From 2a1e0a2fbe8e20acd29e119a3ef5e8d7bd27b586 Mon Sep 17 00:00:00 2001 From: Takashi Matsuoka Date: Thu, 4 Jul 2024 20:58:56 +0900 Subject: [PATCH 1/2] Remove network code --- firmware/include/AzureDpsClient.h | 55 --- firmware/include/Config.h | 13 - firmware/include/ConfigurationMode.h | 5 - firmware/include/README | 39 -- firmware/include/Signature.h | 7 - firmware/include/Storage.h | 31 -- firmware/kartben-artificial-nose.json | 40 -- firmware/lib/README | 46 -- .../WioTerminalLib/include/Aziot/AziotDps.h | 24 -- .../WioTerminalLib/include/Aziot/AziotHub.h | 33 -- .../include/Aziot/EasyAziotConfig.h | 15 - .../include/Aziot/EasyAziotDpsClient.h | 51 --- .../include/Aziot/EasyAziotHubClient.h | 48 --- .../include/Network/Certificates.h | 3 - .../include/Network/Signature.h | 7 - .../include/Network/TimeManager.h | 22 - .../include/Network/WiFiManager.h | 15 - firmware/lib/WioTerminalLib/library.json | 9 - .../lib/WioTerminalLib/src/Aziot/AziotDps.cpp | 77 ---- .../lib/WioTerminalLib/src/Aziot/AziotHub.cpp | 118 ------ .../src/Aziot/EasyAziotDpsClient.cpp | 169 -------- .../src/Aziot/EasyAziotHubClient.cpp | 162 ------- .../src/Network/Certificates.cpp | 24 -- .../WioTerminalLib/src/Network/Signature.cpp | 71 ---- .../src/Network/TimeManager.cpp | 28 -- .../src/Network/WiFiManager.cpp | 19 - .../src/artificial_nose_inferencing.h | 3 + firmware/platformio.ini | 14 - ...udio-grove-multichannel-gas-sensor-v2.json | 63 --- firmware/src/AzureDpsClient.cpp | 144 ------- firmware/src/Config.cpp | 4 - firmware/src/ConfigurationMode.cpp | 397 ------------------ firmware/src/Signature.cpp | 71 ---- firmware/src/Storage.cpp | 71 ---- firmware/src/main.cpp | 294 +------------ firmware/test/README | 11 - 36 files changed, 6 insertions(+), 2197 deletions(-) delete mode 100644 firmware/include/AzureDpsClient.h delete mode 100644 firmware/include/Config.h delete mode 100644 firmware/include/ConfigurationMode.h delete mode 100644 firmware/include/README delete mode 100644 firmware/include/Signature.h delete mode 100644 firmware/include/Storage.h delete mode 100644 firmware/kartben-artificial-nose.json delete mode 100644 firmware/lib/README delete mode 100644 firmware/lib/WioTerminalLib/include/Aziot/AziotDps.h delete mode 100644 firmware/lib/WioTerminalLib/include/Aziot/AziotHub.h delete mode 100644 firmware/lib/WioTerminalLib/include/Aziot/EasyAziotConfig.h delete mode 100644 firmware/lib/WioTerminalLib/include/Aziot/EasyAziotDpsClient.h delete mode 100644 firmware/lib/WioTerminalLib/include/Aziot/EasyAziotHubClient.h delete mode 100644 firmware/lib/WioTerminalLib/include/Network/Certificates.h delete mode 100644 firmware/lib/WioTerminalLib/include/Network/Signature.h delete mode 100644 firmware/lib/WioTerminalLib/include/Network/TimeManager.h delete mode 100644 firmware/lib/WioTerminalLib/include/Network/WiFiManager.h delete mode 100644 firmware/lib/WioTerminalLib/library.json delete mode 100644 firmware/lib/WioTerminalLib/src/Aziot/AziotDps.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Aziot/AziotHub.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Aziot/EasyAziotDpsClient.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Aziot/EasyAziotHubClient.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Network/Certificates.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Network/Signature.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Network/TimeManager.cpp delete mode 100644 firmware/lib/WioTerminalLib/src/Network/WiFiManager.cpp delete mode 100644 firmware/seeedstudio-grove-multichannel-gas-sensor-v2.json delete mode 100644 firmware/src/AzureDpsClient.cpp delete mode 100644 firmware/src/Config.cpp delete mode 100644 firmware/src/ConfigurationMode.cpp delete mode 100644 firmware/src/Signature.cpp delete mode 100644 firmware/src/Storage.cpp delete mode 100644 firmware/test/README diff --git a/firmware/include/AzureDpsClient.h b/firmware/include/AzureDpsClient.h deleted file mode 100644 index 766c580..0000000 --- a/firmware/include/AzureDpsClient.h +++ /dev/null @@ -1,55 +0,0 @@ -#pragma once - -#include -#include -#include - -class AzureDpsClient -{ -public: - AzureDpsClient(); - AzureDpsClient(const AzureDpsClient&) = delete; - AzureDpsClient& operator=(const AzureDpsClient&) = delete; - - std::string GetEndpoint() const { return Endpoint; } - void SetEndpoint(const std::string& endpoint) { Endpoint = endpoint; } - std::string GetIdScope() const { return IdScope; } - void SetIdScope(const std::string& idScope) { IdScope = idScope; } - std::string GetRegistrationId() const { return RegistrationId; } - void SetRegistrationId(const std::string& registrationId) { RegistrationId = registrationId; } - - int Init(const std::string& endpoint, const std::string& idScope, const std::string& registrationId); - - std::vector GetSignature(const uint64_t& expirationEpochTime); - - std::string GetMqttClientId(); - std::string GetMqttUsername(); - std::string GetMqttPassword(const std::string& encryptedSignature, const uint64_t& expirationEpochTime); - - std::string GetRegisterPublishTopic(); - std::string GetRegisterSubscribeTopic() const; - int RegisterSubscribeWork(const std::string& topic, const std::vector& payload); - bool IsRegisterOperationCompleted(); - int GetWaitBeforeQueryStatusSeconds() const; - std::string GetQueryStatusPublishTopic(); - - bool IsAssigned(); - std::string GetHubHost(); - std::string GetDeviceId(); - -private: - std::string Endpoint; - std::string IdScope; - std::string RegistrationId; - - az_iot_provisioning_client ProvClient; - - bool ResponseValid; - std::string ResponseTopic; - std::vector ResponsePayload; - az_iot_provisioning_client_register_response Response; - -private: - static az_iot_provisioning_client_operation_status GetOperationStatus(az_iot_provisioning_client_register_response& response); - -}; diff --git a/firmware/include/Config.h b/firmware/include/Config.h deleted file mode 100644 index 1f910d3..0000000 --- a/firmware/include/Config.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -constexpr int DISPLAY_BRIGHTNESS = 127; // 0-255 - -constexpr int TELEMETRY_INTERVAL = 60; // [sec.] - -extern const char DPS_GLOBAL_DEVICE_ENDPOINT_HOST[]; -extern const char MODEL_ID[]; - -constexpr int MQTT_PACKET_SIZE = 1024; -constexpr int TOKEN_LIFESPAN = 1 * 60 * 60; // [sec.] -constexpr float RECONNECT_RATE = 0.85; -constexpr int JSON_MAX_SIZE = 1024; diff --git a/firmware/include/ConfigurationMode.h b/firmware/include/ConfigurationMode.h deleted file mode 100644 index 8ae56a4..0000000 --- a/firmware/include/ConfigurationMode.h +++ /dev/null @@ -1,5 +0,0 @@ -#pragma once - -class Storage; - -[[noreturn]] void ConfigurationMode(Storage& storage); diff --git a/firmware/include/README b/firmware/include/README deleted file mode 100644 index 194dcd4..0000000 --- a/firmware/include/README +++ /dev/null @@ -1,39 +0,0 @@ - -This directory is intended for project header files. - -A header file is a file containing C declarations and macro definitions -to be shared between several project source files. You request the use of a -header file in your project source file (C, C++, etc) located in `src` folder -by including it, with the C preprocessing directive `#include'. - -```src/main.c - -#include "header.h" - -int main (void) -{ - ... -} -``` - -Including a header file produces the same results as copying the header file -into each source file that needs it. Such copying would be time-consuming -and error-prone. With a header file, the related declarations appear -in only one place. If they need to be changed, they can be changed in one -place, and programs that include the header file will automatically use the -new version when next recompiled. The header file eliminates the labor of -finding and changing all the copies as well as the risk that a failure to -find one copy will result in inconsistencies within a program. - -In C, the usual convention is to give header files names that end with `.h'. -It is most portable to use only letters, digits, dashes, and underscores in -header file names, and at most one dot. - -Read more about using header files in official GCC documentation: - -* Include Syntax -* Include Operation -* Once-Only Headers -* Computed Includes - -https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html diff --git a/firmware/include/Signature.h b/firmware/include/Signature.h deleted file mode 100644 index 62fcb2e..0000000 --- a/firmware/include/Signature.h +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once - -#include -#include - -std::string GenerateEncryptedSignature(const std::string& symmetricKey, const std::vector& signature); -std::string ComputeDerivedSymmetricKey(const std::string& masterKey, const std::string& registrationId); diff --git a/firmware/include/Storage.h b/firmware/include/Storage.h deleted file mode 100644 index 9c823f9..0000000 --- a/firmware/include/Storage.h +++ /dev/null @@ -1,31 +0,0 @@ -#pragma once - -#include - -namespace ExtFlashLoader -{ - class QSPIFlash; -} - -class Storage -{ -private: - Storage(const Storage&) = delete; - Storage& operator=(const Storage&) = delete; - -public: - std::string WiFiSSID; - std::string WiFiPassword; - std::string IdScope; - std::string RegistrationId; - std::string SymmetricKey; - - Storage(ExtFlashLoader::QSPIFlash& flash); - void Load(); - void Save(); - void Erase(); - -private: - ExtFlashLoader::QSPIFlash& Flash_; - -}; diff --git a/firmware/kartben-artificial-nose.json b/firmware/kartben-artificial-nose.json deleted file mode 100644 index 57a26a0..0000000 --- a/firmware/kartben-artificial-nose.json +++ /dev/null @@ -1,40 +0,0 @@ -{ - "@id": "dtmi:kartben:artificial_nose;1", - "@type": "Interface", - "@context": "dtmi:dtdl:context;2", - "displayName": "Artificial Nose", - "description": { - "en": "Artificial Nose powered by Wio Terminal + Grove Multichannel Gas Sensor v2." - }, - "contents": [ - { - "@type": "Component", - "name": "gas_sensor", - "displayName": "Gas Sensor", - "schema": "dtmi:seeedstudio:grove:multichannel_gas_sensor_v2;1" - }, - { - "@type": "Property", - "name": "latestInferenceResult", - "schema": "string", - "displayName": { - "en": "Latest inference result" - } - }, - { - "@type": [ - "Property", - "TimeSpan" - ], - "name": "telemetryInterval", - "unit": "second", - "displayName": { - "en": "Telemetry interval", - "ja": "送信間隔" - }, - "schema": "integer", - "writable": true - } - - ] -} diff --git a/firmware/lib/README b/firmware/lib/README deleted file mode 100644 index 6debab1..0000000 --- a/firmware/lib/README +++ /dev/null @@ -1,46 +0,0 @@ - -This directory is intended for project specific (private) libraries. -PlatformIO will compile them to static libraries and link into executable file. - -The source code of each library should be placed in a an own separate directory -("lib/your_library_name/[here are source files]"). - -For example, see a structure of the following two libraries `Foo` and `Bar`: - -|--lib -| | -| |--Bar -| | |--docs -| | |--examples -| | |--src -| | |- Bar.c -| | |- Bar.h -| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html -| | -| |--Foo -| | |- Foo.c -| | |- Foo.h -| | -| |- README --> THIS FILE -| -|- platformio.ini -|--src - |- main.c - -and a contents of `src/main.c`: -``` -#include -#include - -int main (void) -{ - ... -} - -``` - -PlatformIO Library Dependency Finder will find automatically dependent -libraries scanning project source files. - -More information about PlatformIO Library Dependency Finder -- https://docs.platformio.org/page/librarymanager/ldf.html diff --git a/firmware/lib/WioTerminalLib/include/Aziot/AziotDps.h b/firmware/lib/WioTerminalLib/include/Aziot/AziotDps.h deleted file mode 100644 index a8404aa..0000000 --- a/firmware/lib/WioTerminalLib/include/Aziot/AziotDps.h +++ /dev/null @@ -1,24 +0,0 @@ -#pragma once - -#include -#include - -class AziotDps -{ -public: - AziotDps(); - AziotDps(const AziotDps&) = delete; - AziotDps& operator=(const AziotDps&) = delete; - - void SetMqttPacketSize(int size); - - int RegisterDevice(const std::string& endpointHost, const std::string& idScope, const std::string& registrationId, const std::string& symmetricKey, const std::string& modelId, const uint64_t& expirationEpochTime, std::string* hubHost, std::string* deviceId); - -private: - uint16_t MqttPacketSize_; - - static EasyAziotDpsClient DpsClient_; - static unsigned long DpsPublishTimeOfQueryStatus_; - static void MqttSubscribeCallback(char* topic, uint8_t* payload, unsigned int length); - -}; diff --git a/firmware/lib/WioTerminalLib/include/Aziot/AziotHub.h b/firmware/lib/WioTerminalLib/include/Aziot/AziotHub.h deleted file mode 100644 index af81b63..0000000 --- a/firmware/lib/WioTerminalLib/include/Aziot/AziotHub.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include -#include -#include - -class AziotHub -{ -public: - AziotHub(); - AziotHub(const AziotHub&) = delete; - AziotHub& operator=(const AziotHub&) = delete; - - void SetMqttPacketSize(int size); - - void DoWork(); - bool IsConnected(); - int Connect(const std::string& host, const std::string& deviceId, const std::string& symmetricKey, const std::string& modelId, const uint64_t& expirationEpochTime); - void Disconnect(); - void SendTelemetry(const char* payload, char* componentName); - void RequestTwinDocument(const char* requestId); - void SendTwinPatch(const char* requestId, const char* payload); - - static std::function ReceivedTwinDocumentCallback; - static std::function ReceivedTwinDesiredPatchCallback; - -private: - uint16_t MqttPacketSize_; - - static EasyAziotHubClient HubClient_; - static void MqttSubscribeCallback(char* topic, uint8_t* payload, unsigned int length); - -}; diff --git a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotConfig.h b/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotConfig.h deleted file mode 100644 index fdc7102..0000000 --- a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotConfig.h +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -constexpr size_t SIGNATURE_MAX_SIZE = 256; -constexpr size_t MQTT_USERNAME_MAX_SIZE = 256; -constexpr size_t MQTT_CLIENT_ID_MAX_SIZE = 128; -constexpr size_t MQTT_PASSWORD_MAX_SIZE = 300; - -constexpr size_t REGISTER_PUBLISH_TOPIC_MAX_SIZE = 128; -constexpr size_t QUERY_STATUS_PUBLISH_TOPIC_MAX_SIZE = 256; - -constexpr size_t TELEMETRY_PUBLISH_TOPIC_MAX_SIZE = 128; -constexpr size_t TWIN_DOCUMENT_PUBLISH_TOPIC_MAX_SIZE = 128; -constexpr size_t TWIN_PATCH_PUBLISH_TOPIC_MAX_SIZE = 128; diff --git a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotDpsClient.h b/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotDpsClient.h deleted file mode 100644 index f835fe2..0000000 --- a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotDpsClient.h +++ /dev/null @@ -1,51 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -class EasyAziotDpsClient -{ -public: - EasyAziotDpsClient(); - EasyAziotDpsClient(const EasyAziotDpsClient&) = delete; - EasyAziotDpsClient& operator=(const EasyAziotDpsClient&) = delete; - - int Init(const char* endpoint, const char* idScope, const char* registrationId); - int SetSAS(const char* symmetricKey, const uint64_t& expirationEpochTime, std::function& signature)> generateEncryptedSignature); - - const std::string& GetMqttUsername() const; - const std::string& GetMqttClientId() const; - const std::string& GetMqttPassword() const; - - std::string GetRegisterPublishTopic(); - std::string GetRegisterSubscribeTopic() const; - int RegisterSubscribeWork(const char* topic, const std::vector& payload); - bool IsRegisterOperationCompleted(); - int GetWaitBeforeQueryStatusSeconds() const; - std::string GetQueryStatusPublishTopic(); - - bool IsAssigned() const; - std::string GetHubHost(); - std::string GetDeviceId(); - -private: - std::string Endpoint_; - std::string IdScope_; - std::string RegistrationId_; - - az_iot_provisioning_client ProvClient_; - - std::string MqttUsername_; - std::string MqttClientId_; - std::string MqttPassword_; - - bool ResponseValid_; - std::string ResponseTopic_; // DON'T REMOVE THIS CODE - std::vector ResponsePayload_; // DON'T REMOVE THIS CODE - az_iot_provisioning_client_register_response Response_; - - static az_iot_provisioning_client_operation_status GetOperationStatus(const az_iot_provisioning_client_register_response& response); - -}; diff --git a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotHubClient.h b/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotHubClient.h deleted file mode 100644 index a90a630..0000000 --- a/firmware/lib/WioTerminalLib/include/Aziot/EasyAziotHubClient.h +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#include -#include -#include -#include - -class EasyAziotHubClient -{ -public: - struct TwinResponse - { - std::string RequestId; - az_iot_status Status; - az_iot_hub_client_twin_response_type ResponseType; - std::string Version; - }; - -public: - EasyAziotHubClient(); - EasyAziotHubClient(const EasyAziotHubClient&) = delete; - EasyAziotHubClient& operator=(const EasyAziotHubClient&) = delete; - - int Init(const char* host, const char* deviceId, const char* modelId); - int SetSAS(const char* symmetricKey, const uint64_t& expirationEpochTime, std::function& signature)> generateEncryptedSignature); - - const std::string& GetMqttUsername() const; - const std::string& GetMqttClientId() const; - const std::string& GetMqttPassword() const; - - std::string GetTelemetryPublishTopic(char * componentName); - std::string GetTwinDocumentPublishTopic(const char* requestId); - std::string GetTwinPatchPublishTopic(const char* requestId); - - int ParseTwinTopic(const char* topic, TwinResponse& twinResponse); - -private: - std::string Host_; - std::string DeviceId_; - std::string ModelId_; - - az_iot_hub_client HubClient_; - - std::string MqttUsername_; - std::string MqttClientId_; - std::string MqttPassword_; - -}; diff --git a/firmware/lib/WioTerminalLib/include/Network/Certificates.h b/firmware/lib/WioTerminalLib/include/Network/Certificates.h deleted file mode 100644 index 17b9914..0000000 --- a/firmware/lib/WioTerminalLib/include/Network/Certificates.h +++ /dev/null @@ -1,3 +0,0 @@ -#pragma once - -extern const char CERT_BALTIMORE_CYBERTRUST_ROOT_CA[]; diff --git a/firmware/lib/WioTerminalLib/include/Network/Signature.h b/firmware/lib/WioTerminalLib/include/Network/Signature.h deleted file mode 100644 index 62fcb2e..0000000 --- a/firmware/lib/WioTerminalLib/include/Network/Signature.h +++ /dev/null @@ -1,7 +0,0 @@ -#pragma once - -#include -#include - -std::string GenerateEncryptedSignature(const std::string& symmetricKey, const std::vector& signature); -std::string ComputeDerivedSymmetricKey(const std::string& masterKey, const std::string& registrationId); diff --git a/firmware/lib/WioTerminalLib/include/Network/TimeManager.h b/firmware/lib/WioTerminalLib/include/Network/TimeManager.h deleted file mode 100644 index 4e6167d..0000000 --- a/firmware/lib/WioTerminalLib/include/Network/TimeManager.h +++ /dev/null @@ -1,22 +0,0 @@ -#pragma once - -#include -#include - -class TimeManager -{ -public: - TimeManager(); - TimeManager(const TimeManager&) = delete; - TimeManager& operator=(const TimeManager&) = delete; - - bool Update(); - unsigned long GetEpochTime() const; - -private: - WiFiUDP Udp_; - NTPClient Client_; - unsigned long CaptureTime_; - unsigned long EpochTime_; - -}; diff --git a/firmware/lib/WioTerminalLib/include/Network/WiFiManager.h b/firmware/lib/WioTerminalLib/include/Network/WiFiManager.h deleted file mode 100644 index 2fec950..0000000 --- a/firmware/lib/WioTerminalLib/include/Network/WiFiManager.h +++ /dev/null @@ -1,15 +0,0 @@ -#pragma once - -#include - -class WiFiManager -{ -public: - void Connect(const char* ssid, const char* password); - bool IsConnected(bool reconnect = true); - -private: - std::string Ssid_; - std::string Password_; - -}; diff --git a/firmware/lib/WioTerminalLib/library.json b/firmware/lib/WioTerminalLib/library.json deleted file mode 100644 index e7cc578..0000000 --- a/firmware/lib/WioTerminalLib/library.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "name": "WioTerminalLib", - "version": "0.1.0", - "description": "", - "keywords": "", - "license": "MIT", - "homepage": "" - } - \ No newline at end of file diff --git a/firmware/lib/WioTerminalLib/src/Aziot/AziotDps.cpp b/firmware/lib/WioTerminalLib/src/Aziot/AziotDps.cpp deleted file mode 100644 index 35fd702..0000000 --- a/firmware/lib/WioTerminalLib/src/Aziot/AziotDps.cpp +++ /dev/null @@ -1,77 +0,0 @@ -#include "Aziot/AziotDps.h" -#include -#include -#include -#include - -static WiFiClientSecure Tcp_; // TODO - -EasyAziotDpsClient AziotDps::DpsClient_; -unsigned long AziotDps::DpsPublishTimeOfQueryStatus_ = 0; - -AziotDps::AziotDps() : - MqttPacketSize_(256) -{ -} - -void AziotDps::SetMqttPacketSize(int size) -{ - MqttPacketSize_ = size; -} - -int AziotDps::RegisterDevice(const std::string& endpointHost, const std::string& idScope, const std::string& registrationId, const std::string& symmetricKey, const std::string& modelId, const uint64_t& expirationEpochTime, std::string* hubHost, std::string* deviceId) -{ - std::string endpointAndPort = endpointHost; - endpointAndPort += ":"; - endpointAndPort += std::to_string(8883); - - if (DpsClient_.Init(endpointAndPort.c_str(), idScope.c_str(), registrationId.c_str()) != 0) return -1; - if (DpsClient_.SetSAS(symmetricKey.c_str(), expirationEpochTime, GenerateEncryptedSignature) != 0) return -2; - - PubSubClient mqtt(Tcp_); - Tcp_.setCACert(CERT_BALTIMORE_CYBERTRUST_ROOT_CA); - mqtt.setBufferSize(MqttPacketSize_); - mqtt.setServer(endpointHost.c_str(), 8883); - mqtt.setCallback(AziotDps::MqttSubscribeCallback); - if (!mqtt.connect(DpsClient_.GetMqttClientId().c_str(), DpsClient_.GetMqttUsername().c_str(), DpsClient_.GetMqttPassword().c_str())) return -3; - - mqtt.subscribe(DpsClient_.GetRegisterSubscribeTopic().c_str()); - - mqtt.publish(DpsClient_.GetRegisterPublishTopic().c_str(), String::format("{payload:{\"modelId\":\"%s\"}}", modelId.c_str()).c_str()); - - while (!DpsClient_.IsRegisterOperationCompleted()) - { - mqtt.loop(); - if (DpsPublishTimeOfQueryStatus_ > 0 && millis() >= DpsPublishTimeOfQueryStatus_) - { - mqtt.publish(DpsClient_.GetQueryStatusPublishTopic().c_str(), ""); - DpsPublishTimeOfQueryStatus_ = 0; - } - } - - if (!DpsClient_.IsAssigned()) return -4; - - mqtt.disconnect(); - - *hubHost = DpsClient_.GetHubHost(); - *deviceId = DpsClient_.GetDeviceId(); - - return 0; -} - -void AziotDps::MqttSubscribeCallback(char* topic, uint8_t* payload, unsigned int length) -{ - if (DpsClient_.RegisterSubscribeWork(topic, std::vector(payload, payload + length)) != 0) - { - Serial.printf("Failed to parse topic and/or payload\n"); - return; - } - - if (!DpsClient_.IsRegisterOperationCompleted()) - { - const int waitSeconds = DpsClient_.GetWaitBeforeQueryStatusSeconds(); - Serial.printf("Querying after %u seconds...\n", waitSeconds); - - DpsPublishTimeOfQueryStatus_ = millis() + waitSeconds * 1000; - } -} diff --git a/firmware/lib/WioTerminalLib/src/Aziot/AziotHub.cpp b/firmware/lib/WioTerminalLib/src/Aziot/AziotHub.cpp deleted file mode 100644 index 8126ac4..0000000 --- a/firmware/lib/WioTerminalLib/src/Aziot/AziotHub.cpp +++ /dev/null @@ -1,118 +0,0 @@ -#include "Aziot/AziotHub.h" -#include -#include -#include -#include - -static WiFiClientSecure Tcp_; // TODO -static PubSubClient Mqtt_(Tcp_); - -std::function AziotHub::ReceivedTwinDocumentCallback; -std::function AziotHub::ReceivedTwinDesiredPatchCallback; -EasyAziotHubClient AziotHub::HubClient_; - -AziotHub::AziotHub() : - MqttPacketSize_(256) -{ -} - -void AziotHub::SetMqttPacketSize(int size) -{ - MqttPacketSize_ = size; -} - -void AziotHub::DoWork() -{ - Mqtt_.loop(); -} - -bool AziotHub::IsConnected() -{ - return Mqtt_.connected(); -} - -int AziotHub::Connect(const std::string& host, const std::string& deviceId, const std::string& symmetricKey, const std::string& modelId, const uint64_t& expirationEpochTime) -{ - if (HubClient_.Init(host.c_str(), deviceId.c_str(), modelId.c_str()) != 0) return -1; - if (HubClient_.SetSAS(symmetricKey.c_str(), expirationEpochTime, GenerateEncryptedSignature) != 0) return -2; - - Serial.println("Hub:"); - Serial.print(" Host = "); - Serial.println(host.c_str()); - Serial.print(" Device id = "); - Serial.println(deviceId.c_str()); - Serial.print(" MQTT client id = "); - Serial.println(HubClient_.GetMqttClientId().c_str()); - Serial.print(" MQTT username = "); - Serial.println(HubClient_.GetMqttUsername().c_str()); - - Tcp_.setCACert(CERT_BALTIMORE_CYBERTRUST_ROOT_CA); - Mqtt_.setBufferSize(MqttPacketSize_); - Mqtt_.setServer(host.c_str(), 8883); - Mqtt_.setCallback(MqttSubscribeCallback); - if (!Mqtt_.connect(HubClient_.GetMqttClientId().c_str(), HubClient_.GetMqttUsername().c_str(), HubClient_.GetMqttPassword().c_str())) return -3; - - Mqtt_.subscribe(AZ_IOT_HUB_CLIENT_TWIN_RESPONSE_SUBSCRIBE_TOPIC); - Mqtt_.subscribe(AZ_IOT_HUB_CLIENT_TWIN_PATCH_SUBSCRIBE_TOPIC); - - return 0; -} - -void AziotHub::Disconnect() -{ - Mqtt_.disconnect(); -} - -void AziotHub::SendTelemetry(const char* payload, char* componentName) -{ - std::string telemetryTopic = HubClient_.GetTelemetryPublishTopic(componentName); - - static int sendCount = 0; - if (!Mqtt_.publish(telemetryTopic.c_str(), payload, false)) - { - // Serial.printf("ERROR: Send telemetry %d\n", sendCount); - return; // TODO - } - else - { - ++sendCount; - // Serial.printf("Sent telemetry %d\n", sendCount); - } -} - -void AziotHub::RequestTwinDocument(const char* requestId) -{ - Mqtt_.publish(HubClient_.GetTwinDocumentPublishTopic(requestId).c_str(), nullptr); -} - -void AziotHub::SendTwinPatch(const char* requestId, const char* payload) -{ - Mqtt_.publish(HubClient_.GetTwinPatchPublishTopic(requestId).c_str(), payload); -} - -void AziotHub::MqttSubscribeCallback(char* topic, uint8_t* payload, unsigned int length) -{ - // Serial.printf("Received twin\n"); - // Serial.printf(" topic :%s\n", topic); - // Serial.print("payload:"); - // for (int i = 0; i < static_cast(length); i++) Serial.print(static_cast(payload[i])); - // Serial.println(); - - EasyAziotHubClient::TwinResponse response; - if (HubClient_.ParseTwinTopic(topic, response) == 0) - { - std::string json(reinterpret_cast(payload), reinterpret_cast(payload) + length); - - switch (response.ResponseType) - { - case AZ_IOT_HUB_CLIENT_TWIN_RESPONSE_TYPE_GET: - if (ReceivedTwinDocumentCallback != nullptr) ReceivedTwinDocumentCallback(json.c_str(), response.RequestId.c_str()); - break; - case AZ_IOT_HUB_CLIENT_TWIN_RESPONSE_TYPE_DESIRED_PROPERTIES: - if (ReceivedTwinDesiredPatchCallback != nullptr) ReceivedTwinDesiredPatchCallback(json.c_str(), response.Version.c_str()); - break; - case AZ_IOT_HUB_CLIENT_TWIN_RESPONSE_TYPE_REPORTED_PROPERTIES: - break; - } - } -} diff --git a/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotDpsClient.cpp b/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotDpsClient.cpp deleted file mode 100644 index 84ab936..0000000 --- a/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotDpsClient.cpp +++ /dev/null @@ -1,169 +0,0 @@ -#include "Aziot/EasyAziotConfig.h" -#include "Aziot/EasyAziotDpsClient.h" -#include -#include - -static inline const az_span az_span_create_from_string(const std::string& str) -{ - return az_span_create(reinterpret_cast(const_cast(str.c_str())), str.size()); -} - -EasyAziotDpsClient::EasyAziotDpsClient() : - ResponseValid_(false) -{ -} - -int EasyAziotDpsClient::Init(const char* endpoint, const char* idScope, const char* registrationId) -{ - ResponseValid_ = false; - - Endpoint_ = endpoint; - IdScope_ = idScope; - RegistrationId_ = registrationId; - - { - const az_span endpointSpan = az_span_create_from_string(Endpoint_); - const az_span idScopeSpan = az_span_create_from_string(IdScope_); - const az_span registrationIdSpan = az_span_create_from_string(RegistrationId_); - if (az_result_failed(az_iot_provisioning_client_init(&ProvClient_, endpointSpan, idScopeSpan, registrationIdSpan, nullptr))) return -1; // SDK_API - } - - { - char mqttUsername[MQTT_USERNAME_MAX_SIZE]; - if (az_result_failed(az_iot_provisioning_client_get_user_name(&ProvClient_, mqttUsername, sizeof(mqttUsername), nullptr))) return -2; // SDK_API - MqttUsername_ = mqttUsername; - } - - { - char mqttClientId[MQTT_CLIENT_ID_MAX_SIZE]; - if (az_result_failed(az_iot_provisioning_client_get_client_id(&ProvClient_, mqttClientId, sizeof(mqttClientId), nullptr))) return -3; // SDK_API - MqttClientId_ = mqttClientId; - } - - MqttPassword_.clear(); - - return 0; -} - -int EasyAziotDpsClient::SetSAS(const char* symmetricKey, const uint64_t& expirationEpochTime, std::function& signature)> generateEncryptedSignature) -{ - //////////////////// - // SAS auth - - std::string encryptedSignature; - { - std::vector signature; - { - uint8_t signatureBuf[SIGNATURE_MAX_SIZE]; - const az_span signatureSpan = AZ_SPAN_FROM_BUFFER(signatureBuf); - az_span signatureValidSpan; - if (az_result_failed(az_iot_provisioning_client_sas_get_signature(&ProvClient_, expirationEpochTime, signatureSpan, &signatureValidSpan))) return -4; // SDK_API - signature.assign(az_span_ptr(signatureValidSpan), az_span_ptr(signatureValidSpan) + az_span_size(signatureValidSpan)); - } - encryptedSignature = generateEncryptedSignature(symmetricKey, signature); - } - - { - char mqttPassword[MQTT_PASSWORD_MAX_SIZE]; - const az_span encryptedSignatureSpan = az_span_create_from_string(encryptedSignature); - if (az_result_failed(az_iot_provisioning_client_sas_get_password(&ProvClient_, encryptedSignatureSpan, expirationEpochTime, AZ_SPAN_EMPTY, mqttPassword, sizeof(mqttPassword), nullptr))) return -5; // SDK_API - MqttPassword_ = mqttPassword; - } - - return 0; -} - -const std::string& EasyAziotDpsClient::GetMqttUsername() const -{ - return MqttUsername_; -} - -const std::string& EasyAziotDpsClient::GetMqttClientId() const -{ - return MqttClientId_; -} - -const std::string& EasyAziotDpsClient::GetMqttPassword() const -{ - return MqttPassword_; -} - -std::string EasyAziotDpsClient::GetRegisterPublishTopic() -{ - char registerPublishTopic[REGISTER_PUBLISH_TOPIC_MAX_SIZE]; - if (az_result_failed(az_iot_provisioning_client_register_get_publish_topic(&ProvClient_, registerPublishTopic, sizeof(registerPublishTopic), nullptr))) return std::string(); // SDK_API - - return registerPublishTopic; -} - -std::string EasyAziotDpsClient::GetRegisterSubscribeTopic() const -{ - return AZ_IOT_PROVISIONING_CLIENT_REGISTER_SUBSCRIBE_TOPIC; -} - -int EasyAziotDpsClient::RegisterSubscribeWork(const char* topic, const std::vector& payload) -{ - ResponseValid_ = false; - - ResponseTopic_ = topic; // DON'T REMOVE THIS CODE - ResponsePayload_ = payload; // DON'T REMOVE THIS CODE - - if (az_result_failed(az_iot_provisioning_client_parse_received_topic_and_payload(&ProvClient_, az_span_create_from_string(ResponseTopic_), az_span_create(const_cast(&ResponsePayload_[0]), ResponsePayload_.size()), &Response_))) return -6; // SDK_API - - ResponseValid_ = true; - - return 0; -} - -bool EasyAziotDpsClient::IsRegisterOperationCompleted() -{ - if (!ResponseValid_) return false; - - // TODO az_iot_provisioning_client_parse_operation_status? - return az_iot_provisioning_client_operation_complete(GetOperationStatus(Response_)); // SDK_API -} - -int EasyAziotDpsClient::GetWaitBeforeQueryStatusSeconds() const -{ - if (!ResponseValid_) return 0; - - return Response_.retry_after_seconds; -} - -std::string EasyAziotDpsClient::GetQueryStatusPublishTopic() -{ - if (!ResponseValid_) return std::string(); - - char queryStatusPublishTopic[QUERY_STATUS_PUBLISH_TOPIC_MAX_SIZE]; - if (az_result_failed(az_iot_provisioning_client_query_status_get_publish_topic(&ProvClient_, Response_.operation_id, queryStatusPublishTopic, sizeof(queryStatusPublishTopic), nullptr))) return std::string(); // SDK_API - - return queryStatusPublishTopic; -} - -bool EasyAziotDpsClient::IsAssigned() const -{ - if (!ResponseValid_) return false; - - return GetOperationStatus(Response_) == AZ_IOT_PROVISIONING_STATUS_ASSIGNED; -} - -std::string EasyAziotDpsClient::GetHubHost() -{ - if (!IsAssigned()) return std::string(); - - const az_span& span = Response_.registration_state.assigned_hub_hostname; - return std::string(az_span_ptr(span), az_span_ptr(span) + az_span_size(span)); -} - -std::string EasyAziotDpsClient::GetDeviceId() -{ - if (!IsAssigned()) return std::string(); - - const az_span& span = Response_.registration_state.device_id; - return std::string(az_span_ptr(span), az_span_ptr(span) + az_span_size(span)); -} - -az_iot_provisioning_client_operation_status EasyAziotDpsClient::GetOperationStatus(const az_iot_provisioning_client_register_response& response) -{ - return response.operation_status; -} diff --git a/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotHubClient.cpp b/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotHubClient.cpp deleted file mode 100644 index 9716c40..0000000 --- a/firmware/lib/WioTerminalLib/src/Aziot/EasyAziotHubClient.cpp +++ /dev/null @@ -1,162 +0,0 @@ -#include "Aziot/EasyAziotConfig.h" -#include "Aziot/EasyAziotHubClient.h" -#include - -static inline const az_span az_span_create_from_string(const std::string& str) -{ - return az_span_create(reinterpret_cast(const_cast(str.c_str())), str.size()); -} - -EasyAziotHubClient::EasyAziotHubClient() -{ -} - -int EasyAziotHubClient::Init(const char* host, const char* deviceId, const char* modelId) -{ - Host_ = host; - DeviceId_ = deviceId; - ModelId_ = modelId; - - { - const az_span hostSpan = az_span_create_from_string(Host_); - const az_span deviceIdSpan = az_span_create_from_string(DeviceId_); - az_iot_hub_client_options options = az_iot_hub_client_options_default(); - options.model_id = az_span_create_from_string(ModelId_); - if (az_result_failed(az_iot_hub_client_init(&HubClient_, hostSpan, deviceIdSpan, &options))) return -1; // SDK_API - } - - { - char mqttUsername[MQTT_USERNAME_MAX_SIZE]; - if (az_result_failed(az_iot_hub_client_get_user_name(&HubClient_, mqttUsername, sizeof(mqttUsername), nullptr))) return -2; // SDK_API - MqttUsername_ = mqttUsername; - } - - { - char mqttClientId[MQTT_CLIENT_ID_MAX_SIZE]; - size_t client_id_length; - if (az_result_failed(az_iot_hub_client_get_client_id(&HubClient_, mqttClientId, sizeof(mqttClientId), &client_id_length))) return -3; // SDK_API - MqttClientId_ = mqttClientId; - } - - MqttPassword_.clear(); - - return 0; -} - -int EasyAziotHubClient::SetSAS(const char* symmetricKey, const uint64_t& expirationEpochTime, std::function& signature)> generateEncryptedSignature) -{ - //////////////////// - // SAS auth - - std::string encryptedSignature; - { - std::vector signature; - { - uint8_t signatureBuf[SIGNATURE_MAX_SIZE]; - az_span signatureSpan = AZ_SPAN_FROM_BUFFER(signatureBuf); - az_span signatureValidSpan; - if (az_result_failed(az_iot_hub_client_sas_get_signature(&HubClient_, expirationEpochTime, signatureSpan, &signatureValidSpan))) return -4; // SDK_API - signature.assign(az_span_ptr(signatureValidSpan), az_span_ptr(signatureValidSpan) + az_span_size(signatureValidSpan)); - } - encryptedSignature = generateEncryptedSignature(symmetricKey, signature); - } - - { - char mqttPassword[MQTT_PASSWORD_MAX_SIZE]; - const az_span encryptedSignatureSpan = az_span_create_from_string(encryptedSignature); - if (az_result_failed(az_iot_hub_client_sas_get_password(&HubClient_, expirationEpochTime, encryptedSignatureSpan, AZ_SPAN_EMPTY, mqttPassword, sizeof(mqttPassword), nullptr))) return -5; // SDK_API - MqttPassword_ = mqttPassword; - } - - return 0; -} - -const std::string& EasyAziotHubClient::GetMqttUsername() const -{ - return MqttUsername_; -} - -const std::string& EasyAziotHubClient::GetMqttClientId() const -{ - return MqttClientId_; -} - -const std::string& EasyAziotHubClient::GetMqttPassword() const -{ - return MqttPassword_; -} - - -std::string EasyAziotHubClient::GetTelemetryPublishTopic(char* componentName) -{ - char telemetryPublishTopic[TELEMETRY_PUBLISH_TOPIC_MAX_SIZE]; - az_iot_message_properties pnp_properties; - az_iot_message_properties* properties = &pnp_properties; - - - if (componentName != nullptr) { - az_span componentNameSpan = az_span_create_from_str(componentName); - - static char pnp_properties_buffer[64]; - static az_span const component_telemetry_prop_span = AZ_SPAN_LITERAL_FROM_STR("$.sub"); - - az_iot_message_properties_init(properties, AZ_SPAN_FROM_BUFFER(pnp_properties_buffer), 0); - az_iot_message_properties_append(properties, component_telemetry_prop_span, componentNameSpan); - } - - - if (az_result_failed(az_iot_hub_client_telemetry_get_publish_topic(&HubClient_, componentName != nullptr ? properties : NULL, telemetryPublishTopic, sizeof(telemetryPublishTopic), nullptr))) return std::string(); // SDK_API - - return telemetryPublishTopic; -} - -std::string EasyAziotHubClient::GetTwinDocumentPublishTopic(const char* requestId) -{ - char twinDocumentPublishTopic[TWIN_DOCUMENT_PUBLISH_TOPIC_MAX_SIZE]; - if (az_result_failed(az_iot_hub_client_twin_document_get_publish_topic(&HubClient_, az_span_create_from_str(const_cast(requestId)), twinDocumentPublishTopic, sizeof(twinDocumentPublishTopic), nullptr))) return std::string(); // SDK_API - - return twinDocumentPublishTopic; -} - -std::string EasyAziotHubClient::GetTwinPatchPublishTopic(const char* requestId) -{ - char twinPatchPublishTopic[TWIN_PATCH_PUBLISH_TOPIC_MAX_SIZE]; - - if (az_result_failed(az_iot_hub_client_twin_patch_get_publish_topic(&HubClient_, az_span_create_from_str(const_cast(requestId)), twinPatchPublishTopic, sizeof(twinPatchPublishTopic), nullptr))) return std::string(); // SDK_API - - return twinPatchPublishTopic; -} - -int EasyAziotHubClient::ParseTwinTopic(const char* topic, EasyAziotHubClient::TwinResponse& twinResponse) -{ - az_iot_hub_client_twin_response response; - const az_span topicSpan = az_span_create_from_str(const_cast(topic)); - if (az_result_failed(az_iot_hub_client_twin_parse_received_topic(&HubClient_, topicSpan, &response))) return -1; - - if (az_span_size(response.request_id) <= 0) - { - twinResponse.RequestId.clear(); - } - else - { - char requestId[az_span_size(response.request_id) + 1]; - az_span_to_str(requestId, sizeof(requestId), response.request_id); - twinResponse.RequestId = requestId; - } - - twinResponse.Status = response.status; - twinResponse.ResponseType = response.response_type; - - if (az_span_size(response.version) <= 0) - { - twinResponse.Version.clear(); - } - else - { - char version[az_span_size(response.version) + 1]; - az_span_to_str(version, sizeof(version), response.version); - twinResponse.Version = version; - } - - return 0; -} diff --git a/firmware/lib/WioTerminalLib/src/Network/Certificates.cpp b/firmware/lib/WioTerminalLib/src/Network/Certificates.cpp deleted file mode 100644 index 802d95f..0000000 --- a/firmware/lib/WioTerminalLib/src/Network/Certificates.cpp +++ /dev/null @@ -1,24 +0,0 @@ -#include "Network/Certificates.h" - -const char CERT_BALTIMORE_CYBERTRUST_ROOT_CA[] = - "-----BEGIN CERTIFICATE-----\n" - "MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJ\n" - "RTESMBAGA1UEChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYD\n" - "VQQDExlCYWx0aW1vcmUgQ3liZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoX\n" - "DTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMCSUUxEjAQBgNVBAoTCUJhbHRpbW9y\n" - "ZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFsdGltb3JlIEN5YmVy\n" - "VHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKMEuyKr\n" - "mD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjr\n" - "IZ3AQSsBUnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeK\n" - "mpYcqWe4PwzV9/lSEy/CG9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSu\n" - "XmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9XbIGevOF6uvUA65ehD5f/xXtabz5OTZy\n" - "dc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjprl3RjM71oGDHweI12v/ye\n" - "jl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoIVDaGezq1\n" - "BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3\n" - "DQEBBQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT92\n" - "9hkTI7gQCvlYpNRhcL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3Wgx\n" - "jkzSswF07r51XgdIGn9w/xZchMB5hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0\n" - "Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsaY71k5h+3zvDyny67G7fyUIhz\n" - "ksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9HRCwBXbsdtTLS\n" - "R9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp\n" - "-----END CERTIFICATE-----"; diff --git a/firmware/lib/WioTerminalLib/src/Network/Signature.cpp b/firmware/lib/WioTerminalLib/src/Network/Signature.cpp deleted file mode 100644 index 315874f..0000000 --- a/firmware/lib/WioTerminalLib/src/Network/Signature.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include "Network/Signature.h" -#include -#include -#include -#include - -std::string GenerateEncryptedSignature(const std::string& symmetricKey, const std::vector& signature) -{ - unsigned char base64DecodedSymmetricKey[symmetricKey.size() + 1]; - - // Base64-decode device key - // <-- symmetricKey - // --> base64DecodedSymmetricKey - size_t base64DecodedSymmetricKeyLength; - if (mbedtls_base64_decode(base64DecodedSymmetricKey, sizeof(base64DecodedSymmetricKey), &base64DecodedSymmetricKeyLength, reinterpret_cast(&symmetricKey[0]), symmetricKey.size()) != 0) abort(); - if (base64DecodedSymmetricKeyLength == 0) abort(); - - // SHA-256 encrypt - // <-- base64DecodedSymmetricKey - // <-- signature - // --> encryptedSignature - uint8_t encryptedSignature[32]; // SHA-256 - mbedtls_md_context_t ctx; - const mbedtls_md_type_t mdType{ MBEDTLS_MD_SHA256 }; - if (mbedtls_md_setup(&ctx, mbedtls_md_info_from_type(mdType), 1) != 0) abort(); - if (mbedtls_md_hmac_starts(&ctx, base64DecodedSymmetricKey, base64DecodedSymmetricKeyLength) != 0) abort(); - if (mbedtls_md_hmac_update(&ctx, &signature[0], signature.size()) != 0) abort(); - if (mbedtls_md_hmac_finish(&ctx, encryptedSignature) != 0) abort(); - - // Base64 encode encrypted signature - // <-- encryptedSignature - // --> b64encHmacsha256Signature - char b64encHmacsha256Signature[static_cast(sizeof(encryptedSignature) * 1.5f) + 1]; - size_t b64encHmacsha256SignatureLength; - if (mbedtls_base64_encode(reinterpret_cast(b64encHmacsha256Signature), sizeof(b64encHmacsha256Signature), &b64encHmacsha256SignatureLength, encryptedSignature, mbedtls_md_get_size(mbedtls_md_info_from_type(mdType))) != 0) abort(); - - return std::string(b64encHmacsha256Signature, b64encHmacsha256SignatureLength); -} - -std::string ComputeDerivedSymmetricKey(const std::string& masterKey, const std::string& registrationId) -{ - unsigned char base64DecodedMasterKey[masterKey.size() + 1]; - - // Base64-decode device key - // <-- masterKey - // --> base64DecodedMasterKey - size_t base64DecodedMasterKeyLength; - if (mbedtls_base64_decode(base64DecodedMasterKey, sizeof(base64DecodedMasterKey), &base64DecodedMasterKeyLength, reinterpret_cast(&masterKey[0]), masterKey.size()) != 0) abort(); - if (base64DecodedMasterKeyLength == 0) abort(); - - // SHA-256 encrypt - // <-- base64DecodedMasterKey - // <-- registrationId - // --> derivedSymmetricKey - uint8_t derivedSymmetricKey[32]; // SHA-256 - mbedtls_md_context_t ctx; - const mbedtls_md_type_t mdType{ MBEDTLS_MD_SHA256 }; - if (mbedtls_md_setup(&ctx, mbedtls_md_info_from_type(mdType), 1) != 0) abort(); - if (mbedtls_md_hmac_starts(&ctx, base64DecodedMasterKey, base64DecodedMasterKeyLength) != 0) abort(); - if (mbedtls_md_hmac_update(&ctx, reinterpret_cast(®istrationId[0]), registrationId.size()) != 0) abort(); - if (mbedtls_md_hmac_finish(&ctx, derivedSymmetricKey) != 0) abort(); - - // Base64 encode encrypted signature - // <-- derivedSymmetricKey - // --> b64encDerivedSymmetricKey - char b64encDerivedSymmetricKey[static_cast(sizeof(derivedSymmetricKey) * 1.5f) + 1]; - size_t b64encDerivedSymmetricKeyLength; - if (mbedtls_base64_encode(reinterpret_cast(b64encDerivedSymmetricKey), sizeof(b64encDerivedSymmetricKey), &b64encDerivedSymmetricKeyLength, derivedSymmetricKey, mbedtls_md_get_size(mbedtls_md_info_from_type(mdType))) != 0) abort(); - - return std::string(b64encDerivedSymmetricKey, b64encDerivedSymmetricKeyLength); -} diff --git a/firmware/lib/WioTerminalLib/src/Network/TimeManager.cpp b/firmware/lib/WioTerminalLib/src/Network/TimeManager.cpp deleted file mode 100644 index 25f790d..0000000 --- a/firmware/lib/WioTerminalLib/src/Network/TimeManager.cpp +++ /dev/null @@ -1,28 +0,0 @@ -#include "Network/TimeManager.h" -#include - -TimeManager::TimeManager() : - Client_(Udp_) -{ -} - -bool TimeManager::Update() -{ - bool result = false; - - Client_.begin(); - if (Client_.forceUpdate()) - { - CaptureTime_ = millis(); - EpochTime_ = Client_.getEpochTime(); - result = true; - } - Client_.end(); - - return result; -} - -unsigned long TimeManager::GetEpochTime() const -{ - return EpochTime_ + (millis() - CaptureTime_) / 1000; -} diff --git a/firmware/lib/WioTerminalLib/src/Network/WiFiManager.cpp b/firmware/lib/WioTerminalLib/src/Network/WiFiManager.cpp deleted file mode 100644 index 0b02628..0000000 --- a/firmware/lib/WioTerminalLib/src/Network/WiFiManager.cpp +++ /dev/null @@ -1,19 +0,0 @@ -#include "Network/WiFiManager.h" -#include - -void WiFiManager::Connect(const char* ssid, const char* password) -{ - Ssid_ = ssid; - Password_ = password; -} - -bool WiFiManager::IsConnected(bool reconnect) -{ - if (Ssid_.empty()) return false; - - if (WiFi.status() == WL_CONNECTED) return true; - - if (reconnect) WiFi.begin(Ssid_.c_str(), Password_.c_str()); - - return false; -} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/artificial_nose_inferencing.h b/firmware/lib/ei-artificial_nose-arduino/src/artificial_nose_inferencing.h index 02aaf37..af75d11 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/artificial_nose_inferencing.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/artificial_nose_inferencing.h @@ -32,6 +32,9 @@ #ifdef max #undef max #endif // max +#ifdef abs +#undef abs +#endif // abs #ifdef round #undef round #endif // round diff --git a/firmware/platformio.ini b/firmware/platformio.ini index b3c55f8..ad6e766 100644 --- a/firmware/platformio.ini +++ b/firmware/platformio.ini @@ -35,17 +35,3 @@ lib_deps = https://github.com/Seeed-Studio/Seeed_Arduino_Linechart#v1.0.0 rlogiacco/CircularBuffer@^1.3.3 https://github.com/bxparks/AceButton#v1.9.2 - ;https://github.com/lovyan03/LovyanGFX#0.3.10 - ; BEGIN Azure IoT dependencies - https://github.com/SeeedJP/pio-azure-sdk-for-c#1.1.0 - https://github.com/Seeed-Studio/Seeed_Arduino_rpcWiFi#v1.0.5 - https://github.com/Seeed-Studio/Seeed_Arduino_rpcUnified#v2.1.4 - https://github.com/Seeed-Studio/Seeed_Arduino_mbedtls#v3.0.1 - https://github.com/Seeed-Studio/Seeed_Arduino_FS#v2.1.1 - https://github.com/Seeed-Studio/Seeed_Arduino_SFUD#v2.0.1 - https://github.com/ciniml/ExtFlashLoader#0.1.2 - arduino-libraries/NTPClient - knolleary/PubSubClient - hideakitai/MsgPack - bblanchon/ArduinoJson@6.20.1 - ; END Azure IoT dependencies \ No newline at end of file diff --git a/firmware/seeedstudio-grove-multichannel-gas-sensor-v2.json b/firmware/seeedstudio-grove-multichannel-gas-sensor-v2.json deleted file mode 100644 index ea02595..0000000 --- a/firmware/seeedstudio-grove-multichannel-gas-sensor-v2.json +++ /dev/null @@ -1,63 +0,0 @@ -{ - "@id": "dtmi:seeedstudio:grove:multichannel_gas_sensor_v2;1", - "@type": "Interface", - "@context": "dtmi:dtdl:context;2", - "displayName": "Grove - Multichannel Gas Sensor v2", - "description": { - "en": "A sensor that provides stable and reliable gas detection for carbon monoxide, nitrogen dioxide, volatile organic compounds and ethyl alcohol." - }, - "contents": [ - { - "@type": [ - "Telemetry" - ], - "name": "voc", - "displayName": { - "en": "VOC (ppm)" - }, - "description": { - "en": "Volatile Organic Compounds (VOC) Concentration" - }, - "schema": "integer" - }, - { - "@type": [ - "Telemetry" - ], - "name": "no2", - "displayName": { - "en": "NO₂ (ppm)" - }, - "description": { - "en": "Nitrogen Dioxide (NO₂) Concentration" - }, - "schema": "integer" - }, - { - "@type": [ - "Telemetry" - ], - "name": "c2h5oh", - "displayName": { - "en": "C₂H₅OH (ppm)" - }, - "description": { - "en": "Ethyl Alcohol (C₂H₅OH) Concentration" - }, - "schema": "integer" - }, - { - "@type": [ - "Telemetry" - ], - "name": "co", - "displayName": { - "en": "CO (ppm)" - }, - "description": { - "en": "Carbon Monoxide (CO) Concentration" - }, - "schema": "integer" - } - ] -} diff --git a/firmware/src/AzureDpsClient.cpp b/firmware/src/AzureDpsClient.cpp deleted file mode 100644 index 8ea86c0..0000000 --- a/firmware/src/AzureDpsClient.cpp +++ /dev/null @@ -1,144 +0,0 @@ -#include "AzureDpsClient.h" -#include - -static constexpr size_t SignatureMaxSize = 256; -static constexpr size_t MqttClientIdMaxSize = 128; -static constexpr size_t MqttUsernameMaxSize = 128; -static constexpr size_t MqttPasswordMaxSize = 300; -static constexpr size_t RegisterPublishTopicMaxSize = 128; -static constexpr size_t QueryStatusPublishTopicMaxSize = 256; - -AzureDpsClient::AzureDpsClient() : - ResponseValid{ false } -{ -} - -int AzureDpsClient::Init(const std::string& endpoint, const std::string& idScope, const std::string& registrationId) -{ - ResponseValid = false; - - Endpoint = endpoint; - IdScope = idScope; - RegistrationId = registrationId; - - const az_span endpointSpan{ az_span_create((uint8_t*)&Endpoint[0], Endpoint.size()) }; - const az_span idScopeSpan{ az_span_create((uint8_t*)&IdScope[0], IdScope.size()) }; - const az_span registrationIdSpan{ az_span_create((uint8_t*)&RegistrationId[0], RegistrationId.size()) }; - if (az_result_failed(az_iot_provisioning_client_init(&ProvClient, endpointSpan, idScopeSpan, registrationIdSpan, NULL))) return -1; - - return 0; -} - -std::vector AzureDpsClient::GetSignature(const uint64_t& expirationEpochTime) -{ - uint8_t signature[SignatureMaxSize]; - az_span signatureSpan = az_span_create(signature, sizeof(signature)); - az_span signatureValidSpan; - if (az_result_failed(az_iot_provisioning_client_sas_get_signature(&ProvClient, expirationEpochTime, signatureSpan, &signatureValidSpan))) return std::vector(); - - return std::vector(az_span_ptr(signatureValidSpan), az_span_ptr(signatureValidSpan) + az_span_size(signatureValidSpan)); -} - -std::string AzureDpsClient::GetMqttClientId() -{ - char mqttClientId[MqttClientIdMaxSize]; - if (az_result_failed(az_iot_provisioning_client_get_client_id(&ProvClient, mqttClientId, sizeof(mqttClientId), NULL))) return std::string(); - - return mqttClientId; -} - -std::string AzureDpsClient::GetMqttUsername() -{ - char mqttUsername[MqttUsernameMaxSize]; - if (az_result_failed(az_iot_provisioning_client_get_user_name(&ProvClient, mqttUsername, sizeof(mqttUsername), NULL))) return std::string(); - - return mqttUsername; -} - -std::string AzureDpsClient::GetMqttPassword(const std::string& encryptedSignature, const uint64_t& expirationEpochTime) -{ - char mqttPassword[MqttPasswordMaxSize]; - az_span encryptedSignatureSpan = az_span_create((uint8_t*)&encryptedSignature[0], encryptedSignature.size()); - if (az_result_failed(az_iot_provisioning_client_sas_get_password(&ProvClient, encryptedSignatureSpan, expirationEpochTime, AZ_SPAN_EMPTY, mqttPassword, sizeof(mqttPassword), NULL))) return std::string(); - - return mqttPassword; -} - -std::string AzureDpsClient::GetRegisterPublishTopic() -{ - char registerPublishTopic[RegisterPublishTopicMaxSize]; - if (az_result_failed(az_iot_provisioning_client_register_get_publish_topic(&ProvClient, registerPublishTopic, sizeof(registerPublishTopic), NULL))) return std::string(); - - return registerPublishTopic; -} - -std::string AzureDpsClient::GetRegisterSubscribeTopic() const -{ - return AZ_IOT_PROVISIONING_CLIENT_REGISTER_SUBSCRIBE_TOPIC; -} - -int AzureDpsClient::RegisterSubscribeWork(const std::string& topic, const std::vector& payload) -{ - ResponseValid = false; - - ResponseTopic = topic; - ResponsePayload = payload; - - if (az_result_failed(az_iot_provisioning_client_parse_received_topic_and_payload(&ProvClient, az_span_create((uint8_t*)&ResponseTopic[0], ResponseTopic.size()), az_span_create((uint8_t*)&ResponsePayload[0], ResponsePayload.size()), &Response))) return -1; - - ResponseValid = true; - - return 0; -} - -bool AzureDpsClient::IsRegisterOperationCompleted() -{ - if (!ResponseValid) return false; - - return az_iot_provisioning_client_operation_complete(GetOperationStatus(Response)); -} - -int AzureDpsClient::GetWaitBeforeQueryStatusSeconds() const -{ - if (!ResponseValid) return 0; - - return Response.retry_after_seconds; -} - -std::string AzureDpsClient::GetQueryStatusPublishTopic() -{ - if (!ResponseValid) return std::string(); - - char queryStatusPublishTopic[QueryStatusPublishTopicMaxSize]; - if (az_result_failed(az_iot_provisioning_client_query_status_get_publish_topic(&ProvClient, Response.operation_id, queryStatusPublishTopic, sizeof(queryStatusPublishTopic), NULL))) return std::string(); - - return queryStatusPublishTopic; -} - -bool AzureDpsClient::IsAssigned() -{ - if (!ResponseValid) return false; - - return GetOperationStatus(Response) == AZ_IOT_PROVISIONING_STATUS_ASSIGNED; -} - -std::string AzureDpsClient::GetHubHost() -{ - if (!IsAssigned()) return std::string(); - - const az_span& span{ Response.registration_state.assigned_hub_hostname }; - return std::string(az_span_ptr(span), az_span_ptr(span) + az_span_size(span)); -} - -std::string AzureDpsClient::GetDeviceId() -{ - if (!IsAssigned()) return std::string(); - - const az_span& span{ Response.registration_state.device_id }; - return std::string(az_span_ptr(span), az_span_ptr(span) + az_span_size(span)); -} - -az_iot_provisioning_client_operation_status AzureDpsClient::GetOperationStatus(az_iot_provisioning_client_register_response& response) -{ - return response.operation_status; -} diff --git a/firmware/src/Config.cpp b/firmware/src/Config.cpp deleted file mode 100644 index 3ff4a38..0000000 --- a/firmware/src/Config.cpp +++ /dev/null @@ -1,4 +0,0 @@ -#include "Config.h" - -const char DPS_GLOBAL_DEVICE_ENDPOINT_HOST[] = "global.azure-devices-provisioning.net"; -const char MODEL_ID[] = "dtmi:kartben:artificial_nose;1"; diff --git a/firmware/src/ConfigurationMode.cpp b/firmware/src/ConfigurationMode.cpp deleted file mode 100644 index a8ea62e..0000000 --- a/firmware/src/ConfigurationMode.cpp +++ /dev/null @@ -1,397 +0,0 @@ -#include -#include "ConfigurationMode.h" - -#include "Storage.h" -#include - -#define END_CHAR ('\r') -#define TAB_CHAR ('\t') -#define SPACE_CHAR (' ') -#define BACKSPACE_CHAR (0x08) -#define DEL_CHAR (0x7f) - -#define MAX_CMD_ARG (4) - -#define DLM "\r\n" -#define PROMPT DLM "# " - -#define INBUF_SIZE (1024) - -static Storage* Storage_; - -struct console_command -{ - const char *name; - const char *help; - void (*function) (int argc, char **argv); -}; - -static void help_command(int argc, char** argv); -static void burn_rtl8720_command(int argc, char** argv); -static void reset_factory_settings_command(int argc, char** argv); -static void display_settings_command(int argc, char** argv); -static void wifissid_command(int argc, char** argv); -static void wifipwd_command(int argc, char** argv); -static void az_idscope_command(int argc, char** argv); -static void az_regid_command(int argc, char** argv); -static void az_symkey_command(int argc, char** argv); -static void az_iotc_command(int argc, char** argv); - -static const struct console_command cmds[] = -{ - {"help" , "Help document" , help_command }, - {"burn_rtl8720" , "Enter the Burn RTL8720 Firmware mode" , burn_rtl8720_command }, - {"reset_factory_settings", "Reset factory settings" , reset_factory_settings_command }, - {"show_settings" , "Display settings" , display_settings_command }, - {"set_wifissid" , "Set Wi-Fi SSID" , wifissid_command }, - {"set_wifipwd" , "Set Wi-Fi password" , wifipwd_command }, - {"set_az_idscope" , "Set id scope of Azure IoT DPS" , az_idscope_command }, - {"set_az_regid" , "Set registration id of Azure IoT DPS" , az_regid_command }, - {"set_az_symkey" , "Set symmetric key of Azure IoT DPS" , az_symkey_command }, - {"set_az_iotc" , "Set connection information of Azure IoT Central", az_iotc_command } -}; - -static const int cmd_count = sizeof(cmds) / sizeof(cmds[0]); - -static void EnterBurnRTL8720Mode() -{ - // Switch mode of RTL8720 - pinMode(PIN_SERIAL2_RX, OUTPUT); - pinMode(RTL8720D_CHIP_PU, OUTPUT); - digitalWrite(PIN_SERIAL2_RX, LOW); - digitalWrite(RTL8720D_CHIP_PU, LOW); - delay(500); - pinMode(RTL8720D_CHIP_PU, INPUT); - delay(500); - pinMode(PIN_SERIAL2_RX, INPUT); - - // Initialize UART - Serial.beginWithoutDTR(115200); - auto oldBaud = Serial.baud(); - RTL8720D.begin(oldBaud); - delay(500); - - while (true) - { - // Change baud - const auto baud = Serial.baud(); - if (baud != oldBaud) - { - RTL8720D.begin(baud); - oldBaud = baud; - } - - // USB -> RTL - while (Serial.available()) RTL8720D.write(Serial.read()); - - // RTL -> USB - while (RTL8720D.available()) Serial.write(RTL8720D.read()); - } -} - -static void print_help() -{ - Serial.print("Configuration console:" DLM); - - for (int i = 0; i < cmd_count; i++) - { - Serial.print(String::format(" - %s: %s." DLM, cmds[i].name, cmds[i].help)); - } -} - -static void help_command(int argc, char** argv) -{ - print_help(); -} - -static void burn_rtl8720_command(int argc, char** argv) -{ - Serial.print("Enter the Burn RTL8720 Firmware mode." DLM); - Serial.print("[Windows]" DLM); - Serial.print(" ambd_flash_tool.exe erase" DLM); - Serial.print(" ambd_flash_tool.exe flash -d [RTL8720-firmware-path]" DLM); - Serial.print("[macOS/Linux]" DLM); - Serial.print(" python3 ambd_flash_tool.py erase" DLM); - Serial.print(" python3 ambd_flash_tool.py flash -d [RTL8720-firmware-path]" DLM); - delay(1000); - - EnterBurnRTL8720Mode(); -} - -static void reset_factory_settings_command(int argc, char** argv) -{ - Storage_->Erase(); - Storage_->Load(); - - Serial.print("Reset factory settings successfully." DLM); -} - -static void display_settings_command(int argc, char** argv) -{ - Serial.print(String::format("Wi-Fi SSID = %s" DLM, Storage_->WiFiSSID.c_str())); - Serial.print(String::format("Wi-Fi password = %s" DLM, Storage_->WiFiPassword.c_str())); - Serial.print(String::format("Id scope of Azure IoT DPS = %s" DLM, Storage_->IdScope.c_str())); - Serial.print(String::format("Registration id of Azure IoT DPS = %s" DLM, Storage_->RegistrationId.c_str())); - Serial.print(String::format("Symmetric key of Azure IoT DPS = %s" DLM, Storage_->SymmetricKey.c_str())); -} - -static void wifissid_command(int argc, char** argv) -{ - if (argc != 2) - { - Serial.print(String::format("ERROR: Usage: %s . Please provide the SSID of the Wi-Fi." DLM, argv[0])); - return; - } - - Storage_->WiFiSSID = argv[1]; - Storage_->Save(); - - Serial.print("Set Wi-Fi SSID successfully." DLM); -} - -static void wifipwd_command(int argc, char** argv) -{ - if (argc != 2) - { - Serial.print(String::format("ERROR: Usage: %s . Please provide the password of the Wi-Fi." DLM, argv[0])); - return; - } - - Storage_->WiFiPassword = argv[1]; - Storage_->Save(); - - Serial.print("Set Wi-Fi password successfully." DLM); -} - -static void az_idscope_command(int argc, char** argv) -{ - if (argc != 2) - { - Serial.print(String::format("ERROR: Usage: %s . Please provide the id scope of the Azure IoT DPS." DLM, argv[0])); - return; - } - - Storage_->IdScope = argv[1]; - Storage_->Save(); - - Serial.print("Set id scope successfully." DLM); -} - -static void az_regid_command(int argc, char** argv) -{ - if (argc != 2) - { - Serial.print(String::format("ERROR: Usage: %s . Please provide the registraion id of the Azure IoT DPS." DLM, argv[0])); - return; - } - - Storage_->RegistrationId = argv[1]; - Storage_->Save(); - - Serial.print("Set registration id successfully." DLM); -} - -static void az_symkey_command(int argc, char** argv) -{ - if (argc != 2) - { - Serial.print(String::format("ERROR: Usage: %s . Please provide the symmetric key of the Azure IoT DPS." DLM, argv[0])); - return; - } - - Storage_->SymmetricKey = argv[1]; - Storage_->Save(); - - Serial.print("Set symmetric key successfully." DLM); -} - -static void az_iotc_command(int argc, char** argv) -{ - if (argc != 4) - { - Serial.print(String::format("ERROR: Usage: %s ." DLM, argv[0])); - return; - } - - Storage_->IdScope = argv[1]; - Storage_->RegistrationId = argv[3]; - Storage_->SymmetricKey = ComputeDerivedSymmetricKey(argv[2], argv[3]); - Storage_->Save(); - - Serial.print("Set connection information of Azure IoT Central successfully." DLM); -} - -static bool CliGetInput(char* inbuf, int* bp) -{ - if (inbuf == nullptr) - { - return false; - } - - while (Serial.available() >= 1) - { - inbuf[*bp] = (char)Serial.read(); - - if (inbuf[*bp] == END_CHAR) - { - /* end of input line */ - inbuf[*bp] = '\0'; - *bp = 0; - return true; - } - else if (inbuf[*bp] == TAB_CHAR) - { - inbuf[*bp] = SPACE_CHAR; - } - else if (inbuf[*bp] == BACKSPACE_CHAR || inbuf[*bp] == DEL_CHAR) - { - // Delete - if (*bp > 0) - { - (*bp)--; - Serial.write(BACKSPACE_CHAR); - Serial.write(SPACE_CHAR); - Serial.write(BACKSPACE_CHAR); - } - continue; - } - else if (inbuf[*bp] < SPACE_CHAR) - { - continue; - } - - // Echo - Serial.write(inbuf[*bp]); - (*bp)++; - - if (*bp >= INBUF_SIZE) - { - Serial.print(DLM "ERROR: Input buffer overflow." DLM); - Serial.print(PROMPT); - *bp = 0; - continue; - } - } - - return false; -} - -static bool CliHandleInput(char* inbuf) -{ - struct - { - unsigned inArg:1; - unsigned inQuote:1; - unsigned done:1; - } stat; - - char* argv[MAX_CMD_ARG]; - int argc = 0; - - int i = 0; - - memset((void*)&argv, 0, sizeof(argv)); - memset(&stat, 0, sizeof(stat)); - - do - { - switch (inbuf[i]) - { - case '\0': - if (stat.inQuote) - { - return false; - } - stat.done = 1; - break; - - case '"': - if (i > 0 && inbuf[i - 1] == '\\' && stat.inArg) - { - memcpy(&inbuf[i - 1], &inbuf[i], strlen(&inbuf[i]) + 1); - - --i; - break; - } - if (!stat.inQuote && stat.inArg) break; - if (stat.inQuote && !stat.inArg) return false; - - if (!stat.inQuote && !stat.inArg) - { - stat.inArg = 1; - stat.inQuote = 1; - argc++; - argv[argc - 1] = &inbuf[i + 1]; - } - else if (stat.inQuote && stat.inArg) - { - stat.inArg = 0; - stat.inQuote = 0; - inbuf[i] = '\0'; - } - break; - - case ' ': - if (i > 0 && inbuf[i - 1] == '\\' && stat.inArg) - { - memcpy(&inbuf[i - 1], &inbuf[i], strlen(&inbuf[i]) + 1); - --i; - break; - } - if (!stat.inQuote && stat.inArg) - { - stat.inArg = 0; - inbuf[i] = '\0'; - } - break; - default: - if (!stat.inArg) - { - stat.inArg = 1; - argc++; - argv[argc - 1] = &inbuf[i]; - } - break; - } - } - while (!stat.done && ++i < INBUF_SIZE && argc <= MAX_CMD_ARG); - - if (stat.inQuote) return false; - if (argc < 1) return true; - - Serial.print(DLM); - - for(int i = 0; i < cmd_count; i++) - { - if(strcmp(cmds[i].name, argv[0]) == 0) - { - cmds[i].function(argc, argv); - return true; - } - } - - Serial.print(String::format("ERROR: Invalid command: %s" DLM, argv[0])); - return true; -} - -[[noreturn]] void ConfigurationMode(Storage& storage) -{ - Storage_ = &storage; - - print_help(); - Serial.print(PROMPT); - - char inbuf[INBUF_SIZE]; - int bp = 0; - while (true) - { - if (!CliGetInput(inbuf, &bp)) continue; - - if (!CliHandleInput(inbuf)) - { - Serial.print("ERROR: Syntax error." DLM); - } - - Serial.print(PROMPT); - } -} diff --git a/firmware/src/Signature.cpp b/firmware/src/Signature.cpp deleted file mode 100644 index 36a745c..0000000 --- a/firmware/src/Signature.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include "Signature.h" -#include -#include -#include -#include - -std::string GenerateEncryptedSignature(const std::string& symmetricKey, const std::vector& signature) -{ - unsigned char base64DecodedSymmetricKey[symmetricKey.size() + 1]; - - // Base64-decode device key - // <-- symmetricKey - // --> base64DecodedSymmetricKey - size_t base64DecodedSymmetricKeyLength; - if (mbedtls_base64_decode(base64DecodedSymmetricKey, sizeof(base64DecodedSymmetricKey), &base64DecodedSymmetricKeyLength, (unsigned char*)&symmetricKey[0], symmetricKey.size()) != 0) abort(); - if (base64DecodedSymmetricKeyLength == 0) abort(); - - // SHA-256 encrypt - // <-- base64DecodedSymmetricKey - // <-- signature - // --> encryptedSignature - uint8_t encryptedSignature[32]; // SHA-256 - mbedtls_md_context_t ctx; - const mbedtls_md_type_t mdType{ MBEDTLS_MD_SHA256 }; - if (mbedtls_md_setup(&ctx, mbedtls_md_info_from_type(mdType), 1) != 0) abort(); - if (mbedtls_md_hmac_starts(&ctx, base64DecodedSymmetricKey, base64DecodedSymmetricKeyLength) != 0) abort(); - if (mbedtls_md_hmac_update(&ctx, &signature[0], signature.size()) != 0) abort(); - if (mbedtls_md_hmac_finish(&ctx, encryptedSignature) != 0) abort(); - - // Base64 encode encrypted signature - // <-- encryptedSignature - // --> b64encHmacsha256Signature - char b64encHmacsha256Signature[(size_t)(sizeof(encryptedSignature) * 1.5f) + 1]; - size_t b64encHmacsha256SignatureLength; - if (mbedtls_base64_encode((unsigned char*)b64encHmacsha256Signature, sizeof(b64encHmacsha256Signature), &b64encHmacsha256SignatureLength, encryptedSignature, mbedtls_md_get_size(mbedtls_md_info_from_type(mdType))) != 0) abort(); - - return std::string(b64encHmacsha256Signature, b64encHmacsha256SignatureLength); -} - -std::string ComputeDerivedSymmetricKey(const std::string& masterKey, const std::string& registrationId) -{ - unsigned char base64DecodedMasterKey[masterKey.size() + 1]; - - // Base64-decode device key - // <-- masterKey - // --> base64DecodedMasterKey - size_t base64DecodedMasterKeyLength; - if (mbedtls_base64_decode(base64DecodedMasterKey, sizeof(base64DecodedMasterKey), &base64DecodedMasterKeyLength, (unsigned char*)&masterKey[0], masterKey.size()) != 0) abort(); - if (base64DecodedMasterKeyLength == 0) abort(); - - // SHA-256 encrypt - // <-- base64DecodedMasterKey - // <-- registrationId - // --> derivedSymmetricKey - uint8_t derivedSymmetricKey[32]; // SHA-256 - mbedtls_md_context_t ctx; - const mbedtls_md_type_t mdType{ MBEDTLS_MD_SHA256 }; - if (mbedtls_md_setup(&ctx, mbedtls_md_info_from_type(mdType), 1) != 0) abort(); - if (mbedtls_md_hmac_starts(&ctx, base64DecodedMasterKey, base64DecodedMasterKeyLength) != 0) abort(); - if (mbedtls_md_hmac_update(&ctx, (const unsigned char*)®istrationId[0], registrationId.size()) != 0) abort(); - if (mbedtls_md_hmac_finish(&ctx, derivedSymmetricKey) != 0) abort(); - - // Base64 encode encrypted signature - // <-- derivedSymmetricKey - // --> b64encDerivedSymmetricKey - char b64encDerivedSymmetricKey[(size_t)(sizeof(derivedSymmetricKey) * 1.5f) + 1]; - size_t b64encDerivedSymmetricKeyLength; - if (mbedtls_base64_encode((unsigned char*)b64encDerivedSymmetricKey, sizeof(b64encDerivedSymmetricKey), &b64encDerivedSymmetricKeyLength, derivedSymmetricKey, mbedtls_md_get_size(mbedtls_md_info_from_type(mdType))) != 0) abort(); - - return std::string(b64encDerivedSymmetricKey, b64encDerivedSymmetricKeyLength); -} diff --git a/firmware/src/Storage.cpp b/firmware/src/Storage.cpp deleted file mode 100644 index 5853240..0000000 --- a/firmware/src/Storage.cpp +++ /dev/null @@ -1,71 +0,0 @@ -#include -#include "Storage.h" - -#include -#include - -static auto FlashStartAddress = reinterpret_cast(0x04000000); - -Storage::Storage(ExtFlashLoader::QSPIFlash& flash) : - Flash_(flash) -{ - Flash_.initialize(); - Flash_.reset(); - Flash_.enterToMemoryMode(); -} - -void Storage::Load() -{ - if (memcmp(&FlashStartAddress[0], "AZ01", 4) != 0) - { - WiFiSSID.clear(); - WiFiPassword.clear(); - IdScope.clear(); - RegistrationId.clear(); - SymmetricKey.clear(); - } - else - { - MsgPack::Unpacker unpacker; - unpacker.feed(&FlashStartAddress[8], *(const uint32_t*)&FlashStartAddress[4]); - - MsgPack::str_t str[5]; - unpacker.deserialize(str[0], str[1], str[2], str[3], str[4]); - - WiFiSSID = str[0].c_str(); - WiFiPassword = str[1].c_str(); - IdScope = str[2].c_str(); - RegistrationId = str[3].c_str(); - SymmetricKey = str[4].c_str(); - } -} - -void Storage::Save() -{ - MsgPack::Packer packer; - { - MsgPack::str_t str[5]; - str[0] = WiFiSSID.c_str(); - str[1] = WiFiPassword.c_str(); - str[2] = IdScope.c_str(); - str[3] = RegistrationId.c_str(); - str[4] = SymmetricKey.c_str(); - packer.serialize(str[0], str[1], str[2], str[3], str[4]); - } - - std::vector buf(4 + 4 + packer.size()); - memcpy(&buf[0], "AZ01", 4); - *(uint32_t*)&buf[4] = packer.size(); - memcpy(&buf[8], packer.data(), packer.size()); - - ExtFlashLoader::writeExternalFlash(Flash_, 0, &buf[0], buf.size(), [](std::size_t bytes_processed, std::size_t bytes_total, bool verifying) { return true; }); -} - -void Storage::Erase() -{ - Flash_.exitFromMemoryMode(); - Flash_.writeEnable(); - Flash_.eraseSector(0); - Flash_.waitProgram(0); - Flash_.enterToMemoryMode(); -} diff --git a/firmware/src/main.cpp b/firmware/src/main.cpp index 71d1c76..abd720d 100644 --- a/firmware/src/main.cpp +++ b/firmware/src/main.cpp @@ -1,36 +1,4 @@ #include -#include "Config.h" -#include "ConfigurationMode.h" - -//////////////////////////////////////////////////////////////////////////////// -// Storage - -#include -#include "Storage.h" - -static ExtFlashLoader::QSPIFlash Flash_; -static Storage Storage_(Flash_); - -//////////////////////////////////////////////////////////////////////////////// -// Network - -#include -#include -#include -#include -#include - -static bool isWifiConfigured = false; - -static WiFiManager WifiManager_; -static TimeManager TimeManager_; -static std::string HubHost_; -static std::string DeviceId_; -static AziotHub AziotHub_; - -static unsigned long TelemetryInterval_ = TELEMETRY_INTERVAL; // [sec.] -static unsigned long nextTelemetrySendTime = 0; - /** * @brief Printf function uses vsnprintf and output using Arduino Serial @@ -52,156 +20,6 @@ void ei_printf(const char *format, ...) } } -static void ConnectWiFi() -{ - ei_printf("Connecting to SSID: %s\n", Storage_.WiFiSSID.c_str()); - WifiManager_.Connect(Storage_.WiFiSSID.c_str(), Storage_.WiFiPassword.c_str()); - while (!WifiManager_.IsConnected()) - { - ei_printf("."); - delay(500); - } - ei_printf("Connected\n"); -} - -static void SyncTimeServer() -{ - ei_printf("Synchronize time\n"); - while (!TimeManager_.Update()) - { - ei_printf("."); - delay(1000); - } - ei_printf("Synchronized\n"); -} - -static bool DeviceProvisioning() -{ - ei_printf("Device provisioning:\n"); - ei_printf(" Id scope = %s\n", Storage_.IdScope.c_str()); - ei_printf(" Registration id = %s\n", Storage_.RegistrationId.c_str()); - - AziotDps aziotDps; - aziotDps.SetMqttPacketSize(MQTT_PACKET_SIZE); - - if (aziotDps.RegisterDevice(DPS_GLOBAL_DEVICE_ENDPOINT_HOST, Storage_.IdScope, Storage_.RegistrationId, Storage_.SymmetricKey, MODEL_ID, TimeManager_.GetEpochTime() + TOKEN_LIFESPAN, &HubHost_, &DeviceId_) != 0) - { - ei_printf("ERROR: RegisterDevice()\n"); - return false; - } - - ei_printf("Device provisioned:\n"); - ei_printf(" Hub host = %s\n", HubHost_.c_str()); - ei_printf(" Device id = %s\n", DeviceId_.c_str()); - - return true; -} - -static bool AziotIsConnected() -{ - return AziotHub_.IsConnected(); -} - -static void AziotDoWork() -{ - if(!isWifiConfigured) return; - - static unsigned long connectTime = 0; - static unsigned long forceDisconnectTime; - - bool repeat; - do { - repeat = false; - - const auto now = TimeManager_.GetEpochTime(); - if (!AziotHub_.IsConnected()) { - if (now >= connectTime) { - // Serial.printf("Connecting to Azure IoT Hub...\n"); - if (AziotHub_.Connect(HubHost_, - DeviceId_, - Storage_.SymmetricKey, - MODEL_ID, - now + TOKEN_LIFESPAN) != 0) { - // Serial.printf("ERROR: Try again in 5 seconds\n"); - connectTime = TimeManager_.GetEpochTime() + 5; - return; - } - - // Serial.printf("SUCCESS\n"); - forceDisconnectTime = - TimeManager_.GetEpochTime() + - static_cast(TOKEN_LIFESPAN * RECONNECT_RATE); - - AziotHub_.RequestTwinDocument("get_twin"); - } - } else { - if (now >= forceDisconnectTime) { - // Serial.printf("Disconnect\n"); - AziotHub_.Disconnect(); - connectTime = 0; - - repeat = true; - } else { - AziotHub_.DoWork(); - } - } - } while (repeat); -} - -template -static void AziotSendConfirm(const char* requestId, const char* name, T value, int ackCode, int ackVersion) -{ - StaticJsonDocument doc; - doc[name]["value"] = value; - doc[name]["ac"] = ackCode; - doc[name]["av"] = ackVersion; - - char json[JSON_MAX_SIZE]; - serializeJson(doc, json); - - AziotHub_.SendTwinPatch(requestId, json); -} - -template -static bool AziotUpdateWritableProperty(const char* name, T* value, const JsonVariant& desiredVersion, const JsonVariant& desired, const JsonVariant& reported = JsonVariant()) -{ - bool ret = false; - - JsonVariant desiredValue = desired[name]; - JsonVariant reportedProperty = reported[name]; - - if (!desiredValue.isNull()) - { - *value = desiredValue.as(); - ret = true; - } - - if (desiredValue.isNull()) - { - if (reportedProperty.isNull()) - { - AziotSendConfirm("init", name, *value, 200, 1); - } - } - else if (reportedProperty.isNull() || desiredVersion.as() != reportedProperty["av"].as()) - { - AziotSendConfirm("update", name, *value, 200, desiredVersion.as()); - } - - return ret; -} - - -template -static void AziotSendTelemetry(const StaticJsonDocument& jsonDoc, char* componentName) -{ - char json[jsonDoc.capacity()]; - serializeJson(jsonDoc, json, sizeof(json)); - - AziotHub_.SendTelemetry(json, componentName); -} - - #include using namespace ace_button; @@ -304,35 +122,6 @@ enum class ButtonId static const int ButtonNumber = 6; static AceButton Buttons[ButtonNumber]; -static void ReceivedTwinDocument(const char* json, const char* requestId) -{ - StaticJsonDocument doc; - if (deserializeJson(doc, json)) return; - - if (doc["desired"]["$version"].isNull()) return; - - if (AziotUpdateWritableProperty("telemetryInterval", &TelemetryInterval_, doc["desired"]["$version"], doc["desired"], doc["reported"])) - { - nextTelemetrySendTime = millis() + TelemetryInterval_; - // ei_printf("New telemetryInterval = %d\n", TelemetryInterval_); - } -} - -static void ReceivedTwinDesiredPatch(const char* json, const char* version) -{ - StaticJsonDocument doc; - if (deserializeJson(doc, json)) return; - - if (doc["$version"].isNull()) return; - - if (AziotUpdateWritableProperty("telemetryInterval", &TelemetryInterval_, doc["$version"], doc.as())) - { - nextTelemetrySendTime = millis() + TelemetryInterval_; - // ei_printf("New telemetryInterval = %d\n", TelemetryInterval_); - } - -} - static void ButtonEventHandler(AceButton *button, uint8_t eventType, uint8_t buttonState) { const uint8_t id = button->getId(); @@ -407,8 +196,6 @@ static void ButtonDoWork() void setup() { - Storage_.Load(); - Serial.begin(115200); pinMode(D0, OUTPUT); @@ -424,14 +211,6 @@ void setup() pinMode(WIO_5S_RIGHT, INPUT_PULLUP); pinMode(WIO_5S_PRESS, INPUT_PULLUP); - if (digitalRead(WIO_KEY_A) == LOW && - digitalRead(WIO_KEY_B) == LOW && - digitalRead(WIO_KEY_C) == LOW ) - { - ei_printf("In configuration mode\r\n"); - ConfigurationMode(Storage_); - } - ButtonInit(); pinMode(D0, OUTPUT); @@ -451,42 +230,6 @@ void setup() spr.setTextColor(TEXT_COLOR); spr.setFreeFont(&FreeSans12pt7b); - - if(! Storage_.WiFiSSID.empty()) { - isWifiConfigured = true ; - - spr.drawString("Wi-Fi", 20, 40); - spr.pushSprite(0,0); - ConnectWiFi(); - spr.drawXBitmap(320 - 24 - 4, 4, icon_wifi, 24, 24, TFT_GREEN, BG_COLOR); - spr.drawString("Wi-Fi ... OK", 20, 40); - spr.pushSprite(0,0); - - spr.drawString("Time sync.", 20, 70); - spr.pushSprite(0,0); - SyncTimeServer(); - spr.drawString("Time sync. ... OK", 20, 70); - spr.pushSprite(0,0); - - spr.drawString("Provisioning", 20, 100); - spr.pushSprite(0,0); - if (!DeviceProvisioning()) abort(); - spr.drawString("Provisioning ... OK", 20, 100); - spr.pushSprite(0,0); - - spr.drawString("Azure IoT Hub", 20, 130); - spr.pushSprite(0,0); - AziotDoWork(); - spr.drawString("Azure IoT Hub ... OK", 20, 130); - spr.pushSprite(0,0); - - AziotHub_.SetMqttPacketSize(MQTT_PACKET_SIZE); - - AziotHub_.ReceivedTwinDocumentCallback = ReceivedTwinDocument; - AziotHub_.ReceivedTwinDesiredPatchCallback = ReceivedTwinDesiredPatch; - - } - } int fan = 0; @@ -495,12 +238,7 @@ void loop() { spr.fillSprite(BG_COLOR); - if(isWifiConfigured && WifiManager_.IsConnected()) { - spr.drawXBitmap(320 - 24 - 4, 4, icon_wifi, 24, 24, TFT_GREEN, BG_COLOR); - } - ButtonDoWork(); - AziotDoWork(); if (mode == TRAINING) { strcpy(title_text, "Training mode"); @@ -597,21 +335,6 @@ void loop() } } - if(millis() >= nextTelemetrySendTime) - { - if (AziotIsConnected()) - { - StaticJsonDocument doc; - doc["no2"] = sensors[0].last_val; - doc["co"] = sensors[1].last_val; - doc["c2h5oh"] = sensors[2].last_val; - doc["voc"] = sensors[3].last_val; - AziotSendTelemetry(doc, "gas_sensor"); - - nextTelemetrySendTime = millis() + TelemetryInterval_ * 1000; - } - } - if (mode == TRAINING) { ei_printf("%d,%d,%d,%d\n", sensors[0].last_val, sensors[1].last_val, sensors[2].last_val, sensors[3].last_val); } else { // INFERENCE @@ -709,21 +432,10 @@ void loop() (result.classification[best_prediction].value - latest_inference_confidence_level > .05)) { - if (isWifiConfigured) { - StaticJsonDocument doc; - doc["latestInferenceResult"] = title_text; - - char json[JSON_MAX_SIZE]; - serializeJson(doc, json); - - static int requestId = 444; - char b[12]; - AziotHub_.SendTwinPatch(itoa(requestId++, b, 10), json); - } - latest_inference_idx = best_prediction; - latest_inference_confidence_level = - result.classification[best_prediction].value; + latest_inference_idx = best_prediction; + latest_inference_confidence_level = + result.classification[best_prediction].value; } } } diff --git a/firmware/test/README b/firmware/test/README deleted file mode 100644 index df5066e..0000000 --- a/firmware/test/README +++ /dev/null @@ -1,11 +0,0 @@ - -This directory is intended for PIO Unit Testing and project tests. - -Unit Testing is a software testing method by which individual units of -source code, sets of one or more MCU program modules together with associated -control data, usage procedures, and operating procedures, are tested to -determine whether they are fit for use. Unit testing finds problems early -in the development cycle. - -More information about PIO Unit Testing: -- https://docs.platformio.org/page/plus/unit-testing.html From 0ff5b8f84643bb1e4e11e0406b2d31866ddd9f61 Mon Sep 17 00:00:00 2001 From: Takashi Matsuoka Date: Fri, 5 Jul 2024 20:53:29 +0900 Subject: [PATCH 2/2] Merge Coke or Pepsi model --- firmware/include/images/icon_coffee.h | 1066 - firmware/include/images/icon_coke.h | 1074 + firmware/include/images/icon_coke.png | Bin 0 -> 29577 bytes firmware/include/images/icon_pepsi.h | 1074 + firmware/include/images/icon_pepsi.png | Bin 0 -> 26231 bytes firmware/include/images/icon_whiskey.h | 1066 - firmware/include/images/icon_wifi.h | 15 - .../src/edge-impulse-sdk/.gitignore | 4 + .../src/edge-impulse-sdk/.mbedignore | 2 +- .../CMSIS/Core/Include/cachel1_armv7.h | 71 +- .../CMSIS/Core/Include/cmsis_armcc.h | 885 +- .../CMSIS/Core/Include/cmsis_armclang.h | 1648 +- .../CMSIS/Core/Include/cmsis_armclang_ltm.h | 1612 +- .../CMSIS/Core/Include/cmsis_gcc.h | 1818 +- .../CMSIS/Core/Include/cmsis_iccarm.h | 43 +- .../CMSIS/Core/Include/cmsis_version.h | 8 +- .../CMSIS/Core/Include/core_armv81mml.h | 51 +- .../CMSIS/Core/Include/core_armv8mml.h | 88 +- .../CMSIS/Core/Include/core_cm3.h | 18 +- .../CMSIS/Core/Include/core_cm33.h | 40 +- .../CMSIS/Core/Include/core_cm35p.h | 42 +- .../CMSIS/Core/Include/core_cm4.h | 18 +- .../CMSIS/Core/Include/core_cm55.h | 753 +- .../CMSIS/Core/Include/core_cm7.h | 20 +- .../CMSIS/Core/Include/core_cm85.h | 4636 +++ .../CMSIS/Core/Include/core_sc300.h | 16 +- .../CMSIS/Core/Include/core_starmc1.h | 3592 +++ .../CMSIS/Core/Include/mpu_armv8.h | 8 +- .../CMSIS/Core/Include/pac_armv81.h | 206 + .../CMSIS/DSP/Include/arm_common_tables.h | 18 +- .../CMSIS/DSP/Include/arm_common_tables_f16.h | 8 +- .../CMSIS/DSP/Include/arm_const_structs.h | 8 +- .../CMSIS/DSP/Include/arm_const_structs_f16.h | 10 +- .../CMSIS/DSP/Include/arm_helium_utils.h | 16 +- .../CMSIS/DSP/Include/arm_math.h | 185 +- .../CMSIS/DSP/Include/arm_math_f16.h | 7 +- .../CMSIS/DSP/Include/arm_math_memory.h | 90 +- .../CMSIS/DSP/Include/arm_math_types.h | 49 +- .../CMSIS/DSP/Include/arm_math_types_f16.h | 30 +- .../CMSIS/DSP/Include/arm_mve_tables.h | 7 +- .../CMSIS/DSP/Include/arm_mve_tables_f16.h | 11 +- .../CMSIS/DSP/Include/arm_vec_fft.h | 273 +- .../CMSIS/DSP/Include/arm_vec_math.h | 7 +- .../CMSIS/DSP/Include/arm_vec_math_f16.h | 72 +- .../DSP/Include/dsp/basic_math_functions.h | 121 +- .../Include/dsp/basic_math_functions_f16.h | 5 +- .../CMSIS/DSP/Include/dsp/bayes_functions.h | 15 +- .../DSP/Include/dsp/bayes_functions_f16.h | 15 +- .../DSP/Include/dsp/complex_math_functions.h | 55 +- .../Include/dsp/complex_math_functions_f16.h | 5 +- .../DSP/Include/dsp/controller_functions.h | 107 +- .../Include/dsp/controller_functions_f16.h | 5 +- .../CMSIS/DSP/Include/dsp/debug.h | 146 + .../DSP/Include/dsp/distance_functions.h | 49 +- .../DSP/Include/dsp/distance_functions_f16.h | 5 +- .../DSP/Include/dsp/fast_math_functions.h | 170 +- .../DSP/Include/dsp/fast_math_functions_f16.h | 14 +- .../DSP/Include/dsp/filtering_functions.h | 98 +- .../DSP/Include/dsp/filtering_functions_f16.h | 19 +- .../DSP/Include/dsp/interpolation_functions.h | 57 +- .../Include/dsp/interpolation_functions_f16.h | 5 +- .../CMSIS/DSP/Include/dsp/matrix_functions.h | 105 +- .../DSP/Include/dsp/matrix_functions_f16.h | 47 +- .../CMSIS/DSP/Include/dsp/matrix_utils.h | 640 + .../CMSIS/DSP/Include/dsp/none.h | 14 +- .../Include/dsp/quaternion_math_functions.h | 4 + .../DSP/Include/dsp/statistics_functions.h | 524 +- .../Include/dsp/statistics_functions_f16.h | 92 +- .../CMSIS/DSP/Include/dsp/support_functions.h | 31 +- .../DSP/Include/dsp/support_functions_f16.h | 63 +- .../CMSIS/DSP/Include/dsp/svm_defines.h | 4 + .../CMSIS/DSP/Include/dsp/svm_functions.h | 7 +- .../CMSIS/DSP/Include/dsp/svm_functions_f16.h | 26 +- .../DSP/Include/dsp/transform_functions.h | 172 +- .../DSP/Include/dsp/transform_functions_f16.h | 56 +- .../CMSIS/DSP/Include/dsp/utils.h | 42 +- .../Source/BasicMathFunctions/arm_abs_f16.c | 29 +- .../Source/BasicMathFunctions/arm_abs_f32.c | 8 +- .../Source/BasicMathFunctions/arm_abs_f64.c | 78 + .../Source/BasicMathFunctions/arm_abs_q15.c | 8 +- .../Source/BasicMathFunctions/arm_abs_q31.c | 8 +- .../Source/BasicMathFunctions/arm_abs_q7.c | 8 +- .../Source/BasicMathFunctions/arm_add_f16.c | 27 +- .../Source/BasicMathFunctions/arm_add_f32.c | 8 +- .../Source/BasicMathFunctions/arm_add_f64.c | 79 + .../Source/BasicMathFunctions/arm_add_q15.c | 16 +- .../Source/BasicMathFunctions/arm_add_q31.c | 8 +- .../Source/BasicMathFunctions/arm_add_q7.c | 10 +- .../Source/BasicMathFunctions/arm_and_u16.c | 8 +- .../Source/BasicMathFunctions/arm_and_u32.c | 8 +- .../Source/BasicMathFunctions/arm_and_u8.c | 8 +- .../Source/BasicMathFunctions/arm_clip_f16.c | 8 +- .../Source/BasicMathFunctions/arm_clip_f32.c | 7 +- .../Source/BasicMathFunctions/arm_clip_q15.c | 7 +- .../Source/BasicMathFunctions/arm_clip_q31.c | 7 +- .../Source/BasicMathFunctions/arm_clip_q7.c | 7 +- .../BasicMathFunctions/arm_dot_prod_f16.c | 20 +- .../BasicMathFunctions/arm_dot_prod_f32.c | 14 +- .../BasicMathFunctions/arm_dot_prod_f64.c | 82 + .../BasicMathFunctions/arm_dot_prod_q15.c | 12 +- .../BasicMathFunctions/arm_dot_prod_q31.c | 8 +- .../BasicMathFunctions/arm_dot_prod_q7.c | 12 +- .../Source/BasicMathFunctions/arm_mult_f16.c | 27 +- .../Source/BasicMathFunctions/arm_mult_f32.c | 8 +- .../Source/BasicMathFunctions/arm_mult_f64.c | 79 + .../Source/BasicMathFunctions/arm_mult_q15.c | 16 +- .../Source/BasicMathFunctions/arm_mult_q31.c | 8 +- .../Source/BasicMathFunctions/arm_mult_q7.c | 8 +- .../BasicMathFunctions/arm_negate_f16.c | 29 +- .../BasicMathFunctions/arm_negate_f32.c | 8 +- .../BasicMathFunctions/arm_negate_f64.c | 77 + .../BasicMathFunctions/arm_negate_q15.c | 12 +- .../BasicMathFunctions/arm_negate_q31.c | 8 +- .../Source/BasicMathFunctions/arm_negate_q7.c | 10 +- .../Source/BasicMathFunctions/arm_not_u16.c | 8 +- .../Source/BasicMathFunctions/arm_not_u32.c | 8 +- .../Source/BasicMathFunctions/arm_not_u8.c | 8 +- .../BasicMathFunctions/arm_offset_f16.c | 30 +- .../BasicMathFunctions/arm_offset_f32.c | 8 +- .../BasicMathFunctions/arm_offset_f64.c | 79 + .../BasicMathFunctions/arm_offset_q15.c | 12 +- .../BasicMathFunctions/arm_offset_q31.c | 8 +- .../Source/BasicMathFunctions/arm_offset_q7.c | 10 +- .../Source/BasicMathFunctions/arm_or_u16.c | 8 +- .../Source/BasicMathFunctions/arm_or_u32.c | 8 +- .../DSP/Source/BasicMathFunctions/arm_or_u8.c | 8 +- .../Source/BasicMathFunctions/arm_scale_f16.c | 43 +- .../Source/BasicMathFunctions/arm_scale_f32.c | 8 +- .../Source/BasicMathFunctions/arm_scale_f64.c | 79 + .../Source/BasicMathFunctions/arm_scale_q15.c | 12 +- .../Source/BasicMathFunctions/arm_scale_q31.c | 8 +- .../Source/BasicMathFunctions/arm_scale_q7.c | 8 +- .../Source/BasicMathFunctions/arm_shift_q15.c | 8 +- .../Source/BasicMathFunctions/arm_shift_q31.c | 8 +- .../Source/BasicMathFunctions/arm_shift_q7.c | 8 +- .../Source/BasicMathFunctions/arm_sub_f16.c | 29 +- .../Source/BasicMathFunctions/arm_sub_f32.c | 8 +- .../Source/BasicMathFunctions/arm_sub_f64.c | 79 + .../Source/BasicMathFunctions/arm_sub_q15.c | 16 +- .../Source/BasicMathFunctions/arm_sub_q31.c | 8 +- .../Source/BasicMathFunctions/arm_sub_q7.c | 10 +- .../Source/BasicMathFunctions/arm_xor_u16.c | 8 +- .../Source/BasicMathFunctions/arm_xor_u32.c | 8 +- .../Source/BasicMathFunctions/arm_xor_u8.c | 8 +- .../arm_gaussian_naive_bayes_predict_f16.c | 55 +- .../arm_gaussian_naive_bayes_predict_f32.c | 49 +- .../Source/CommonTables/arm_common_tables.c | 38 +- .../CommonTables/arm_common_tables_f16.c | 8 +- .../Source/CommonTables/arm_const_structs.c | 8 +- .../CommonTables/arm_const_structs_f16.c | 8 +- .../DSP/Source/CommonTables/arm_mve_tables.c | 7277 ++--- .../Source/CommonTables/arm_mve_tables_f16.c | 10883 +++---- .../ComplexMathFunctionsF16.c | 36 - .../ComplexMathFunctions/arm_cmplx_conj_f16.c | 41 +- .../ComplexMathFunctions/arm_cmplx_conj_f32.c | 8 +- .../ComplexMathFunctions/arm_cmplx_conj_q15.c | 18 +- .../ComplexMathFunctions/arm_cmplx_conj_q31.c | 8 +- .../arm_cmplx_dot_prod_f16.c | 33 +- .../arm_cmplx_dot_prod_f32.c | 8 +- .../arm_cmplx_dot_prod_q15.c | 8 +- .../arm_cmplx_dot_prod_q31.c | 8 +- .../ComplexMathFunctions/arm_cmplx_mag_f16.c | 29 +- .../ComplexMathFunctions/arm_cmplx_mag_f32.c | 8 +- .../ComplexMathFunctions/arm_cmplx_mag_f64.c | 82 + .../arm_cmplx_mag_fast_q15.c | 227 + .../ComplexMathFunctions/arm_cmplx_mag_q15.c | 99 +- .../ComplexMathFunctions/arm_cmplx_mag_q31.c | 8 +- .../arm_cmplx_mag_squared_f16.c | 30 +- .../arm_cmplx_mag_squared_f32.c | 8 +- .../arm_cmplx_mag_squared_f64.c | 80 + .../arm_cmplx_mag_squared_q15.c | 18 +- .../arm_cmplx_mag_squared_q31.c | 8 +- .../arm_cmplx_mult_cmplx_f16.c | 29 +- .../arm_cmplx_mult_cmplx_f32.c | 8 +- .../arm_cmplx_mult_cmplx_f64.c | 87 + .../arm_cmplx_mult_cmplx_q15.c | 8 +- .../arm_cmplx_mult_cmplx_q31.c | 8 +- .../arm_cmplx_mult_real_f16.c | 51 +- .../arm_cmplx_mult_real_f32.c | 10 +- .../arm_cmplx_mult_real_q15.c | 22 +- .../arm_cmplx_mult_real_q31.c | 10 +- .../ControllerFunctions/arm_pid_init_f32.c | 8 +- .../ControllerFunctions/arm_pid_init_q15.c | 8 +- .../ControllerFunctions/arm_pid_init_q31.c | 8 +- .../ControllerFunctions/arm_pid_reset_f32.c | 8 +- .../ControllerFunctions/arm_pid_reset_q15.c | 8 +- .../ControllerFunctions/arm_pid_reset_q31.c | 8 +- .../ControllerFunctions/arm_sin_cos_f32.c | 40 +- .../ControllerFunctions/arm_sin_cos_q31.c | 8 +- .../DistanceFunctions/DistanceFunctionsF16.c | 40 - .../DistanceFunctions/arm_boolean_distance.c | 6 +- .../arm_boolean_distance_template.h | 6 +- .../arm_braycurtis_distance_f16.c | 10 +- .../arm_braycurtis_distance_f32.c | 6 +- .../arm_canberra_distance_f16.c | 14 +- .../arm_canberra_distance_f32.c | 6 +- .../arm_chebyshev_distance_f16.c | 12 +- .../arm_chebyshev_distance_f32.c | 6 +- .../arm_chebyshev_distance_f64.c | 80 + .../arm_cityblock_distance_f16.c | 8 +- .../arm_cityblock_distance_f32.c | 6 +- .../arm_cityblock_distance_f64.c | 71 + .../arm_correlation_distance_f16.c | 20 +- .../arm_correlation_distance_f32.c | 6 +- .../arm_cosine_distance_f16.c | 12 +- .../arm_cosine_distance_f32.c | 8 +- .../arm_cosine_distance_f64.c | 74 + .../DistanceFunctions/arm_dice_distance.c | 6 +- .../arm_euclidean_distance_f16.c | 6 +- .../arm_euclidean_distance_f32.c | 6 +- .../arm_euclidean_distance_f64.c | 70 + .../DistanceFunctions/arm_hamming_distance.c | 6 +- .../DistanceFunctions/arm_jaccard_distance.c | 6 +- .../arm_jensenshannon_distance_f16.c | 12 +- .../arm_jensenshannon_distance_f32.c | 6 +- .../arm_kulsinski_distance.c | 6 +- .../arm_minkowski_distance_f16.c | 17 +- .../arm_minkowski_distance_f32.c | 9 +- .../arm_rogerstanimoto_distance.c | 6 +- .../arm_russellrao_distance.c | 8 +- .../arm_sokalmichener_distance.c | 6 +- .../arm_sokalsneath_distance.c | 6 +- .../DistanceFunctions/arm_yule_distance.c | 6 +- .../FastMathFunctions/FastMathFunctionsF16.c | 35 - .../Source/FastMathFunctions/arm_atan2_f16.c | 175 + .../Source/FastMathFunctions/arm_atan2_f32.c | 187 + .../Source/FastMathFunctions/arm_atan2_q15.c | 239 + .../Source/FastMathFunctions/arm_atan2_q31.c | 240 + .../Source/FastMathFunctions/arm_cos_f32.c | 8 +- .../Source/FastMathFunctions/arm_cos_q15.c | 8 +- .../Source/FastMathFunctions/arm_cos_q31.c | 8 +- .../Source/FastMathFunctions/arm_divide_q15.c | 114 + .../Source/FastMathFunctions/arm_divide_q31.c | 109 + .../Source/FastMathFunctions/arm_sin_f32.c | 8 +- .../Source/FastMathFunctions/arm_sin_q15.c | 8 +- .../Source/FastMathFunctions/arm_sin_q31.c | 8 +- .../Source/FastMathFunctions/arm_sqrt_q15.c | 76 +- .../Source/FastMathFunctions/arm_sqrt_q31.c | 77 +- .../Source/FastMathFunctions/arm_vexp_f16.c | 25 +- .../Source/FastMathFunctions/arm_vexp_f32.c | 33 +- .../Source/FastMathFunctions/arm_vexp_f64.c | 70 + .../FastMathFunctions/arm_vinverse_f16.c | 10 +- .../Source/FastMathFunctions/arm_vlog_f16.c | 164 +- .../Source/FastMathFunctions/arm_vlog_f32.c | 30 +- .../arm_vlog_f64.c} | 43 +- .../Source/FastMathFunctions/arm_vlog_q15.c | 268 + .../Source/FastMathFunctions/arm_vlog_q31.c | 262 + .../FilteringFunctionsF16.c | 39 - .../arm_biquad_cascade_df1_32x64_init_q31.c | 8 +- .../arm_biquad_cascade_df1_32x64_q31.c | 10 +- .../arm_biquad_cascade_df1_f16.c | 11 +- .../arm_biquad_cascade_df1_f32.c | 10 +- .../arm_biquad_cascade_df1_fast_q15.c | 16 +- .../arm_biquad_cascade_df1_fast_q31.c | 8 +- .../arm_biquad_cascade_df1_init_f16.c | 47 +- .../arm_biquad_cascade_df1_init_f32.c | 8 +- .../arm_biquad_cascade_df1_init_q15.c | 8 +- .../arm_biquad_cascade_df1_init_q31.c | 8 +- .../arm_biquad_cascade_df1_q15.c | 8 +- .../arm_biquad_cascade_df1_q31.c | 10 +- .../arm_biquad_cascade_df2T_f16.c | 13 +- .../arm_biquad_cascade_df2T_f32.c | 14 +- .../arm_biquad_cascade_df2T_f64.c | 20 +- .../arm_biquad_cascade_df2T_init_f16.c | 9 +- .../arm_biquad_cascade_df2T_init_f32.c | 203 +- .../arm_biquad_cascade_df2T_init_f64.c | 8 +- .../arm_biquad_cascade_stereo_df2T_f16.c | 15 +- .../arm_biquad_cascade_stereo_df2T_f32.c | 14 +- .../arm_biquad_cascade_stereo_df2T_init_f16.c | 8 +- .../arm_biquad_cascade_stereo_df2T_init_f32.c | 8 +- .../Source/FilteringFunctions/arm_conv_f32.c | 30 +- .../arm_conv_fast_opt_q15.c | 8 +- .../FilteringFunctions/arm_conv_fast_q15.c | 8 +- .../FilteringFunctions/arm_conv_fast_q31.c | 8 +- .../FilteringFunctions/arm_conv_opt_q15.c | 8 +- .../FilteringFunctions/arm_conv_opt_q7.c | 8 +- .../FilteringFunctions/arm_conv_partial_f32.c | 32 +- .../arm_conv_partial_fast_opt_q15.c | 8 +- .../arm_conv_partial_fast_q15.c | 17 +- .../arm_conv_partial_fast_q31.c | 25 +- .../arm_conv_partial_opt_q15.c | 8 +- .../arm_conv_partial_opt_q7.c | 8 +- .../FilteringFunctions/arm_conv_partial_q15.c | 29 +- .../FilteringFunctions/arm_conv_partial_q31.c | 25 +- .../FilteringFunctions/arm_conv_partial_q7.c | 27 +- .../Source/FilteringFunctions/arm_conv_q15.c | 8 +- .../Source/FilteringFunctions/arm_conv_q31.c | 8 +- .../Source/FilteringFunctions/arm_conv_q7.c | 8 +- .../FilteringFunctions/arm_correlate_f16.c | 128 +- .../FilteringFunctions/arm_correlate_f32.c | 33 +- .../FilteringFunctions/arm_correlate_f64.c | 369 + .../arm_correlate_fast_opt_q15.c | 8 +- .../arm_correlate_fast_q15.c | 8 +- .../arm_correlate_fast_q31.c | 8 +- .../arm_correlate_opt_q15.c | 8 +- .../FilteringFunctions/arm_correlate_opt_q7.c | 8 +- .../FilteringFunctions/arm_correlate_q15.c | 10 +- .../FilteringFunctions/arm_correlate_q31.c | 10 +- .../FilteringFunctions/arm_correlate_q7.c | 10 +- .../FilteringFunctions/arm_fir_decimate_f32.c | 10 +- .../arm_fir_decimate_fast_q15.c | 8 +- .../arm_fir_decimate_fast_q31.c | 8 +- .../arm_fir_decimate_init_f32.c | 8 +- .../arm_fir_decimate_init_q15.c | 8 +- .../arm_fir_decimate_init_q31.c | 8 +- .../FilteringFunctions/arm_fir_decimate_q15.c | 8 +- .../FilteringFunctions/arm_fir_decimate_q31.c | 8 +- .../Source/FilteringFunctions/arm_fir_f16.c | 17 +- .../Source/FilteringFunctions/arm_fir_f32.c | 31 +- .../Source/FilteringFunctions/arm_fir_f64.c | 133 + .../FilteringFunctions/arm_fir_fast_q15.c | 8 +- .../FilteringFunctions/arm_fir_fast_q31.c | 10 +- .../FilteringFunctions/arm_fir_init_f16.c | 16 +- .../FilteringFunctions/arm_fir_init_f32.c | 16 +- .../FilteringFunctions/arm_fir_init_f64.c | 88 + .../FilteringFunctions/arm_fir_init_q15.c | 16 +- .../FilteringFunctions/arm_fir_init_q31.c | 16 +- .../FilteringFunctions/arm_fir_init_q7.c | 17 +- .../arm_fir_interpolate_f32.c | 14 +- .../arm_fir_interpolate_init_f32.c | 8 +- .../arm_fir_interpolate_init_q15.c | 8 +- .../arm_fir_interpolate_init_q31.c | 8 +- .../arm_fir_interpolate_q15.c | 8 +- .../arm_fir_interpolate_q31.c | 10 +- .../FilteringFunctions/arm_fir_lattice_f32.c | 11 +- .../arm_fir_lattice_init_f32.c | 8 +- .../arm_fir_lattice_init_q15.c | 8 +- .../arm_fir_lattice_init_q31.c | 8 +- .../FilteringFunctions/arm_fir_lattice_q15.c | 8 +- .../FilteringFunctions/arm_fir_lattice_q31.c | 8 +- .../Source/FilteringFunctions/arm_fir_q15.c | 31 +- .../Source/FilteringFunctions/arm_fir_q31.c | 33 +- .../Source/FilteringFunctions/arm_fir_q7.c | 69 +- .../FilteringFunctions/arm_fir_sparse_f32.c | 11 +- .../arm_fir_sparse_init_f32.c | 8 +- .../arm_fir_sparse_init_q15.c | 8 +- .../arm_fir_sparse_init_q31.c | 8 +- .../arm_fir_sparse_init_q7.c | 8 +- .../FilteringFunctions/arm_fir_sparse_q15.c | 8 +- .../FilteringFunctions/arm_fir_sparse_q31.c | 8 +- .../FilteringFunctions/arm_fir_sparse_q7.c | 8 +- .../FilteringFunctions/arm_iir_lattice_f32.c | 8 +- .../arm_iir_lattice_init_f32.c | 8 +- .../arm_iir_lattice_init_q15.c | 8 +- .../arm_iir_lattice_init_q31.c | 8 +- .../FilteringFunctions/arm_iir_lattice_q15.c | 8 +- .../FilteringFunctions/arm_iir_lattice_q31.c | 8 +- .../arm_levinson_durbin_f16.c | 277 + .../arm_levinson_durbin_f32.c | 283 + .../arm_levinson_durbin_q31.c | 380 + .../Source/FilteringFunctions/arm_lms_f32.c | 8 +- .../FilteringFunctions/arm_lms_init_f32.c | 8 +- .../FilteringFunctions/arm_lms_init_q15.c | 8 +- .../FilteringFunctions/arm_lms_init_q31.c | 8 +- .../FilteringFunctions/arm_lms_norm_f32.c | 8 +- .../arm_lms_norm_init_f32.c | 8 +- .../arm_lms_norm_init_q15.c | 8 +- .../arm_lms_norm_init_q31.c | 8 +- .../FilteringFunctions/arm_lms_norm_q15.c | 8 +- .../FilteringFunctions/arm_lms_norm_q31.c | 8 +- .../Source/FilteringFunctions/arm_lms_q15.c | 8 +- .../Source/FilteringFunctions/arm_lms_q31.c | 8 +- .../InterpolationFunctionsF16.c | 37 - .../arm_bilinear_interp_f16.c | 70 +- .../arm_bilinear_interp_f32.c | 6 +- .../arm_bilinear_interp_q15.c | 6 +- .../arm_bilinear_interp_q31.c | 6 +- .../arm_bilinear_interp_q7.c | 6 +- .../arm_linear_interp_f16.c | 46 +- .../arm_linear_interp_f32.c | 6 +- .../arm_linear_interp_q15.c | 8 +- .../arm_linear_interp_q31.c | 8 +- .../arm_linear_interp_q7.c | 8 +- .../arm_spline_interp_f32.c | 10 +- .../arm_spline_interp_init_f32.c | 8 +- .../MatrixFunctions/MatrixFunctionsF16.c | 45 - .../MatrixFunctions/arm_householder_f16.c | 125 + .../MatrixFunctions/arm_householder_f32.c | 196 + .../MatrixFunctions/arm_householder_f64.c | 121 + .../Source/MatrixFunctions/arm_mat_add_f16.c | 20 +- .../Source/MatrixFunctions/arm_mat_add_f32.c | 32 +- .../Source/MatrixFunctions/arm_mat_add_q15.c | 10 +- .../Source/MatrixFunctions/arm_mat_add_q31.c | 10 +- .../MatrixFunctions/arm_mat_cholesky_f16.c | 33 +- .../MatrixFunctions/arm_mat_cholesky_f32.c | 35 +- .../MatrixFunctions/arm_mat_cholesky_f64.c | 17 +- .../MatrixFunctions/arm_mat_cmplx_mult_f16.c | 21 +- .../MatrixFunctions/arm_mat_cmplx_mult_f32.c | 12 +- .../MatrixFunctions/arm_mat_cmplx_mult_q15.c | 20 +- .../MatrixFunctions/arm_mat_cmplx_mult_q31.c | 12 +- .../MatrixFunctions/arm_mat_cmplx_trans_f16.c | 8 +- .../MatrixFunctions/arm_mat_cmplx_trans_f32.c | 26 +- .../MatrixFunctions/arm_mat_cmplx_trans_q15.c | 8 +- .../MatrixFunctions/arm_mat_cmplx_trans_q31.c | 8 +- .../Source/MatrixFunctions/arm_mat_init_f16.c | 8 +- .../Source/MatrixFunctions/arm_mat_init_f32.c | 8 +- .../Source/MatrixFunctions/arm_mat_init_q15.c | 8 +- .../Source/MatrixFunctions/arm_mat_init_q31.c | 13 +- .../MatrixFunctions/arm_mat_inverse_f16.c | 751 +- .../MatrixFunctions/arm_mat_inverse_f32.c | 1440 +- .../MatrixFunctions/arm_mat_inverse_f64.c | 495 +- .../Source/MatrixFunctions/arm_mat_ldlt_f32.c | 160 +- .../Source/MatrixFunctions/arm_mat_ldlt_f64.c | 83 +- .../Source/MatrixFunctions/arm_mat_mult_f16.c | 20 +- .../Source/MatrixFunctions/arm_mat_mult_f32.c | 70 +- .../Source/MatrixFunctions/arm_mat_mult_f64.c | 27 +- .../MatrixFunctions/arm_mat_mult_fast_q15.c | 44 +- .../MatrixFunctions/arm_mat_mult_fast_q31.c | 8 +- .../MatrixFunctions/arm_mat_mult_opt_q31.c | 788 + .../Source/MatrixFunctions/arm_mat_mult_q15.c | 597 +- .../Source/MatrixFunctions/arm_mat_mult_q31.c | 12 +- .../Source/MatrixFunctions/arm_mat_mult_q7.c | 8 +- .../Source/MatrixFunctions/arm_mat_qr_f16.c | 784 + .../Source/MatrixFunctions/arm_mat_qr_f32.c | 854 + .../Source/MatrixFunctions/arm_mat_qr_f64.c | 311 + .../MatrixFunctions/arm_mat_scale_f16.c | 18 +- .../MatrixFunctions/arm_mat_scale_f32.c | 25 +- .../MatrixFunctions/arm_mat_scale_q15.c | 12 +- .../MatrixFunctions/arm_mat_scale_q31.c | 8 +- .../arm_mat_solve_lower_triangular_f16.c | 54 +- .../arm_mat_solve_lower_triangular_f32.c | 78 +- .../arm_mat_solve_lower_triangular_f64.c | 30 +- .../arm_mat_solve_upper_triangular_f16.c | 46 +- .../arm_mat_solve_upper_triangular_f32.c | 64 +- .../arm_mat_solve_upper_triangular_f64.c | 26 +- .../Source/MatrixFunctions/arm_mat_sub_f16.c | 20 +- .../Source/MatrixFunctions/arm_mat_sub_f32.c | 33 +- .../Source/MatrixFunctions/arm_mat_sub_f64.c | 18 +- .../Source/MatrixFunctions/arm_mat_sub_q15.c | 14 +- .../Source/MatrixFunctions/arm_mat_sub_q31.c | 10 +- .../MatrixFunctions/arm_mat_trans_f16.c | 8 +- .../MatrixFunctions/arm_mat_trans_f32.c | 26 +- .../MatrixFunctions/arm_mat_trans_f64.c | 15 +- .../MatrixFunctions/arm_mat_trans_q15.c | 12 +- .../MatrixFunctions/arm_mat_trans_q31.c | 8 +- .../Source/MatrixFunctions/arm_mat_trans_q7.c | 12 +- .../MatrixFunctions/arm_mat_vec_mult_f16.c | 32 +- .../MatrixFunctions/arm_mat_vec_mult_f32.c | 18 +- .../MatrixFunctions/arm_mat_vec_mult_q15.c | 34 +- .../MatrixFunctions/arm_mat_vec_mult_q31.c | 16 +- .../MatrixFunctions/arm_mat_vec_mult_q7.c | 30 +- .../arm_quaternion2rotation_f32.c | 7 +- .../arm_quaternion_conjugate_f32.c | 7 +- .../arm_quaternion_inverse_f32.c | 7 +- .../arm_quaternion_norm_f32.c | 7 +- .../arm_quaternion_normalize_f32.c | 7 +- .../arm_quaternion_product_f32.c | 7 +- .../arm_quaternion_product_single_f32.c | 4 +- .../arm_rotation2quaternion_f32.c | 65 +- .../DSP/Source/SVMFunctions/SVMFunctionsF16.c | 40 - .../SVMFunctions/arm_svm_linear_init_f16.c | 13 +- .../SVMFunctions/arm_svm_linear_init_f32.c | 4 +- .../SVMFunctions/arm_svm_linear_predict_f16.c | 8 +- .../SVMFunctions/arm_svm_linear_predict_f32.c | 4 +- .../arm_svm_polynomial_init_f16.c | 9 +- .../arm_svm_polynomial_init_f32.c | 4 +- .../arm_svm_polynomial_predict_f16.c | 53 +- .../arm_svm_polynomial_predict_f32.c | 4 +- .../SVMFunctions/arm_svm_rbf_init_f16.c | 9 +- .../SVMFunctions/arm_svm_rbf_init_f32.c | 4 +- .../SVMFunctions/arm_svm_rbf_predict_f16.c | 28 +- .../SVMFunctions/arm_svm_rbf_predict_f32.c | 4 +- .../SVMFunctions/arm_svm_sigmoid_init_f16.c | 9 +- .../SVMFunctions/arm_svm_sigmoid_init_f32.c | 4 +- .../arm_svm_sigmoid_predict_f16.c | 10 +- .../arm_svm_sigmoid_predict_f32.c | 4 +- .../StatisticsFunctionsF16.c | 44 - .../StatisticsFunctions/arm_absmax_f16.c | 278 + .../StatisticsFunctions/arm_absmax_f32.c | 264 + .../StatisticsFunctions/arm_absmax_f64.c | 96 + .../arm_absmax_no_idx_f16.c | 232 + .../arm_absmax_no_idx_f32.c | 229 + .../arm_absmax_no_idx_f64.c | 91 + .../arm_absmax_no_idx_q15.c | 224 + .../arm_absmax_no_idx_q31.c | 224 + .../arm_absmax_no_idx_q7.c | 228 + .../StatisticsFunctions/arm_absmax_q15.c | 240 + .../StatisticsFunctions/arm_absmax_q31.c | 240 + .../StatisticsFunctions/arm_absmax_q7.c | 298 + .../StatisticsFunctions/arm_absmin_f16.c | 280 + .../StatisticsFunctions/arm_absmin_f32.c | 283 + .../StatisticsFunctions/arm_absmin_f64.c | 94 + .../arm_absmin_no_idx_f16.c | 234 + .../arm_absmin_no_idx_f32.c | 230 + .../arm_absmin_no_idx_f64.c | 88 + .../arm_absmin_no_idx_q15.c | 226 + .../arm_absmin_no_idx_q31.c | 225 + .../arm_absmin_no_idx_q7.c | 227 + .../StatisticsFunctions/arm_absmin_q15.c | 273 + .../StatisticsFunctions/arm_absmin_q31.c | 273 + .../StatisticsFunctions/arm_absmin_q7.c | 326 + .../StatisticsFunctions/arm_accumulate_f16.c | 125 + .../StatisticsFunctions/arm_accumulate_f32.c | 213 + .../StatisticsFunctions/arm_accumulate_f64.c | 131 + .../StatisticsFunctions/arm_entropy_f16.c | 8 +- .../StatisticsFunctions/arm_entropy_f32.c | 4 +- .../StatisticsFunctions/arm_entropy_f64.c | 6 +- .../arm_kullback_leibler_f16.c | 10 +- .../arm_kullback_leibler_f32.c | 4 +- .../arm_kullback_leibler_f64.c | 6 +- .../arm_logsumexp_dot_prod_f16.c | 4 +- .../arm_logsumexp_dot_prod_f32.c | 4 +- .../StatisticsFunctions/arm_logsumexp_f16.c | 12 +- .../StatisticsFunctions/arm_logsumexp_f32.c | 6 +- .../Source/StatisticsFunctions/arm_max_f16.c | 18 +- .../Source/StatisticsFunctions/arm_max_f32.c | 8 +- .../Source/StatisticsFunctions/arm_max_f64.c | 94 + .../StatisticsFunctions/arm_max_no_idx_f16.c | 10 +- .../StatisticsFunctions/arm_max_no_idx_f32.c | 6 +- .../StatisticsFunctions/arm_max_no_idx_f64.c | 79 + .../StatisticsFunctions/arm_max_no_idx_q15.c | 146 + .../StatisticsFunctions/arm_max_no_idx_q31.c | 146 + .../StatisticsFunctions/arm_max_no_idx_q7.c | 147 + .../Source/StatisticsFunctions/arm_max_q15.c | 105 +- .../Source/StatisticsFunctions/arm_max_q31.c | 116 +- .../Source/StatisticsFunctions/arm_max_q7.c | 124 +- .../Source/StatisticsFunctions/arm_mean_f16.c | 22 +- .../Source/StatisticsFunctions/arm_mean_f32.c | 8 +- .../Source/StatisticsFunctions/arm_mean_f64.c | 79 + .../Source/StatisticsFunctions/arm_mean_q15.c | 12 +- .../Source/StatisticsFunctions/arm_mean_q31.c | 10 +- .../Source/StatisticsFunctions/arm_mean_q7.c | 10 +- .../Source/StatisticsFunctions/arm_min_f16.c | 18 +- .../Source/StatisticsFunctions/arm_min_f32.c | 10 +- .../Source/StatisticsFunctions/arm_min_f64.c | 94 + .../StatisticsFunctions/arm_min_no_idx_f16.c | 148 + .../StatisticsFunctions/arm_min_no_idx_f32.c | 142 + .../StatisticsFunctions/arm_min_no_idx_f64.c | 79 + .../StatisticsFunctions/arm_min_no_idx_q15.c | 146 + .../StatisticsFunctions/arm_min_no_idx_q31.c | 145 + .../StatisticsFunctions/arm_min_no_idx_q7.c | 145 + .../Source/StatisticsFunctions/arm_min_q15.c | 101 +- .../Source/StatisticsFunctions/arm_min_q31.c | 108 +- .../Source/StatisticsFunctions/arm_min_q7.c | 8 +- .../Source/StatisticsFunctions/arm_mse_f16.c | 207 + .../Source/StatisticsFunctions/arm_mse_f32.c | 251 + .../Source/StatisticsFunctions/arm_mse_f64.c | 114 + .../Source/StatisticsFunctions/arm_mse_q15.c | 179 + .../Source/StatisticsFunctions/arm_mse_q31.c | 180 + .../Source/StatisticsFunctions/arm_mse_q7.c | 183 + .../StatisticsFunctions/arm_power_f16.c | 8 +- .../StatisticsFunctions/arm_power_f32.c | 8 +- .../StatisticsFunctions/arm_power_f64.c | 81 + .../StatisticsFunctions/arm_power_q15.c | 12 +- .../StatisticsFunctions/arm_power_q31.c | 8 +- .../Source/StatisticsFunctions/arm_power_q7.c | 10 +- .../Source/StatisticsFunctions/arm_rms_f16.c | 23 +- .../Source/StatisticsFunctions/arm_rms_f32.c | 8 +- .../Source/StatisticsFunctions/arm_rms_q15.c | 12 +- .../Source/StatisticsFunctions/arm_rms_q31.c | 8 +- .../Source/StatisticsFunctions/arm_std_f16.c | 8 +- .../Source/StatisticsFunctions/arm_std_f32.c | 8 +- .../Source/StatisticsFunctions/arm_std_f64.c | 63 + .../Source/StatisticsFunctions/arm_std_q15.c | 12 +- .../Source/StatisticsFunctions/arm_std_q31.c | 8 +- .../Source/StatisticsFunctions/arm_var_f16.c | 55 +- .../Source/StatisticsFunctions/arm_var_f32.c | 8 +- .../Source/StatisticsFunctions/arm_var_f64.c | 104 + .../Source/StatisticsFunctions/arm_var_q15.c | 16 +- .../Source/StatisticsFunctions/arm_var_q31.c | 8 +- .../SupportFunctions/arm_barycenter_f16.c | 32 +- .../SupportFunctions/arm_barycenter_f32.c | 5 +- .../SupportFunctions/arm_bitonic_sort_f32.c | 6 +- .../SupportFunctions/arm_bubble_sort_f32.c | 6 +- .../Source/SupportFunctions/arm_copy_f16.c | 8 +- .../Source/SupportFunctions/arm_copy_f32.c | 8 +- .../Source/SupportFunctions/arm_copy_f64.c | 75 + .../Source/SupportFunctions/arm_copy_q15.c | 12 +- .../Source/SupportFunctions/arm_copy_q31.c | 8 +- .../DSP/Source/SupportFunctions/arm_copy_q7.c | 10 +- .../SupportFunctions/arm_f16_to_float.c | 14 +- .../Source/SupportFunctions/arm_f16_to_q15.c | 10 +- .../Source/SupportFunctions/arm_fill_f16.c | 8 +- .../Source/SupportFunctions/arm_fill_f32.c | 8 +- .../Source/SupportFunctions/arm_fill_f64.c | 75 + .../Source/SupportFunctions/arm_fill_q15.c | 8 +- .../Source/SupportFunctions/arm_fill_q31.c | 8 +- .../DSP/Source/SupportFunctions/arm_fill_q7.c | 8 +- .../SupportFunctions/arm_float_to_f16.c | 14 +- .../SupportFunctions/arm_float_to_q15.c | 10 +- .../SupportFunctions/arm_float_to_q31.c | 8 +- .../Source/SupportFunctions/arm_float_to_q7.c | 12 +- .../SupportFunctions/arm_heap_sort_f32.c | 6 +- .../SupportFunctions/arm_insertion_sort_f32.c | 6 +- .../SupportFunctions/arm_merge_sort_f32.c | 6 +- .../arm_merge_sort_init_f32.c | 6 +- .../Source/SupportFunctions/arm_q15_to_f16.c | 20 +- .../SupportFunctions/arm_q15_to_float.c | 18 +- .../Source/SupportFunctions/arm_q15_to_q31.c | 12 +- .../Source/SupportFunctions/arm_q15_to_q7.c | 14 +- .../SupportFunctions/arm_q31_to_float.c | 10 +- .../Source/SupportFunctions/arm_q31_to_q15.c | 10 +- .../Source/SupportFunctions/arm_q31_to_q7.c | 12 +- .../Source/SupportFunctions/arm_q7_to_float.c | 12 +- .../Source/SupportFunctions/arm_q7_to_q15.c | 10 +- .../Source/SupportFunctions/arm_q7_to_q31.c | 10 +- .../SupportFunctions/arm_quick_sort_f32.c | 6 +- .../SupportFunctions/arm_selection_sort_f32.c | 6 +- .../Source/SupportFunctions/arm_sort_f32.c | 6 +- .../SupportFunctions/arm_sort_init_f32.c | 6 +- .../SupportFunctions/arm_weighted_sum_f16.c | 4 +- .../SupportFunctions/arm_weighted_sum_f32.c | 4 +- .../TransformFunctions/arm_bitreversal.c | 8 +- .../TransformFunctions/arm_bitreversal2.c | 6 +- .../TransformFunctions/arm_bitreversal_f16.c | 5 +- .../Source/TransformFunctions/arm_cfft_f16.c | 496 +- .../Source/TransformFunctions/arm_cfft_f32.c | 397 +- .../Source/TransformFunctions/arm_cfft_f64.c | 12 +- .../TransformFunctions/arm_cfft_init_f16.c | 8 +- .../TransformFunctions/arm_cfft_init_f32.c | 8 +- .../TransformFunctions/arm_cfft_init_f64.c | 26 +- .../TransformFunctions/arm_cfft_init_q15.c | 8 +- .../TransformFunctions/arm_cfft_init_q31.c | 8 +- .../Source/TransformFunctions/arm_cfft_q15.c | 270 +- .../Source/TransformFunctions/arm_cfft_q31.c | 227 +- .../TransformFunctions/arm_cfft_radix2_f16.c | 169 +- .../TransformFunctions/arm_cfft_radix2_f32.c | 8 +- .../arm_cfft_radix2_init_f16.c | 7 +- .../arm_cfft_radix2_init_f32.c | 8 +- .../arm_cfft_radix2_init_q15.c | 28 +- .../arm_cfft_radix2_init_q31.c | 28 +- .../TransformFunctions/arm_cfft_radix2_q15.c | 8 +- .../TransformFunctions/arm_cfft_radix2_q31.c | 8 +- .../TransformFunctions/arm_cfft_radix4_f16.c | 513 +- .../TransformFunctions/arm_cfft_radix4_f32.c | 8 +- .../arm_cfft_radix4_init_f16.c | 8 +- .../arm_cfft_radix4_init_f32.c | 8 +- .../arm_cfft_radix4_init_q15.c | 20 +- .../arm_cfft_radix4_init_q31.c | 22 +- .../TransformFunctions/arm_cfft_radix4_q15.c | 41 +- .../TransformFunctions/arm_cfft_radix4_q31.c | 25 +- .../TransformFunctions/arm_cfft_radix8_f16.c | 301 +- .../TransformFunctions/arm_cfft_radix8_f32.c | 8 +- .../Source/TransformFunctions/arm_dct4_f32.c | 16 +- .../TransformFunctions/arm_dct4_init_f32.c | 16 +- .../TransformFunctions/arm_dct4_init_q15.c | 15 +- .../TransformFunctions/arm_dct4_init_q31.c | 15 +- .../Source/TransformFunctions/arm_dct4_q15.c | 16 +- .../Source/TransformFunctions/arm_dct4_q31.c | 15 +- .../Source/TransformFunctions/arm_mfcc_f16.c | 165 + .../Source/TransformFunctions/arm_mfcc_f32.c | 154 + .../TransformFunctions/arm_mfcc_init_f16.c | 114 + .../TransformFunctions/arm_mfcc_init_f32.c | 111 + .../TransformFunctions/arm_mfcc_init_q15.c | 111 + .../TransformFunctions/arm_mfcc_init_q31.c | 111 + .../Source/TransformFunctions/arm_mfcc_q15.c | 203 + .../Source/TransformFunctions/arm_mfcc_q31.c | 202 + .../Source/TransformFunctions/arm_rfft_f32.c | 8 +- .../TransformFunctions/arm_rfft_fast_f16.c | 188 +- .../TransformFunctions/arm_rfft_fast_f32.c | 12 +- .../TransformFunctions/arm_rfft_fast_f64.c | 8 +- .../arm_rfft_fast_init_f16.c | 9 +- .../arm_rfft_fast_init_f32.c | 26 +- .../arm_rfft_fast_init_f64.c | 10 +- .../TransformFunctions/arm_rfft_init_f32.c | 8 +- .../TransformFunctions/arm_rfft_init_q15.c | 8 +- .../TransformFunctions/arm_rfft_init_q31.c | 8 +- .../Source/TransformFunctions/arm_rfft_q15.c | 48 +- .../Source/TransformFunctions/arm_rfft_q31.c | 47 +- .../CMSIS/NN/Include/arm_nn_math_types.h | 172 + .../CMSIS/NN/Include/arm_nn_tables.h | 12 +- .../CMSIS/NN/Include/arm_nn_types.h | 15 +- .../CMSIS/NN/Include/arm_nnfunctions.h | 4399 +-- .../CMSIS/NN/Include/arm_nnsupportfunctions.h | 574 +- .../arm_nn_activations_q15.c | 4 + .../arm_nn_activations_q7.c | 25 +- .../Source/ActivationFunctions/arm_relu6_s8.c | 4 + .../Source/ActivationFunctions/arm_relu_q15.c | 24 +- .../Source/ActivationFunctions/arm_relu_q7.c | 26 +- .../arm_elementwise_add_s16.c | 140 + .../arm_elementwise_add_s8.c | 132 +- .../arm_elementwise_mul_s16.c | 126 + .../arm_elementwise_mul_s8.c | 42 +- .../arm_concatenation_s8_w.c | 9 +- .../arm_concatenation_s8_x.c | 9 +- .../arm_concatenation_s8_y.c | 9 +- .../arm_concatenation_s8_z.c | 9 +- .../arm_convolve_1_x_n_s8.c | 160 +- .../arm_convolve_1x1_HWC_q7_fast_nonsquare.c | 96 +- .../arm_convolve_1x1_s8_fast.c | 121 +- .../arm_convolve_HWC_q15_basic.c | 76 +- .../arm_convolve_HWC_q15_fast.c | 87 +- .../arm_convolve_HWC_q15_fast_nonsquare.c | 100 +- .../arm_convolve_HWC_q7_RGB.c | 95 +- .../arm_convolve_HWC_q7_basic.c | 78 +- .../arm_convolve_HWC_q7_basic_nonsquare.c | 81 +- .../arm_convolve_HWC_q7_fast.c | 97 +- .../arm_convolve_HWC_q7_fast_nonsquare.c | 90 +- .../arm_convolve_fast_s16.c | 245 + .../ConvolutionFunctions/arm_convolve_s16.c | 160 + .../ConvolutionFunctions/arm_convolve_s8.c | 187 +- .../arm_convolve_wrapper_s16.c | 134 + .../arm_convolve_wrapper_s8.c | 49 +- .../arm_depthwise_conv_3x3_s8.c | 62 +- .../arm_depthwise_conv_fast_s16.c | 471 + .../arm_depthwise_conv_s16.c | 296 + .../arm_depthwise_conv_s8.c | 266 +- .../arm_depthwise_conv_s8_opt.c | 255 +- .../arm_depthwise_conv_u8_basic_ver1.c | 69 +- .../arm_depthwise_conv_wrapper_s16.c | 125 + .../arm_depthwise_conv_wrapper_s8.c | 43 +- .../arm_depthwise_separable_conv_HWC_q7.c | 116 +- ...epthwise_separable_conv_HWC_q7_nonsquare.c | 118 +- .../arm_nn_depthwise_conv_s8_core.c | 4 + .../arm_nn_mat_mult_kernel_q7_q15.c | 16 +- .../arm_nn_mat_mult_kernel_q7_q15_reordered.c | 16 +- .../arm_nn_mat_mult_kernel_s8_s16.c | 212 +- .../arm_nn_mat_mult_kernel_s8_s16_reordered.c | 201 - .../ConvolutionFunctions/arm_nn_mat_mult_s8.c | 44 +- .../arm_fully_connected_mat_q7_vec_q15.c | 58 +- .../arm_fully_connected_mat_q7_vec_q15_opt.c | 196 +- .../arm_fully_connected_q15.c | 55 +- .../arm_fully_connected_q15_opt.c | 122 +- .../arm_fully_connected_q7.c | 59 +- .../arm_fully_connected_q7_opt.c | 261 +- .../arm_fully_connected_s16.c | 101 + .../arm_fully_connected_s8.c | 41 +- .../arm_nn_accumulate_q7_to_q15.c | 25 +- .../Source/NNSupportFunctions/arm_nn_add_q7.c | 11 +- .../arm_nn_depthwise_conv_nt_t_padded_s8.c | 81 +- .../arm_nn_depthwise_conv_nt_t_s16.c | 175 + .../arm_nn_depthwise_conv_nt_t_s8.c | 83 +- .../arm_nn_mat_mul_core_1x_s8.c | 131 +- .../arm_nn_mat_mul_core_4x_s8.c | 165 +- .../arm_nn_mat_mul_kernel_s16.c | 254 + .../arm_nn_mat_mult_nt_t_s8.c | 38 +- .../NNSupportFunctions/arm_nn_mult_q15.c | 91 +- .../NNSupportFunctions/arm_nn_mult_q7.c | 66 +- .../arm_nn_vec_mat_mult_t_s16.c | 372 + .../arm_nn_vec_mat_mult_t_s8.c | 394 +- .../arm_nn_vec_mat_mult_t_svdf_s8.c | 345 + .../Source/NNSupportFunctions/arm_nntables.c | 4 + .../arm_q7_to_q15_no_shift.c | 25 +- .../arm_q7_to_q15_reordered_no_shift.c | 29 +- .../arm_q7_to_q15_reordered_with_offset.c | 16 +- .../arm_q7_to_q15_with_offset.c | 4 + .../Source/PoolingFunctions/arm_avgpool_s16.c | 311 + .../Source/PoolingFunctions/arm_avgpool_s8.c | 254 +- .../PoolingFunctions/arm_max_pool_s16.c | 216 + .../Source/PoolingFunctions/arm_max_pool_s8.c | 43 +- .../Source/PoolingFunctions/arm_pool_q7_HWC.c | 26 +- .../Source/ReshapeFunctions/arm_reshape_s8.c | 17 +- .../NN/Source/SVDFunctions/arm_svdf_s8.c | 270 +- .../SVDFunctions/arm_svdf_state_s16_s8.c | 271 + .../arm_nn_softmax_common_s8.c | 151 + .../Source/SoftmaxFunctions/arm_softmax_q15.c | 18 +- .../Source/SoftmaxFunctions/arm_softmax_q7.c | 19 +- .../Source/SoftmaxFunctions/arm_softmax_s16.c | 126 + .../Source/SoftmaxFunctions/arm_softmax_s8.c | 72 +- .../SoftmaxFunctions/arm_softmax_s8_s16.c | 59 + .../Source/SoftmaxFunctions/arm_softmax_u8.c | 5 +- .../arm_softmax_with_batch_q7.c | 20 +- .../src/edge-impulse-sdk/LICENSE | 1 - .../edge-impulse-sdk/LICENSE-apache-2.0.txt | 2 +- .../src/edge-impulse-sdk/anomaly/anomaly.h | 87 - .../classifier/ei_aligned_malloc.h | 29 +- .../classifier/ei_classifier_config.h | 78 +- .../classifier/ei_classifier_smooth.h | 33 +- .../classifier/ei_classifier_types.h | 275 +- .../classifier/ei_fill_result_struct.h | 1732 + .../classifier/ei_model_types.h | 337 +- .../src/edge-impulse-sdk/classifier/ei_nms.h | 392 + .../classifier/ei_performance_calibration.h | 199 + .../edge-impulse-sdk/classifier/ei_quantize.h | 37 + .../classifier/ei_run_classifier.h | 2107 +- .../classifier/ei_run_classifier_c.cpp | 34 + .../classifier/ei_run_classifier_c.h | 39 + .../classifier/ei_run_classifier_image.h | 17 + .../edge-impulse-sdk/classifier/ei_run_dsp.h | 772 +- .../classifier/ei_signal_with_axes.h | 92 +- .../classifier/ei_signal_with_range.h | 64 +- .../classifier/inferencing_engines/akida.h | 578 + .../classifier/inferencing_engines/anomaly.h | 280 + .../classifier/inferencing_engines/drpai.h | 758 + .../classifier/inferencing_engines/engines.h | 59 + .../classifier/inferencing_engines/memryx.h | 476 + .../inferencing_engines/onnx_tidl.h | 704 + .../inferencing_engines/tensaiflow.h | 235 + .../classifier/inferencing_engines/tensorrt.h | 319 + .../inferencing_engines/tflite_eon.h | 406 + .../inferencing_engines/tflite_full.h | 247 + .../inferencing_engines/tflite_helper.h | 574 + .../inferencing_engines/tflite_micro.h | 470 + .../inferencing_engines/tflite_tidl.h | 396 + .../src/edge-impulse-sdk/cmake/utils.cmake | 18 + .../cmake/zephyr/CMakeLists.txt | 26 +- .../create-arduino-library.sh | 60 - .../src/edge-impulse-sdk/dsp/config.hpp | 37 +- .../edge-impulse-sdk/dsp/dct/fast-dct-fft.cpp | 175 +- .../edge-impulse-sdk/dsp/dct/fast-dct-fft.h | 30 +- .../src/edge-impulse-sdk/dsp/ei_alloc.h | 79 + .../src/edge-impulse-sdk/dsp/ei_dsp_handle.h | 58 + .../src/edge-impulse-sdk/dsp/ei_flatten.h | 198 + .../src/edge-impulse-sdk/dsp/ei_hr.hpp | 96 + .../src/edge-impulse-sdk/dsp/ei_profiler.h | 29 +- .../src/edge-impulse-sdk/dsp/ei_utils.h | 35 +- .../src/edge-impulse-sdk/dsp/ei_vector.h | 32 + .../src/edge-impulse-sdk/dsp/image/image.hpp | 29 +- .../edge-impulse-sdk/dsp/image/processing.cpp | 80 +- .../edge-impulse-sdk/dsp/image/processing.hpp | 72 +- .../dsp/kissfft/_kiss_fft_guts.h | 2 + .../edge-impulse-sdk/dsp/kissfft/kiss_fft.cpp | 2 +- .../dsp/kissfft/kiss_fftr.cpp | 6 +- .../edge-impulse-sdk/dsp/kissfft/kiss_fftr.h | 2 +- .../edge-impulse-sdk/dsp/kissfft/kissfft.h | 12 +- .../edge-impulse-sdk/dsp/kissfft/kissfft.hh | 361 - .../dsp/kissfft/kissfft_i32.hh | 304 - .../src/edge-impulse-sdk/dsp/memory.cpp | 29 +- .../src/edge-impulse-sdk/dsp/memory.hpp | 59 +- .../src/edge-impulse-sdk/dsp/numpy.hpp | 1524 +- .../src/edge-impulse-sdk/dsp/numpy_types.h | 318 +- .../src/edge-impulse-sdk/dsp/returntypes.h | 48 + .../src/edge-impulse-sdk/dsp/returntypes.hpp | 39 +- .../edge-impulse-sdk/dsp/spectral/feature.hpp | 624 +- .../edge-impulse-sdk/dsp/spectral/filters.hpp | 29 +- .../dsp/spectral/fir_filter.hpp | 29 +- .../dsp/spectral/processing.hpp | 485 +- .../edge-impulse-sdk/dsp/spectral/signal.hpp | 356 + .../dsp/spectral/spectral.hpp | 29 +- .../edge-impulse-sdk/dsp/spectral/wavelet.hpp | 354 + .../dsp/spectral/wavelet_coeff.hpp | 282 + .../edge-impulse-sdk/dsp/speechpy/feature.hpp | 302 +- .../dsp/speechpy/functions.hpp | 66 +- .../dsp/speechpy/processing.hpp | 136 +- .../dsp/speechpy/speechpy.hpp | 29 +- .../porting/arduino/debug_log.cpp | 29 +- .../porting/arduino/ei_classifier_porting.cpp | 50 +- .../porting/brickml/debug_log.cpp | 33 + .../porting/brickml/ei_classifier_porting.cpp | 178 + .../porting/ei_classifier_porting.h | 304 +- .../src/edge-impulse-sdk/porting/ei_logging.h | 82 + .../porting/espressif/ESP-NN/CMakeLists.txt | 51 + .../porting/espressif/ESP-NN/CONTRIBUTING.md | 38 + .../espressif/ESP-NN/Kconfig.projbuild | 29 + .../porting/espressif/ESP-NN/LICENSE | 202 + .../porting/espressif/ESP-NN/README.md | 1 + .../espressif/ESP-NN/idf_component.yml | 11 + .../porting/espressif/ESP-NN/include/esp_nn.h | 46 + .../espressif/ESP-NN/include/esp_nn_ansi_c.h | 47 + .../ESP-NN/include/esp_nn_ansi_headers.h | 309 + .../espressif/ESP-NN/include/esp_nn_defs.h | 83 + .../espressif/ESP-NN/include/esp_nn_esp32s3.h | 231 + .../ESP-NN/include/esp_nn_generic_opt.h | 47 + .../activation_functions/esp_nn_relu_ansi.c | 34 + .../esp_nn_relu_s8_esp32s3.S | 118 + .../ESP-NN/src/basic_math/esp_nn_add_ansi.c | 101 + .../src/basic_math/esp_nn_add_s8_esp32s3.S | 638 + .../ESP-NN/src/basic_math/esp_nn_mul_ansi.c | 46 + .../src/basic_math/esp_nn_mul_s8_esp32s3.S | 323 + .../ESP-NN/src/common/common_functions.h | 255 + .../common/esp_nn_common_functions_esp32s3.S | 266 + ...sp_nn_multiply_by_quantized_mult_esp32s3.S | 127 + ..._multiply_by_quantized_mult_ver1_esp32s3.S | 163 + .../ESP-NN/src/convolution/esp_nn_conv_ansi.c | 183 + .../src/convolution/esp_nn_conv_esp32s3.c | 273 + .../ESP-NN/src/convolution/esp_nn_conv_opt.c | 183 + .../esp_nn_conv_s16_mult4_1x1_esp32s3.S | 358 + .../esp_nn_conv_s16_mult8_esp32s3.S | 489 + ...v_s8_filter_aligned_input_padded_esp32s3.S | 271 + .../esp_nn_conv_s8_mult8_1x1_esp32s3.S | 497 + .../convolution/esp_nn_depthwise_conv_ansi.c | 104 + .../convolution/esp_nn_depthwise_conv_opt.c | 295 + ..._nn_depthwise_conv_s16_mult1_3x3_esp32s3.S | 403 + ...thwise_conv_s16_mult1_3x3_no_pad_esp32s3.S | 367 + .../esp_nn_depthwise_conv_s16_mult1_esp32s3.S | 345 + .../esp_nn_depthwise_conv_s16_mult4_esp32s3.S | 416 + ..._nn_depthwise_conv_s16_mult8_3x3_esp32s3.S | 458 + .../esp_nn_depthwise_conv_s16_mult8_esp32s3.S | 432 + .../esp_nn_depthwise_conv_s8_esp32s3.c | 547 + ...pthwise_conv_s8_mult1_3x3_padded_esp32s3.S | 512 + .../esp_nn_fully_connected_ansi.c | 54 + .../esp_nn_fully_connected_s8_esp32s3.S | 220 + .../ESP-NN/src/pooling/esp_nn_avg_pool_ansi.c | 76 + .../src/pooling/esp_nn_avg_pool_s8_esp32s3.S | 686 + .../ESP-NN/src/pooling/esp_nn_max_pool_ansi.c | 70 + .../src/pooling/esp_nn_max_pool_s8_esp32s3.S | 449 + .../ESP-NN/src/softmax/esp_nn_softmax_ansi.c | 92 + .../ESP-NN/src/softmax/esp_nn_softmax_opt.c | 112 + .../ESP-NN/src/softmax/softmax_common.h | 104 + .../porting/espressif/debug_log.cpp | 33 + .../espressif/ei_classifier_porting.cpp | 122 + .../porting/ethos-core-driver/CMakeLists.txt | 96 + .../porting/ethos-core-driver/LICENSE.txt | 201 + .../porting/ethos-core-driver/README.MD | 271 + .../porting/ethos-core-driver/SECURITY.md | 85 + .../ethos-core-driver/include/ethosu_driver.h | 361 + .../ethos-core-driver/include/ethosu_types.h | 76 + .../ethos-core-driver/include/pmu_ethosu.h | 326 + .../ethos-core-driver/src/ehtosu_config_u65.h | 124 + .../src/ethosu55_interface.h | 26198 ++++++++++++++++ .../src/ethosu65_interface.h | 26061 +++++++++++++++ .../ethos-core-driver/src/ethosu_config_u55.h | 124 + .../ethos-core-driver/src/ethosu_device.h | 142 + .../src/ethosu_device_u55_u65.c | 392 + .../ethos-core-driver/src/ethosu_driver.c | 765 + .../ethos-core-driver/src/ethosu_interface.h} | 27 +- .../ethos-core-driver/src/ethosu_log.h | 72 + .../ethos-core-driver/src/ethosu_pmu.c | 304 + .../porting/ethos-core-driver/version.txt | 1 + .../porting/himax-we2/debug_log.cpp | 33 + .../himax-we2/ei_classifier_porting.cpp | 151 + .../porting/himax/debug_log.cpp | 33 + .../porting/himax/ei_classifier_porting.cpp | 168 + .../porting/iar/debug_log.cpp | 33 + .../porting/iar/ei_classifier_porting.cpp | 89 + .../porting/infineon-psoc62/debug_log.cpp | 33 + .../infineon-psoc62/ei_classifier_porting.cpp | 158 + .../porting/lib/at_base64_lib.h | 81 - .../porting/mbed/debug_log.cpp | 33 + .../porting/mbed/ei_classifier_porting.cpp | 96 + .../porting/mingw32/debug_log.cpp | 33 + .../porting/mingw32/ei_classifier_porting.cpp | 79 + .../porting/particle/debug_log.cpp | 33 + .../particle/ei_classifier_porting.cpp | 102 + .../porting/posix/debug_log.cpp | 32 + .../porting/posix/ei_classifier_porting.cpp | 102 + .../raspberry/ei_classifier_porting.cpp | 126 + .../porting/renesas-ra/debug_log.cpp | 33 + .../renesas-ra/ei_classifier_porting.cpp | 327 + .../porting/seeed-vision-ai/debug_log.cpp | 33 + .../seeed-vision-ai/ei_classifier_porting.cpp | 160 + .../porting/silabs/debug_log.cpp | 33 + .../porting/silabs/ei_classifier_porting.cpp | 106 + .../porting/sony/debug_log.cpp | 33 + .../porting/sony/ei_classifier_porting.cpp | 119 + .../porting/stm32-cubeai/debug_log.cpp | 33 + .../stm32-cubeai/ei_classifier_porting.cpp | 117 + .../porting/synaptics/debug_log.cpp | 33 + .../synaptics/ei_classifier_porting.cpp | 132 + .../edge-impulse-sdk/porting/ti/debug_log.cpp | 33 + .../porting/ti/ei_classifier_porting.cpp | 96 + .../porting/zephyr/debug_log.cpp | 33 + .../porting/zephyr/ei_classifier_porting.cpp | 104 + .../scripts/leak-detection.js | 152 - .../src/edge-impulse-sdk/tensorflow/LICENSE | 46 - .../tensorflow/lite/builtin_op_data.h | 22 + .../tensorflow/lite/builtin_ops.h | 194 + .../tensorflow/lite/c/builtin_op_data.h | 486 +- .../tensorflow/lite/c/c_api_types.h | 75 +- .../tensorflow/lite/c/common.c | 233 +- .../tensorflow/lite/c/common.h | 889 +- .../tensorflow/lite/context_util.h | 54 + .../tensorflow/lite/core/api/common.cc | 354 + .../{error_reporter.cpp => error_reporter.cc} | 0 .../tensorflow/lite/core/api/error_reporter.h | 15 +- ...versions.cpp => flatbuffer_conversions.cc} | 541 +- .../lite/core/api/flatbuffer_conversions.h | 76 +- .../api/{op_resolver.cpp => op_resolver.cc} | 9 +- .../tensorflow/lite/core/api/op_resolver.h | 76 +- .../api/{tensor_utils.cpp => tensor_utils.cc} | 2 +- .../tensorflow/lite/core/api/tensor_utils.h | 2 +- .../tensorflow/lite/core/c/builtin_op_data.h | 537 + .../tensorflow/lite/core/c/c_api_types.h | 168 + .../tensorflow/lite/core/c/common.h | 1170 + .../custom/tree_ensemble_classifier.cc | 192 + .../kernels/custom/tree_ensemble_classifier.h | 31 + .../tensorflow/lite/kernels/internal/common.h | 369 +- .../lite/kernels/internal/compatibility.h | 11 +- .../lite/kernels/internal/cppmath.h | 5 +- .../kernels/internal/optimized/neon_check.h | 22 +- .../lite/kernels/internal/portable_tensor.h | 6 +- .../kernels/internal/portable_tensor_utils.cc | 86 + .../kernels/internal/portable_tensor_utils.h | 623 + ...tization_util.cpp => quantization_util.cc} | 39 +- .../lite/kernels/internal/reference/add.h | 90 +- .../lite/kernels/internal/reference/add_n.h | 46 +- .../kernels/internal/reference/batch_matmul.h | 275 + .../internal/reference/binary_function.h | 17 +- .../internal/reference/broadcast_args.h | 56 + .../kernels/internal/reference/broadcast_to.h | 97 + .../kernels/internal/reference/comparisons.h | 2 +- .../internal/reference/concatenation.h | 2 + .../lite/kernels/internal/reference/conv.h | 47 +- .../lite/kernels/internal/reference/cumsum.h | 175 + .../internal/reference/depth_to_space.h | 79 + .../internal/reference/depthwiseconv_uint8.h | 22 + .../lite/kernels/internal/reference/div.h | 28 +- .../kernels/internal/reference/floor_div.h | 35 + .../kernels/internal/reference/floor_mod.h | 44 + .../internal/reference/fully_connected.h | 3 + .../kernels/internal/reference/hard_swish.h | 14 +- .../internal/reference/integer_ops/add.h | 1 + .../internal/reference/integer_ops/add.h'' | 144 - .../internal/reference/integer_ops/conv.h | 38 +- .../reference/integer_ops/depthwise_conv.h | 2 + .../reference/integer_ops/fully_connected.h | 74 +- .../reference/integer_ops/l2normalization.h | 2 + .../internal/reference/integer_ops/logistic.h | 2 + .../internal/reference/integer_ops/mean.h | 2 + .../internal/reference/integer_ops/mul.h | 12 +- .../internal/reference/integer_ops/pooling.h | 10 +- .../internal/reference/integer_ops/tanh.h | 1 + .../reference/integer_ops/transpose_conv.h | 23 +- .../internal/reference/l2normalization.h | 2 +- .../kernels/internal/reference/log_softmax.h | 256 + .../kernels/internal/reference/lstm_cell.h | 422 + .../lite/kernels/internal/reference/mul.h | 52 +- .../lite/kernels/internal/reference/pad.h | 61 +- .../lite/kernels/internal/reference/pooling.h | 10 +- .../lite/kernels/internal/reference/prelu.h | 2 + .../reference/process_broadcast_shapes.h | 2 + .../kernels/internal/reference/quantize.h | 34 + .../lite/kernels/internal/reference/reduce.h | 152 +- .../kernels/internal/reference/requantize.h | 2 + .../internal/reference/resize_bilinear.h | 228 + .../reference/resize_nearest_neighbor.h | 1 + .../lite/kernels/internal/reference/select.h | 151 + .../lite/kernels/internal/reference/slice.h | 80 + .../lite/kernels/internal/reference/softmax.h | 7 +- .../internal/reference/space_to_depth.h | 80 + .../internal/reference/strided_slice.h | 104 +- .../lite/kernels/internal/reference/sub.h | 201 +- .../kernels/internal/reference/transpose.h | 203 + .../internal/reference/transpose_conv.h | 24 +- .../reference_portable_tensor_utils.cc | 809 + .../reference_portable_tensor_utils.h | 333 + .../reference_portable_tensor_utils_impl.h | 244 + .../lite/kernels/internal/runtime_shape.h | 158 + .../kernels/internal/strided_slice_logic.h | 63 + .../lite/kernels/internal/tensor_ctypes.h | 2 +- .../lite/kernels/internal/tensor_utils.cc | 25 + .../tensorflow/lite/kernels/internal/types.h | 350 +- .../tensorflow/lite/kernels/kernel_util.h | 53 +- ...rnel_util_lite.cpp => kernel_util_lite.cc} | 218 +- .../tensorflow/lite/kernels/op_macros.h | 63 +- .../tensorflow/lite/kernels/padding.h | 3 +- ...l_ops_resolver.cpp => all_ops_resolver.cc} | 46 +- .../tensorflow/lite/micro/compatibility.h | 5 +- .../lite/micro/fake_micro_context.cc | 110 + .../lite/micro/fake_micro_context.h | 56 + .../micro/flatbuffer_conversions_bridge.cc | 34 + .../micro/flatbuffer_conversions_bridge.h | 45 + .../tensorflow/lite/micro/flatbuffer_utils.cc | 85 + .../tensorflow/lite/micro/flatbuffer_utils.h | 65 + .../tensorflow/lite/micro/ibuffer_allocator.h | 100 + .../lite/micro/kernels/activations.cc | 120 + .../lite/micro/kernels/activations.cpp | 288 - .../lite/micro/kernels/activations.h | 63 + .../lite/micro/kernels/activations_common.cc | 158 + .../tensorflow/lite/micro/kernels/add.cc | 1383 + .../tensorflow/lite/micro/kernels/add.cpp | 525 - .../tensorflow/lite/micro/kernels/add.h | 77 + .../lite/micro/kernels/add_common.cc | 106 + .../tensorflow/lite/micro/kernels/add_n.cc | 215 + .../tensorflow/lite/micro/kernels/add_n.cpp | 119 - .../{arg_min_max.cpp => arg_min_max.cc} | 61 +- .../lite/micro/kernels/assign_variable.cc | 101 + .../lite/micro/kernels/batch_matmul.cc | 644 + ...h_to_space_nd.cpp => batch_to_space_nd.cc} | 25 +- .../lite/micro/kernels/broadcast_args.cc | 91 + .../lite/micro/kernels/broadcast_to.cc | 123 + .../lite/micro/kernels/call_once.cc | 88 + .../lite/micro/kernels/{cast.cpp => cast.cc} | 46 +- .../lite/micro/kernels/{ceil.cpp => ceil.cc} | 31 +- .../lite/micro/kernels/circular_buffer.cc | 117 + .../lite/micro/kernels/circular_buffer.cpp | 192 - .../lite/micro/kernels/circular_buffer.h | 48 + .../micro/kernels/circular_buffer_common.cc | 97 + .../{comparisons.cpp => comparisons.cc} | 182 +- .../lite/micro/kernels/complex_abs.cc | 103 + .../{concatenation.cpp => concatenation.cc} | 107 +- .../tensorflow/lite/micro/kernels/conv.cc | 2213 ++ .../tensorflow/lite/micro/kernels/conv.cpp | 990 - .../tensorflow/lite/micro/kernels/conv.h | 41 +- .../{conv_common.cpp => conv_common.cc} | 72 +- .../tensorflow/lite/micro/kernels/conv_test.h | 64 +- .../tensorflow/lite/micro/kernels/cumsum.cc | 175 + .../lite/micro/kernels/depth_to_space.cc | 142 + .../lite/micro/kernels/depthwise_conv.cc | 2106 ++ .../lite/micro/kernels/depthwise_conv.cpp | 1015 - .../lite/micro/kernels/depthwise_conv.h | 28 +- ...nv_common.cpp => depthwise_conv_common.cc} | 65 +- .../kernels/{dequantize.cpp => dequantize.cc} | 95 +- .../lite/micro/kernels/dequantize.h | 38 + .../lite/micro/kernels/dequantize_common.cc | 67 + ...stprocess.cpp => detection_postprocess.cc} | 336 +- .../lite/micro/kernels/{div.cpp => div.cc} | 78 +- .../lite/micro/kernels/elementwise.cc | 430 + .../lite/micro/kernels/elementwise.cpp | 214 - .../lite/micro/kernels/{elu.cpp => elu.cc} | 36 +- .../tensorflow/lite/micro/kernels/ethosu.cc | 214 + .../lite/micro/kernels/{exp.cpp => exp.cc} | 25 +- .../{expand_dims.cpp => expand_dims.cc} | 125 +- .../lite/micro/kernels/{fill.cpp => fill.cc} | 61 +- .../micro/kernels/{floor.cpp => floor.cc} | 21 +- .../lite/micro/kernels/floor_div.cc | 130 + .../lite/micro/kernels/floor_mod.cc | 128 + .../lite/micro/kernels/fully_connected.cc | 1809 ++ .../lite/micro/kernels/fully_connected.cpp | 870 - .../lite/micro/kernels/fully_connected.h | 39 +- ...d_common.cpp => fully_connected_common.cc} | 5 + .../tensorflow/lite/micro/kernels/gather.cc | 226 + .../lite/micro/kernels/gather_nd.cc | 212 + .../lite/micro/kernels/hard_swish.cc | 75 + .../lite/micro/kernels/hard_swish.h | 30 + .../{hard_swish.cpp => hard_swish_common.cc} | 84 +- .../tensorflow/lite/micro/kernels/if.cc | 121 + .../lite/micro/kernels/kernel_runner.cc | 121 + .../lite/micro/kernels/kernel_runner.cpp | 161 - .../lite/micro/kernels/kernel_runner.h | 44 +- .../lite/micro/kernels/kernel_util.h | 109 +- .../lite/micro/kernels/kernel_util_micro.cc | 280 + .../lite/micro/kernels/kernel_util_micro.cpp | 53 - .../kernels/{l2_pool_2d.cpp => l2_pool_2d.cc} | 39 +- .../micro/kernels/{l2norm.cpp => l2norm.cc} | 41 +- .../kernels/{leaky_relu.cpp => leaky_relu.cc} | 80 +- .../lite/micro/kernels/leaky_relu.h | 43 + .../lite/micro/kernels/leaky_relu_common.cc | 78 + .../lite/micro/kernels/log_softmax.cc | 148 + .../tensorflow/lite/micro/kernels/logical.cc | 44 + .../tensorflow/lite/micro/kernels/logical.h | 35 + .../{logical.cpp => logical_common.cc} | 58 +- .../kernels/{logistic.cpp => logistic.cc} | 107 +- .../tensorflow/lite/micro/kernels/logistic.h | 42 + .../lite/micro/kernels/logistic_common.cc | 119 + .../lite/micro/kernels/lstm_eval.cc | 222 + .../tensorflow/lite/micro/kernels/lstm_eval.h | 417 + .../lite/micro/kernels/lstm_eval_test.h | 817 + .../lite/micro/kernels/lstm_shared.h | 150 + ...maximum_minimum.cpp => maximum_minimum.cc} | 50 +- .../tensorflow/lite/micro/kernels/micro_ops.h | 104 +- .../lite/micro/kernels/micro_tensor_utils.cc | 67 + .../lite/micro/kernels/micro_tensor_utils.h | 56 + .../lite/micro/kernels/mirror_pad.cc | 215 + .../kernels/mli_function_specializations.h | 145 + .../lite/micro/kernels/mli_interface.cc | 160 + .../lite/micro/kernels/mli_interface.h | 80 + .../{mli_slicers.cpp => mli_slicers.cc} | 2 +- .../lite/micro/kernels/mli_slicers.h | 2 +- .../lite/micro/kernels/mli_tf_utils.h | 296 +- .../tensorflow/lite/micro/kernels/mul.cc | 387 + .../tensorflow/lite/micro/kernels/mul.cpp | 470 - .../tensorflow/lite/micro/kernels/mul.h | 74 + .../lite/micro/kernels/mul_common.cc | 213 + .../lite/micro/kernels/{neg.cpp => neg.cc} | 25 +- .../lite/micro/kernels/{pack.cpp => pack.cc} | 27 +- .../lite/micro/kernels/{pad.cpp => pad.cc} | 244 +- .../lite/micro/kernels/{ethosu.cpp => pad.h} | 14 +- .../tensorflow/lite/micro/kernels/pooling.cc | 1567 + .../tensorflow/lite/micro/kernels/pooling.cpp | 1111 - .../tensorflow/lite/micro/kernels/pooling.h | 142 + .../lite/micro/kernels/pooling_common.cc | 128 + .../tensorflow/lite/micro/kernels/prelu.cc | 75 + .../tensorflow/lite/micro/kernels/prelu.h | 39 + .../kernels/{prelu.cpp => prelu_common.cc} | 106 +- .../kernels/{quantize.cpp => quantize.cc} | 10 +- ...quantize_common.cpp => quantize_common.cc} | 106 +- .../lite/micro/kernels/read_variable.cc | 87 + .../tensorflow/lite/micro/kernels/real.cc | 134 + .../tensorflow/lite/micro/kernels/reduce.cc | 86 + .../tensorflow/lite/micro/kernels/reduce.cpp | 342 - .../tensorflow/lite/micro/kernels/reduce.h | 71 + .../lite/micro/kernels/reduce_common.cc | 417 + .../micro/kernels/{reshape.cpp => reshape.cc} | 26 +- .../lite/micro/kernels/resize_bilinear.cc | 116 + ...eighbor.cpp => resize_nearest_neighbor.cc} | 47 +- .../tensorflow/lite/micro/kernels/rfft2d.cc | 207 + .../micro/kernels/{round.cpp => round.cc} | 20 +- .../lite/micro/kernels/scratch_buf_mgr.cc | 397 + .../lite/micro/kernels/scratch_buf_mgr.cpp | 347 - .../lite/micro/kernels/scratch_buf_mgr.h | 98 +- ...scratch_buffers.cpp => scratch_buffers.cc} | 87 +- .../lite/micro/kernels/scratch_buffers.h | 18 +- .../tensorflow/lite/micro/kernels/select.cc | 248 + .../micro/kernels/{shape.cpp => shape.cc} | 14 +- .../tensorflow/lite/micro/kernels/slice.cc | 157 + .../tensorflow/lite/micro/kernels/softmax.cc | 565 + .../tensorflow/lite/micro/kernels/softmax.cpp | 217 - .../tensorflow/lite/micro/kernels/softmax.h | 42 +- .../{softmax_common.cpp => softmax_common.cc} | 133 +- ...e_to_batch_nd.cpp => space_to_batch_nd.cc} | 24 +- .../lite/micro/kernels/space_to_depth.cc | 127 + .../micro/kernels/{split.cpp => split.cc} | 23 +- .../micro/kernels/{split_v.cpp => split_v.cc} | 30 +- .../lite/micro/kernels/squared_difference.cc | 247 + .../micro/kernels/{squeeze.cpp => squeeze.cc} | 49 +- .../{strided_slice.cpp => strided_slice.cc} | 110 +- .../tensorflow/lite/micro/kernels/sub.cc | 168 + .../tensorflow/lite/micro/kernels/sub.cpp | 256 - .../tensorflow/lite/micro/kernels/sub.h | 60 + .../lite/micro/kernels/sub_common.cc | 109 + .../tensorflow/lite/micro/kernels/svdf.cc | 339 + .../tensorflow/lite/micro/kernels/svdf.cpp | 586 - .../tensorflow/lite/micro/kernels/svdf.h | 52 +- .../{svdf_common.cpp => svdf_common.cc} | 141 +- .../lite/micro/kernels/{tanh.cpp => tanh.cc} | 116 +- .../lite/micro/kernels/transpose.cc | 122 + .../lite/micro/kernels/transpose_conv.cc | 708 + .../lite/micro/kernels/transpose_conv.cpp | 269 - .../micro/kernels/tree_ensemble_classifier.cc | 194 + .../micro/kernels/tree_ensemble_classifier.h | 29 + .../kernels/unidirectional_sequence_lstm.cc | 589 + .../micro/kernels/{unpack.cpp => unpack.cc} | 17 +- .../lite/micro/kernels/var_handle.cc | 93 + .../tensorflow/lite/micro/kernels/while.cc | 133 + .../kernels/{zeros_like.cpp => zeros_like.cc} | 33 +- .../{memory_helpers.cpp => memory_helpers.cc} | 16 +- .../tensorflow/lite/micro/memory_helpers.h | 12 +- ...y_planner.cpp => greedy_memory_planner.cc} | 101 +- .../memory_planner/greedy_memory_planner.h | 44 +- ...y_planner.cpp => linear_memory_planner.cc} | 22 +- .../memory_planner/linear_memory_planner.h | 11 +- .../micro/memory_planner/memory_plan_struct.h | 73 + ...emory_planner.h => micro_memory_planner.h} | 54 +- .../non_persistent_buffer_planner_shim.cc | 66 + .../non_persistent_buffer_planner_shim.h | 129 + .../lite/micro/micro_allocation_info.cc | 375 + .../lite/micro/micro_allocation_info.h | 139 + .../tensorflow/lite/micro/micro_allocator.cc | 941 + .../tensorflow/lite/micro/micro_allocator.cpp | 1158 - .../tensorflow/lite/micro/micro_allocator.h | 212 +- .../lite/micro/micro_arena_constants.h | 28 + .../tensorflow/lite/micro/micro_context.cc | 129 + .../tensorflow/lite/micro/micro_context.h | 161 + .../micro_utils.h => micro_error_reporter.cc} | 45 +- .../lite/micro/micro_error_reporter.h | 18 +- .../tensorflow/lite/micro/micro_graph.cc | 258 + .../tensorflow/lite/micro/micro_graph.h | 110 + .../lite/micro/micro_interpreter.cc | 347 + .../lite/micro/micro_interpreter.cpp | 409 - .../tensorflow/lite/micro/micro_interpreter.h | 140 +- ...{micro_error_reporter.cpp => micro_log.cc} | 23 +- .../tensorflow/lite/micro/micro_log.h | 49 + .../lite/micro/micro_mutable_op_resolver.h | 388 +- .../tensorflow/lite/micro/micro_op_resolver.h | 15 +- .../tensorflow/lite/micro/micro_profiler.cc | 115 + .../tensorflow/lite/micro/micro_profiler.cpp | 58 - .../tensorflow/lite/micro/micro_profiler.h | 54 +- .../lite/micro/micro_profiler_interface.h | 38 + .../lite/micro/micro_resource_variable.cc | 148 + .../lite/micro/micro_resource_variable.h | 87 + .../{micro_string.cpp => micro_string.cc} | 8 + .../micro/{micro_time.cpp => micro_time.cc} | 11 +- .../tensorflow/lite/micro/micro_time.h | 10 +- .../micro/{micro_utils.cpp => micro_utils.cc} | 32 +- .../tensorflow/lite/micro/micro_utils.h | 43 +- .../tensorflow/lite/micro/mock_micro_graph.cc | 66 + .../tensorflow/lite/micro/mock_micro_graph.h | 60 + .../non_persistent_arena_buffer_allocator.cc | 170 + .../non_persistent_arena_buffer_allocator.h | 104 + .../lite/micro/op_resolver_bridge.cc | 32 + .../lite/micro/op_resolver_bridge.h | 38 + .../persistent_arena_buffer_allocator.cc | 52 + .../micro/persistent_arena_buffer_allocator.h | 58 + ...cator.cpp => recording_micro_allocator.cc} | 165 +- .../lite/micro/recording_micro_allocator.h | 48 +- .../lite/micro/recording_micro_interpreter.h | 20 +- ...ecording_single_arena_buffer_allocator.cc} | 47 +- ...recording_single_arena_buffer_allocator.h} | 31 +- .../{schema_utils.cpp => schema_utils.cc} | 0 .../lite/micro/simple_memory_allocator.cpp | 149 - .../lite/micro/simple_memory_allocator.h | 112 - .../micro/single_arena_buffer_allocator.cc | 199 + .../micro/single_arena_buffer_allocator.h | 144 + .../{system_setup.cpp => system_setup.cc} | 0 .../lite/micro/test_helper_custom_ops.cc | 112 + .../lite/micro/test_helper_custom_ops.h | 50 + .../tensorflow/lite/micro/test_helpers.cc | 2035 ++ .../tensorflow/lite/micro/test_helpers.cpp | 1079 - .../tensorflow/lite/micro/test_helpers.h | 114 +- .../lite/portable_type_to_tflitetype.h | 3 +- .../tensorflow/lite/schema/schema_generated.h | 16765 +--------- .../lite/schema/schema_generated_full.h | 17601 +++++++++++ .../third_party/arc_mli_package/LICENSE | 13 + .../emsdp_em11d_em9d_dfss/release/libmli.a | Bin 0 -> 2419476 bytes .../include/api/mli_helpers_api.h | 145 + .../include/api/mli_kernels_api.h | 667 + .../include/api/mli_krn_avepool_spec_api.h | 117 + .../include/api/mli_krn_conv2d_spec_api.h | 828 + .../api/mli_krn_depthwise_conv2d_spec_api.h | 786 + .../include/api/mli_krn_maxpool_spec_api.h | 119 + .../arc_mli_package/include/api/mli_mov_api.h | 369 + .../arc_mli_package/include/mli_api.h | 25 + .../arc_mli_package/include/mli_config.h | 115 + .../arc_mli_package/include/mli_types.h | 339 + .../include/flatbuffers/fb_allocator.h | 68 + .../include/flatbuffers/fb_array.h | 243 + .../include/flatbuffers/{base.h => fb_base.h} | 109 +- .../include/flatbuffers/fb_buffer.h | 142 + .../include/flatbuffers/fb_buffer_ref.h | 53 + .../flatbuffers/fb_default_allocator.h | 58 + .../include/flatbuffers/fb_detached_buffer.h | 114 + .../flatbuffers/fb_flatbuffer_builder.h | 1214 + .../include/flatbuffers/fb_stl_emulation.h | 509 + .../include/flatbuffers/fb_string.h | 64 + .../include/flatbuffers/fb_struct.h | 53 + .../include/flatbuffers/fb_table.h | 168 + .../include/flatbuffers/{util.h => fb_util.h} | 69 +- .../include/flatbuffers/fb_vector.h | 389 + .../include/flatbuffers/fb_vector_downward.h | 271 + .../include/flatbuffers/fb_verifier.h | 304 + .../include/flatbuffers/flatbuffers.h | 2627 +- .../include/flatbuffers/flexbuffers.h | 360 +- .../include/flatbuffers/stl_emulation.h | 449 - .../src/model-parameters/anomaly_clusters.h | 49 - .../src/model-parameters/anomaly_metadata.h | 57 + .../src/model-parameters/anomaly_types.h | 36 - .../src/model-parameters/dsp_blocks.h | 45 - .../src/model-parameters/model_metadata.h | 206 +- .../src/model-parameters/model_variables.h | 194 + .../src/tflite-model/tflite-resolver.h | 31 - .../src/tflite-model/tflite-trained.cpp | 22 - .../src/tflite-model/tflite-trained.h | 294 - .../tflite-model/tflite_learn_33_compiled.cpp | 663 + .../tflite-model/tflite_learn_33_compiled.h | 59 + .../tflite-model/trained_model_ops_define.h | 102 + firmware/src/main.cpp | 9 +- 1307 files changed, 212882 insertions(+), 69939 deletions(-) delete mode 100644 firmware/include/images/icon_coffee.h create mode 100644 firmware/include/images/icon_coke.h create mode 100644 firmware/include/images/icon_coke.png create mode 100644 firmware/include/images/icon_pepsi.h create mode 100644 firmware/include/images/icon_pepsi.png delete mode 100644 firmware/include/images/icon_whiskey.h delete mode 100644 firmware/include/images/icon_wifi.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm85.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_starmc1.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/pac_armv81.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/debug.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f64.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/ComplexMathFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_fast_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f64.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/DistanceFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f64.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/FastMathFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f64.c rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/{SupportFunctions/SupportFunctionsF16.c => FastMathFunctions/arm_vlog_f64.c} (55%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q31.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/FilteringFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_q31.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/InterpolationFunctionsF16.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/MatrixFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_opt_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f64.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/SVMFunctionsF16.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/StatisticsFunctionsF16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q7.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f64.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f32.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q15.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q31.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_fast_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_kernel_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_state_s16_s8.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s16.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8_s16.c delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/anomaly/anomaly.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_fill_result_struct.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_nms.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_performance_calibration.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_quantize.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/akida.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/drpai.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/engines.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/memryx.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/create-arduino-library.sh create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_alloc.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_dsp_handle.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_flatten.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_hr.hpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_vector.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.hh delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft_i32.hh create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/returntypes.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/signal.hpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet.hpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet_coeff.hpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_logging.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CMakeLists.txt create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CONTRIBUTING.md create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/Kconfig.projbuild create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/LICENSE create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/README.md create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/idf_component.yml create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_c.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_headers.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_defs.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_esp32s3.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_generic_opt.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/common_functions.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_common_functions_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_ver1_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_esp32s3.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_opt.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_filter_aligned_input_padded_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_opt.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult4_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_s8_esp32s3.S create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_ansi.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_opt.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/softmax_common.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/CMakeLists.txt create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/LICENSE.txt create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/README.MD create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/SECURITY.md create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_driver.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_types.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/pmu_ethosu.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ehtosu_config_u65.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu55_interface.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu65_interface.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_config_u55.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device_u55_u65.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_driver.c rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/{CMSIS/DSP/Source/BayesFunctions/BayesFunctionsF16.c => porting/ethos-core-driver/src/ethosu_interface.h} (51%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_log.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_pmu.c create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/version.txt create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/ei_classifier_porting.cpp delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/lib/at_base64_lib.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/raspberry/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/ei_classifier_porting.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/debug_log.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/ei_classifier_porting.cpp delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/scripts/leak-detection.js create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_op_data.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_ops.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/context_util.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/common.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/{error_reporter.cpp => error_reporter.cc} (100%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/{flatbuffer_conversions.cpp => flatbuffer_conversions.cc} (82%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/{op_resolver.cpp => op_resolver.cc} (89%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/{tensor_utils.cpp => tensor_utils.cc} (96%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/common.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/{quantization_util.cpp => quantization_util.cc} (93%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/batch_matmul.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_args.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_to.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/cumsum.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depth_to_space.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_div.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h'' create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/log_softmax.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/lstm_cell.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_bilinear.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/select.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/slice.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/space_to_depth.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/runtime_shape.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_utils.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/{kernel_util_lite.cpp => kernel_util_lite.cc} (74%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{all_ops_resolver.cpp => all_ops_resolver.cc} (68%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{arg_min_max.cpp => arg_min_max.cc} (67%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/assign_variable.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_matmul.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{batch_to_space_nd.cpp => batch_to_space_nd.cc} (87%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_args.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_to.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/call_once.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{cast.cpp => cast.cc} (67%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{ceil.cpp => ceil.cc} (80%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer_common.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{comparisons.cpp => comparisons.cc} (78%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/complex_abs.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{concatenation.cpp => concatenation.cc} (77%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{conv_common.cpp => conv_common.cc} (76%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cumsum.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depth_to_space.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{depthwise_conv_common.cpp => depthwise_conv_common.cc} (78%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{dequantize.cpp => dequantize.cc} (52%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize_common.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{tflite_detection_postprocess.cpp => detection_postprocess.cc} (77%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{div.cpp => div.cc} (78%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{elu.cpp => elu.cc} (86%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{exp.cpp => exp.cc} (81%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{expand_dims.cpp => expand_dims.cc} (50%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{fill.cpp => fill.cc} (68%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{floor.cpp => floor.cc} (79%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_div.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_mod.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{fully_connected_common.cpp => fully_connected_common.cc} (93%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather_nd.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{hard_swish.cpp => hard_swish_common.cc} (55%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/if.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{l2_pool_2d.cpp => l2_pool_2d.cc} (80%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{l2norm.cpp => l2norm.cc} (81%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{leaky_relu.cpp => leaky_relu.cc} (58%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/log_softmax.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{logical.cpp => logical_common.cc} (58%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{logistic.cpp => logistic.cc} (51%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval_test.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{maximum_minimum.cpp => maximum_minimum.cc} (72%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mirror_pad.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{mli_slicers.cpp => mli_slicers.cc} (98%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul_common.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{neg.cpp => neg.cc} (75%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{pack.cpp => pack.cc} (82%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{pad.cpp => pad.cc} (73%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{ethosu.cpp => pad.h} (72%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{prelu.cpp => prelu_common.cc} (50%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{quantize.cpp => quantize.cc} (84%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{quantize_common.cpp => quantize_common.cc} (61%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/read_variable.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/real.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{reshape.cpp => reshape.cc} (88%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_bilinear.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{resize_nearest_neighbor.cpp => resize_nearest_neighbor.cc} (78%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/rfft2d.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{round.cpp => round.cc} (85%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{scratch_buffers.cpp => scratch_buffers.cc} (67%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{shape.cpp => shape.cc} (84%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/slice.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{softmax_common.cpp => softmax_common.cc} (68%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{space_to_batch_nd.cpp => space_to_batch_nd.cc} (88%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_depth.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{split.cpp => split.cc} (87%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{split_v.cpp => split_v.cc} (86%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squared_difference.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{squeeze.cpp => squeeze.cc} (70%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{strided_slice.cpp => strided_slice.cc} (66%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub_common.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{svdf_common.cpp => svdf_common.cc} (76%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{tanh.cpp => tanh.cc} (56%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{unpack.cpp => unpack.cc} (87%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/var_handle.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/while.cc rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/{zeros_like.cpp => zeros_like.cc} (77%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{memory_helpers.cpp => memory_helpers.cc} (93%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/{greedy_memory_planner.cpp => greedy_memory_planner.cc} (86%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/{linear_memory_planner.cpp => linear_memory_planner.cc} (71%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/{memory_planner.h => micro_memory_planner.h} (55%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{kernels/micro_utils.h => micro_error_reporter.cc} (53%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{micro_error_reporter.cpp => micro_log.cc} (72%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{micro_string.cpp => micro_string.cc} (97%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{micro_time.cpp => micro_time.cc} (87%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{micro_utils.cpp => micro_utils.cc} (69%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{recording_micro_allocator.cpp => recording_micro_allocator.cc} (57%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{recording_simple_memory_allocator.cpp => recording_single_arena_buffer_allocator.cc} (51%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{recording_simple_memory_allocator.h => recording_single_arena_buffer_allocator.h} (57%) rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{schema_utils.cpp => schema_utils.cc} (100%) delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.cpp delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/{system_setup.cpp => system_setup.cc} (100%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.cc create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cc delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/LICENSE create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/bin/emsdp_em11d_em9d_dfss/release/libmli.a create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_helpers_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_kernels_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_krn_avepool_spec_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_krn_conv2d_spec_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_krn_depthwise_conv2d_spec_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_krn_maxpool_spec_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/api/mli_mov_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/mli_api.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/mli_config.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/include/mli_types.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_allocator.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_array.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/{base.h => fb_base.h} (80%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_buffer.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_buffer_ref.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_default_allocator.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_detached_buffer.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_flatbuffer_builder.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_stl_emulation.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_string.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_struct.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_table.h rename firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/{util.h => fb_util.h} (93%) create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_vector.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_vector_downward.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/fb_verifier.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/stl_emulation.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/model-parameters/anomaly_clusters.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/model-parameters/anomaly_metadata.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/model-parameters/anomaly_types.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/model-parameters/dsp_blocks.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/model-parameters/model_variables.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/tflite-resolver.h delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/tflite-trained.cpp delete mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/tflite-trained.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/tflite_learn_33_compiled.cpp create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/tflite_learn_33_compiled.h create mode 100644 firmware/lib/ei-artificial_nose-arduino/src/tflite-model/trained_model_ops_define.h diff --git a/firmware/include/images/icon_coffee.h b/firmware/include/images/icon_coffee.h deleted file mode 100644 index 22b7211..0000000 --- a/firmware/include/images/icon_coffee.h +++ /dev/null @@ -1,1066 +0,0 @@ -// Generated by : ImageConverter 565 v2.2 -// Generated from: icons_for_nose_coffee.png -// Time generated: 4/21/2021 5:27:51 PM -// Dimensions : 130x130 pixels -// Size : 33,800 Bytes - -#include - -const unsigned short icon_coffee[0x4204] PROGMEM ={ -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0010 (16) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0020 (32) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0030 (48) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0040 (64) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0050 (80) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0060 (96) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0070 (112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0080 (128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0090 (144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00A0 (160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00B0 (176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00C0 (192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00D0 (208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00E0 (224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00F0 (240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0100 (256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0110 (272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0120 (288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0130 (304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0140 (320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0150 (336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0160 (352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0170 (368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0180 (384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0190 (400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01A0 (416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01B0 (432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01C0 (448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01D0 (464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01E0 (480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01F0 (496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0200 (512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0210 (528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0220 (544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0230 (560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0240 (576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0250 (592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0260 (608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0270 (624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0280 (640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0290 (656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02A0 (672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02B0 (688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02C0 (704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02D0 (720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02E0 (736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02F0 (752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0300 (768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0310 (784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0320 (800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0330 (816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0340 (832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0350 (848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0360 (864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0370 (880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0380 (896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0390 (912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03A0 (928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03B0 (944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03C0 (960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03D0 (976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03E0 (992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03F0 (1008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0400 (1024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0410 (1040) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0420 (1056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0430 (1072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0440 (1088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0450 (1104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0460 (1120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0470 (1136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0480 (1152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0490 (1168) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04A0 (1184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04B0 (1200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04C0 (1216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04D0 (1232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04E0 (1248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04F0 (1264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0500 (1280) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0510 (1296) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0520 (1312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0530 (1328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0540 (1344) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0550 (1360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0560 (1376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0570 (1392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0580 (1408) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0590 (1424) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05A0 (1440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05B0 (1456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05C0 (1472) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x05D0 (1488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05E0 (1504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05F0 (1520) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0600 (1536) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0610 (1552) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0620 (1568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0630 (1584) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0640 (1600) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xF81F, // 0x0650 (1616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0660 (1632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0670 (1648) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0680 (1664) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0690 (1680) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06A0 (1696) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06B0 (1712) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06C0 (1728) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, // 0x06D0 (1744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06E0 (1760) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06F0 (1776) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0700 (1792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0710 (1808) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0720 (1824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0730 (1840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0740 (1856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x0750 (1872) -0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0760 (1888) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0770 (1904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0780 (1920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0790 (1936) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07A0 (1952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07B0 (1968) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07C0 (1984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, // 0x07D0 (2000) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07E0 (2016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07F0 (2032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0800 (2048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0810 (2064) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0820 (2080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0830 (2096) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0840 (2112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0850 (2128) -0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0860 (2144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0870 (2160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0880 (2176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0890 (2192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08A0 (2208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08B0 (2224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08C0 (2240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08D0 (2256) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08E0 (2272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08F0 (2288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0900 (2304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0910 (2320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0920 (2336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0930 (2352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0940 (2368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0950 (2384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0960 (2400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0970 (2416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0980 (2432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0990 (2448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09A0 (2464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09B0 (2480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09C0 (2496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09D0 (2512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09E0 (2528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09F0 (2544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A00 (2560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A10 (2576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A20 (2592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A30 (2608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A40 (2624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A50 (2640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, // 0x0A60 (2656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A70 (2672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A80 (2688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A90 (2704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AA0 (2720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AB0 (2736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AC0 (2752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x0AD0 (2768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0AE0 (2784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AF0 (2800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B00 (2816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B10 (2832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B20 (2848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B30 (2864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B40 (2880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xF81F, // 0x0B50 (2896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0B60 (2912) -0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B70 (2928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B80 (2944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B90 (2960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BA0 (2976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BB0 (2992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BC0 (3008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x0BD0 (3024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0BE0 (3040) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BF0 (3056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C00 (3072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C10 (3088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C20 (3104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C30 (3120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C40 (3136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, // 0x0C50 (3152) -0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x0C60 (3168) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C70 (3184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C80 (3200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C90 (3216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CA0 (3232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CB0 (3248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CC0 (3264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CD0 (3280) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CE0 (3296) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CF0 (3312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, // 0x0D00 (3328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D10 (3344) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D20 (3360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D30 (3376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D40 (3392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D50 (3408) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D60 (3424) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x0D70 (3440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0D80 (3456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D90 (3472) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DA0 (3488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DB0 (3504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DC0 (3520) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DD0 (3536) -0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DE0 (3552) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0DF0 (3568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, // 0x0E00 (3584) -0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E10 (3600) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E20 (3616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E30 (3632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E40 (3648) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E50 (3664) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E60 (3680) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0E70 (3696) -0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x0E80 (3712) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E90 (3728) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EA0 (3744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EB0 (3760) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EC0 (3776) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0ED0 (3792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EE0 (3808) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0EF0 (3824) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F00 (3840) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F10 (3856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F20 (3872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F30 (3888) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F40 (3904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F50 (3920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, // 0x0F60 (3936) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0F70 (3952) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F80 (3968) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F90 (3984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FA0 (4000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FB0 (4016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FC0 (4032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FD0 (4048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x0FE0 (4064) -0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, // 0x0FF0 (4080) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1000 (4096) -0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1010 (4112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1020 (4128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1030 (4144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1040 (4160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1050 (4176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1060 (4192) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1070 (4208) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x1080 (4224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x1090 (4240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10A0 (4256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10B0 (4272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10C0 (4288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10D0 (4304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x10E0 (4320) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10F0 (4336) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1100 (4352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1110 (4368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1120 (4384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1130 (4400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1140 (4416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1150 (4432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1160 (4448) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1170 (4464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1180 (4480) -0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1190 (4496) -0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11A0 (4512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11B0 (4528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11C0 (4544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11D0 (4560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11E0 (4576) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x11F0 (4592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1200 (4608) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1210 (4624) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1220 (4640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1230 (4656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1240 (4672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1250 (4688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1260 (4704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, // 0x1270 (4720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1280 (4736) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x1290 (4752) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12A0 (4768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12B0 (4784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12C0 (4800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12D0 (4816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12E0 (4832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x12F0 (4848) -0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x1300 (4864) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1310 (4880) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1320 (4896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1330 (4912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1340 (4928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1350 (4944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1360 (4960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1370 (4976) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1380 (4992) -0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1390 (5008) -0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x13A0 (5024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13B0 (5040) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13C0 (5056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13D0 (5072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13E0 (5088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, // 0x13F0 (5104) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1400 (5120) -0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1410 (5136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1420 (5152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1430 (5168) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1440 (5184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1450 (5200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1460 (5216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, // 0x1470 (5232) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1480 (5248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, // 0x1490 (5264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x14A0 (5280) -0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14B0 (5296) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14C0 (5312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14D0 (5328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14E0 (5344) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14F0 (5360) -0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1500 (5376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1510 (5392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1520 (5408) -0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1530 (5424) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1540 (5440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1550 (5456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1560 (5472) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1570 (5488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1580 (5504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1590 (5520) -0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, // 0x15A0 (5536) -0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15B0 (5552) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15C0 (5568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15D0 (5584) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15E0 (5600) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15F0 (5616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, // 0x1600 (5632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, // 0x1610 (5648) -0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1620 (5664) -0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1630 (5680) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1640 (5696) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1650 (5712) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1660 (5728) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1670 (5744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, // 0x1680 (5760) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, // 0x1690 (5776) -0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x16A0 (5792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16B0 (5808) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16C0 (5824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16D0 (5840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16E0 (5856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16F0 (5872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC659, 0xC659, 0xC659, 0xBDF8, // 0x1700 (5888) -0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1710 (5904) -0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1720 (5920) -0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xA576, 0xA576, 0xA576, 0xA576, 0xA576, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1730 (5936) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1740 (5952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1750 (5968) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1760 (5984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1770 (6000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, // 0x1780 (6016) -0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1790 (6032) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x17A0 (6048) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xC659, 0xC659, 0xC659, 0xA576, 0xF81F, 0xF81F, 0xF81F, // 0x17B0 (6064) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17C0 (6080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17D0 (6096) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17E0 (6112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17F0 (6128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1800 (6144) -0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1810 (6160) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1820 (6176) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xEFBE, // 0x1830 (6192) -0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1840 (6208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1850 (6224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1860 (6240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1870 (6256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1880 (6272) -0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1890 (6288) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x18A0 (6304) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, // 0x18B0 (6320) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x18C0 (6336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x18D0 (6352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x18E0 (6368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x18F0 (6384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1900 (6400) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1910 (6416) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1920 (6432) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, // 0x1930 (6448) -0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1940 (6464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1950 (6480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1960 (6496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1970 (6512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1980 (6528) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1990 (6544) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, // 0x19A0 (6560) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x19B0 (6576) -0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, // 0x19C0 (6592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x19D0 (6608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x19E0 (6624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x19F0 (6640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A00 (6656) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A10 (6672) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xBDF8, 0xBDF8, 0xD6DB, 0xD6DB, // 0x1A20 (6688) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A30 (6704) -0xD6DB, 0xD6DB, 0xBDF8, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A40 (6720) -0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1A50 (6736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1A60 (6752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1A70 (6768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A80 (6784) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1A90 (6800) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1AA0 (6816) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, // 0x1AB0 (6832) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1AC0 (6848) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1AD0 (6864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1AE0 (6880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1AF0 (6896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1B00 (6912) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1B10 (6928) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1B20 (6944) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1B30 (6960) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1B40 (6976) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1B50 (6992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1B60 (7008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1B70 (7024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1B80 (7040) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1B90 (7056) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1BA0 (7072) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1BB0 (7088) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1BC0 (7104) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x1BD0 (7120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1BE0 (7136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1BF0 (7152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1C00 (7168) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1C10 (7184) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1C20 (7200) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1C30 (7216) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1C40 (7232) -0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, // 0x1C50 (7248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1C60 (7264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1C70 (7280) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1C80 (7296) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1C90 (7312) -0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1CA0 (7328) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1CB0 (7344) -0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1CC0 (7360) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1CD0 (7376) -0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1CE0 (7392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1CF0 (7408) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1D00 (7424) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1D10 (7440) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1D20 (7456) -0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, // 0x1D30 (7472) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1D40 (7488) -0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1D50 (7504) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1D60 (7520) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1D70 (7536) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, // 0x1D80 (7552) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1D90 (7568) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1DA0 (7584) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1DB0 (7600) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1DC0 (7616) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, // 0x1DD0 (7632) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1DE0 (7648) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1DF0 (7664) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x1E00 (7680) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1E10 (7696) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1E20 (7712) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1E30 (7728) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1E40 (7744) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1E50 (7760) -0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E60 (7776) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E70 (7792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E80 (7808) -0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1E90 (7824) -0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, // 0x1EA0 (7840) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1EB0 (7856) -0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, // 0x1EC0 (7872) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, // 0x1ED0 (7888) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x1EE0 (7904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1EF0 (7920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1F00 (7936) -0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1F10 (7952) -0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, // 0x1F20 (7968) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, // 0x1F30 (7984) -0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x1F40 (8000) -0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, // 0x1F50 (8016) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, // 0x1F60 (8032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1F70 (8048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1F80 (8064) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x1F90 (8080) -0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, // 0x1FA0 (8096) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x1FB0 (8112) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x1FC0 (8128) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x1FD0 (8144) -0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1FE0 (8160) -0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1FF0 (8176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2000 (8192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2010 (8208) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x2020 (8224) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2030 (8240) -0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, // 0x2040 (8256) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x2050 (8272) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, // 0x2060 (8288) -0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2070 (8304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2080 (8320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2090 (8336) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x20A0 (8352) -0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x20B0 (8368) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x20C0 (8384) -0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x20D0 (8400) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x20E0 (8416) -0x7268, 0x7268, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x20F0 (8432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2100 (8448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xD6DB, 0x7268, 0x7268, 0x7268, // 0x2110 (8464) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x2120 (8480) -0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2130 (8496) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2140 (8512) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, // 0x2150 (8528) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2160 (8544) -0x7268, 0x7268, 0x7268, 0x7268, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2170 (8560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2180 (8576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, // 0x2190 (8592) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x21A0 (8608) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x21B0 (8624) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x21C0 (8640) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, // 0x21D0 (8656) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x21E0 (8672) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x21F0 (8688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2200 (8704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x2210 (8720) -0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, // 0x2220 (8736) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, // 0x2230 (8752) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2240 (8768) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x2250 (8784) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2260 (8800) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2270 (8816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2280 (8832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2290 (8848) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x22A0 (8864) -0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x22B0 (8880) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x22C0 (8896) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x22D0 (8912) -0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x22E0 (8928) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x22F0 (8944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2300 (8960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2310 (8976) -0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2320 (8992) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, // 0x2330 (9008) -0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2340 (9024) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, // 0x2350 (9040) -0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2360 (9056) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, // 0x2370 (9072) -0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, // 0x2380 (9088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2390 (9104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x23A0 (9120) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x23B0 (9136) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x23C0 (9152) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, // 0x23D0 (9168) -0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x23E0 (9184) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x23F0 (9200) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2400 (9216) -0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2410 (9232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, // 0x2420 (9248) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, // 0x2430 (9264) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, // 0x2440 (9280) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, // 0x2450 (9296) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2460 (9312) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2470 (9328) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2480 (9344) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2490 (9360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x24A0 (9376) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x24B0 (9392) -0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, // 0x24C0 (9408) -0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, // 0x24D0 (9424) -0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x24E0 (9440) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, // 0x24F0 (9456) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2500 (9472) -0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2510 (9488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2520 (9504) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2530 (9520) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x2540 (9536) -0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, // 0x2550 (9552) -0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2560 (9568) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2570 (9584) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2580 (9600) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2590 (9616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, // 0x25A0 (9632) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x25B0 (9648) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x25C0 (9664) -0xB48C, 0xB48C, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xFF9B, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x25D0 (9680) -0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x25E0 (9696) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, // 0x25F0 (9712) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2600 (9728) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x2610 (9744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, // 0x2620 (9760) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2630 (9776) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x8B4A, // 0x2640 (9792) -0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, // 0x2650 (9808) -0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2660 (9824) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2670 (9840) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2680 (9856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xF81F, // 0x2690 (9872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x26A0 (9888) -0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x26B0 (9904) -0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x26C0 (9920) -0x7268, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, // 0x26D0 (9936) -0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x26E0 (9952) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x26F0 (9968) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2700 (9984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2710 (10000) -0xD6DB, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2720 (10016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2730 (10032) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2740 (10048) -0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0xB48C, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, // 0x2750 (10064) -0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2760 (10080) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2770 (10096) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2780 (10112) -0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2790 (10128) -0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x27A0 (10144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x27B0 (10160) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, // 0x27C0 (10176) -0x7268, 0x7268, 0x7268, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x8B4A, 0x7268, 0x7268, 0x7268, 0x7268, // 0x27D0 (10192) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x27E0 (10208) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x27F0 (10224) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x2800 (10240) -0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2810 (10256) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2820 (10272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2830 (10288) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2840 (10304) -0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2850 (10320) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x2860 (10336) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2870 (10352) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2880 (10368) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2890 (10384) -0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x28A0 (10400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x28B0 (10416) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x28C0 (10432) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0x7268, 0x7268, 0x7268, 0x7268, // 0x28D0 (10448) -0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, 0x7268, // 0x28E0 (10464) -0x7268, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x28F0 (10480) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2900 (10496) -0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2910 (10512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2920 (10528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2930 (10544) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2940 (10560) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2950 (10576) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2960 (10592) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2970 (10608) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2980 (10624) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2990 (10640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x29A0 (10656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, // 0x29B0 (10672) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x29C0 (10688) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x29D0 (10704) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x29E0 (10720) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x29F0 (10736) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A00 (10752) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2A10 (10768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x2A20 (10784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2A30 (10800) -0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A40 (10816) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A50 (10832) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A60 (10848) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A70 (10864) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2A80 (10880) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2A90 (10896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2AA0 (10912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2AB0 (10928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2AC0 (10944) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2AD0 (10960) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2AE0 (10976) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2AF0 (10992) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B00 (11008) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2B10 (11024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2B20 (11040) -0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2B30 (11056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B40 (11072) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B50 (11088) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B60 (11104) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B70 (11120) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2B80 (11136) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2B90 (11152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, // 0x2BA0 (11168) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2BB0 (11184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2BC0 (11200) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2BD0 (11216) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2BE0 (11232) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2BF0 (11248) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C00 (11264) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x2C10 (11280) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, 0xD6DB, // 0x2C20 (11296) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2C30 (11312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C40 (11328) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C50 (11344) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C60 (11360) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C70 (11376) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2C80 (11392) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, // 0x2C90 (11408) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xD6DB, // 0x2CA0 (11424) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2CB0 (11440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2CC0 (11456) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2CD0 (11472) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2CE0 (11488) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2CF0 (11504) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D00 (11520) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D10 (11536) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x2D20 (11552) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2D30 (11568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2D40 (11584) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D50 (11600) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D60 (11616) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D70 (11632) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D80 (11648) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2D90 (11664) -0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2DA0 (11680) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2DB0 (11696) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2DC0 (11712) -0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2DD0 (11728) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2DE0 (11744) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2DF0 (11760) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E00 (11776) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E10 (11792) -0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E20 (11808) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2E30 (11824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2E40 (11840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E50 (11856) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E60 (11872) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E70 (11888) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E80 (11904) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2E90 (11920) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2EA0 (11936) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2EB0 (11952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2EC0 (11968) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2ED0 (11984) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2EE0 (12000) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2EF0 (12016) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F00 (12032) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F10 (12048) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F20 (12064) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x2F30 (12080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2F40 (12096) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F50 (12112) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F60 (12128) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F70 (12144) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F80 (12160) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2F90 (12176) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2FA0 (12192) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x2FB0 (12208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2FC0 (12224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2FD0 (12240) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2FE0 (12256) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x2FF0 (12272) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3000 (12288) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3010 (12304) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3020 (12320) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x3030 (12336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3040 (12352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x3050 (12368) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3060 (12384) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3070 (12400) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3080 (12416) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3090 (12432) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x30A0 (12448) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x30B0 (12464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x30C0 (12480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x30D0 (12496) -0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x30E0 (12512) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x30F0 (12528) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3100 (12544) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3110 (12560) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x3120 (12576) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, // 0x3130 (12592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3140 (12608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3150 (12624) -0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3160 (12640) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3170 (12656) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3180 (12672) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3190 (12688) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x31A0 (12704) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, // 0x31B0 (12720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31C0 (12736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31D0 (12752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x31E0 (12768) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x31F0 (12784) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3200 (12800) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3210 (12816) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x3220 (12832) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x3230 (12848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3240 (12864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3250 (12880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3260 (12896) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3270 (12912) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3280 (12928) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3290 (12944) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x32A0 (12960) -0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x32B0 (12976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32C0 (12992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32D0 (13008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x32E0 (13024) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x32F0 (13040) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3300 (13056) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3310 (13072) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3320 (13088) -0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x3330 (13104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3340 (13120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3350 (13136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x3360 (13152) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3370 (13168) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3380 (13184) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3390 (13200) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x33A0 (13216) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x33B0 (13232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33C0 (13248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33D0 (13264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33E0 (13280) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x33F0 (13296) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3400 (13312) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3410 (13328) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3420 (13344) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x3430 (13360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3440 (13376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3450 (13392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3460 (13408) -0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3470 (13424) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3480 (13440) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3490 (13456) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x34A0 (13472) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x34B0 (13488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34C0 (13504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34D0 (13520) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34E0 (13536) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x34F0 (13552) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3500 (13568) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3510 (13584) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3520 (13600) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x3530 (13616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3540 (13632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3550 (13648) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3560 (13664) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3570 (13680) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3580 (13696) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3590 (13712) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x35A0 (13728) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, // 0x35B0 (13744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35C0 (13760) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35D0 (13776) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35E0 (13792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x35F0 (13808) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3600 (13824) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3610 (13840) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3620 (13856) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x3630 (13872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3640 (13888) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3650 (13904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3660 (13920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, // 0x3670 (13936) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3680 (13952) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3690 (13968) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x36A0 (13984) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, // 0x36B0 (14000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36C0 (14016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36D0 (14032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36E0 (14048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36F0 (14064) -0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3700 (14080) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3710 (14096) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3720 (14112) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3730 (14128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3740 (14144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3750 (14160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3760 (14176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3770 (14192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3780 (14208) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3790 (14224) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x37A0 (14240) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, 0xF81F, // 0x37B0 (14256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37C0 (14272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37D0 (14288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37E0 (14304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37F0 (14320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3800 (14336) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3810 (14352) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3820 (14368) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, 0xF81F, // 0x3830 (14384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3840 (14400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3850 (14416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3860 (14432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3870 (14448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3880 (14464) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3890 (14480) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x38A0 (14496) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xF81F, // 0x38B0 (14512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38C0 (14528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38D0 (14544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38E0 (14560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38F0 (14576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xEFBE, 0xEFBE, // 0x3900 (14592) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3910 (14608) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3920 (14624) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x3930 (14640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3940 (14656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3950 (14672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3960 (14688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3970 (14704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3980 (14720) -0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3990 (14736) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x39A0 (14752) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x39B0 (14768) -0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39C0 (14784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39D0 (14800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39E0 (14816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39F0 (14832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A00 (14848) -0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3A10 (14864) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3A20 (14880) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3A30 (14896) -0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A40 (14912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A50 (14928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A60 (14944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A70 (14960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A80 (14976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3A90 (14992) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3AA0 (15008) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3AB0 (15024) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AC0 (15040) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AD0 (15056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AE0 (15072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AF0 (15088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B00 (15104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3B10 (15120) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3B20 (15136) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3B30 (15152) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B40 (15168) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B50 (15184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B60 (15200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B70 (15216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B80 (15232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3B90 (15248) -0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3BA0 (15264) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3BB0 (15280) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BC0 (15296) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BD0 (15312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BE0 (15328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BF0 (15344) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C00 (15360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, // 0x3C10 (15376) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3C20 (15392) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, // 0x3C30 (15408) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C40 (15424) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C50 (15440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C60 (15456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C70 (15472) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C80 (15488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C90 (15504) -0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xEFBE, 0xEFBE, 0xEFBE, // 0x3CA0 (15520) -0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xEFBE, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3CB0 (15536) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CC0 (15552) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CD0 (15568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CE0 (15584) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CF0 (15600) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D00 (15616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D10 (15632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3D20 (15648) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3D30 (15664) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D40 (15680) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D50 (15696) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D60 (15712) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D70 (15728) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D80 (15744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D90 (15760) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3DA0 (15776) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3DB0 (15792) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DC0 (15808) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DD0 (15824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DE0 (15840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DF0 (15856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E00 (15872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E10 (15888) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, // 0x3E20 (15904) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3E30 (15920) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E40 (15936) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E50 (15952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E60 (15968) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E70 (15984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E80 (16000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E90 (16016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EA0 (16032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x3EB0 (16048) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EC0 (16064) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3ED0 (16080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EE0 (16096) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EF0 (16112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F00 (16128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F10 (16144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F20 (16160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F30 (16176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F40 (16192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F50 (16208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F60 (16224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F70 (16240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F80 (16256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F90 (16272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FA0 (16288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FB0 (16304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FC0 (16320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FD0 (16336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FE0 (16352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FF0 (16368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4000 (16384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4010 (16400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4020 (16416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4030 (16432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4040 (16448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4050 (16464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4060 (16480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4070 (16496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4080 (16512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4090 (16528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40A0 (16544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40B0 (16560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40C0 (16576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40D0 (16592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40E0 (16608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40F0 (16624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4100 (16640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4110 (16656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4120 (16672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4130 (16688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4140 (16704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4150 (16720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4160 (16736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4170 (16752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4180 (16768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4190 (16784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41A0 (16800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41B0 (16816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41C0 (16832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41D0 (16848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41E0 (16864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41F0 (16880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4200 (16896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, }; diff --git a/firmware/include/images/icon_coke.h b/firmware/include/images/icon_coke.h new file mode 100644 index 0000000..bcb225f --- /dev/null +++ b/firmware/include/images/icon_coke.h @@ -0,0 +1,1074 @@ +// Generated by : ImageConverter 565 Online +// Generated from : icon_coke.png +// Time generated : Fri, 05 Jul 24 13:42:37 +0200 (Server timezone: CET) +// Image Size : 130x130 pixels +// Memory usage : 33800 bytes + + +#if defined(__AVR__) + #include +#elif defined(__PIC32MX__) + #define PROGMEM +#elif defined(__arm__) + #define PROGMEM +#endif + +const unsigned short icon_coke[16900] PROGMEM={ +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0010 (16) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0020 (32) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0030 (48) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0040 (64) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0050 (80) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0060 (96) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0070 (112) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0080 (128) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0090 (144) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00A0 (160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00B0 (176) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, 0xF9DF, 0xF9DF, 0xFA3F, 0xFBBE, 0xFC3D, 0xFC3C, 0xF43B, // 0x00C0 (192) pixels +0xF41A, 0xF41A, 0xF41A, 0xF43A, 0xFC3A, 0xFC3A, 0xF43A, 0xFC7B, 0xFC3C, 0xFC1D, 0xFB5F, 0xFA1F, 0xF9FF, 0xF9DF, 0xF81F, 0xF81F, // 0x00D0 (208) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00E0 (224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00F0 (240) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0100 (256) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0110 (272) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0120 (288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0130 (304) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, 0xF93F, 0xFAFF, 0xFC1D, 0xFBFB, 0xF438, 0xEC34, 0xEB50, 0xE28E, 0xE24A, 0xE1A8, // 0x0140 (320) pixels +0xD946, 0xD8A5, 0xD864, 0xD864, 0xD864, 0xD864, 0xD864, 0xD864, 0xD864, 0xD8E5, 0xD946, 0xE1C8, 0xE26B, 0xE2CE, 0xEB91, 0xF495, // 0x0150 (336) pixels +0xF419, 0xFC1C, 0xFBFE, 0xFA1F, 0xF91F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0160 (352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0170 (368) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0180 (384) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0190 (400) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01A0 (416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01B0 (432) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xFAFF, 0xFBDD, 0xF459, 0xEBD4, 0xE30E, 0xE1C9, 0xD884, 0xD822, 0xD823, 0xD823, 0xD823, // 0x01C0 (448) pixels +0xD823, 0xE023, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, // 0x01D0 (464) pixels +0xD823, 0xD823, 0xD822, 0xD8C5, 0xE22A, 0xE350, 0xF435, 0xFC3B, 0xFBBE, 0xFA3F, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01E0 (480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01F0 (496) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0200 (512) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0210 (528) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0220 (544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0230 (560) pixels +0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xFB3F, 0xFC3C, 0xEBF6, 0xE32F, 0xD967, 0xD863, 0xE023, 0xD823, 0xD823, 0xE003, 0xD823, 0xD823, // 0x0240 (576) pixels +0xD823, 0xE023, 0xE003, 0xE003, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x0250 (592) pixels +0xD803, 0xE003, 0xD803, 0xE003, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD863, 0xD9E9, 0xEB92, 0xF457, 0xFBDD, 0xFADF, 0xF87F, // 0x0260 (608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0270 (624) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0280 (640) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0290 (656) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02A0 (672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02B0 (688) pixels +0xF81F, 0xF81F, 0xF81F, 0xFA7F, 0xFBFD, 0xF3F7, 0xE2ED, 0xD905, 0xE023, 0xE023, 0xD823, 0xD823, 0xE003, 0xE003, 0xE003, 0xE023, // 0x02C0 (704) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, // 0x02D0 (720) pixels +0xE023, 0xD803, 0xD803, 0xD823, 0xE003, 0xE023, 0xE023, 0xE003, 0xE003, 0xE003, 0xD803, 0xE023, 0xD823, 0xE023, 0xD843, 0xD967, // 0x02E0 (736) pixels +0xEB70, 0xF419, 0xFBBE, 0xFA1F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02F0 (752) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0300 (768) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0310 (784) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0320 (800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0330 (816) pixels +0xF81F, 0xF81F, 0xF91F, 0xFB3F, 0xF43A, 0xEB91, 0xD987, 0xD843, 0xD823, 0xE023, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, // 0x0340 (832) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0350 (848) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, // 0x0360 (864) pixels +0xE023, 0xE023, 0xD823, 0xD884, 0xDA0A, 0xEBF4, 0xFBFC, 0xFA7F, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0370 (880) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0380 (896) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0390 (912) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03A0 (928) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03B0 (944) pixels +0xF81F, 0xF81F, 0xF8BF, 0xFB7E, 0xF438, 0xE2EE, 0xD8A4, 0xD823, 0xE023, 0xE023, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD823, // 0x03C0 (960) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x03D0 (976) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, // 0x03E0 (992) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD946, 0xEB70, 0xF41B, 0xFABF, 0xF87F, 0xF81F, 0xF81F, 0xF81F, // 0x03F0 (1008) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0400 (1024) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0410 (1040) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0420 (1056) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0430 (1072) pixels +0xF81F, 0xF81F, 0xF87F, 0xFB3E, 0xF418, 0xE2CC, 0xD843, 0xE023, 0xD803, 0xD803, 0xE023, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, // 0x0440 (1088) pixels +0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0450 (1104) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x0460 (1120) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD8C5, 0xE350, 0xFC3B, 0xFADF, // 0x0470 (1136) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0480 (1152) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0490 (1168) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04A0 (1184) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04B0 (1200) pixels +0xF81F, 0xF81F, 0xF81F, 0xFB1F, 0xF439, 0xE2AD, 0xD864, 0xE023, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x04C0 (1216) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x04D0 (1232) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x04E0 (1248) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE003, 0xE023, // 0x04F0 (1264) pixels +0xD8A4, 0xE371, 0xFC3B, 0xFA5F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0500 (1280) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0510 (1296) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0520 (1312) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0530 (1328) pixels +0xF81F, 0xF81F, 0xF81F, 0xF9FF, 0xFC3B, 0xEB50, 0xD8A4, 0xD823, 0xD803, 0xD803, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0540 (1344) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0550 (1360) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0560 (1376) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0570 (1392) pixels +0xE023, 0xD803, 0xD823, 0xD823, 0xD946, 0xEBB4, 0xFBBD, 0xF91F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0580 (1408) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0590 (1424) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05A0 (1440) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05B0 (1456) pixels +0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xFBBE, 0xF3F5, 0xD967, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x05C0 (1472) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x05D0 (1488) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x05E0 (1504) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x05F0 (1520) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE003, 0xE023, 0xD823, 0xDA2A, 0xF418, 0xFABF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0600 (1536) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0610 (1552) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0620 (1568) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0630 (1584) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF9FF, 0xFC1B, 0xE2AD, 0xD823, 0xD823, 0xD803, 0xD823, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, // 0x0640 (1600) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0650 (1616) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0660 (1632) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0670 (1648) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xD884, 0xEB71, 0xFBFD, 0xF8FF, 0xF81F, // 0x0680 (1664) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0690 (1680) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06A0 (1696) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06B0 (1712) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB5F, 0xEBF5, 0xD926, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, // 0x06C0 (1728) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x06D0 (1744) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x06E0 (1760) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x06F0 (1776) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xE22A, // 0x0700 (1792) pixels +0xF419, 0xFA5F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0710 (1808) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0720 (1824) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0730 (1840) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8DF, 0xFBFD, 0xEB2F, 0xD843, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x0740 (1856) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0750 (1872) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0760 (1888) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0770 (1904) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, // 0x0780 (1920) pixels +0xD823, 0xE023, 0xD8C5, 0xEBB4, 0xFB7F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0790 (1936) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07A0 (1952) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07B0 (1968) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF9BF, 0xFC5A, 0xE20A, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, // 0x07C0 (1984) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x07D0 (2000) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x07E0 (2016) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x07F0 (2032) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x0800 (2048) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD843, 0xE34F, 0xFC1D, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0810 (2064) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0820 (2080) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0830 (2096) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA7F, 0xF418, 0xD967, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, // 0x0840 (2112) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0850 (2128) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0860 (2144) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0870 (2160) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0880 (2176) pixels +0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xE003, 0xD823, 0xE22B, 0xFBFB, 0xF93F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0890 (2192) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08A0 (2208) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08B0 (2224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFABF, 0xEBF5, 0xD8A4, 0xD823, 0xD803, 0xD823, 0xE023, 0xE023, // 0x08C0 (2240) pixels +0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x08D0 (2256) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x08E0 (2272) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x08F0 (2288) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0900 (2304) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, 0xD9A8, 0xF41A, 0xF9FF, 0xF81F, // 0x0910 (2320) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0920 (2336) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0930 (2352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEB73, 0xD843, 0xD823, 0xE003, 0xD803, 0xD803, // 0x0940 (2368) pixels +0xD803, 0xE023, 0xD803, 0xE023, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0950 (2384) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0960 (2400) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0970 (2416) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0980 (2432) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD926, // 0x0990 (2448) pixels +0xF458, 0xF9FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09A0 (2464) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09B0 (2480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEB73, 0xD843, 0xD823, 0xD823, 0xD803, // 0x09C0 (2496) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x09D0 (2512) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x09E0 (2528) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x09F0 (2544) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A00 (2560) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A10 (2576) pixels +0xE003, 0xD823, 0xD926, 0xF437, 0xFABF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A20 (2592) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A30 (2608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEB73, 0xD843, 0xD823, 0xD803, // 0x0A40 (2624) pixels +0xD803, 0xD803, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A50 (2640) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A60 (2656) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A70 (2672) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A80 (2688) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0A90 (2704) pixels +0xE023, 0xE023, 0xD823, 0xD803, 0xE023, 0xD8A4, 0xEC16, 0xFA1F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AA0 (2720) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AB0 (2736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFAFF, 0xEB93, 0xE043, 0xD803, // 0x0AC0 (2752) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0AD0 (2768) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0AE0 (2784) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0AF0 (2800) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B00 (2816) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B10 (2832) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD905, 0xF438, 0xF9FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B20 (2848) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B30 (2864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEBB3, 0xD844, // 0x0B40 (2880) pixels +0xD823, 0xD803, 0xD803, 0xD803, 0xE023, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B50 (2896) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B60 (2912) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B70 (2928) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B80 (2944) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0B90 (2960) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE003, 0xE023, 0xD926, 0xF438, 0xF95F, 0xF81F, 0xF81F, // 0x0BA0 (2976) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BB0 (2992) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA5F, 0xEBF6, // 0x0BC0 (3008) pixels +0xD884, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0BD0 (3024) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0BE0 (3040) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0BF0 (3056) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C00 (3072) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C10 (3088) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE003, 0xD823, 0xD9A8, 0xF41B, // 0x0C20 (3104) pixels +0xF8DF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C30 (3120) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF91F, // 0x0C40 (3136) pixels +0xF439, 0xD905, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C50 (3152) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C60 (3168) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C70 (3184) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C80 (3200) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0C90 (3216) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xD803, // 0x0CA0 (3232) pixels +0xE023, 0xE20A, 0xFBFC, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CB0 (3248) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CC0 (3264) pixels +0xF87F, 0xFC1B, 0xD9A8, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0CD0 (3280) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0CE0 (3296) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0CF0 (3312) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D00 (3328) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D10 (3344) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D20 (3360) pixels +0xE023, 0xE003, 0xD803, 0xD823, 0xE2CD, 0xFBDE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D30 (3376) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D40 (3392) pixels +0xF81F, 0xF81F, 0xFBBD, 0xE26C, 0xD823, 0xE003, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D50 (3408) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D60 (3424) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D70 (3440) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D80 (3456) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0D90 (3472) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0DA0 (3488) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xD823, 0xD823, 0xEB92, 0xFABF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DB0 (3504) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DC0 (3520) pixels +0xF81F, 0xF81F, 0xF81F, 0xFA9F, 0xEB92, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0DD0 (3536) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0DE0 (3552) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0DF0 (3568) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E00 (3584) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E10 (3600) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E20 (3616) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD823, 0xD823, 0xD864, 0xF417, 0xF99F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E30 (3632) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E40 (3648) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF97F, 0xF418, 0xD884, 0xD823, 0xD823, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E50 (3664) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E60 (3680) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E70 (3696) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E80 (3712) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0E90 (3728) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0EA0 (3744) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, 0xD823, 0xD823, 0xD823, 0xD9C8, 0xFC1C, 0xF87F, 0xF81F, // 0x0EB0 (3760) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EC0 (3776) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC3C, 0xD9A8, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x0ED0 (3792) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0EE0 (3808) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0EF0 (3824) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F00 (3840) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F10 (3856) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F20 (3872) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, 0xD823, 0xD823, 0xE003, 0xD823, 0xE2EE, // 0x0F30 (3888) pixels +0xFB9E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F40 (3904) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB1F, 0xE34F, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, // 0x0F50 (3920) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F60 (3936) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F70 (3952) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F80 (3968) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0F90 (3984) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0FA0 (4000) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, // 0x0FB0 (4016) pixels +0xD803, 0xD843, 0xEBF6, 0xF9FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FC0 (4032) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF97F, 0xF418, 0xD864, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x0FD0 (4048) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0FE0 (4064) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x0FF0 (4080) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1000 (4096) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1010 (4112) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1020 (4128) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, // 0x1030 (4144) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD947, 0xFC1C, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1040 (4160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBDD, 0xDA09, 0xD823, 0xD803, 0xD803, 0xD803, // 0x1050 (4176) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1060 (4192) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1070 (4208) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1080 (4224) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1090 (4240) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x10A0 (4256) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x10B0 (4272) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xEB30, 0xFAFF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10C0 (4288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA5F, 0xEBD3, 0xE023, 0xD823, 0xD823, // 0x10D0 (4304) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x10E0 (4320) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x10F0 (4336) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1100 (4352) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1110 (4368) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1120 (4384) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1130 (4400) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD884, 0xF419, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1140 (4416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBFC, 0xD926, 0xD823, // 0x1150 (4432) pixels +0xE003, 0xE003, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1160 (4448) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1170 (4464) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1180 (4480) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1190 (4496) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x11A0 (4512) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x11B0 (4528) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE2AD, 0xFB5E, 0xF81F, 0xF81F, // 0x11C0 (4544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xE371, // 0x11D0 (4560) pixels +0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x11E0 (4576) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x11F0 (4592) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1200 (4608) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1210 (4624) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1220 (4640) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1230 (4656) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD803, 0xD843, 0xF417, // 0x1240 (4672) pixels +0xF91F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, // 0x1250 (4688) pixels +0xFC1B, 0xD8E5, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1260 (4704) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1270 (4720) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1280 (4736) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1290 (4752) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x12A0 (4768) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x12B0 (4784) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, // 0x12C0 (4800) pixels +0xD823, 0xE24B, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12D0 (4816) pixels +0xF81F, 0xFA9F, 0xE350, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x12E0 (4832) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x12F0 (4848) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1300 (4864) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1310 (4880) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1320 (4896) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1330 (4912) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1340 (4928) pixels +0xE023, 0xD803, 0xD803, 0xD823, 0xF417, 0xF95F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1350 (4944) pixels +0xF81F, 0xF81F, 0xF81F, 0xFC3B, 0xD906, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1360 (4960) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1370 (4976) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1380 (4992) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1390 (5008) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x13A0 (5024) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x13B0 (5040) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x13C0 (5056) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE24B, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13D0 (5072) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA7F, 0xEBD2, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x13E0 (5088) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x13F0 (5104) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1400 (5120) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1410 (5136) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1420 (5152) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1430 (5168) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1440 (5184) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD884, 0xF3F9, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1450 (5200) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC1D, 0xD987, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1460 (5216) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1470 (5232) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1480 (5248) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1490 (5264) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x14A0 (5280) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x14B0 (5296) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x14C0 (5312) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE003, 0xD803, 0xE003, 0xE30F, 0xFB1F, 0xF81F, 0xF81F, 0xF81F, // 0x14D0 (5328) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF91F, 0xEC16, 0xD843, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x14E0 (5344) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x14F0 (5360) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1500 (5376) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1510 (5392) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1520 (5408) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1530 (5424) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1540 (5440) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD8E5, 0xFC1C, 0xF81F, // 0x1550 (5456) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB7F, 0xE2CC, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, // 0x1560 (5472) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1570 (5488) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1580 (5504) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1590 (5520) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x15A0 (5536) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x15B0 (5552) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x15C0 (5568) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, // 0x15D0 (5584) pixels +0xEBD4, 0xF9DF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, 0xFC3B, 0xD8A4, 0xD823, 0xD803, 0xD803, // 0x15E0 (5600) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, // 0x15F0 (5616) pixels +0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1600 (5632) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1610 (5648) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1620 (5664) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, // 0x1630 (5680) pixels +0xE023, 0xE023, 0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, // 0x1640 (5696) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, // 0x1650 (5712) pixels +0xD803, 0xD823, 0xE22A, 0xFBDE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF93F, 0xEBD4, 0xD823, 0xE003, // 0x1660 (5728) pixels +0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, // 0x1670 (5744) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, // 0x1680 (5760) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1690 (5776) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x16A0 (5792) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x16B0 (5808) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, // 0x16C0 (5824) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x16D0 (5840) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xD864, 0xF459, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB9E, 0xE28C, // 0x16E0 (5856) pixels +0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x16F0 (5872) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE023, 0xD823, 0xD823, 0xD844, 0xD8C5, 0xD8C5, 0xD823, 0xE023, 0xD823, 0xD803, 0xE023, // 0x1700 (5888) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1710 (5904) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1720 (5920) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD843, // 0x1730 (5936) pixels +0xD844, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD844, 0xD843, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, // 0x1740 (5952) pixels +0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xE003, 0xD823, 0xD803, 0xD803, // 0x1750 (5968) pixels +0xD803, 0xE023, 0xD823, 0xD803, 0xD823, 0xD823, 0xE023, 0xEBB2, 0xF9DF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1760 (5984) pixels +0xFBFB, 0xD8A4, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1770 (6000) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD905, 0xE30D, 0xED55, 0xF618, 0xFE9A, 0xFE9A, 0xED35, 0xD947, 0xD823, // 0x1780 (6016) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1790 (6032) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x17A0 (6048) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD8E5, 0xE2CC, // 0x17B0 (6064) pixels +0xED14, 0xF5F8, 0xF618, 0xED97, 0xE32D, 0xD926, 0xE2ED, 0xECB3, 0xEDB7, 0xF618, 0xF5F8, 0xEDB6, 0xEC51, 0xD9C8, 0xD864, 0xD823, // 0x17C0 (6080) pixels +0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xE023, 0xD823, 0xE167, 0xE36E, 0xEC51, 0xE1E9, 0xE023, // 0x17D0 (6096) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xD926, 0xEA8B, 0xD823, 0xD823, 0xE24A, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17E0 (6112) pixels +0xF81F, 0xF97F, 0xEC15, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x17F0 (6128) pixels +0xE023, 0xE023, 0xE023, 0xE003, 0xD803, 0xD823, 0xE023, 0xD926, 0xEC92, 0xFEDB, 0xFFBE, 0xFFDF, 0xFFDF, 0xFFBE, 0xFF5D, 0xFFBF, // 0x1800 (6144) pixels +0xF618, 0xD884, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1810 (6160) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1820 (6176) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xD987, 0xEC92, // 0x1830 (6192) pixels +0xFEBB, 0xFFBE, 0xFFDF, 0xFFDF, 0xFF7D, 0xFF5D, 0xFF9E, 0xFF3D, 0xFFBE, 0xFFDF, 0xFFDE, 0xFFDF, 0xFFDF, 0xFFDF, 0xFFDF, 0xFF7D, // 0x1840 (6208) pixels +0xF5D7, 0xE2EC, 0xD843, 0xD823, 0xD823, 0xD823, 0xE003, 0xE023, 0xD803, 0xD823, 0xD823, 0xD906, 0xECB3, 0xFF3D, 0xFFBE, 0xFFBE, // 0x1850 (6224) pixels +0xFE7A, 0xD8C5, 0xE023, 0xE003, 0xD803, 0xE023, 0xD9A7, 0xF535, 0xE26B, 0xD823, 0xD803, 0xD864, 0xF3BB, 0xF81F, 0xF81F, 0xF81F, // 0x1860 (6240) pixels +0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xE2ED, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1870 (6256) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD803, 0xD823, 0xD8A4, 0xEC11, 0xFEFC, 0xFFDF, 0xFFFF, 0xFFDF, 0xFF3C, 0xED14, 0xE26B, // 0x1880 (6272) pixels +0xD9A7, 0xECB3, 0xFF7E, 0xE1C8, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1890 (6288) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x18A0 (6304) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xD8E5, 0xEC51, // 0x18B0 (6320) pixels +0xFEFC, 0xFFDF, 0xFFDF, 0xFFBE, 0xF618, 0xE34E, 0xE34E, 0xFEFC, 0xFFDF, 0xFF9E, 0xED14, 0xEC52, 0xF5D7, 0xFEDB, 0xFFDF, 0xFFFF, // 0x18C0 (6336) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xF576, 0xE229, 0xD823, 0xE003, 0xD803, 0xE023, 0xD803, 0xE023, 0xE2AB, 0xFE7A, 0xFFBF, 0xFF9D, // 0x18D0 (6352) pixels +0xED15, 0xECB3, 0xFEDB, 0xD8E5, 0xD823, 0xD823, 0xD823, 0xE2CC, 0xFE9A, 0xEC72, 0xD823, 0xD823, 0xD823, 0xD823, 0xF435, 0xF9BF, // 0x18E0 (6368) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC3D, 0xD9C8, 0xD823, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, // 0x18F0 (6384) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, 0xD823, 0xE22A, 0xF618, 0xFFBF, 0xFFFF, 0xFFFF, 0xFFBE, 0xF5B7, 0xDA09, // 0x1900 (6400) pixels +0xD823, 0xD864, 0xE2EC, 0xF576, 0xFF7E, 0xE1E9, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1910 (6416) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1920 (6432) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xD823, 0xE2CC, // 0x1930 (6448) pixels +0xF67A, 0xFFDF, 0xFFFF, 0xFFDF, 0xFEDB, 0xE38F, 0xD905, 0xEC72, 0xFF5D, 0xF5F8, 0xECF4, 0xFF1C, 0xD926, 0xD823, 0xD823, 0xD946, // 0x1940 (6464) pixels +0xE3D0, 0xFE9A, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFF3C, 0xEC72, 0xD926, 0xD823, 0xD803, 0xD823, 0xE38F, 0xFF5D, 0xFFDF, // 0x1950 (6480) pixels +0xFF3D, 0xE36E, 0xD863, 0xF5F8, 0xF576, 0xD843, 0xD823, 0xE126, 0xEC92, 0xFF5D, 0xF5B7, 0xD8A4, 0xD803, 0xD803, 0xD803, 0xD823, // 0x1960 (6496) pixels +0xE2EE, 0xFABF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF3FA, 0xD864, 0xD803, 0xD803, 0xE023, 0xD823, 0xD823, 0xE023, 0xE023, // 0x1970 (6512) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, 0xD803, 0xD843, 0xE3AF, 0xFF1C, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF5D, 0xEC11, // 0x1980 (6528) pixels +0xD864, 0xE023, 0xD823, 0xEC72, 0xFF9E, 0xFFFF, 0xFEDB, 0xD8E5, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1990 (6544) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x19A0 (6560) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD823, 0xE023, 0xD8C4, // 0x19B0 (6576) pixels +0xECD3, 0xFF7D, 0xFFDF, 0xFFFF, 0xFFDF, 0xF5B6, 0xD987, 0xD905, 0xED56, 0xFF7D, 0xEC92, 0xD8A4, 0xEC72, 0xFEFC, 0xD884, 0xD823, // 0x19C0 (6592) pixels +0xD803, 0xD823, 0xD823, 0xD8E5, 0xEC92, 0xFF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFE9A, 0xEBCF, 0xD864, 0xEC31, 0xFF9E, // 0x19D0 (6608) pixels +0xFFDF, 0xFF3C, 0xE2EC, 0xE043, 0xE26A, 0xFF1C, 0xE38E, 0xD905, 0xE38F, 0xF69A, 0xFFDF, 0xF5F8, 0xD926, 0xD803, 0xE003, 0xE023, // 0x19E0 (6624) pixels +0xD803, 0xE023, 0xD9A8, 0xFC3D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF95F, 0xEC15, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x19F0 (6640) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD884, 0xECB3, 0xFFBE, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF1C, // 0x1A00 (6656) pixels +0xE2ED, 0xD823, 0xD823, 0xD823, 0xD946, 0xFF3D, 0xFFDF, 0xFFBF, 0xEC51, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1A10 (6672) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1A20 (6688) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD823, // 0x1A30 (6704) pixels +0xD9C7, 0xF5F8, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF9E, 0xEC72, 0xD884, 0xD8E5, 0xF5D7, 0xFF5D, 0xE3AF, 0xD823, 0xD843, 0xF619, 0xF619, // 0x1A40 (6720) pixels +0xD843, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE26A, 0xF639, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF576, 0xEC93, // 0x1A50 (6736) pixels +0xFFBE, 0xFFDF, 0xFF3C, 0xE2EC, 0xD863, 0xD863, 0xE2ED, 0xED14, 0xECB3, 0xFE9B, 0xFFDF, 0xFFDE, 0xF5F8, 0xD906, 0xD823, 0xD803, // 0x1A60 (6752) pixels +0xD803, 0xE023, 0xD803, 0xD823, 0xD884, 0xF3FA, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFAFF, 0xEB6F, 0xD823, 0xD803, 0xE023, 0xE023, // 0x1A70 (6768) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xE003, 0xD823, 0xD926, 0xF556, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A80 (6784) pixels +0xFEBB, 0xE28B, 0xD823, 0xE003, 0xD803, 0xD823, 0xD987, 0xFF5D, 0xFFBF, 0xF5D7, 0xD8A4, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x1A90 (6800) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, // 0x1AA0 (6816) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xE003, 0xE023, 0xE023, 0xE023, 0xE003, 0xD823, 0xE023, 0xE023, 0xD823, 0xD803, 0xD803, // 0x1AB0 (6832) pixels +0xD823, 0xDA6A, 0xFEDB, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF3C, 0xE38F, 0xD843, 0xD864, 0xF555, 0xFF7D, 0xE36E, 0xD823, 0xD823, 0xE1E9, // 0x1AC0 (6848) pixels +0xFF3C, 0xEC31, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD8C4, 0xEC31, 0xFF3C, 0xFFDF, 0xFFDF, 0xF5B6, // 0x1AD0 (6864) pixels +0xECB3, 0xFFBE, 0xFFDF, 0xFF3C, 0xE38F, 0xF5B7, 0xF5D8, 0xF618, 0xFEFC, 0xFF7E, 0xFFDF, 0xFFDF, 0xFF9E, 0xF514, 0xD8C5, 0xE003, // 0x1AE0 (6880) pixels +0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xF3F7, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xFB7F, 0xD9E9, 0xD823, 0xD803, // 0x1AF0 (6896) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD906, 0xF5D7, 0xFFDF, 0xFFFF, 0xFFFF, // 0x1B00 (6912) pixels +0xFFDF, 0xFEFB, 0xDA09, 0xD823, 0xD803, 0xD803, 0xD823, 0xD823, 0xD843, 0xEB6F, 0xEBD0, 0xD8E5, 0xD823, 0xD803, 0xD803, 0xD803, // 0x1B10 (6928) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xD823, 0xE023, 0xD823, 0xE003, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xE003, // 0x1B20 (6944) pixels +0xD823, 0xE023, 0xE043, 0xD823, 0xE003, 0xD823, 0xE023, 0xD823, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xE003, // 0x1B30 (6960) pixels +0xD803, 0xD823, 0xE2CC, 0xFF1C, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF3C, 0xE2EC, 0xD823, 0xD823, 0xEBF0, 0xFF7D, 0xEC31, 0xD823, 0xD823, // 0x1B40 (6976) pixels +0xD843, 0xEC93, 0xFEFC, 0xE1C8, 0xE023, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xE003, 0xE003, 0xD823, 0xD9E8, 0xF576, // 0x1B50 (6992) pixels +0xF5B7, 0xECB3, 0xFFBE, 0xFFFF, 0xFF7D, 0xE3D0, 0xFEDB, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFFDF, 0xF69A, 0xE32E, 0xD843, // 0x1B60 (7008) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE003, 0xE023, 0xE3B1, 0xFADF, 0xF81F, 0xF81F, 0xF81F, 0xFC3D, 0xD946, // 0x1B70 (7024) pixels +0xD823, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD905, 0xF5D7, 0xFFDF, 0xFFFF, // 0x1B80 (7040) pixels +0xFFFF, 0xFFFF, 0xFF3C, 0xE2EC, 0xE023, 0xD823, 0xD803, 0xE023, 0xD823, 0xD823, 0xD823, 0xD9C8, 0xE30D, 0xE32D, 0xD987, 0xD823, // 0x1B90 (7056) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xE126, 0xE28A, 0xE2AB, 0xD9E8, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, // 0x1BA0 (7072) pixels +0xE023, 0xD823, 0xD8C4, 0xE24A, 0xE2AB, 0xD967, 0xD884, 0xE106, 0xD906, 0xD906, 0xE064, 0xD823, 0xD864, 0xD906, 0xD906, 0xD906, // 0x1BB0 (7088) pixels +0xD884, 0xD823, 0xD823, 0xE2CC, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF3C, 0xE2EC, 0xD823, 0xE023, 0xE1A7, 0xFEFB, 0xF596, 0xD884, // 0x1BC0 (7104) pixels +0xD823, 0xD823, 0xD9C8, 0xFEFC, 0xECB3, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE003, // 0x1BD0 (7120) pixels +0xE023, 0xD843, 0xE410, 0xFF9E, 0xFFFF, 0xFF9E, 0xE451, 0xF67A, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFF9E, 0xFE7A, 0xE3F0, 0xD905, // 0x1BE0 (7136) pixels +0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xE2AC, 0xFB1F, 0xF81F, 0xF81F, 0xF81F, // 0x1BF0 (7152) pixels +0xF45A, 0xD884, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD884, 0xF535, 0xFFDF, // 0x1C00 (7168) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFF7E, 0xE34E, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD987, 0xED14, 0xFF1C, 0xFFDF, 0xFFDF, // 0x1C10 (7184) pixels +0xFEDB, 0xE26A, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD8C5, 0xEC51, 0xFEBB, 0xFFBE, 0xFFDF, 0xFF3D, 0xE3F0, 0xD823, 0xD823, // 0x1C20 (7200) pixels +0xD823, 0xD823, 0xD844, 0xE30D, 0xF659, 0xFF9E, 0xFFDF, 0xFEDB, 0xF5B7, 0xFEBB, 0xFEBB, 0xFE59, 0xE209, 0xD823, 0xEC52, 0xFEBB, // 0x1C30 (7216) pixels +0xFEBB, 0xFE7A, 0xE1C8, 0xD823, 0xE2CC, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xE30D, 0xE023, 0xD803, 0xD823, 0xED14, 0xFEFC, // 0x1C40 (7232) pixels +0xD9A8, 0xD823, 0xE003, 0xD884, 0xF5B7, 0xFEBB, 0xD967, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x1C50 (7248) pixels +0xD803, 0xD823, 0xD823, 0xE36E, 0xFF7E, 0xFFFF, 0xFFDF, 0xED14, 0xE28B, 0xF597, 0xF5D7, 0xF5D7, 0xF556, 0xEC72, 0xE2AC, 0xD864, // 0x1C60 (7264) pixels +0xE023, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD9A8, 0xFB7E, 0xF81F, // 0x1C70 (7280) pixels +0xF81F, 0xF81F, 0xF397, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD843, 0xECB2, // 0x1C80 (7296) pixels +0xFFBF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, 0xE411, 0xD823, 0xD803, 0xD803, 0xE003, 0xD823, 0xD823, 0xE34E, 0xFEFB, 0xFFDF, 0xFFBF, // 0x1C90 (7312) pixels +0xF639, 0xF5F7, 0xFFDF, 0xED75, 0xE023, 0xD803, 0xE003, 0xD823, 0xE1E9, 0xF618, 0xFFBF, 0xFFDF, 0xFFBE, 0xF618, 0xFEDB, 0xFEDB, // 0x1CA0 (7328) pixels +0xD884, 0xD823, 0xD823, 0xE0A5, 0xECF4, 0xFF9E, 0xFFDF, 0xFFBE, 0xF638, 0xFF3C, 0xFFDF, 0xFFFF, 0xFFDF, 0xF596, 0xD863, 0xE26A, // 0x1CB0 (7344) pixels +0xFF5D, 0xFFFF, 0xFFDF, 0xED76, 0xD864, 0xE24A, 0xFEFC, 0xFFDF, 0xFFFF, 0xFFDF, 0xFF7D, 0xE3AF, 0xD823, 0xE023, 0xD823, 0xD947, // 0x1CC0 (7360) pixels +0xFEBB, 0xEC93, 0xD823, 0xD803, 0xD843, 0xEC92, 0xFF7D, 0xE36E, 0xD823, 0xE003, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1CD0 (7376) pixels +0xE023, 0xE003, 0xD803, 0xD823, 0xE2AB, 0xFF5D, 0xFFFF, 0xFFDF, 0xF618, 0xD8C5, 0xD823, 0xD843, 0xD9C8, 0xF639, 0xF472, 0xD863, // 0x1CE0 (7392) pixels +0xD823, 0xD823, 0xD823, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD926, // 0x1CF0 (7408) pixels +0xFC5C, 0xF81F, 0xF81F, 0xF87F, 0xEB76, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, // 0x1D00 (7424) pixels +0xE38F, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF555, 0xD864, 0xD823, 0xE003, 0xE003, 0xD823, 0xD843, 0xEC52, 0xFF7D, 0xFFFF, // 0x1D10 (7440) pixels +0xFFBF, 0xECB3, 0xE0A4, 0xD926, 0xFE7A, 0xF5D7, 0xD823, 0xD823, 0xD823, 0xE2CC, 0xFEBB, 0xFFFF, 0xFFFF, 0xFEFC, 0xE36E, 0xD9C9, // 0x1D20 (7456) pixels +0xECF4, 0xFF1C, 0xD905, 0xD823, 0xE146, 0xF596, 0xFFDF, 0xFFFF, 0xFF5D, 0xEBCF, 0xD926, 0xF69A, 0xFFFF, 0xFFFF, 0xFF3C, 0xE209, // 0x1D30 (7472) pixels +0xD884, 0xF5F8, 0xFFDF, 0xFFFF, 0xFF1C, 0xD9A8, 0xD947, 0xF67A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xEC92, 0xD823, 0xE003, 0xD823, // 0x1D40 (7488) pixels +0xD843, 0xE2EC, 0xFF3D, 0xD9E9, 0xE023, 0xD843, 0xEC92, 0xFF9E, 0xECB3, 0xD843, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE003, // 0x1D50 (7504) pixels +0xE003, 0xE003, 0xD803, 0xD803, 0xE023, 0xD9A8, 0xFEDB, 0xFFDF, 0xFFFF, 0xFEFC, 0xDA09, 0xD823, 0xD823, 0xD8E5, 0xF618, 0xF679, // 0x1D60 (7520) pixels +0xD926, 0xE023, 0xD823, 0xD803, 0xE003, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, // 0x1D70 (7536) pixels +0xD823, 0xD8A4, 0xFC7A, 0xF81F, 0xF81F, 0xF9DF, 0xEBF3, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xE003, // 0x1D80 (7552) pixels +0xD823, 0xDA29, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFE9A, 0xD946, 0xD823, 0xD823, 0xD823, 0xD823, 0xD843, 0xEC72, 0xFFBE, // 0x1D90 (7568) pixels +0xFFFF, 0xFFFF, 0xF618, 0xD884, 0xE023, 0xD8C5, 0xF659, 0xED75, 0xD823, 0xD823, 0xE2CC, 0xFF3C, 0xFFFF, 0xFFFF, 0xFEFB, 0xE26A, // 0x1DA0 (7584) pixels +0xE32D, 0xFF1C, 0xFFDE, 0xF659, 0xD843, 0xD8C4, 0xF5D7, 0xFFDF, 0xFFFF, 0xFF9E, 0xE38F, 0xD823, 0xEC52, 0xFFDF, 0xFFFF, 0xFFDF, // 0x1DB0 (7600) pixels +0xEC52, 0xD823, 0xE105, 0xEC72, 0xEC93, 0xF493, 0xE30D, 0xD884, 0xF596, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF596, 0xD8A4, 0xD803, // 0x1DC0 (7616) pixels +0xD803, 0xD803, 0xE023, 0xE38E, 0xFF3C, 0xD987, 0xE1A8, 0xED14, 0xFF9E, 0xED35, 0xD8A4, 0xE003, 0xD823, 0xD803, 0xD803, 0xE023, // 0x1DD0 (7632) pixels +0xE023, 0xE003, 0xE003, 0xD803, 0xD803, 0xD823, 0xD8E5, 0xF618, 0xFFDF, 0xFFFF, 0xFF9E, 0xE3AF, 0xD823, 0xD823, 0xD864, 0xF515, // 0x1DE0 (7648) pixels +0xFEDB, 0xDA29, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1DF0 (7664) pixels +0xD803, 0xD803, 0xD823, 0xD843, 0xF478, 0xF87F, 0xF81F, 0xFAFF, 0xEB8F, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xE023, // 0x1E00 (7680) pixels +0xD823, 0xD823, 0xD8A4, 0xF618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xE2CC, 0xD823, 0xD803, 0xD803, 0xD803, 0xD843, 0xEC31, // 0x1E10 (7696) pixels +0xFFBE, 0xFFFF, 0xFFFF, 0xFFDF, 0xED76, 0xD823, 0xE023, 0xDA09, 0xFF5D, 0xEBD0, 0xD843, 0xE38F, 0xFF5D, 0xFFFF, 0xFFFF, 0xFEFB, // 0x1E20 (7712) pixels +0xE26A, 0xD8E5, 0xF69A, 0xFFDF, 0xFFBE, 0xE36E, 0xD884, 0xF535, 0xFFDF, 0xFFFF, 0xFFDF, 0xECD3, 0xD844, 0xE1E9, 0xFF1C, 0xFFFF, // 0x1E30 (7728) pixels +0xFFDF, 0xF67A, 0xD905, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD843, 0xEC72, 0xFFBF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFE9A, 0xD946, // 0x1E40 (7744) pixels +0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xE28A, 0xFF5D, 0xFEBB, 0xFEFB, 0xFF3D, 0xEC72, 0xD8A4, 0xE023, 0xD823, 0xD823, 0xD823, // 0x1E50 (7760) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD843, 0xECD4, 0xFFDF, 0xFFFF, 0xFFDF, 0xF535, 0xD863, 0xD803, 0xD823, // 0x1E60 (7776) pixels +0xEC72, 0xFF3C, 0xE32D, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1E70 (7792) pixels +0xE023, 0xE023, 0xD803, 0xD823, 0xE003, 0xD823, 0xEBF6, 0xF87F, 0xF81F, 0xFB7F, 0xE30D, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x1E80 (7808) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xEBF0, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xECD3, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, // 0x1E90 (7824) pixels +0xE2EC, 0xFF7D, 0xFFFF, 0xFFFF, 0xFF5D, 0xF6DB, 0xFF3C, 0xE2CC, 0xD863, 0xECB3, 0xFF3D, 0xD9E8, 0xED14, 0xFF9E, 0xFFFF, 0xFFFF, // 0x1EA0 (7840) pixels +0xFF7D, 0xE30D, 0xD843, 0xD987, 0xFF5D, 0xFF9E, 0xED35, 0xD864, 0xEC92, 0xFFBE, 0xFFFF, 0xFFDF, 0xF5B7, 0xD884, 0xD843, 0xF576, // 0x1EB0 (7856) pixels +0xFFDF, 0xFFFF, 0xFF9E, 0xE30D, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD823, 0xE2AB, 0xFF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7D, // 0x1EC0 (7872) pixels +0xE2EC, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE34D, 0xECF4, 0xEC92, 0xE209, 0xD823, 0xD803, 0xD803, 0xD823, // 0x1ED0 (7888) pixels +0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xE32D, 0xFF7D, 0xFFFF, 0xFFFF, 0xFEDB, 0xD987, 0xD823, // 0x1EE0 (7904) pixels +0xD843, 0xEC31, 0xFF5D, 0xEBF0, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x1EF0 (7920) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD803, 0xD823, 0xEBD6, 0xF87F, 0xF81F, 0xFBDE, 0xE26B, 0xD823, 0xD823, 0xD803, // 0x1F00 (7936) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD947, 0xFEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF6BA, 0xD946, 0xD823, 0xD823, 0xD823, // 0x1F10 (7952) pixels +0xE003, 0xD9A8, 0xFEDB, 0xFFFF, 0xFFFF, 0xFFDF, 0xEC52, 0xE28B, 0xF6BB, 0xFF5D, 0xF5D8, 0xFF3D, 0xFEFB, 0xFEFC, 0xFFDF, 0xFFFF, // 0x1F20 (7968) pixels +0xFFFF, 0xFFDF, 0xECB3, 0xD823, 0xE003, 0xD843, 0xE32D, 0xEB6F, 0xD8A4, 0xE36E, 0xFF9E, 0xFFFF, 0xFFDF, 0xFEDB, 0xD9A7, 0xD823, // 0x1F30 (7984) pixels +0xE2EC, 0xFF7D, 0xFFFF, 0xFFFF, 0xF596, 0xD864, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD946, 0xFE9A, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1F40 (8000) pixels +0xFFDF, 0xECB3, 0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xE003, 0xD803, 0xD823, 0xE023, 0xE023, 0xD823, 0xD803, 0xD823, // 0x1F50 (8016) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD926, 0xFEBA, 0xFFDF, 0xFFFF, 0xFFBE, 0xE3AF, // 0x1F60 (8032) pixels +0xD823, 0xD823, 0xE431, 0xFF5D, 0xEC51, 0xD843, 0xE003, 0xE003, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, // 0x1F70 (8048) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD803, 0xD823, 0xEBD4, 0xF93F, 0xF81F, 0xFBBE, 0xE1E9, 0xD823, // 0x1F80 (8064) pixels +0xD823, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xECB3, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBF, 0xE38F, 0xD823, 0xD823, // 0x1F90 (8080) pixels +0xD803, 0xD823, 0xD844, 0xF596, 0xFFDF, 0xFFFF, 0xFFDF, 0xF618, 0xD8C5, 0xD823, 0xD946, 0xEC31, 0xFF5D, 0xFF9E, 0xF659, 0xFF7D, // 0x1FA0 (8096) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFE9A, 0xD8E5, 0xE023, 0xE023, 0xD823, 0xE023, 0xD823, 0xE209, 0xFEFC, 0xFFFF, 0xFFFF, 0xFF7D, 0xE2EC, // 0x1FB0 (8112) pixels +0xD823, 0xD8C5, 0xF659, 0xFFFF, 0xFFDF, 0xFF3C, 0xD9E9, 0xD823, 0xD823, 0xE003, 0xD803, 0xD803, 0xD823, 0xECD4, 0xFFDF, 0xFFFF, // 0x1FC0 (8128) pixels +0xFFFF, 0xFFDF, 0xF659, 0xD8E5, 0xD823, 0xD803, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD823, // 0x1FD0 (8144) pixels +0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD823, 0xD843, 0xECF4, 0xFFDF, 0xFFFF, 0xFFDF, // 0x1FE0 (8160) pixels +0xF5B7, 0xD883, 0xD824, 0xEC31, 0xFF5D, 0xEC51, 0xD843, 0xE023, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xE023, 0xE023, 0xE023, // 0x1FF0 (8176) pixels +0xD823, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xEBB3, 0xF97F, 0xF81F, 0xFBBE, // 0x2000 (8192) pixels +0xE1E9, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xD803, 0xE023, 0xD967, 0xFF1C, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF638, 0xD8A4, // 0x2010 (8208) pixels +0xD823, 0xD803, 0xD803, 0xD823, 0xE36E, 0xFFBE, 0xFFFF, 0xFFFF, 0xFF7D, 0xE26A, 0xD823, 0xD803, 0xE023, 0xE2CC, 0xFF3D, 0xECB3, // 0x2020 (8224) pixels +0xE2AC, 0xFF9E, 0xFFFF, 0xFFFF, 0xFF9E, 0xE34E, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD8E5, 0xF639, 0xFFFF, 0xFFFF, 0xFFDF, // 0x2030 (8240) pixels +0xECB3, 0xD823, 0xD823, 0xEC52, 0xFFDF, 0xFFFF, 0xFFDF, 0xEC72, 0xD823, 0xE0C5, 0xE38E, 0xE229, 0xD803, 0xD823, 0xE2AB, 0xFF5D, // 0x2040 (8256) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xE2EC, 0xD823, 0xE003, 0xD823, 0xD8A4, 0xE32E, 0xF514, 0xF5B7, 0xE3D0, 0xD864, 0xD823, 0xD803, // 0x2050 (8272) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xE063, 0xE2CC, 0xECF4, 0xF5D7, 0xECF4, 0xD946, 0xD823, 0xD823, 0xE023, 0xE2AB, 0xFF7D, 0xFFFF, // 0x2060 (8288) pixels +0xFFDF, 0xFF3D, 0xDA09, 0xD844, 0xEC31, 0xFF5D, 0xEC51, 0xD843, 0xD823, 0xE24A, 0xEC51, 0xF596, 0xEC52, 0xD8E5, 0xE2AB, 0xE30D, // 0x2070 (8304) pixels +0xE30D, 0xE26B, 0xD823, 0xE003, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xEBD3, 0xF9BF, // 0x2080 (8320) pixels +0xF81F, 0xFBBE, 0xE1E9, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xD803, 0xE023, 0xEC31, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, // 0x2090 (8336) pixels +0xE36E, 0xD823, 0xD803, 0xD803, 0xD823, 0xD884, 0xFE59, 0xFFDF, 0xFFFF, 0xFFDF, 0xF535, 0xD843, 0xD823, 0xE023, 0xD8E5, 0xF639, // 0x20A0 (8352) pixels +0xF67A, 0xD926, 0xF5B7, 0xFFFF, 0xFFFF, 0xFFFF, 0xF5F8, 0xD863, 0xE023, 0xD803, 0xD823, 0xE003, 0xD844, 0xED14, 0xFFDF, 0xFFFF, // 0x20B0 (8368) pixels +0xFFDF, 0xFE9A, 0xD926, 0xE023, 0xE209, 0xFF3C, 0xFFFF, 0xFFFF, 0xFE7A, 0xD8E5, 0xD843, 0xECD3, 0xFF1C, 0xE2AC, 0xE023, 0xD864, // 0x20C0 (8384) pixels +0xF618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF555, 0xD843, 0xD823, 0xD823, 0xE2CC, 0xF639, 0xFFBE, 0xFFFF, 0xFFDF, 0xFFBE, 0xE3F0, // 0x20D0 (8400) pixels +0xD823, 0xD803, 0xE023, 0xD823, 0xD823, 0xD9A7, 0xF596, 0xFF9E, 0xFFDF, 0xFFBE, 0xFFDF, 0xF555, 0xD843, 0xD823, 0xD8A4, 0xF5F8, // 0x20E0 (8416) pixels +0xFFDF, 0xFFFF, 0xFFDF, 0xEC93, 0xD884, 0xEC31, 0xFF5D, 0xEC51, 0xD843, 0xD905, 0xECD4, 0xFF5D, 0xFFDF, 0xFFDF, 0xFFDF, 0xF67A, // 0x20F0 (8432) pixels +0xFF9E, 0xFFDF, 0xFFBE, 0xF576, 0xD863, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, // 0x2100 (8448) pixels +0xEBD3, 0xF97F, 0xF81F, 0xFBBE, 0xE1E9, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xD8A4, 0xF659, 0xFFDF, 0xFFFF, 0xFFFF, // 0x2110 (8464) pixels +0xFFFF, 0xF67A, 0xD8A4, 0xE023, 0xD803, 0xD803, 0xD823, 0xE38F, 0xFFBE, 0xFFFF, 0xFFFF, 0xFF7E, 0xDA4A, 0xE023, 0xD803, 0xD843, // 0x2120 (8480) pixels +0xECD3, 0xFF5D, 0xE2EC, 0xE24A, 0xFF9E, 0xFFFF, 0xFFFF, 0xFFBE, 0xE36E, 0xE023, 0xD823, 0xD823, 0xD803, 0xD823, 0xEC31, 0xFFBE, // 0x2130 (8496) pixels +0xFFFF, 0xFFFF, 0xFFBE, 0xE38F, 0xD823, 0xD884, 0xF5B7, 0xFFFF, 0xFFFF, 0xFF9E, 0xE32D, 0xE023, 0xE410, 0xFF7D, 0xEC10, 0xD823, // 0x2140 (8512) pixels +0xD823, 0xE38F, 0xFF9E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF1C, 0xE1E9, 0xE023, 0xD823, 0xEC10, 0xFF7D, 0xFFFF, 0xFFDF, 0xF638, 0xEC52, // 0x2150 (8528) pixels +0xFEDC, 0xF659, 0xD823, 0xE023, 0xD823, 0xD823, 0xE2CC, 0xFEDB, 0xFFDF, 0xFFBE, 0xED14, 0xE30D, 0xF67A, 0xFE5A, 0xD8C5, 0xD823, // 0x2160 (8544) pixels +0xE36E, 0xFFBE, 0xFFFF, 0xFFFF, 0xFEDB, 0xD987, 0xECF4, 0xFF5D, 0xEC51, 0xD843, 0xD987, 0xF618, 0xFFDF, 0xFFFF, 0xFFBE, 0xF5B7, // 0x2170 (8560) pixels +0xED34, 0xFFBE, 0xFFFF, 0xFFFF, 0xFF1C, 0xD9C8, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x2180 (8576) pixels +0xD803, 0xD823, 0xEBD2, 0xF9DF, 0xF81F, 0xFBBE, 0xE1E9, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xE24A, 0xFF7E, 0xFFFF, // 0x2190 (8592) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xEC51, 0xD823, 0xD823, 0xD823, 0xD823, 0xD843, 0xF597, 0xFFFF, 0xFFFF, 0xFFFF, 0xF618, 0xD884, 0xD823, // 0x21A0 (8608) pixels +0xD823, 0xE3AF, 0xFF9E, 0xECF4, 0xD843, 0xECD4, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEDB, 0xD8A5, 0xD823, 0xE003, 0xE003, 0xD823, 0xE38E, // 0x21B0 (8624) pixels +0xFF9E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFE79, 0xD8A4, 0xD823, 0xEC52, 0xFFDF, 0xFFFF, 0xFFDF, 0xF576, 0xD843, 0xE36E, 0xFF5D, 0xED35, // 0x21C0 (8640) pixels +0xD863, 0xE023, 0xD8C5, 0xF659, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xECF4, 0xD823, 0xD823, 0xE3AF, 0xFF9E, 0xFFFF, 0xFFDF, 0xED96, // 0x21D0 (8656) pixels +0xD8C5, 0xD823, 0xECD3, 0xF69A, 0xD843, 0xD823, 0xD823, 0xE32D, 0xFF3D, 0xFFDF, 0xFFDF, 0xEC92, 0xD843, 0xD823, 0xECF4, 0xFE39, // 0x21E0 (8672) pixels +0xD8A4, 0xE106, 0xF6BA, 0xFFDF, 0xFFFF, 0xFFDF, 0xE411, 0xF5D7, 0xFF1C, 0xE38F, 0xD843, 0xD9A8, 0xF69A, 0xFFDF, 0xFFFF, 0xFF9E, // 0x21F0 (8688) pixels +0xEC31, 0xD884, 0xED34, 0xFFDF, 0xFFFF, 0xFFDE, 0xEC72, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2200 (8704) pixels +0xD803, 0xD803, 0xD803, 0xD823, 0xEBF2, 0xF9FF, 0xF81F, 0xFBBE, 0xE1E9, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xEC11, // 0x2210 (8720) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xD9C8, 0xD823, 0xE003, 0xD823, 0xE023, 0xD905, 0xFF1C, 0xFFFF, 0xFFFF, 0xFFDF, 0xED55, // 0x2220 (8736) pixels +0xD843, 0xE023, 0xE36E, 0xFF5D, 0xF5F8, 0xD8E5, 0xD843, 0xF659, 0xFFDF, 0xFFFF, 0xFFFF, 0xF596, 0xD823, 0xD823, 0xD823, 0xE023, // 0x2230 (8752) pixels +0xE36E, 0xFF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xEC72, 0xE043, 0xE32E, 0xFF7D, 0xFFFF, 0xFFFF, 0xFF3D, 0xD9E9, 0xE36E, 0xFF7D, // 0x2240 (8768) pixels +0xF5D8, 0xD8E5, 0xE023, 0xD823, 0xE32D, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xE1C8, 0xD823, 0xDA09, 0xFF1C, 0xFFFF, 0xFFFF, // 0x2250 (8784) pixels +0xFEBB, 0xD926, 0xD823, 0xD823, 0xF556, 0xF639, 0xD823, 0xD823, 0xE32D, 0xFF3D, 0xFFDF, 0xFFFF, 0xFEFC, 0xD926, 0xD823, 0xE023, // 0x2260 (8800) pixels +0xED96, 0xF596, 0xD823, 0xE451, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF7D, 0xFEDB, 0xFEBB, 0xE2CC, 0xD823, 0xD946, 0xF659, 0xFFDF, 0xFFFF, // 0x2270 (8816) pixels +0xFF9E, 0xEC11, 0xD843, 0xE2AB, 0xFF7D, 0xFFFF, 0xFFFF, 0xFE9A, 0xD8E5, 0xD823, 0xE003, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2280 (8832) pixels +0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xEBF2, 0xF9DF, 0xF81F, 0xFBDE, 0xE24A, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, // 0x2290 (8848) pixels +0xD822, 0xF5D7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF639, 0xD843, 0xD823, 0xD803, 0xD823, 0xD823, 0xE209, 0xFF9E, 0xFFDF, 0xFFFF, // 0x22A0 (8864) pixels +0xFFFF, 0xFE9A, 0xDACB, 0xECD4, 0xFF7E, 0xF659, 0xD987, 0xD823, 0xD884, 0xFE9A, 0xFFFF, 0xFFFF, 0xFFFF, 0xF535, 0xD823, 0xD803, // 0x22B0 (8880) pixels +0xD884, 0xEC52, 0xFF7D, 0xF618, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xE410, 0xE38E, 0xFF1C, 0xFFFF, 0xFFFF, 0xFFDF, 0xEE18, 0xEC52, // 0x22C0 (8896) pixels +0xFF7D, 0xF659, 0xE187, 0xD823, 0xD803, 0xD843, 0xF5D7, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF596, 0xD843, 0xD843, 0xF576, 0xFFFF, // 0x22D0 (8912) pixels +0xFFFF, 0xFFDF, 0xEBF0, 0xD823, 0xD823, 0xD884, 0xFEBB, 0xEC93, 0xD823, 0xE26A, 0xFF3D, 0xFFDF, 0xFFFF, 0xFFDF, 0xFF3D, 0xD946, // 0x22E0 (8928) pixels +0xD843, 0xD926, 0xFEBA, 0xEBF0, 0xD947, 0xFEFB, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFBF, 0xF5D7, 0xD9C8, 0xD823, 0xD8A4, 0xF5B7, 0xFFDF, // 0x22F0 (8944) pixels +0xFFFF, 0xFFDF, 0xECD3, 0xD843, 0xD8A4, 0xF618, 0xFFFF, 0xFFFF, 0xFFBE, 0xE36E, 0xE023, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x2300 (8960) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD803, 0xE023, 0xEBD3, 0xF97F, 0xF81F, 0xFBBF, 0xE30D, 0xE003, 0xD803, 0xD803, // 0x2310 (8976) pixels +0xD823, 0xD803, 0xD863, 0xFEBA, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEC92, 0xD823, 0xD803, 0xD803, 0xD823, 0xD823, 0xE22A, 0xFF9E, // 0x2320 (8992) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDE, 0xFFDF, 0xF659, 0xE1E9, 0xD823, 0xD823, 0xD884, 0xFE9A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, // 0x2330 (9008) pixels +0xE3AF, 0xE32D, 0xF5F8, 0xFF9E, 0xF556, 0xE3AF, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2340 (9024) pixels +0xFFDF, 0xFFDF, 0xFEBA, 0xDA09, 0xD823, 0xD823, 0xD823, 0xE1E8, 0xFF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xE34E, 0xD843, 0xE22A, // 0x2350 (9040) pixels +0xFF5D, 0xFFFF, 0xFFFF, 0xFEFB, 0xD967, 0xD823, 0xD823, 0xE2AC, 0xFF5D, 0xE2CC, 0xD987, 0xFEBB, 0xFFFF, 0xFFFF, 0xFF9E, 0xED35, // 0x2360 (9056) pixels +0xFF9E, 0xED14, 0xD8E5, 0xE3F0, 0xFEFC, 0xD946, 0xEC72, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF7D, 0xEC72, 0xD8A4, 0xD823, 0xD823, 0xEC72, // 0x2370 (9072) pixels +0xFFBE, 0xFFFF, 0xFFDF, 0xF5D7, 0xD8A4, 0xE023, 0xEBB0, 0xFFDF, 0xFFFF, 0xFFDF, 0xF596, 0xD843, 0xD823, 0xD823, 0xD803, 0xD803, // 0x2380 (9088) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD803, 0xD823, 0xEBD6, 0xF87F, 0xF81F, 0xFB1F, 0xEB6F, 0xD823, // 0x2390 (9104) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xD926, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xE32D, 0xD843, 0xD803, 0xD823, 0xD823, 0xE023, // 0x23A0 (9120) pixels +0xD987, 0xFF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF5D7, 0xD987, 0xD823, 0xD823, 0xD803, 0xD823, 0xF5B7, 0xFFDF, 0xFFFF, // 0x23B0 (9136) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xFFDF, 0xFF7D, 0xEC52, 0xD884, 0xEBB0, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF5D7, 0xFF7D, 0xFFFF, // 0x23C0 (9152) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xF638, 0xD987, 0xD823, 0xE003, 0xD823, 0xD823, 0xEBAF, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF1C, 0xD8C4, // 0x23D0 (9168) pixels +0xD823, 0xEC93, 0xFFFF, 0xFFFF, 0xFFDF, 0xF5B7, 0xD823, 0xD803, 0xD864, 0xF576, 0xF67A, 0xD906, 0xF596, 0xFFDF, 0xFFFF, 0xFFDF, // 0x23E0 (9184) pixels +0xED55, 0xD884, 0xEC92, 0xFF7D, 0xF659, 0xFF1C, 0xF5F8, 0xECF4, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFBE, 0xE36E, 0xE023, 0xD803, 0xD823, // 0x23F0 (9200) pixels +0xE2EC, 0xFF7D, 0xFFFF, 0xFFFF, 0xFEDB, 0xD987, 0xD823, 0xD9A7, 0xFEFB, 0xFFFF, 0xFFFF, 0xFF3D, 0xE24A, 0xD823, 0xD823, 0xD803, // 0x2400 (9216) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, 0xD803, 0xD823, 0xEBF6, 0xF87F, 0xF81F, 0xF9DF, // 0x2410 (9232) pixels +0xEBF3, 0xD823, 0xD803, 0xD823, 0xD803, 0xE023, 0xE1C9, 0xFF3D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xE26A, 0xD823, 0xD823, 0xD803, // 0x2420 (9248) pixels +0xD823, 0xD823, 0xD863, 0xF535, 0xFFDF, 0xFFFF, 0xFFDF, 0xFF1C, 0xEC72, 0xD8C5, 0xE023, 0xE003, 0xE003, 0xD823, 0xD823, 0xE24A, // 0x2430 (9264) pixels +0xFEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xF638, 0xE28B, 0xD823, 0xD823, 0xE2AB, 0xFF9E, 0xFFFF, 0xFFDF, 0xFF7D, 0xECF4, 0xD8E5, // 0x2440 (9280) pixels +0xF5F8, 0xFFDF, 0xFFFF, 0xFF7D, 0xECF4, 0xD905, 0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xF5B7, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2450 (9296) pixels +0xF596, 0xD843, 0xD884, 0xFEBB, 0xFFDF, 0xFF3D, 0xECB3, 0xD946, 0xD823, 0xD823, 0xE24A, 0xFF5D, 0xEC11, 0xE38F, 0xFFBE, 0xFFFF, // 0x2460 (9312) pixels +0xFFFF, 0xFEBB, 0xD966, 0xE023, 0xD843, 0xE32D, 0xFF3C, 0xFF9E, 0xFEBB, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFE7A, 0xD8C4, 0xD823, // 0x2470 (9328) pixels +0xD823, 0xD947, 0xFE9B, 0xFFFF, 0xFFFF, 0xFFBE, 0xE3AF, 0xD823, 0xD823, 0xED14, 0xFFDF, 0xFFFF, 0xFFDF, 0xECD3, 0xD823, 0xD843, // 0x2480 (9344) pixels +0xD906, 0xD844, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xF457, 0xF87F, // 0x2490 (9360) pixels +0xF81F, 0xF87F, 0xEB76, 0xE023, 0xD803, 0xD823, 0xD823, 0xD823, 0xE1E9, 0xFF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF1C, 0xE1A8, 0xD823, // 0x24A0 (9376) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE0C5, 0xEBD0, 0xF556, 0xEBF0, 0xD9A7, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xE003, // 0x24B0 (9392) pixels +0xD823, 0xD823, 0xD9C8, 0xEC72, 0xF5B7, 0xECB3, 0xE2CC, 0xD8C5, 0xD823, 0xD823, 0xD823, 0xD843, 0xE34E, 0xEC92, 0xEC11, 0xDA2A, // 0x24C0 (9408) pixels +0xD823, 0xD823, 0xE1A8, 0xEC52, 0xEC32, 0xE26A, 0xD823, 0xE023, 0xE003, 0xD823, 0xD823, 0xD823, 0xD843, 0xF67A, 0xFFDF, 0xFFFF, // 0x24D0 (9424) pixels +0xFFFF, 0xFFDF, 0xEC31, 0xD823, 0xE229, 0xFF5D, 0xF67A, 0xE26A, 0xD823, 0xD823, 0xD823, 0xD8A4, 0xF618, 0xFE9A, 0xD9A8, 0xFEBB, // 0x24E0 (9440) pixels +0xFFDF, 0xFFFF, 0xFFBE, 0xE36F, 0xD823, 0xD823, 0xD823, 0xDA6A, 0xFF3C, 0xEC93, 0xD946, 0xF639, 0xFFFF, 0xFFFF, 0xFFDF, 0xEC51, // 0x24F0 (9456) pixels +0xE023, 0xD803, 0xD863, 0xED56, 0xFFDF, 0xFFFF, 0xFFFF, 0xF596, 0xD843, 0xD823, 0xE2EC, 0xFF7D, 0xFFFF, 0xFFFF, 0xFEBA, 0xD946, // 0x2500 (9472) pixels +0xE043, 0xED14, 0xFE39, 0xD926, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD8A4, // 0x2510 (9488) pixels +0xF43A, 0xF81F, 0xF81F, 0xF81F, 0xF397, 0xD843, 0xD803, 0xD823, 0xD803, 0xD823, 0xE1A8, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, // 0x2520 (9504) pixels +0xE28B, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2530 (9520) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xD843, 0xD823, // 0x2540 (9536) pixels +0xD823, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xD926, 0xFEDB, // 0x2550 (9552) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, 0xE2CC, 0xD843, 0xE3AF, 0xF5D8, 0xD967, 0xD823, 0xD803, 0xD823, 0xD823, 0xEC92, 0xFF9E, 0xE3CF, // 0x2560 (9568) pixels +0xEC31, 0xFFDF, 0xFFFF, 0xFFFF, 0xF659, 0xD8C5, 0xD823, 0xD803, 0xD8A4, 0xF5D8, 0xFE9A, 0xD946, 0xD9E8, 0xFF7D, 0xFFFF, 0xFFDF, // 0x2570 (9584) pixels +0xFF3D, 0xD947, 0xD823, 0xD823, 0xE36E, 0xFF9E, 0xFFFF, 0xFFFF, 0xFF3D, 0xE24A, 0xD823, 0xD8C5, 0xF659, 0xFFFF, 0xFFFF, 0xFFBE, // 0x2580 (9600) pixels +0xE38F, 0xD843, 0xE36E, 0xFF3D, 0xE3AF, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE003, // 0x2590 (9616) pixels +0xE023, 0xD926, 0xFC3C, 0xF81F, 0xF81F, 0xF81F, 0xF459, 0xD884, 0xD823, 0xD823, 0xD823, 0xD823, 0xD884, 0xFEDB, 0xFFDF, 0xFFFF, // 0x25A0 (9632) pixels +0xFFFF, 0xFFDF, 0xE38F, 0xD823, 0xD803, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xE023, 0xD803, 0xD803, // 0x25B0 (9648) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xD864, 0xD8E5, 0xD8E5, 0xD946, // 0x25C0 (9664) pixels +0xD966, 0xD905, 0xD8C5, 0xD864, 0xE023, 0xD823, 0xD823, 0xE003, 0xE023, 0xD823, 0xD803, 0xD823, 0xD803, 0xE003, 0xD803, 0xD823, // 0x25D0 (9680) pixels +0xD987, 0xFF1C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xD9E9, 0xD823, 0xE24A, 0xD966, 0xE023, 0xD803, 0xD803, 0xD823, 0xE36F, 0xFF7D, // 0x25E0 (9696) pixels +0xF5B6, 0xD8C4, 0xFE79, 0xFFFF, 0xFFFF, 0xFFDF, 0xEBD0, 0xD823, 0xD823, 0xD843, 0xEC72, 0xFF7D, 0xE32D, 0xD823, 0xEC92, 0xFFDF, // 0x25F0 (9712) pixels +0xFFFF, 0xFFFF, 0xF5D7, 0xD843, 0xD823, 0xE187, 0xFEDB, 0xFFFF, 0xFFFF, 0xFFDF, 0xF555, 0xD823, 0xD823, 0xECD3, 0xFFBF, 0xFFFF, // 0x2600 (9728) pixels +0xFFDF, 0xF5F7, 0xD864, 0xE249, 0xFEFC, 0xF576, 0xD864, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2610 (9744) pixels +0xD803, 0xD823, 0xD823, 0xD9A8, 0xFB5E, 0xF81F, 0xF81F, 0xF81F, 0xFC3D, 0xD926, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xF5B7, // 0x2620 (9760) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF596, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xE023, 0xD823, 0xD803, 0xD803, // 0x2630 (9776) pixels +0xD823, 0xD803, 0xE003, 0xE003, 0xD803, 0xE023, 0xD823, 0xE023, 0xD823, 0xD884, 0xD9E8, 0xE38F, 0xECD3, 0xED56, 0xF618, 0xFEDB, // 0x2640 (9792) pixels +0xFEFC, 0xFF7D, 0xFF9E, 0xFF3D, 0xFEDB, 0xF5F8, 0xECF4, 0xE30D, 0xD8C5, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2650 (9808) pixels +0xD823, 0xD823, 0xD946, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xE28B, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xE32D, // 0x2660 (9824) pixels +0xFF5D, 0xFE9A, 0xE187, 0xE24A, 0xFF7D, 0xFFFF, 0xFFFF, 0xFF5D, 0xD966, 0xE023, 0xD823, 0xE3CF, 0xFF7D, 0xED14, 0xD843, 0xD843, // 0x2670 (9840) pixels +0xF618, 0xFFDF, 0xFFFF, 0xFFDF, 0xEC10, 0xD823, 0xD863, 0xED96, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF7D, 0xE24A, 0xD843, 0xE34E, 0xFF9E, // 0x2680 (9856) pixels +0xFFFF, 0xFFFF, 0xFF5D, 0xE26B, 0xD946, 0xFE9A, 0xF659, 0xE147, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2690 (9872) pixels +0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xE26B, 0xFAFF, 0xF81F, 0xF81F, 0xF81F, 0xFB9F, 0xDA09, 0xD823, 0xE003, 0xD803, 0xD803, // 0x26A0 (9888) pixels +0xE023, 0xE30D, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7E, 0xE30C, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, // 0x26B0 (9904) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD843, 0xE023, 0xD823, 0xE106, 0xE2EC, 0xEC51, 0xF597, 0xFEDC, 0xFF7D, 0xFFBF, 0xFFDF, 0xFFDF, // 0x26C0 (9920) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFF9E, 0xFEDB, 0xEC72, 0xD926, 0xE023, 0xD823, 0xD823, // 0x26D0 (9936) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD864, 0xF69A, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xEC92, 0xD843, 0xD823, 0xD803, 0xE023, 0xD843, // 0x26E0 (9952) pixels +0xE38F, 0xFF5D, 0xFEFC, 0xDA6A, 0xD843, 0xEC72, 0xFFDF, 0xFFFF, 0xFFFF, 0xFF1C, 0xD905, 0xD843, 0xEBF0, 0xFF7D, 0xF618, 0xD905, // 0x26F0 (9968) pixels +0xD823, 0xD926, 0xFEDB, 0xFFFF, 0xFFFF, 0xFFBE, 0xE30D, 0xD863, 0xECB3, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF67A, 0xD8A4, 0xE24A, // 0x2700 (9984) pixels +0xFF1C, 0xFFFF, 0xFFFF, 0xFFDF, 0xED14, 0xD9E8, 0xF618, 0xFEFB, 0xE229, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x2710 (10000) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xEBD1, 0xFA9F, 0xF81F, 0xF81F, 0xF81F, 0xFAFF, 0xE32F, 0xD823, 0xD803, // 0x2720 (10016) pixels +0xD803, 0xD803, 0xD823, 0xD8A4, 0xF5D7, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEFC, 0xE2ED, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, // 0x2730 (10032) pixels +0xE023, 0xE023, 0xD803, 0xD823, 0xD843, 0xD967, 0xE2AB, 0xEC31, 0xF618, 0xFEFB, 0xFF9E, 0xFF7D, 0xFF3C, 0xFEFC, 0xFF1C, 0xFEFC, // 0x2740 (10048) pixels +0xFF3C, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFEBB, 0xE36E, // 0x2750 (10064) pixels +0xD843, 0xE023, 0xD803, 0xD823, 0xD823, 0xD803, 0xD823, 0xED34, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF3D, 0xE34E, 0xD8C5, 0xD884, // 0x2760 (10080) pixels +0xE209, 0xED35, 0xFF9E, 0xFEDB, 0xE2CB, 0xE023, 0xD823, 0xED55, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, 0xF5B7, 0xF5D7, 0xFF9E, 0xF618, // 0x2770 (10096) pixels +0xD946, 0xD823, 0xD823, 0xE1E8, 0xFF7E, 0xFFFF, 0xFFFF, 0xFFDF, 0xECF4, 0xED34, 0xFF7E, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, // 0x2780 (10112) pixels +0xECD3, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xED56, 0xFEFB, 0xFF3C, 0xE2CC, 0xD844, 0xD843, 0xE023, 0xD803, 0xD803, 0xD803, // 0x2790 (10128) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xF3F7, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF9BF, 0xEC15, // 0x27A0 (10144) pixels +0xD823, 0xD823, 0xE023, 0xE023, 0xD803, 0xD823, 0xD9C9, 0xFEBB, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7D, 0xF5B7, 0xE34E, 0xDA09, // 0x27B0 (10160) pixels +0xD967, 0xD967, 0xD9E8, 0xE26A, 0xE36F, 0xED35, 0xF639, 0xFEDB, 0xFF3C, 0xFEDB, 0xF597, 0xEC72, 0xE3AF, 0xE28B, 0xE1A8, 0xD8E6, // 0x27C0 (10176) pixels +0xD8E5, 0xD8E5, 0xE1C8, 0xE2EC, 0xEC92, 0xFE9A, 0xFFBF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFFDF, 0xFFDF, 0xFFDF, // 0x27D0 (10192) pixels +0xFFDF, 0xFFBE, 0xF555, 0xD946, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xDA29, 0xFF5D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, // 0x27E0 (10208) pixels +0xFE9A, 0xF69A, 0xFF5D, 0xFFDF, 0xF659, 0xE24A, 0xE023, 0xD823, 0xD823, 0xF576, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, // 0x27F0 (10224) pixels +0xF5F8, 0xD946, 0xD823, 0xE023, 0xD823, 0xDA4A, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFFDF, 0xECD3, 0xFF1C, 0xFFDF, 0xFFFF, // 0x2800 (10240) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xFEFC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF3C, 0xE2CC, 0xE1C8, 0xEC11, 0xF432, 0xD987, 0xD823, // 0x2810 (10256) pixels +0xD823, 0xE003, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD864, 0xF3FA, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2820 (10272) pixels +0xF81F, 0xF3D9, 0xD844, 0xE003, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE1E8, 0xF5F8, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2830 (10288) pixels +0xFFBE, 0xFF9E, 0xFF3C, 0xFF3C, 0xFF9E, 0xFF9E, 0xFF7D, 0xFE7A, 0xED35, 0xEC11, 0xE209, 0xD884, 0xD843, 0xD823, 0xE023, 0xE023, // 0x2840 (10304) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xD8A4, 0xEC52, 0xFF5D, 0xFFFF, 0xFFFF, 0xFF9E, 0xF5F8, 0xECD3, 0xEC11, // 0x2850 (10320) pixels +0xE3F0, 0xECD3, 0xED76, 0xFE9A, 0xFF9E, 0xFE7A, 0xE28B, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xEC31, 0xFF9E, 0xFFDF, 0xFFFF, // 0x2860 (10336) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xFFDF, 0xFF5D, 0xECD4, 0xD8E5, 0xD823, 0xD803, 0xD803, 0xE023, 0xEC92, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFDF, // 0x2870 (10352) pixels +0xFF9E, 0xED14, 0xD8E5, 0xD823, 0xD823, 0xD823, 0xE023, 0xE147, 0xFEDB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, 0xECD4, 0xD8C4, 0xFE9A, // 0x2880 (10368) pixels +0xFFDF, 0xFFFF, 0xFFFF, 0xFFBE, 0xF5B7, 0xE34E, 0xFFDE, 0xFFDF, 0xFFFF, 0xFFDF, 0xFE9A, 0xE2CC, 0xD823, 0xE22A, 0xF596, 0xEC93, // 0x2890 (10384) pixels +0xEB8F, 0xD823, 0xE003, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD9C8, 0xFC1D, 0xF81F, 0xF81F, // 0x28A0 (10400) pixels +0xF81F, 0xF81F, 0xF81F, 0xFC5D, 0xE188, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD884, 0xE36E, 0xF5D7, 0xFEDB, // 0x28B0 (10416) pixels +0xFF7D, 0xFFBE, 0xFF9E, 0xFF5D, 0xFEDB, 0xF618, 0xED55, 0xEB8F, 0xD987, 0xD8A4, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, // 0x28C0 (10432) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xE2CC, 0xFF5D, 0xFF1C, 0xE32D, 0xD884, // 0x28D0 (10448) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xD8A4, 0xE229, 0xECF4, 0xF619, 0xE2EC, 0xD823, 0xE003, 0xD823, 0xD823, 0xD823, 0xE36E, // 0x28E0 (10464) pixels +0xF639, 0xFF5D, 0xFFBE, 0xFF5D, 0xF67A, 0xED14, 0xD9E9, 0xD843, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xD926, 0xF5F8, 0xFFBE, // 0x28F0 (10480) pixels +0xFF9E, 0xF639, 0xE32D, 0xD844, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xEC10, 0xFF1C, 0xFFBE, 0xFE9A, 0xE3AF, 0xD844, // 0x2900 (10496) pixels +0xD823, 0xE38E, 0xFEFC, 0xFFBE, 0xFEBA, 0xEC11, 0xD8A4, 0xD966, 0xF679, 0xFFBF, 0xFF3C, 0xED14, 0xD967, 0xE023, 0xD823, 0xE26A, // 0x2910 (10512) pixels +0xF555, 0xECB3, 0xEB2D, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xD803, 0xD823, 0xE023, 0xE2EE, 0xFA9F, // 0x2920 (10528) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFABF, 0xE2CD, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE003, 0xE003, 0xD823, // 0x2930 (10544) pixels +0xD843, 0xD926, 0xD9C8, 0xE26A, 0xE229, 0xD9A7, 0xD906, 0xD844, 0xD823, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2940 (10560) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD843, 0xEBF1, 0xE2AC, // 0x2950 (10576) pixels +0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE023, 0xE023, 0xE187, 0xEBF0, 0xE167, 0xE023, 0xD803, 0xD803, // 0x2960 (10592) pixels +0xD803, 0xD823, 0xD884, 0xD9A8, 0xE209, 0xD9A8, 0xD8C5, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE003, 0xD823, // 0x2970 (10608) pixels +0xD8C5, 0xE249, 0xDA09, 0xD8A4, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE003, 0xE003, 0xD823, 0xD967, 0xDA29, 0xD905, // 0x2980 (10624) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xE187, 0xE24A, 0xD906, 0xD823, 0xD823, 0xD803, 0xD926, 0xE24A, 0xD987, 0xD823, 0xD823, 0xD803, // 0x2990 (10640) pixels +0xE023, 0xD843, 0xE1A8, 0xE1C8, 0xD843, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, // 0x29A0 (10656) pixels +0xEC34, 0xF9BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF99F, 0xEC35, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xE023, // 0x29B0 (10672) pixels +0xD823, 0xD823, 0xD803, 0xD823, 0xD823, 0xD843, 0xE043, 0xD823, 0xD823, 0xD823, 0xE003, 0xE003, 0xD823, 0xE023, 0xE023, 0xE023, // 0x29C0 (10688) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, // 0x29D0 (10704) pixels +0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE023, 0xE023, 0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xD8A5, 0xD803, // 0x29E0 (10720) pixels +0xD823, 0xE023, 0xD803, 0xD823, 0xD803, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, 0xD803, 0xD803, // 0x29F0 (10736) pixels +0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, // 0x2A00 (10752) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xE023, 0xD823, 0xE023, 0xD843, 0xD823, 0xD803, // 0x2A10 (10768) pixels +0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, // 0x2A20 (10784) pixels +0xD823, 0xD863, 0xFBDB, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBDB, 0xD864, 0xD823, 0xD803, 0xD803, 0xD803, // 0x2A30 (10800) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x2A40 (10816) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, // 0x2A50 (10832) pixels +0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2A60 (10848) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xE023, // 0x2A70 (10864) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xE003, 0xD803, 0xD823, 0xD823, 0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, // 0x2A80 (10880) pixels +0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE023, 0xD803, 0xD823, 0xD803, 0xD803, // 0x2A90 (10896) pixels +0xD823, 0xD803, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xE023, // 0x2AA0 (10912) pixels +0xD803, 0xD803, 0xD823, 0xDA2A, 0xFBDE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBBE, 0xE26B, 0xE023, 0xD803, // 0x2AB0 (10928) pixels +0xE003, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xD803, 0xE023, // 0x2AC0 (10944) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x2AD0 (10960) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, // 0x2AE0 (10976) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2AF0 (10992) pixels +0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD823, 0xD823, 0xE003, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, // 0x2B00 (11008) pixels +0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD823, // 0x2B10 (11024) pixels +0xD803, 0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, // 0x2B20 (11040) pixels +0xD803, 0xD823, 0xD803, 0xD823, 0xE023, 0xEBB2, 0xF9FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF95F, 0xEBB3, // 0x2B30 (11056) pixels +0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2B40 (11072) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2B50 (11088) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, // 0x2B60 (11104) pixels +0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, // 0x2B70 (11120) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, // 0x2B80 (11136) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2B90 (11152) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x2BA0 (11168) pixels +0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xE023, 0xD843, 0xF459, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2BB0 (11184) pixels +0xF87F, 0xFC3B, 0xD884, 0xE023, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2BC0 (11200) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2BD0 (11216) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2BE0 (11232) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2BF0 (11248) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C00 (11264) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C10 (11280) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C20 (11296) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xE1E9, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2C30 (11312) pixels +0xF81F, 0xF81F, 0xF81F, 0xFB7E, 0xE2AC, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C40 (11328) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C50 (11344) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C60 (11360) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C70 (11376) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C80 (11392) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2C90 (11408) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2CA0 (11424) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xEB93, 0xFA1F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2CB0 (11440) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8FF, 0xEC16, 0xD843, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2CC0 (11456) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2CD0 (11472) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2CE0 (11488) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2CF0 (11504) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D00 (11520) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D10 (11536) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D20 (11552) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD8C5, 0xFC3B, 0xF81F, 0xF81F, 0xF81F, // 0x2D30 (11568) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC1D, 0xD946, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, // 0x2D40 (11584) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D50 (11600) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D60 (11616) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D70 (11632) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D80 (11648) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2D90 (11664) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2DA0 (11680) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE30D, 0xFB3F, 0xF81F, // 0x2DB0 (11696) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA7F, 0xEBB2, 0xD823, 0xD803, 0xE003, 0xE023, // 0x2DC0 (11712) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2DD0 (11728) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2DE0 (11744) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2DF0 (11760) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E00 (11776) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E10 (11792) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E20 (11808) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xD823, 0xD864, 0xF438, // 0x2E30 (11824) pixels +0xF8DF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC3B, 0xD926, 0xD823, // 0x2E40 (11840) pixels +0xE003, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E50 (11856) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E60 (11872) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E70 (11888) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E80 (11904) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2E90 (11920) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2EA0 (11936) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xE023, // 0x2EB0 (11952) pixels +0xE26B, 0xFBDE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFABF, // 0x2EC0 (11968) pixels +0xE370, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2ED0 (11984) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2EE0 (12000) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2EF0 (12016) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F00 (12032) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F10 (12048) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F20 (12064) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x2F30 (12080) pixels +0xD803, 0xD823, 0xF416, 0xF97F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2F40 (12096) pixels +0xF81F, 0xF87F, 0xF43A, 0xD8C5, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F50 (12112) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F60 (12128) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F70 (12144) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F80 (12160) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2F90 (12176) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2FA0 (12192) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2FB0 (12208) pixels +0xE023, 0xD803, 0xD823, 0xE22A, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2FC0 (12224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEB90, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2FD0 (12240) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2FE0 (12256) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x2FF0 (12272) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3000 (12288) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3010 (12304) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3020 (12320) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x3030 (12336) pixels +0xD803, 0xD823, 0xD823, 0xD803, 0xD823, 0xF416, 0xF97F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3040 (12352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, 0xFC5B, 0xD906, 0xD823, 0xE003, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, // 0x3050 (12368) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3060 (12384) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3070 (12400) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3080 (12416) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3090 (12432) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x30A0 (12448) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x30B0 (12464) pixels +0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xD823, 0xE28C, 0xFB9E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x30C0 (12480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA7F, 0xEBB3, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, // 0x30D0 (12496) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x30E0 (12512) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x30F0 (12528) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3100 (12544) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3110 (12560) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3120 (12576) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3130 (12592) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xE023, 0xD884, 0xF438, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3140 (12608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBDD, 0xD9E9, 0xE023, 0xE003, // 0x3150 (12624) pixels +0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3160 (12640) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3170 (12656) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3180 (12672) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3190 (12688) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x31A0 (12704) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x31B0 (12720) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE32F, 0xFAFF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31C0 (12736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF97F, 0xF457, // 0x31D0 (12752) pixels +0xD864, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x31E0 (12768) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x31F0 (12784) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3200 (12800) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3210 (12816) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3220 (12832) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3230 (12848) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xD947, 0xFC3B, 0xF87F, 0xF81F, 0xF81F, 0xF81F, // 0x3240 (12864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3250 (12880) pixels +0xF81F, 0xFB3F, 0xE30F, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3260 (12896) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3270 (12912) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3280 (12928) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3290 (12944) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x32A0 (12960) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x32B0 (12976) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD803, 0xD843, 0xEBF5, 0xF9DF, 0xF81F, 0xF81F, // 0x32C0 (12992) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32D0 (13008) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFC1C, 0xD987, 0xD823, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x32E0 (13024) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x32F0 (13040) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3300 (13056) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3310 (13072) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3320 (13088) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3330 (13104) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xE023, 0xE2EE, 0xFB9E, 0xF81F, // 0x3340 (13120) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3350 (13136) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF9BF, 0xF416, 0xD863, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, // 0x3360 (13152) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3370 (13168) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3380 (13184) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3390 (13200) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x33A0 (13216) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x33B0 (13232) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD967, 0xFC1B, // 0x33C0 (13248) pixels +0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33D0 (13264) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB1F, 0xE350, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, // 0x33E0 (13280) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x33F0 (13296) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3400 (13312) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3410 (13328) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3420 (13344) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3430 (13360) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD864, // 0x3440 (13376) pixels +0xF417, 0xF9BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3450 (13392) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFBDD, 0xE26C, 0xE023, 0xD803, // 0x3460 (13408) pixels +0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3470 (13424) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3480 (13440) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3490 (13456) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x34A0 (13472) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x34B0 (13488) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, // 0x34C0 (13504) pixels +0xD823, 0xEB51, 0xFAFF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34D0 (13520) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xFC3B, // 0x34E0 (13536) pixels +0xD967, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x34F0 (13552) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3500 (13568) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3510 (13584) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3520 (13600) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3530 (13616) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, // 0x3540 (13632) pixels +0xD803, 0xE023, 0xE2AD, 0xFBBE, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3550 (13648) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3560 (13664) pixels +0xF81F, 0xF9BF, 0xF418, 0xD8C5, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3570 (13680) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3580 (13696) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3590 (13712) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x35A0 (13728) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x35B0 (13744) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x35C0 (13760) pixels +0xE023, 0xD803, 0xD823, 0xDA09, 0xFC1C, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35D0 (13776) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35E0 (13792) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA5F, 0xEBB5, 0xD864, 0xE003, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, // 0x35F0 (13808) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3600 (13824) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3610 (13840) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3620 (13856) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3630 (13872) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3640 (13888) pixels +0xE023, 0xD803, 0xE023, 0xD823, 0xD946, 0xF439, 0xF93F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3650 (13904) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3660 (13920) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFADF, 0xEB93, 0xD843, 0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x3670 (13936) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3680 (13952) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3690 (13968) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x36A0 (13984) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x36B0 (14000) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x36C0 (14016) pixels +0xD803, 0xE003, 0xE023, 0xD823, 0xD823, 0xD8A4, 0xF438, 0xF9FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36D0 (14032) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36E0 (14048) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB5E, 0xEB91, 0xD843, 0xE003, 0xD823, 0xD803, // 0x36F0 (14064) pixels +0xD803, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3700 (14080) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3710 (14096) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3720 (14112) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3730 (14128) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3740 (14144) pixels +0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD8A4, 0xF3D7, 0xFA3F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3750 (14160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3760 (14176) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB5E, 0xEB71, 0xD843, // 0x3770 (14192) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3780 (14208) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3790 (14224) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x37A0 (14240) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x37B0 (14256) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x37C0 (14272) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD823, 0xD823, 0xD8A5, 0xF3F7, 0xFA5F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37D0 (14288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37E0 (14304) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37F0 (14320) pixels +0xFB7E, 0xE371, 0xE043, 0xD823, 0xE003, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3800 (14336) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3810 (14352) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3820 (14368) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3830 (14384) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3840 (14400) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD823, 0xE023, 0xD8C4, 0xF3D6, 0xFA3F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3850 (14416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3860 (14432) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3870 (14448) pixels +0xF81F, 0xF81F, 0xF81F, 0xFB7E, 0xEB91, 0xD864, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, // 0x3880 (14464) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3890 (14480) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x38A0 (14496) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x38B0 (14512) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x38C0 (14528) pixels +0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD8E5, 0xF3D7, 0xFA1F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38D0 (14544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38E0 (14560) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38F0 (14576) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFB1F, 0xEBD4, 0xD884, 0xE023, 0xD803, 0xD823, 0xE003, 0xD803, 0xD803, 0xD803, // 0x3900 (14592) pixels +0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3910 (14608) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3920 (14624) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3930 (14640) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3940 (14656) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xE003, 0xD823, 0xD823, 0xD967, 0xF439, 0xFA1F, 0xF81F, 0xF81F, 0xF81F, // 0x3950 (14672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3960 (14688) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3970 (14704) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA9F, 0xF3F7, 0xD926, 0xE023, 0xD803, 0xD803, 0xD823, // 0x3980 (14720) pixels +0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3990 (14736) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x39A0 (14752) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x39B0 (14768) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x39C0 (14784) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE22A, 0xFC5B, 0xF9DF, 0xF81F, 0xF81F, // 0x39D0 (14800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39E0 (14816) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39F0 (14832) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF9FF, 0xFC3A, 0xDA2A, 0xD823, // 0x3A00 (14848) pixels +0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3A10 (14864) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3A20 (14880) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3A30 (14896) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3A40 (14912) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xD823, 0xD823, 0xD823, 0xE023, 0xE2EE, 0xFBDD, 0xF8FF, 0xF81F, // 0x3A50 (14928) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A60 (14944) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A70 (14960) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF91F, // 0x3A80 (14976) pixels +0xFBDD, 0xE2CE, 0xD843, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3A90 (14992) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3AA0 (15008) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3AB0 (15024) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3AC0 (15040) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD8C4, 0xEBB3, 0xFB7E, 0xF81F, // 0x3AD0 (15056) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AE0 (15072) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AF0 (15088) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B00 (15104) pixels +0xF81F, 0xF81F, 0xF81F, 0xFB3E, 0xEBD5, 0xD905, 0xD823, 0xD823, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, // 0x3B10 (15120) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3B20 (15136) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3B30 (15152) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3B40 (15168) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, 0xD803, 0xD823, 0xE1C8, 0xF438, 0xFA7F, // 0x3B50 (15184) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B60 (15200) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B70 (15216) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B80 (15232) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA3F, 0xF41A, 0xE28C, 0xD823, 0xD823, 0xE003, 0xD823, 0xD823, 0xD803, 0xE023, // 0x3B90 (15248) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3BA0 (15264) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3BB0 (15280) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3BC0 (15296) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xE023, 0xD864, 0xE330, 0xFBFC, // 0x3BD0 (15312) pixels +0xF95F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BE0 (15328) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BF0 (15344) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C00 (15360) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xFB9E, 0xEBF4, 0xD906, 0xD823, 0xD823, 0xE003, // 0x3C10 (15376) pixels +0xD803, 0xD823, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3C20 (15392) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3C30 (15408) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3C40 (15424) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xDA0A, 0xF437, // 0x3C50 (15440) pixels +0xFAFF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C60 (15456) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C70 (15472) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C80 (15488) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xFA1F, 0xFC3B, 0xE30F, // 0x3C90 (15504) pixels +0xD884, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, // 0x3CA0 (15520) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3CB0 (15536) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3CC0 (15552) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD823, 0xD823, 0xD906, 0xEBD3, // 0x3CD0 (15568) pixels +0xFBDD, 0xF93F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CE0 (15584) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CF0 (15600) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D00 (15616) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D10 (15632) pixels +0xF87F, 0xFB3F, 0xF418, 0xE26B, 0xD864, 0xD823, 0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, // 0x3D20 (15648) pixels +0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3D30 (15664) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3D40 (15680) pixels +0xE023, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE003, 0xD823, 0xD823, 0xD823, 0xD8E5, 0xE32F, // 0x3D50 (15696) pixels +0xFC3B, 0xFA7F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D60 (15712) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D70 (15728) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D80 (15744) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D90 (15760) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF93F, 0xFB9E, 0xF418, 0xE28B, 0xD864, 0xD823, 0xE023, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, // 0x3DA0 (15776) pixels +0xD823, 0xD823, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3DB0 (15792) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3DC0 (15808) pixels +0xE023, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xD8C5, 0xE30E, // 0x3DD0 (15824) pixels +0xF43A, 0xFADF, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DE0 (15840) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DF0 (15856) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E00 (15872) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E10 (15888) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF91F, 0xFB7E, 0xF437, 0xE2CD, 0xD863, 0xD823, 0xE023, 0xD803, // 0x3E20 (15904) pixels +0xD823, 0xD823, 0xD823, 0xD803, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3E30 (15920) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3E40 (15936) pixels +0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD8C5, 0xE350, // 0x3E50 (15952) pixels +0xF43A, 0xFB3F, 0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E60 (15968) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E70 (15984) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E80 (16000) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E90 (16016) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8FF, 0xFB7E, 0xF45A, 0xE350, // 0x3EA0 (16032) pixels +0xD926, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3EB0 (16048) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xE023, // 0x3EC0 (16064) pixels +0xE023, 0xE023, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xE209, 0xEBD3, // 0x3ED0 (16080) pixels +0xFC1B, 0xFADF, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EE0 (16096) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EF0 (16112) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F00 (16128) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F10 (16144) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F20 (16160) pixels +0xF87F, 0xFA9F, 0xFC1C, 0xF436, 0xE2AC, 0xD8E5, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xE023, 0xD803, 0xD803, 0xD803, // 0x3F30 (16176) pixels +0xD823, 0xE023, 0xD803, 0xD803, 0xD803, 0xE003, 0xE023, 0xD803, 0xE023, 0xE023, 0xE023, 0xD803, 0xD803, 0xE023, 0xE023, 0xD803, // 0x3F40 (16192) pixels +0xD803, 0xD803, 0xD803, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, 0xD803, 0xE023, 0xE023, 0xD823, 0xD823, 0xE167, 0xEB2F, 0xF438, // 0x3F50 (16208) pixels +0xFBBD, 0xF9DF, 0xF87F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F60 (16224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F70 (16240) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F80 (16256) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F90 (16272) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FA0 (16288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF97F, 0xFB3F, 0xF43B, 0xF416, 0xE2CD, 0xD947, 0xD843, 0xE023, 0xD823, 0xE023, 0xE023, // 0x3FB0 (16304) pixels +0xE023, 0xD803, 0xD803, 0xE003, 0xD823, 0xD803, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD803, 0xD803, 0xD803, 0xD803, // 0x3FC0 (16320) pixels +0xD803, 0xD803, 0xD803, 0xE003, 0xD803, 0xD823, 0xE023, 0xD823, 0xE023, 0xD823, 0xD823, 0xD884, 0xD988, 0xEB4F, 0xF417, 0xFC3C, // 0x3FD0 (16336) pixels +0xFABF, 0xF8FF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FE0 (16352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FF0 (16368) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4000 (16384) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4010 (16400) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4020 (16416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF93F, 0xFADF, 0xFBFD, 0xF438, 0xEBD4, 0xE2CD, // 0x4030 (16432) pixels +0xD926, 0xD884, 0xD823, 0xE023, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, 0xD823, // 0x4040 (16448) pixels +0xE023, 0xD823, 0xD803, 0xD803, 0xD803, 0xD823, 0xD843, 0xD823, 0xD843, 0xD8A4, 0xD988, 0xE30E, 0xEBB5, 0xF459, 0xFBBE, 0xFABF, // 0x4050 (16464) pixels +0xF8BF, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4060 (16480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4070 (16496) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4080 (16512) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4090 (16528) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40A0 (16544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF87F, // 0x40B0 (16560) pixels +0xF99F, 0xFB5F, 0xFBBE, 0xFC5A, 0xF456, 0xEB74, 0xE370, 0xE2AB, 0xE1C8, 0xD906, 0xD863, 0xD843, 0xD843, 0xD823, 0xD823, 0xD843, // 0x40C0 (16576) pixels +0xD843, 0xD843, 0xD843, 0xD863, 0xD884, 0xD926, 0xE209, 0xE2EC, 0xE372, 0xEBB4, 0xF457, 0xFC1C, 0xFB9E, 0xFB3F, 0xF93F, 0xF81F, // 0x40D0 (16592) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40E0 (16608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40F0 (16624) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4100 (16640) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4110 (16656) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4120 (16672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4130 (16688) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF8BF, 0xF8BF, 0xFA9F, 0xFBBE, 0xFBFD, 0xFC3C, 0xFBDC, 0xFBBC, 0xF3FA, 0xF439, // 0x4140 (16704) pixels +0xF419, 0xF419, 0xF419, 0xF439, 0xFBFB, 0xFBBC, 0xFBDC, 0xFC3D, 0xFBDE, 0xFB9F, 0xF9DF, 0xF8BF, 0xF87F, 0xF81F, 0xF81F, 0xF81F, // 0x4150 (16720) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4160 (16736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4170 (16752) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4180 (16768) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4190 (16784) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41A0 (16800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41B0 (16816) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41C0 (16832) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41D0 (16848) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41E0 (16864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41F0 (16880) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4200 (16896) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F +}; diff --git a/firmware/include/images/icon_coke.png b/firmware/include/images/icon_coke.png new file mode 100644 index 0000000000000000000000000000000000000000..2aae0530812899550358bcfa28883eb2413cb17b GIT binary patch literal 29577 zcmeFYWmH_-wk}$@ySux)d*SXbg}W9`a1ZVpJa{0%-7UBiB)A6;kRaibwbtHy?RU>R z_np?>{kN-?%sIx_zuw0;dLMJn)<(sssmP)r5+VWs02FySpvLQO=qM>G|aIbjdFkRkt~^oEuIy{o+A( z@$3WA-~Sw(mTl*rFHT?w66eS<`lwqpuI|Z+VIZ{T|Pj?jHOOpKjG&eg>(q zENMuM2Tabk=)j3_5w&p?7(#V1C3qKIXU|-~1wQ@StxV<^N|n40h-V0t$^IUsl6rCW zhJ`hd^qnyAPeP4AtP6dG$X>aTWg_Jlk?fwS$!UQHzMx-IPH!T{kisssAG`0!uCAWv z$+l+?mUoYDeoP}RUR?CBoF*#Tf^l1LK^#OfTdxeGk-st}Dxj`?&-~aWKP97g88P49+D_DW?4;%P=nF&5C}( z*r%DX^7Hk)W#fM3CR~a>Kj3p^d4Aw< zcBWd#@qwXp*V&!W`IAXgo%Zva#a~8kJtj?;P4-tTTJ8aMo7J?D_F}9U&~(xc!?8oe zdROmp$O}uBChn6w7CsZ`?{1NNDhA5vH}VVF4(as;NHX&U)NM?sKW3gDxWAaz8a{dK z=VhF~Sp5D@On*G|e8j76mWDm8T^X0Wr(G;(I>}9b`DU%yIoh+?+ZXgpbP2xs;k<2d zXblg4Bp*W}2fW(tOD40n%y6~XkMEfp)AC$&EUWVE|`yzR2+Mt7}lI*m!S}S$jeY6pj&Jhzv|>q z%dJ7{#X;Qo$o9@#ZhKo0Nr&y8uGclj-`P}Ah z7+{f7F%`);Hq#s$wGT|%KJt1QZcrF`fbRTT54&KGMjOlR?(6+x6*jPwCl|_l$+MU@ zann=-Zb@WzVL0$Vg%~!BLAUfOcq;c<8*64U?eo@dSleaz$B58g^}o2qj&9woA>Ht) zp3%QguOi}-DW*yRjMI|ld5pYebtLs%artn6IWf@R$D8q75uMs4%eGtv#m}VL zq_1P^Hr0O8rl{!)D@CXjcoHu7Q{AjZj+u+f0n1*%Wc%W_^U(b;K75&DBCR}okzLL* zx*u-Ev;T_>Jni*Uh8k>0FABrQH|sJp5I2=64sq%9m97q}I=t}RT=;4a^4*N?E(YWS z`gD4_T14Z-$UNp@L`rm@cAWTSrW+pC30mnkS$FNS$&HyDWsc;lya?|lhEdTIu96L? zU)s}qvXK?$tPGi0)1?ECB$uh83NlkUiLAvcZdF&D&%u z`rCT`SSsL>y~pD0SHZ#k;}lUok1g)`j0AZm-R{4k(Qr?DKv;gixvMSmdt6yRP zIrV!a;RB6DcOG0}>7545FGxSOP!zh~07&0BhB`Md8eCcu`oR0pD@>`#B`8y}Pg-5J zrvE2XI#t1++>*y|69dGr?p zIR*Y?Er3)Oub>HLo!kv%9MpjnfVjcx4=%^*{VG|*(MC$!YpKUfS2T!XZx_aH5GgPY z0iUxs(*_^nzZUux(-vlSQji!QibZ02ojGiJ0u2Ic!9CZXthh11t=C&wLd^LmO<-6L(LgU1tskqfzn9JOy(JZ)MXjSD<#;DhPQtSBc;x@Sa@ zrx#*Z_$)U^Z`uH@+zl@=;iR$4ZdBhVX@B?an?j-SJBJL1$6rTfp z24wm_I22ij$1u}afNn+jbap*2`F-FA3h(CHV)Te`&U3fMOHcwWfaD)2+Jzd2ee~R6 zAI)*@BntL+ejVgIk6iSa!h4GAkXN5r(C?FSG|x2P#1C*%)T+VS0L}>a-oiN}M;sy> zLp*#}W-g>VoT!yo#;WnCcb8*^FVv<*`4UNL#`5z%KaJK3^#JdvCmU%S7N-*+$27y3J@Y_)L86AYagvX2yTJbQ)P?j{rHq_FjPFrUNVD&XJH16dT z3Lb?!oyq7>88phceCW;7Pzq{56r+-bUz)lIG^ar4@|o?IU`6{+W)3Z2G)}`I+@p33 zlS3cxk5Eaqu~^Z#)Lvr*VvzP`hBQe`3}Z8OKM?I=x-6WRve&IG`fS{J$jYQKF|C5> z0$t7yM_HN)eoGuai7P2PBfS@y)Me6E86q_r8P0>pB*A)DeZ%X9;39T0VV!7>1f2@8 z$MX%U#e3?>qn+tT=QNpTu=2Q#NuuqT8C*o0~)_(8LYac+pycRC$ONhPDVr1<5HY&R-6 zFkE(Tg}&9umN%(LjluyuOIq7khJZhc$G4mbpRQQ7~>kG|{F2}TTT4*acw3I$_ zH0*cmc6FbSueSRLkfAwI5h=)*+BWqKALyqTU@z{Rk}KKq^#gQ{3_sosmREDufkif^ZW?*M;ZblCV649zrif z)j7e3Aa4Wj@JC$6z=BP8&syV8L!hCrCe{7Uh=U`)aL8ot*SpFJFFTo~TglT=o6S%` z;T8sat!`kYFuK(!eoH|);@faW57-UrduP5X;=}p1sH}M%`DY9~-9|&`s+Szn0 zK)*{qd|fe=kBugux4M}Lg6F29jgv?G#)HG`QTo20K~GF>UZ#p*-ff64teQ&<-;hCg zZ;M2{Hmk(T5{C8!fDlE%`@ls@_-5e)v{a2x z90sb}`Mmk}SiW&aCz3^$XrU*gqre!tO@3GvXsTMDxZe0wGwI6IvT|oydnmis99Bb- z&paJtZ}Cx?5QY7}kitsRvM?|&QWxI|u3L+Y((FL_&LALN^JoiDR`5_KHgHF?%WAjQ zWvRHUy>kjx+oG^{#?pbNW*b4g0#H^fePfjAC~9=3iI%9965e)TMe-3$NUR8}-B*;4 zQ4oxjSP(eC|7s>9gS)w?%~u_)*-BaoVocqclv6?1xm6}7(h_A%0`cM*P9rg9rm(${g9{bv1})v<;q_@hmZ2zzKOQ8b zt(Lzf&H=FfjJ<1W*KU()dC$!#;3Y1o1*6!j&^p5l6=?M(nyu{!IykxluH<{`=!VQN z5(j7$9~)uy^Fbs7oE^jh#E_HFQ6ur!L_G3$2acAj-Qf)!G&oqES`N_Vsj<6Io*0dT zKPmC&&Zx(EuBrwgAu4;zKNpWaLN|+U^_BF|ksHueA<4;PSgg;&;R$Ql@($l0HdOJ zQOJO(DUV~w!`qs`Hn=raQyxJmkV~W7{HHuAG+G53#Uq-d^4J8)>=10uTgU-#5OW}a%6SA0VV-E2F zW;3u(gE%kFl>WR#VY%g(Bib;<+B!d30CpK965 z&ZFkeIX)r61&S%yrlDom8Zyjz*V73`{RRmuTQU((Kp~D$gFRoks1r9|1tUqckzlmj zYGDGA&TrP=6i5L+N1yt%Z5smNIgm~Z`{4P_PbIN#5)j2DSg@S>EK@yQr)hK11mBDr zcc0~b>*eAHM(Bl8zuUZ=Vb{x(U_w2iJtFQul&spK#kodw$kR*_{I2b&7b*Z0Aa0rM zTao%f7>1NKun?6RJLo!=Fyvn$pN`RL52 zZb7S%R1^{vyjar5TAWZSjL<>s>& z-+5&2NmUV);wO-ME;G`3bVc;)O>(>tAYKdSd`}&nK%V)V9y=wD?i;}vAqpl#b#tW@ z$aCntEXR0Ky0`eBS5L(6z0yJ$Z{P7PXTXQ+z$!5Do?(MVn2N*V%mWHIgQSFoGO*-m zK#f$jjN&PLEZh6@t%usj1x|$N8nn@K@!8LyAEXAt z%nq9S6DMi+*=ih2{c}!nky53wXS=;5!rEnfAC!gnn7M;SG=5fr4)8OVYxDoQlYb*z%e zT6L>M>Y1-9HyOVc&e`m#L32fJ;Q&EQDc4iw+Fhh!36KD$s$JcNN>??1lE4mYIW%-L zwcJM@3_UoMdNvMf^s)NlHANa(R`6J0E7`WbR}R1B!mv_hrgsbs5I*5=z5&xzv^;4+_CbrWQ-WQm#{UN`+Z%;&Y5 zl&dvaPh;}}ny<^U_&j<3=LqMj! zPZsj+4L-7(dmnZy$?WGMEv6X5+&`1D4CNa?I9$ULYIp(NUG$?xFRC7DS9N$;{@vn1 zA0p=m@-!BT0~S!Pt&UaF?Y3{!sj-w8e5^!swi1#>?sQe zLyQ#~nKtYl$wzi+C!P`t7a#vbD@xOX_wzcdv!(4Z#aq&sR7MFv`>LN*1dOy$ea6|cN(f?@RmP{w|OpA_Nc098cQ*W_h^2?>lc3EC;@UBa9?sw#HSC(_bsN=1mT%Wu&T7D6u(PHKQC57 zzd;o*Sm&9ACXlCml;y1s3s9=N3OsTHx-Fc4kV_Fuu7RWE6EEgOzeYz2xeVqglJtllF#)iNSwEw7JtES7G-U*%ZI}U@K|FkUZFrcE<4@P|v!6 ze%{`g08e>yssDq8j{t9A2>up{C2o5$4%0pAQHyx-9sYc|L8>EvVpri1M$uC(xBx&& zmYExjq$=vv1WA3d;*K`Tj@yOf4o0CK-e5dSHj~FKg#k(psZ~Yxwx{a|vv%||woX9{ z6_SNPCKOnOC%15=B=aWuS&q^3!_BdzR5JVPcQVZesjqYJFZ#vmp`_}gU@TPIzx_1z zPCgx}TS0V(446Ep?OSD!^xbLl(qP7$lunyIJQBjKI|;!TR+W-y-x^(6k$cV+vR5I* zfW3Jg7rWsjWu(b%XGz-2cQ%SGA9tx?LnYQslXl)tI3qp8$kQ|0YIOG<&0Z#^73~wi zyV);Dw!j7$dYgfIW^zJD2tJE2N+qa^IAuZpeUHYE(}U!M4Q8ciXDaMkDGNM{@K)@% zY1JQ?igh|FG!IA`kV*#~69+#>}!Gg9^jJT$a%c+a4 zIAh9AIa~6@?c0ORNSc6tWFDV>{Z%*Hste9yq&Ig}h>lhWRk2rvAzn`63|gyG+iA$q z#gtlg^e0{LpVLBYw<7e@#S+J18BnnUOu5io{6USMml|wLEVels3W-B!1l^)< zgY*Lix?N_gG{7th1_=?0B!1z14}x8S|JY^^a~h?iSM)s*F|bX$l8OwpQZJ%3r|0Yhbtn#rHn1X87{4YBv( zqWq%kmP(QgWFcK8&9Y}~saXwg_TOgYA9qWPL{qC9wpc>MANDU-pMxd3r#R;+-lTfa2po$~klTT!=^D4-u||`Q#-%s%~NR<X)%;mbiE-N2jW6aMcEOl@WEjY!JkVCn>(kqY$|c;1?Gne~!OP)aM(>OabQjA@n zH4xC^i-vWd;2U%oP?5TP$_r)XfO`A3r`3uPUTLP%a#}d=MazZfq$2bqY}xTG`-S@t zwR08L({JptLRPYc`@JbMKOj=K0+9nx?O6g)r`Sn-Ipv0r6 zzr#l)&=SNOUlV5yL`y<*=?)66N*)R051i4W!ugYa@0Z82zcsn-7@E9I)P*BO^tC^g z!o{(R>|UCMULZ60&Zvg@!r@fevnx4DGfBOW-%;f>x?U^VNVtF;qQkE_W$5@}IGqt@ z1TE``At|}g7PUG`Wnz{0E4<-t$!4+0WMcAJRGw4HAIs+Dc!XCCA z#en%mB+=Q;qda4TZ#7GHP1Q$Jp}HiPeL2zaLh}v3R;4qM43K_leJrh3%<@F%@{#dW zj{&B>*Y*skxmSDJ)P91X9a&B4ZqD<$+^B3*y#o>z-3tx_@4g~Z+uY*X&oj7U3PRI0OVBh6u*SAs#PdO?9#& zi%N{WrMON=7X}2dut;)5hRW9$qxN&_4<1T=rJBhPZ)Is`wL@Aj0Mvm4l->=8r-x02 z=JZfmjLKN?t9c5(`k#jw5PRw6!Uv&$6tBLFu|sRi{@{fFwi)W%mP``_G&9kA$np75 zY3fidxrGkgCE&*#>O5~VgX;tLMiANcu5^pT%jJaMaW~o@BpMO;HL5%@OQcB@> zxtjPZq!!>-q2ClbQ&YmgC3-|{&vWrK0C8xV2udNrK0-O`UvTQmzBk7%vTbNx)4)TS zl^oRQwr0aWzCDCWWiQthVg6x6(8`zv*L#`3>W9Oj)qxla^8sLi2)$h0g?BuUk^Hkt zo99gDfZwxwq*1eTpM%k%Sa2_V@6JJ>w<*|52{XH&nP5ytJXkjip9j67{Zm<^w|cF$ z6+VK=q&p*O()O{$L*_(}L^-mOm3Er-@O0HfqU{=nVf8>=41~*KvGSya$uRbo!_Y>p z!ESx6U>J8|_*1?|_>vfE59ia{(e z+TUf>d`T(mwHGp}4>DMy9$Hk|l4xo&4Zc|442Gq~&{WwUBMvZDrZ%xfbm)5lJzHak zYD2hyQ0{MQXgcBS-4;Bw8J8F^;Ha@l%xJWIyuZfD3nJVPFV6MGSIb1NTgPh69+)hm zA@C|wA8JfOz~8`4$Y4_mqMvD>9houF?whO+e6_dQvT-mOdV+RU@FRbhopD4!%2BQI z83-waumSOy>uO24lx1`)?XM%vLRt93B$T)f>j@$=*{&bPx2dZF0Xop0Su7f=xXQ7U z(OGNKZenD`eN{l`TehGxdY=6mAwUdjl)tm`@T?{wKJ3x;hi-o>mARb8yOBIzXBtWr zf}XoT@U?DN>5nmF*KO-hQCYo3eMU4+f~idB74kL?%!QzQv*g;4A`5*QB}Q|rYIeAz zx68O10ppqq3e_&0G?sGY(qMQmj0CtwJ0;{QG26_^X2N$1wG5;YmB|IRG-q!ji%PJB zf1*-_6fVCmPN zikw6{EN3tw!MJ*aWvTSPRK28{MaT72ZD)F}ois{`$KQBy6l?bafMAy`mpT|6YAa+I zGLj%Tqux&Q8oZ)N`9w%mC)TNwXNh6P2aKCz{*>K2rTAJ2KbfY+=7o+@6sgFg+*rN& zg+V#^`VUetK6Qez^oa;8tL{S*#S9ArBdPMZV|`5)5FU*d<7ujMJ)%7&Z3%E1PScWb zI6t(Uv$M5Y_K^`$s<=+UxDX}XpUss(bc$7s4DYeBw*}lX97uRpgvWI;N?Ek~W=om;jdBg*-PH5^pF#e^Ri z4WJ!qcoUsrSPJI_rH7w++6d=2fY!jP^6N^@jzX_JJ+%U&0PO;m36rj`>##ap4nYp* zBfC;<3r$k*rJ@W+40cZ6Ce1l7j&&G$`o)~m7QTJa9p6Krt}zTu2#I;S@zjXQ4j=LQh6kNV4#4WGVBvXth(?9B$fg2;aTNHogTu(`i{*=jH0`GBR?`P zQ}em~63*_Bw_pAOF&fd4+X%z%-2_-wxL$oj7Tib@g%oLlc}zSotRj&=nnfS+mQzEP z)uZ73^*efnZo(9`vk_w|{c5|=%Fu`uba;pvt~;-h8ovoa=gp1sE(1aGdC6rnkMLp}$QI~Ev=FcS??!Y5oN^6r4c z%T>!+kB>9&phe4jdhHiyGr{=7if3=9vvCH~_bNZv+cUrM)36CMD?FCRfQ_Ne#jr>* zoW!xK{s6jtF0Xj_KKCwH?(MR9FxC{N^I27*2h8Bh%ufkjlbeU7zL<9eoz2T&ui$VF zT4f>*Ga+ma3yqfME3XIda4!AS=~L9oeC=_;jg`fEcd%X!Ib>G#6Y6R7&4RtY?+wky zlfm@%5AD9m55wf=0u*Q=tMpM;gSebBDvVUFfC3ymDHG-?;9#Yq77k3lz=S1Q_MBN% z5q@u?c^1k02v`3z)+~n&ycKM{<{ATpDU-{=_RXsR^kYi^-*?iK<}T)}DT=y-h4XgL zZX($!5w@{(U(ZGY5z;L^l4$WUz$j-@Y3@V(>)VsJ4ks`6ByI}QOJYf&>J}%;NA-nR zUtJZYd-2n{fI~KTHEuECM9NfXOsqnU-2m-fNKGg(AR+{cyld4cwGg8q{NlSgHn=Mrmzw*6<{V#g>BVGB}xm6Jr zr*R8Q)VEr@UOl8XKYQ}b-GY2A5%A~ktd~?Q;asx%Hx`1Faq-kl%CRt*r3%hJYDHY@ z($seYP=1k6j&e>hx>oS*JYFWEB4ay8R!$&|h-eDw+JqMBme&ICGsk3=*t&b2+pzTD z>_r5N&_$#4!O#T^$9_a-xK@|PB)w*{eFxKDm~Z(3d;Dy>KTn8u%jKQ9%Jp%8X$>lQ zeP+r|&7&;<3_2I(u2LJ8CT-L2Z@E$Jxj&<^<&R45%Ory382c!g4Hn-(dBaClTb$9~ zBiWLNW6;PU@cH;^LLIU)?tG2Vkt7(Y{ou;3yZ@%`pfa*(*{m*JFi>q7?6<+-;=8^n zkn_WPni#E484R-mT&{>vV4>K0zdVepS23jm;7=OY2Ddr1Wr?_dZA1tquN?2|E1^P7 zXqNEMgll*}Zky9u^B!y`D5GAB^0lu3pawU;!3x7-I6z&bcp->2@ zDwio}N(pPfT3knPYUo{F^v(&l9ZAs{^j?l=Lyj{{$O#n3_Y|r)%T${^jNf8yLA}5s zzMqMAAlu-$fGw)#$GYocF?PZPSno+lxUk*^NY%+3-)^5idk4Om)a$(#2qLI@>=?m% zdA_f(=b$o=5L6pXtGl<8Cpz!N`ljm7?;>~Y*wn}oJ4^9c$mp(_7p3o;GUXn5wDr9@ z1Fuwm?nAks9(>3ES@@nB+aAe7VW_o_!(Mj`)%A4u1w%wC0Z@I2YCIeKWpM}E`Y@UMR za59?gSB14fBby@a2&~}MlJo{mc@T7(=^`n$STH6Zp+rsSVQN-jfK!69cH&6yw9A@e z`Oz%`Jri`EbY_`P*FDgkh+oXy3ilRhTaF_tp|R5u43*EQxtqWnj2>#ALd69L!l~4D z9bnYSN7neFx5(9RBvUe6z@5D0jZ+;=5BaFv{ptVn9%yJ7^`Ub~8|e`BZPYe0iiv(^9f)w+=+)j8STk zVy{~HDOzC3jf*QEHd9ZC9Ua04+qqCTkh1+$q;a;KpxVPq;J5ju@oU~g5}mn`;mU(8 zMqqtvxFc*J8VO zYE>gQj<7h4k+#sP#)mwNGv^aPmT0hf7nO}>1nMQieSDO!jXV{i>21S@XQOoP*00@C z?HI@_Cq|M%CO8W}FOC={#PCL;_T!*|#cNK--=WqV=wKzcigo+{^+D?-MNE`MNU6!5{ zTaDdV`(lmS(~vMzUIJLj_xYL6AL%hArYnz6!d0XyKP|mS?JcDgy)V+-UM;LyFTPv; zR2A&9dC)`w4X05Zco|u)iqJ-S<&pW|sTM;&R2`|+ltEEzD1+X`Ddp!|#7o?TNw@oc zO7PK?jbpJ|aTTtx>3Ek7+%6?V@?O$>hJn+hruuY3+kE{bz)NtdTZxt&4V1AuE4lp9 zExbkaC>Up{y9FdE#1w0iA}Z@ug<&JNvL(C>mNwMzuSoFo>^2}NV@?%Ki=v@~dhlI~ zivUW&RV6u%2HGxIEcBw{>}X!1T4EPj;L#=j`W*ZD3p;_SBn@-EbYoV>=TrwTlJIR+ zyxZL)dEU-BlUd_vz5IwQYn)HTgmYBKQ0B)T#4{(pvhQPjaq*HLXp72*mD<**Li?zQ z%hSFU!k^akJPZP8_VV3mdJ(=<42maKh{_tlz3_fH!+p8^$nui}1N@>PwBa9ua`6kU zD|7GIFLPuU0Dy?Flaf-Cmy-I&PUF}8#d&X%MdSv=iHA%z6{VR_z0lo8)nNKUE22xZ zKEmTF7#3;6r7HFm%1fo}?9jL35EC9QNJrDXD1pS83mj`!+Gz zf)qZ2Ev%TsEN8tzqCu|Qj49s&{t1N?$p95$=NXt7vNqO^r{T7v`d+R98Co7yVp`m{ zB+TCIOazQq+-^X1p3iunSef!#_^Z`H~kT}qop<5Yfyi9{=1E=yt3LqZGOvWW9Q`hhsAI7-;tIU z|HQd^x;y@Xv9w^band9H^U_0x7P0+uM=XcNl7Ral+fAaqu`X74z5z8OC z3Ibg$Jbyct2Z~Vsj#tpq#lp@~@Q*`l5En0>H6JGnp9LR33pa=l#KO;R#m@q=H0LuH zu(aYd=ivJrmAtbD802hW^_%LIoYn4?hliirnv2hZhlQU9WWmDCZ^6rA&TG!j!ezz5 z!3namwgy>q{f$D^-R`v#K#qTp>Nl0;D;2i@$by%Xmz~9mpNEHqn}X5r#s z7vScywlue7xBi3b_dEznsL6{^aA3UEH-?TpUFxe@9FHoAQsMA{YLvTm+R| zEdDV318imayW;+;IZ2QW+aIUGZ2ukje`C_Lb@6uo|Hkt#=s#H`+`-;1?hdN%s^<1q z7U2J$=f48~lS$)swetYG`^x_x2KE2I3ICa}a<8^7?!JHXuW9A>=h>f2lB3-pQjwGY zu?z@;EdG?=1LS38`NtHz>iB1ug)PY0#_Dyw{j1pi(Qfx&B(fzZH^|D$nwy2wl3jp> zTY$rYg`X2-&LUtbz-?{8Y0hgYVDazl9xm2kZ;-o{gw1OXyw3A$as4sRHw!x-3p%F2R&*F*UC8YL+I zI}tTKU0ogRtla-yr+?(l{|oMK_J8H*|H=ICus^M(TwHx$tJoH->h1jBy8kbLe=;c9 zSy(xHxcqma{~hwDEPuNYy~g}!+v^4H^?J$n&kN>XS@OG({x81%%D(@L9$u;cs;$El{)L@_?VV+tP~V;)OgLOg(xkr5vQhMIwZcjU|1hW2LSJAGX0 z=+Mt}_fhA}LLwd)ZJyQ6iEW8+WE4|-(9&NX^~pwKwXG-AJ{2iQ z@-l?Lo1v$i<{RBlqP+fL6B3k^LOTnARQUthqD}+~j`)=6C=Yi%H*joFYqxG_mn~wO zI=!o!@O#wOb&G}R1ozTnN(DVc{OPa)q?$cQ^TomF9Hp1M7$ry*ST$9ronOvhrww6_ ziRX}ea2!>zHGZCtd!n%UaLMs(VP*^UT^ZpPO8d1!2v$I5Bs+xF*S z*m?|jMDq~J$@n+-Q{Q|wg8e!U8wcsaBVR_?vBg!9WUqVbOoTbEerYX4;v;{=LFShg zWuj?$PKcDymm_d3RqlNst9)koBjH;?wpcG$4~z2SjY^dmQY853cvkJIr3uMYrIRZi z#yl*qcDfi*L!3|Vo&+k)qu$9*BrPNlyxB87#z4AdG6P$e{bFi{?;_O1Q-2Fo5>~v-X7jN^T4l!^L93oMVZ5iM(Q1R>5&L* zZpvEPc5r7mBoH_sQzGsrxm1W#YUwV+Ye>AeS3vB0QqxWq_e!;ahZrV}GDTI;>XfSl zYI5?(*w7r&s6zBThdg#SsJ+k@hauahghKXqLr+n=7fLkh@X<+)gx;{oIrJXd<{B7K z@ILcXPpT$M`ol;+^=|H?w&(Pm`urE`;@EXEsvSFzgou!p1Em0c-xftKW<%J)@v)+T$Xx-UDB-gsRGdXO70rHxw8)tV{L7i=bq3I z;uy6<`##t&k6>oeZHbGQyMCMRnf~Pf|hxh`BoWl^rnO zyrd_m-mJWss0JtmVIY^&G2RUG#1WKn5G7FAmg_>VB_Q`Xr<(Hr2(b!KE-~DX^~MijXMy+j@&>$7bt%y zGpu0EyVhiS#`1m{mP1b-(Aa7dOP|KK&hJ4DA%T_>+&N@r^F5TYaEixbqN10>2{j&Q zTZJg5E(p^8btB^E>l)Q-W@=z4ddT_tmTfeDAqA+9Ift2B^?Y49|`;n7+6Un0{Ldu0pI1> zcgL06`}k0#iq~Q92?BIE=j?z#C^i`RE_J ze+cg7-{+du-`9O85DZ-AXv~L!3{Js_TdWNvI;3re9dL%f&eUsE@D6O=*qj=Jpm(zz z#;!hVuk9_Q8|p{ev>G3i2|fR$)xw6)Xf5yL^aG2y5e9!QnO=|Nc}t2$y=?Qzc7we9 zn=TvQ&l}T^&<{i}+PO-WsTZrzoBjivj@MQ|$op{=h1 zRYQ2@o^YDcCo_h~!9Z%14L@NywNFCOV=D#s)~5!>(ZW_^HEPk)6LB!!lCw5p}h|um$|sr8X#q5(9%I<)WGjBSvLi zyXn!hTC8MAFhP>*??-0e%#7Ie2r>B}A)Y6Q98J)HV+lUf;MkuTx>Lu<8M!Jw{Dml0+FrRf~g)4-UX@r zSR->V@ulI|e$(s-6OzU)tx5^gutCZ8m^04#SRk%{tdQpBbI~-C-GvpPKS7wY2YhkV5;8_w8IZ!*!QNPRwiDVCGS zi5}^G=zds4Fk|dISC|@|;jYbR*JrdhG8Xb#Yvx#Mq%=2gYG{~yAUzSX-HZ%;wl4_O z5qnMuhhaCPBXFm?WhC|rb4WxbL|RtLV(v53*_0Y1*T#XBvh7VC;xi_CI@%rlo{dfe zYksA-WONZia(NrcF1LI#SW2epPgkj8%k)c%p6T{W&(XnY0Nl4aR#=SUtILTx#)}{| zv0N$9f)-OUmmBLr;QQ0*=4g6-9T}7|2w7Yc@AUGUbMsGCAeP56`|qunYpL^fozxd` zn#D-C%Trm|Z-51>LKh>~nCR+JxL<$KS)!e|czu(5&&UPybWBH@?llwW>C&lxvw-j6 z8&OdinIko2@3o=hX1(P*)kFlWDt8ojz8=?`Sc>dci0%R?b40(^%-9u zxgxe|+*VIms9_NhFiF&lj#9h$vpPxP>&U^etcqU)rOj6_siok+wCoQ`Y4wt1 zZ?t?*H!5n!yIi!a3*IfV^{5M-giL^R9Vnf3mkG?^>%qp~f7vW`KW$DFWm@Hn0tEDd zN`o>WiK+ce=!d%xZ*JpfG}mL!NIDEv9OJ6l`Sj-*bB{v`3Nx z&#wd@Hr8ajf3({lv1pOlDfQYE4et2fA@B_6wkeyfK)Fr350f% zM~SYdNJ~k31k>LtFe7udB~(xagVObSjv`@E(%sK~=vA zX8q7S=`h@7zH-WB!I0LAz7}|Kj-~T$*k!;)G#*dssG~|_8XQbsgvQF?bJkoY zdh`ODov$Xet!&3b`{~YaZjV`Dv5=TE(yNz^y>P65$tCPq!z`F|7W*xPUHYGxz8SBG zf6u&Sy1@-^j}NV_{i%p3W1c)Jd9=o{8U>Rciz!?>eH&Xu{^K$u9f^#R&)v!PGwW*uL@OG4T9OM>CpEPLvO!b{F^wd(**ZUCr2B)0hHswbXH zjDO2asFRA>xP{26OeY8>zD+;8H(uu?40f}6d{7Q-`pj5=Hztzi*@aKv=j+pp3;;Hi zrM$Fc#GN#AZZcVwNE}LKLPB4C>Dps=-x2?cem9@z{emRvI8~s~(V^`Mxl;%AxO8o) ze7%q~S;DQ+(nOvg`*kJ1PnP^5Ys$^ctR$nqvR=axcw8vRv7L)(O;Cd&kYS#3WV3rm zQc*rwA%e2Xk{fTc-k4?xQ@6I+gU<+lKC9t@Cso8C(*1VNI^o#*>F?v~pX`)zn5@;1 z`VLAkY~ahED$lswd&7kVL$8F#;Bc($`4_SWoak!PUY#NcDhP$G<*TbFz7u}!4PII8 ztVLr!;D+l-vTPaB1W3UUcdgTk+GNHQZBF&6r2F#kzAf#rUSJa{0BL=&?5_LuwdtBF ztLEfOqI0|KYc37+I>aa{04T9?Rq4>1WcnAWlLi23L3-L17iOJ!yxF=1+%74vu6X*n z=)KR9igO^?9+&6Tf3k5|Z z>n^)S<}Bm^eVg4oii`4GIPr)aQU^civ_c=79m5wV|&3fyb#aa8UPkBk#h(m29(O6DA_1+yq9y!;6OBh>QTlj(q)H&9FPUyuNJOr?hwb zW)I&d3l@ZKeSl(iV@zcz~iQWo!JXXOq)L)|8^ zlW%B+rc8_6|1^XhMJWVvAu!^)W}}81ty_{MRb$6h^gpZo;-QI!ON@*xF$^V9gN%WF zi=KJZ*W&kgw;OBk#QvJQSVZ7Y-zyEPvR?10#qd_k5-8>}sqQUI-01%OQWjQxQ z2t*y*xLY<4zBL(!$9>UhMK@i=x%mL>l`BiH9wu`Z8ctN!(gv8P{i9ndv<-~cfRBJqWb8lDBrwdc_WYY7Th*UV1*%eqOKt&4LP>dRpVFg z9>>)V;=q5-^&fbEQLFylY0(!Z1V&z)e$sJp znXs+UjF~mVMnSFYYc4mM7Xge&r1J4+u%_BEUmX7>Gx14%v(7kG-6laHDs{oKhn~RF zbx;O1+9i~jHoN4uk^b)8<8{@sN$*)-e9smuF*`>DHDcg6)D%!K%rj36oO}`)tRjhJ zt7`6hoZ_NxBQX5(+|y64c=FlAxVL{h0PDR+jx!=|rCfl4X9}bwM1*!lg~P6+oJ;~iNyQ4j@s|DiG$+2Gsr4m=X(K-7 zO>4~CAQ2%}R#r-RF;jQQl!j0RLs}IVUv-%g2nYc&t9s0fI)6Ese79Vgci!3MFTNbP z?-{nysFm0Idwz=tQz589kpqJ*Qd852LCP%yK!^~y?0mCDOGh`dsK)Kuv9I9gb)u*~tyet>>lZs%kcUV1Sy0;G`g@|uSqM>HzrVig5Q zNDX9MJ;c?iBe4Nh6^;KeujZbI$xar0VGmctstH zKqK%cZ_&g^y0@F;ISU)4d$h_t_6V>+DLIOM|9#}8H%ZCS3Ji9gJ0PuZFJcxD>T4?= ze1xm(Rg{;N#OD13OR8UfE&R!>v}Yc1z8i&ORnLvlk{WfY%@B*)+B~~p#Bgp|q{OhL z?v1yt*WOb|#x<9Ex^)p^!%9@W_Lk0Gz`2>;K_}V^7so#QmYG~99+uVbIASANx;q@H ze0(&Q)RK}Is+m4d|KqZIbTm52d4;jruzj)lpvMJsBm9( z$#u6F%(mZ^J>*i#$pI5oNwi`52k{B-kthtf1|FSr*~Q+yyTI#)h`(2lGDh^CG06fq z5M|=M+GXEzeW3Th;P)YJ`Fi!FK%i>y6>O26>n;4xg_My=AX^i4Z%&r)7Y2r3o_pTe zm1D<6?syzU=@~a&o7w+(%E};+@5nxBeR_M_w}ID1qQPl1DnI%h)@G!kvAnwi@kYf$ z>+dP5;_Ma43}hV98$_UFnfUmUpF^)t;-ony%C>LtAhUBva*8`QG*pit&lagdOv)U7 zwYOc{>W80lwQCbN_&_Ft3dQ1e?@Z#DQ+G*8RSgc@bZO>*6Jfd((709e(kr^E+)nr8 zUVkkY6@W-X;fm2OP+5cjx{EUh^s5^CV*Ks*74V&Zif`X;Dd!06((;<8o;NJOVBgJ` z7e9KR_rPxCG9$CUseI&_#Jcj#Mw zc5Nvu0Al~CC;AUMKn;kBy<$!6*q1UNxHbR6b1TP<58d%7I`39AZgke*(S3fEVF}sJHlXL3H)f3-|PMDJY=$#tyJ4Lvy)oY@! zPl2kbs(_f}y8QI~A(yJxOGF}KEnF0S=}iy?ZW!Y4-2=>INyA4oDp%&R$7o88&1tLmVRsVfE|DN0Z zojRA^^HB8Z@w8XRqGulV?AbxcpisE-vC$0=jm~-ePPp6zRVz{Z=DYgs0+mhshBhh{ z4o<<4VuLiX6x8;1KbX?}DeKBcOZ+6r58c0jJvqkGZn{5VTe9pk! z3(khi1%*`BR6g>g`@%DdhTjmL^JC4hkw(~7g8RgyJe_w3DLKxgy87S?FM(Kl?p1Kv zMezHCU@iW+`r2V!8B~HNCtDgCNCoUN&+b<^;ug-!RaR?FS@7vGoKONAd3n+Dby^qp zUUP}7V>>XbiiE;Bg9dOsUUK~~>#hH3`xXU{-sSGxNeL8+R6h1}_^u~h$MwoQ`Y0u+ z!dmo8{i9E?5Mj%tpj7)0+kt@zf%H`I3#TTS3R!6f?gKC!8wng+wo-mrnzA8OVR?3I z#hHO5waj3D$M$vod!_Y0JbPfj_|j$TuD+Q{YbcOr`uvV%ODYJ(msd1tN$_u-0|S>G@X=q09ggiJb^8U6OYEQF-T`8T(~<)s45%x++ysk2`a~ z32?i~u@w{zU(S~A7b{7@6_-+rW<*Ms)V}^EFDnN~(mkFodo+CTDbzHM=}~mY%|>3f z7(^Da+fAkmreU?p_qS_X^TtGUXpwtze-@oHtLADLP?g%6 z>K9+FzxQ#YHmuC)!>&&2eWp@p;&>fB}HQ zAAeeL;~iM=OVR@$3dL8iadq0=vD1yboT58!0!YIR70-?h-}PA9MQ0_wnof;GmpeFf zR$8|%$jV9<5mh_%&36@7-bC|$VSy4<)PHbyWM(E0Oo&$1L?%sR)xdGbAU6{PP1w=J zOLWm{7E!-DL_t()r+rlOpPTI4Dss;r zD%(&4u0coTo^zVIU8HI+TUB-aZB!ev zy0t61@EkQ6fL2sjKlwZ*#L%tq<5-alSD9vtwYS(XGmF<}87dKo3-b)GcfH1rMr3UX z#JOp!I!!N@tPFnmpE~o#yFxe4uY{G7M2W z{`2yhJMS|a!l4`QUU$Q7?iR(Io8vAnPVaLl73K>O6!FDNBX7Q=3Sy=XE%UFt78w~J zLNr|Y{5Z^CuF&kuE-_oR05RF9n>@vy@wK=NZIus?XA3|$)kYNT3iwmbj32?>n$w2P zbz;&cISvvGOL=8dl{-E^D57@lwYSIT{p9PmPk8=Mq4(dX@0P-elp<+gsgQp7^{O+^ z%RG62r&EW-idEq+zKqTIf~!K#)8bP;D!pO2caIKMup#u>Y<%(!D_7h!JTRc2p{SoV zTi*Y|5Qaiheq%9RW>JBt5V2F{FFfxjQa1MAiwkq(KmAlW{I2->Uob(Tl%1Y?{grlU zX=v*E#bZaqWdO=?t7`OETC~@OBFaL-j?_qbefa({^<)`p z5EGPXV@jn0#NYi~|Lxrh0f=FMM2QH*V6{2=;>4&bDUp&2sS4&U4Ss*GfK4W~6eR1J zN32xb%oWHagB_MH#bWkPOTg;hXRpNEpGt<_$cxt`Be4dj-+WbCx4ld6eITvhaqhOQ z)ifa*nKwWB#w1oHWzFyuUOB|7uC0IadCtrBb=qA4vMs5u4Uc;RVt~bS&cMv0`+!N6 zw4$>5spk#LwhGeouDqNwGe}jV@tPN2#_}@PMT0VrJfiH8r)Xt~%4W0HoJ}fOemC;g zGc$~WOiuG@eH2PW0zj6nvm1h@HSQM5WoII5)8kB5xxiW?g_3pcO z=}ouMvUPz=&ZV>fh!hl=H(!^pVg_|>mp9~6@})V6Qw^Walo?-wD0lev?k;;0liC{o z@~g<0*SYt8g+s3h&HgI(^mu~fT-prdgct;5=kv^q#(mJxL@H-LlbLDs&Bc=yWc+Uwrv!w5Df-Hlio9kluhr@!8vCSs+}@5 zKK>mwP0v1Ek}dsgguecoZ9{#Ob?Xggo7P}bRn#}sj(-CNJm(C|95etfQxvGJt$O@9 zZIRpT?vaVgs+!^VXf#1eTO!4)Ldjj&jKZ!a(om5_#f7@OjMa8vV3>7pPWB)AFZCKA zQgB+&gKJMeUS6ICkreGo*V8^41;l_b*b-%;mIY?d{au|qrFH7$>AVN$WRTyl%)|ud z7i#|;AQ1DDW|HtCY3`;rQJ|(3W1@j>i2nj z9n>)OV_2e4gE?Wsf9Y8T*I%WP=-QiZrA5pAXPv5n05L&eM`Id^ic9_1UX(XSZcN*@$RHL{vMCvCRS%_)>CGOw3kHD* zD)B`>m*0J_O!&Ze*un0udqh{QOF!(ehIv1;&6{o8$c(P-J+2lT=QsGWip`F#W&STv z8!5w|75c@3vVj-)Pwy8v{upnYRz_Yfd|tI|4TYp3m?*0ZmX%qvz6;O%4692~8*z9g zMp8>_(z?;6u5=QB1*i^%Z@zc!_j8*+e2*QASB$tj_U3dB$5M1M4iQyndEH$*r42f{ z;Ht|i0)BbzUA7%-P9q~8FE1<29CWI;LmT*g8V)B`mqh0+sD9xk`g#E+bbUIqaP5;X z`nq)S9k{PrR^q3htH+Itzw|E0EQen09IN{C+M3t?n{(co$j(VDTTwIq^~h5%yArX= z7hf*8atIgXSj$({yge!O*cfh>9b2%-+o_XOR|jW&QhncJ#*$S~5|2UA7&9g6s+&}X zT+g;h8jz$!!fiYA_u1=%amhbCMb;Qs+Y=|MNYiYeB;o|yB}VgRD9EFnG{kMGu0>4{ zr8Sy}laN6y07QNr+1O%!1k^=oH(VZHv{>Hy&?Z$I8_>Q9L<|z_*)H?ABkCu;rwdo6 zi2VRVGQ8Zj1!V-ZE+lJ9sj}9INH_5%5CcNCM9{7ob!aW6v)M+Ed;$fppdl;rQXFgfU zQo+A&Fy{~yGH!n$XVPi-W+NE(LCwGkwYObK;FN+FNF6q2(j0a!KB}t10tGV@AX({DRiCPMs*Zh3?7SaNW4|O5SU|*0*tZ}6 z23Xk^!G?Cs`W-_GM+@50NMMH&n7|CRNd#aao8b}zBm&zZohuPL(K0YWlC*Pd3`1(x zUZeQR|9S50_^V9Nx$uYgR-b1R`JI|v8i>LcN(d|)mQ#*coSnDrX$#JV6~|dC;(VE8 zy=gSrJ1oRXupyRGRl^1t^|LN}X-vVKjQk`~tUd^fPtwGrcuEX2kQ3f8SZ}e@F<bS+2Qfx72>g&Nt zeQoD$PGh@NoH&`up2}+HU&Y!8Bm^W}lu^(F-)|-_8f%b|eP~_4W7fxz^o?!0f8HO= z=G^G*TYg&dUuo-)+RAgV^`o;Xcugr^n|ty$O$4bGM~3yZ4qu5#Sl?Tmx4iqD$ySo~)0f#>tfo9*(m#VTTxImi8P?cUXK zaBu9`+BLR>K%FaSIyH}AMBY`+|2wnp7>5P(hhPBUn%pYq#2eL1f=v{>-8D9i z1(9@aTd?~T^t^wofBf4DeSU-V)qQ+tUJN27(OqJ&6#!I8)o11nxWzTO4H~eWU;t>0 zyCCyo^Qgm_K*jl$NuhC$M^YqPy>=Fu73u*J9S zGrpq0m;cH<=WZ*_O~5WCv9bA1g0{uYjv8f-Z-H&-+Wk8^D7{D7t4_$d;AY#+*dUMO zpQt9BE(1t*kypt{&p+z{?|XY=>9#%RcQvwl&BzX)mUi(_agouaevLyJ`KLkQ@XCY; zIcfP9JeD@ICuVN*De*h|iP6)(g2W~17he^3uTOTD{JQCA?@3z)I7R=Eh(F)h- z;F!maP-2q~*#95$k=kCV)Yirl(kxn-lXK<+ncp0NkADA5|0oOqaBiZy_>H>JcW_-) zRFW0>n#kGCjzm*3c9KGyWZM!twJJR2A>W4w;*&o{W7O>u+U#!!yM9>y>doegvc}4x zfI2+RogaoI=RTFVq67h%#$i1Q4h$$EdJJEf{@<4Zzr8+gyTag1;Noaa?&R86@1nI;$viipsbv9eOn1M>S^k+!G@ zz3ko*AGgC`a7G`E_xYu2*_h}%Z*b63ku4lbTTZI~b2LHY%VbtL5nAP?54<#s`WqAT zuw;kb0(D1o@dCi*(l8L z9M?Z@x3k<|cEqF|yFqq549@5PJt5MN`AzNjV-hnyLnLl!3Jz&Zt8$b$YtxocJLts! z)#4`;d07Fr^8((Zj>&0%u6JQ)Ofr5;6mutm!5Oy$oomAu%}tWs)v0Gd zEU>`DySMcobV%B6$NFkI8=q%mKK^6?{%RN+M_1tBM6~s?xV0p>XnNwuxw?D}Tec{% zlk}yM^5&2@1woqB?Mf!=Hi-kSpPN%SC7JK+w5S9a8t`bFR%r(v>dWu#cXu@B7GV}| z7oWt=3_}w&j{R(V_nL_3r)d4E#Om3RZ@)oh6*VLbr;L#>)(Z!v+7wYEbC;9FP*M*x z09M$B7>W!xwQS|tyPGG!hd;Nqw|oy;;m4>+_^C!DAXNk8@o`?RSzO4?i;bL4E_Z<|zaTB%fva*T z;=v5pwg!s-sW5Cb3IG6yh_;QXH>#ePgc7jT_BN;|1|ir`0!%d`eoFY*Wb(VH!ACI< o&GKLw5cz9P{QqAt`~$}S0k^PD_FL`P%K!iX07*qoM6N<$f{w$Gx&QzG literal 0 HcmV?d00001 diff --git a/firmware/include/images/icon_pepsi.h b/firmware/include/images/icon_pepsi.h new file mode 100644 index 0000000..1899fac --- /dev/null +++ b/firmware/include/images/icon_pepsi.h @@ -0,0 +1,1074 @@ +// Generated by : ImageConverter 565 Online +// Generated from : icon_pepsi.png +// Time generated : Fri, 05 Jul 24 13:43:11 +0200 (Server timezone: CET) +// Image Size : 130x130 pixels +// Memory usage : 33800 bytes + + +#if defined(__AVR__) + #include +#elif defined(__PIC32MX__) + #define PROGMEM +#elif defined(__arm__) + #define PROGMEM +#endif + +const unsigned short icon_pepsi[16900] PROGMEM={ +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0010 (16) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0020 (32) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0030 (48) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0xD01A, 0xC018, 0xA815, 0x9813, 0x8010, 0x700E, 0x580B, 0x4008, 0x3006, // 0x0040 (64) pixels +0x1803, 0x1803, 0x3006, 0x4008, 0x580B, 0x700E, 0x8010, 0x9813, 0xA815, 0xC018, 0xD01A, 0xE81D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0050 (80) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0060 (96) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0070 (112) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0080 (128) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0090 (144) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00A0 (160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00B0 (176) pixels +0xF81F, 0xF81F, 0xF81F, 0xE81D, 0xD01A, 0xA815, 0x8010, 0x3807, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x00C0 (192) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3807, 0x8010, // 0x00D0 (208) pixels +0xA815, 0xD01A, 0xE81D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00E0 (224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00F0 (240) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0100 (256) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0110 (272) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0120 (288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0130 (304) pixels +0xF81F, 0xF81F, 0xD01A, 0xA014, 0x580B, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2104, 0x528A, 0x7BEF, 0x8C71, // 0x0140 (320) pixels +0xA514, 0xAD75, 0xB596, 0xBDD7, 0xC618, 0xC618, 0xBDD7, 0xB596, 0xAD75, 0xA514, 0x8C71, 0x7BEF, 0x528A, 0x18E3, 0x0000, 0x0000, // 0x0150 (336) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x600C, 0xA815, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0160 (352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0170 (368) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0180 (384) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0190 (400) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01A0 (416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01B0 (432) pixels +0xF81F, 0xD01A, 0x9012, 0x3006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4208, 0x8430, 0xB596, 0xD69A, 0xEF5D, 0xFFDF, 0xFFFF, // 0x01C0 (448) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, // 0x01D0 (464) pixels +0xEF5D, 0xD69A, 0xB596, 0x8430, 0x39E7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3807, 0x9012, 0xD01A, 0xF81F, 0xF81F, 0xF81F, // 0x01E0 (480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01F0 (496) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0200 (512) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0210 (528) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0220 (544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0230 (560) pixels +0xE01C, 0xA014, 0x4008, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0x94B2, 0xC638, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0240 (576) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0250 (592) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0xC638, 0x94B2, 0x4228, 0x0000, 0x0000, 0x0000, 0x0000, 0x4008, // 0x0260 (608) pixels +0xA815, 0xE01C, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0270 (624) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0280 (640) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0290 (656) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02A0 (672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02B0 (688) pixels +0xC819, 0x780F, 0x0801, 0x0000, 0x0000, 0x0000, 0x52AA, 0xAD55, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x02C0 (704) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x02D0 (720) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0xAD55, 0x52AA, // 0x02E0 (736) pixels +0x0000, 0x0000, 0x0000, 0x0801, 0x8010, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02F0 (752) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0300 (768) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0310 (784) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0320 (800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0330 (816) pixels +0xC018, 0x580B, 0x0000, 0x0000, 0x0000, 0x2965, 0x9CF3, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0340 (832) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFF7D, 0xFF5D, 0xFF5D, 0xFF5D, 0xFF5D, 0xFF5D, 0xFF5D, // 0x0350 (848) pixels +0xFF7D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0360 (864) pixels +0xFFFF, 0xFFFF, 0xDEDB, 0x9CF3, 0x2965, 0x0000, 0x0000, 0x0000, 0x600C, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0370 (880) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0380 (896) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0390 (912) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03A0 (928) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03B0 (944) pixels +0xC819, 0x580B, 0x0000, 0x0000, 0x0000, 0x5AEB, 0xC618, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x03C0 (960) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, 0xFEDB, 0xFE18, 0xFD34, 0xFC71, 0xFB6D, 0xFA07, 0xF8C1, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x03D0 (976) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8C0, 0xF985, 0xFB0B, 0xFC50, 0xFD34, 0xFE18, 0xFEDB, 0xFF9E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x03E0 (992) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0xC618, 0x5AEB, 0x0000, 0x0000, 0x0000, 0x600C, 0xC819, 0xF81F, 0xF81F, // 0x03F0 (1008) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0400 (1024) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0410 (1040) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0420 (1056) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0430 (1072) pixels +0xD81B, 0x680D, 0x0000, 0x0000, 0x0000, 0x738E, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0440 (1088) pixels +0xFFFF, 0xFF9E, 0xFE59, 0xFD14, 0xFB2C, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0450 (1104) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFB2C, 0xFD14, 0xFE59, // 0x0460 (1120) pixels +0xFF9D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x738E, 0x0000, 0x0000, 0x0000, // 0x0470 (1136) pixels +0x700E, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0480 (1152) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0490 (1168) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04A0 (1184) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04B0 (1200) pixels +0xE81D, 0x9012, 0x0000, 0x0000, 0x0000, 0x6B6D, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x04C0 (1216) pixels +0xFF3C, 0xFDB6, 0xFB6D, 0xF8C0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x04D0 (1232) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x04E0 (1248) pixels +0xF8A0, 0xF8A0, 0xF8C0, 0xFB6D, 0xFDB6, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, // 0x04F0 (1264) pixels +0x6B4D, 0x0000, 0x0000, 0x0801, 0x9813, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0500 (1280) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0510 (1296) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0520 (1312) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0530 (1328) pixels +0xF81F, 0xC018, 0x3807, 0x0000, 0x0000, 0x4A69, 0xC638, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF9E, // 0x0540 (1344) pixels +0xFDD7, 0xFB2C, 0xF880, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0550 (1360) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0560 (1376) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF880, 0xFB2C, 0xFDD7, 0xFF9D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0570 (1392) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xC638, 0x4A69, 0x0000, 0x0000, 0x4008, 0xC018, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0580 (1408) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0590 (1424) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05A0 (1440) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x05B0 (1456) pixels +0xF81F, 0xE81D, 0x780F, 0x0000, 0x0000, 0x18E3, 0xAD75, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEFB, // 0x05C0 (1472) pixels +0xFC92, 0xF923, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x05D0 (1488) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x05E0 (1504) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF923, 0xFC92, 0xFEFB, 0xFFFF, // 0x05F0 (1520) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0xAD75, 0x18C3, 0x0000, 0x0000, 0x8010, 0xE81D, 0xF81F, 0xF81F, 0xF81F, // 0x0600 (1536) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0610 (1552) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0620 (1568) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0630 (1584) pixels +0xF81F, 0xF81F, 0xC819, 0x3807, 0x0000, 0x0000, 0x7BCF, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFE9A, // 0x0640 (1600) pixels +0xFBAE, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0650 (1616) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0660 (1632) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0670 (1648) pixels +0xF8A0, 0xFBAE, 0xFE9A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x7BCF, 0x0000, 0x0000, 0x4008, 0xD01A, // 0x0680 (1664) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0690 (1680) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06A0 (1696) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06B0 (1712) pixels +0xF81F, 0xF81F, 0xF81F, 0xA014, 0x0000, 0x0000, 0x2104, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEBA, // 0x06C0 (1728) pixels +0xFB8D, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x06D0 (1744) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x06E0 (1760) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x06F0 (1776) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFB6D, 0xFEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x2104, // 0x0700 (1792) pixels +0x0000, 0x0801, 0xA815, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0710 (1808) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0720 (1824) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0730 (1840) pixels +0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x700E, 0x0000, 0x0000, 0x632C, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF3C, // 0x0740 (1856) pixels +0xFC30, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0750 (1872) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0760 (1888) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0770 (1904) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC10, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0780 (1920) pixels +0xFFFF, 0xE71C, 0x632C, 0x0000, 0x0000, 0x8010, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0790 (1936) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07A0 (1952) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07B0 (1968) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x4809, 0x0000, 0x0000, 0x8C71, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, // 0x07C0 (1984) pixels +0xFD55, 0xF923, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x07D0 (2000) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x07E0 (2016) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x07F0 (2032) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF903, 0xFD34, 0xFFBE, 0xFFFF, // 0x0800 (2048) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x9492, 0x0000, 0x0000, 0x580B, 0xE81D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0810 (2064) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0820 (2080) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0830 (2096) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x3006, 0x0000, 0x0861, 0xB5B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0840 (2112) pixels +0xFEFB, 0xFAEB, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0850 (2128) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0860 (2144) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0870 (2160) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0880 (2176) pixels +0xFAAA, 0xFEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x10A2, 0x0000, 0x4008, 0xD81B, 0xF81F, 0xF81F, 0xF81F, // 0x0890 (2192) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08A0 (2208) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08B0 (2224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x2004, 0x0000, 0x2124, 0xCE79, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x08C0 (2240) pixels +0xFFDF, 0xFD55, 0xF8E2, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x08D0 (2256) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x08E0 (2272) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x08F0 (2288) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0900 (2304) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8C0, 0xFD13, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x3186, 0x0000, 0x2805, 0xD01A, // 0x0910 (2320) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0920 (2336) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0930 (2352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x0801, 0x0000, 0x4208, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0940 (2368) pixels +0xFFFF, 0xFF5D, 0xFBAE, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0950 (2384) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0960 (2400) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0970 (2416) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0980 (2432) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFB4C, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x4A49, // 0x0990 (2448) pixels +0x0000, 0x2004, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09A0 (2464) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x09B0 (2480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x0801, 0x0000, 0x4A69, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, // 0x09C0 (2496) pixels +0xFFFF, 0xFFFF, 0xFEBA, 0xFA07, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x09D0 (2512) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x09E0 (2528) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x09F0 (2544) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0A00 (2560) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF9A5, 0xFE79, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0A10 (2576) pixels +0xFFFF, 0xEF5D, 0x528A, 0x0000, 0x1803, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A20 (2592) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A30 (2608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0801, 0x0000, 0x5ACB, 0xEF5D, 0xFFFF, 0xFFFF, // 0x0A40 (2624) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFDD7, 0xF8E1, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0A50 (2640) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0A60 (2656) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0A70 (2672) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0A80 (2688) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFD95, 0xFFFF, // 0x0A90 (2704) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x5ACB, 0x0000, 0x1803, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AA0 (2720) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AB0 (2736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x1803, 0x0000, 0x528A, 0xEF5D, 0xFFFF, // 0x0AC0 (2752) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFCF3, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0AD0 (2768) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0AE0 (2784) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0AF0 (2800) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B00 (2816) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B10 (2832) pixels +0xF8A0, 0xFCD3, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x4A69, 0x0000, 0x2805, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B20 (2848) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B30 (2864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x3006, 0x0000, 0x4228, 0xE73C, // 0x0B40 (2880) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFC71, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B50 (2896) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B60 (2912) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B70 (2928) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B80 (2944) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0B90 (2960) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC51, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x4228, 0x0000, 0x4008, 0xE81D, 0xF81F, // 0x0BA0 (2976) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BB0 (2992) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x4809, 0x0000, 0x2965, // 0x0BC0 (3008) pixels +0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFC30, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0BD0 (3024) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0BE0 (3040) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0BF0 (3056) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C00 (3072) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C10 (3088) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC10, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x2945, 0x0000, // 0x0C20 (3104) pixels +0x600C, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C30 (3120) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x700E, 0x0000, // 0x0C40 (3136) pixels +0x1082, 0xCE79, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFC30, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C50 (3152) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C60 (3168) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C70 (3184) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C80 (3200) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0C90 (3216) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC0F, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0CA0 (3232) pixels +0xCE79, 0x0861, 0x0000, 0x8010, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CB0 (3248) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x9813, // 0x0CC0 (3264) pixels +0x0000, 0x0000, 0xB5B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFC71, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0CD0 (3280) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0CE0 (3296) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0CF0 (3312) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D00 (3328) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D10 (3344) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC51, 0xFFDF, 0xFFFF, // 0x0D20 (3360) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xB5B6, 0x0000, 0x0000, 0xB016, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D30 (3376) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D40 (3392) pixels +0xC819, 0x0000, 0x0000, 0x8C71, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFD13, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D50 (3408) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D60 (3424) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D70 (3440) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D80 (3456) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0D90 (3472) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0DA0 (3488) pixels +0xFCD3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C71, 0x0000, 0x0801, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DB0 (3504) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0DC0 (3520) pixels +0xF81F, 0xE81D, 0x3807, 0x0000, 0x630C, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFDF7, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0DD0 (3536) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0DE0 (3552) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0DF0 (3568) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E00 (3584) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E10 (3600) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E20 (3616) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xFD96, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x630C, 0x0000, 0x4809, 0xF01E, 0xF81F, 0xF81F, 0xF81F, // 0x0E30 (3632) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E40 (3648) pixels +0xF81F, 0xF81F, 0xF81F, 0x780F, 0x0000, 0x18E3, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEBA, 0xF902, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E50 (3664) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E60 (3680) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E70 (3696) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E80 (3712) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0E90 (3728) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0EA0 (3744) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFE79, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x18E3, 0x0000, 0x9012, 0xF81F, // 0x0EB0 (3760) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EC0 (3776) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xB817, 0x0000, 0x0000, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7D, 0xFA27, 0xF8A0, 0xF8A0, // 0x0ED0 (3792) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0EE0 (3808) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0EF0 (3824) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F00 (3840) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F10 (3856) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F20 (3872) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF9A6, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB5B6, 0x0000, // 0x0F30 (3888) pixels +0x0000, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F40 (3904) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x3006, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFBAE, 0xF8A0, // 0x0F50 (3920) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F60 (3936) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F70 (3952) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F80 (3968) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0F90 (3984) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0FA0 (4000) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFB4D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x0FB0 (4016) pixels +0xFFFF, 0x73AE, 0x0000, 0x4809, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FC0 (4032) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x8010, 0x0000, 0x18E3, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFD75, // 0x0FD0 (4048) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0FE0 (4064) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x0FF0 (4080) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1000 (4096) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1010 (4112) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1020 (4128) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFD34, 0xFFFF, // 0x1030 (4144) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x18E3, 0x0000, 0x9813, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1040 (4160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x0000, 0x0000, 0xAD75, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1050 (4176) pixels +0xFEFB, 0xF902, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1060 (4192) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1070 (4208) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1080 (4224) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1090 (4240) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x10A0 (4256) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x10B0 (4272) pixels +0xF8C1, 0xFEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD75, 0x0000, 0x1002, 0xE01C, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10C0 (4288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x600C, 0x0000, 0x528A, 0xFFDF, 0xFFFF, 0xFFFF, // 0x10D0 (4304) pixels +0xFFFF, 0xFFDF, 0xFB2C, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x10E0 (4320) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x10F0 (4336) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1100 (4352) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1110 (4368) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1120 (4384) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1130 (4400) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xFACA, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x528A, 0x0000, 0x780F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1140 (4416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x0000, 0x0000, 0xCE59, 0xFFFF, // 0x1150 (4432) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFD95, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1160 (4448) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1170 (4464) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1180 (4480) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1190 (4496) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x11A0 (4512) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x11B0 (4528) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFD55, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCE59, 0x0000, 0x0000, 0xD01A, 0xF81F, // 0x11C0 (4544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x4809, 0x0000, 0x6B6D, // 0x11D0 (4560) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xF944, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x11E0 (4576) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x11F0 (4592) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1200 (4608) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1210 (4624) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1220 (4640) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1230 (4656) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF923, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x6B6D, 0x0000, // 0x1240 (4672) pixels +0x680D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xB817, 0x0000, // 0x1250 (4688) pixels +0x0000, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFC71, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1260 (4704) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1270 (4720) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1280 (4736) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1290 (4752) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x12A0 (4768) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x12B0 (4784) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFC30, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x12C0 (4800) pixels +0xD69A, 0x0000, 0x0000, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12D0 (4816) pixels +0x500A, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEDB, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x12E0 (4832) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x12F0 (4848) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1300 (4864) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1310 (4880) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1320 (4896) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1330 (4912) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xFEBA, 0xFFFF, // 0x1340 (4928) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x6B6D, 0x0000, 0x700E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1350 (4944) pixels +0xF81F, 0xC819, 0x0000, 0x0000, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFBCF, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1360 (4960) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1370 (4976) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1380 (4992) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1390 (5008) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x13A0 (5024) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8E2, 0xF985, 0xFA89, 0xFB8D, 0xFBCF, 0xFC0F, 0xFC50, 0xFC71, 0xFCB2, 0xFCF3, 0xFCB2, 0xFC50, // 0x13B0 (5040) pixels +0xFBEF, 0xFBAE, 0xFAAA, 0xF944, 0xF8C1, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x13C0 (5056) pixels +0xFB8E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x0000, 0x0000, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13D0 (5072) pixels +0xF81F, 0xF81F, 0xF81F, 0x680D, 0x0000, 0x5ACB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFEDA, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x13E0 (5088) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x13F0 (5104) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1400 (5120) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1410 (5136) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF902, 0xFA68, 0xFBEF, 0xFCD2, // 0x1420 (5152) pixels +0xFD75, 0xFE17, 0xFE99, 0xFEDB, 0xFF3C, 0xFF9D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1430 (5168) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFBE, 0xFF5D, 0xFEDB, 0xFE38, 0xFD75, 0xFC71, 0xFA48, 0xF8C1, 0xF8A0, 0xF8A0, // 0x1440 (5184) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xFE9A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x5ACB, 0x0000, 0x8811, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1450 (5200) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD81B, 0x0000, 0x0000, 0xBDF7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFC0F, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1460 (5216) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1470 (5232) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1480 (5248) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1490 (5264) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF880, 0xF8E2, 0xFA68, 0xFC0F, 0xFD34, 0xFDF7, 0xFEBA, 0xFF3C, 0xFFBE, 0xFFFF, // 0x14A0 (5280) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x14B0 (5296) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7D, // 0x14C0 (5312) pixels +0xFEBA, 0xFDB6, 0xFBEF, 0xF985, 0xF8A0, 0xFBAE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDF7, 0x0000, 0x1002, 0xE81D, 0xF81F, 0xF81F, // 0x14D0 (5328) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x9813, 0x0000, 0x2945, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF3C, 0xF8A0, 0xF8A0, // 0x14E0 (5344) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x14F0 (5360) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1500 (5376) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1510 (5392) pixels +0xF8A0, 0xF8A0, 0xF880, 0xF8E1, 0xFAAA, 0xFC71, 0xFD95, 0xFE59, 0xFF1C, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1520 (5408) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1530 (5424) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1540 (5440) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xFEDB, 0xFD54, 0xFF3C, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x2945, 0x0000, 0xB016, // 0x1550 (5456) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF01E, 0x2805, 0x0000, 0x9CD3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1560 (5472) pixels +0xFF1C, 0xFD75, 0xFB6D, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1570 (5488) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1580 (5504) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF880, // 0x1590 (5520) pixels +0xFA48, 0xFBEF, 0xFD34, 0xFE38, 0xFF1B, 0xFFBE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x15A0 (5536) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x15B0 (5552) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x15C0 (5568) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CD3, // 0x15D0 (5584) pixels +0x0000, 0x500A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, 0xFFFF, // 0x15E0 (5600) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF7D, 0xFE38, 0xFCD2, 0xFAAA, 0xF880, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x15F0 (5616) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1600 (5632) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF880, 0xFA07, 0xFBCF, 0xFD14, 0xFDF7, // 0x1610 (5648) pixels +0xFEDB, 0xFF9D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1620 (5664) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1630 (5680) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1640 (5696) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1650 (5712) pixels +0xFFFF, 0xDEDB, 0x0000, 0x0000, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x8010, 0x0000, 0x528A, 0xFFFF, 0xFFFF, // 0x1660 (5728) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF5D, 0xFE79, 0xFD55, 0xFC0F, 0xF9A5, // 0x1670 (5744) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, // 0x1680 (5760) pixels +0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF8A0, 0xF902, 0xFAEB, 0xFC51, 0xFD55, 0xFE38, 0xFEDB, 0xFF9E, 0xFFFF, 0xFFFF, // 0x1690 (5776) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x16A0 (5792) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x16B0 (5808) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x16C0 (5824) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x16D0 (5840) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A69, 0x0000, 0xA014, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF01E, 0x2004, 0x0000, 0xAD55, // 0x16E0 (5856) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x16F0 (5872) pixels +0xFFFF, 0xFFFF, 0xFF5D, 0xFEBA, 0xFE18, 0xFD55, 0xFCB2, 0xFBEF, 0xFB6D, 0xFA07, 0xF943, 0xF923, 0xF923, 0xF944, 0xF985, 0xF9E6, // 0x1700 (5888) pixels +0xFACA, 0xFBCE, 0xFC30, 0xFCF3, 0xFD55, 0xFDF7, 0xFE58, 0xFEDB, 0xFF5D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1710 (5904) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1720 (5920) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1730 (5936) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1740 (5952) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1750 (5968) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA534, 0x0000, 0x4809, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0000, // 0x1760 (5984) pixels +0x0000, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1770 (6000) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1780 (6016) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1790 (6032) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x17A0 (6048) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x17B0 (6064) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x17C0 (6080) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x17D0 (6096) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0xE01C, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17E0 (6112) pixels +0x9813, 0x0000, 0x4208, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x17F0 (6128) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1800 (6144) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1810 (6160) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1820 (6176) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1830 (6192) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1840 (6208) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1850 (6224) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4208, 0x0000, 0xB016, 0xF81F, 0xF81F, // 0x1860 (6240) pixels +0xF81F, 0xF81F, 0x4809, 0x0000, 0x94B2, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1870 (6256) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1880 (6272) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1890 (6288) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x18A0 (6304) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x18B0 (6320) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x18C0 (6336) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x18D0 (6352) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9492, 0x0000, 0x780F, // 0x18E0 (6368) pixels +0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x0000, 0x0000, 0xC638, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x18F0 (6384) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1900 (6400) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1910 (6416) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1920 (6432) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1930 (6448) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1940 (6464) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1950 (6480) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC638, // 0x1960 (6496) pixels +0x0000, 0x2004, 0xF01E, 0xF81F, 0xF81F, 0xC018, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1970 (6512) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1980 (6528) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1990 (6544) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19A0 (6560) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19B0 (6576) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19C0 (6592) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19D0 (6608) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19E0 (6624) pixels +0xFFFF, 0xEF5D, 0x0000, 0x0000, 0xD81B, 0xF81F, 0xF81F, 0x9813, 0x0000, 0x4228, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x19F0 (6640) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A00 (6656) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A10 (6672) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A20 (6688) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A30 (6704) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A40 (6720) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A50 (6736) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A60 (6752) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4228, 0x0000, 0xB817, 0xF81F, 0xF81F, 0x680D, 0x0000, 0x8C51, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1A70 (6768) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1A80 (6784) pixels +0xEF5D, 0xEF5D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1A90 (6800) pixels +0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1AA0 (6816) pixels +0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1AB0 (6832) pixels +0xEF5D, 0xEF5D, 0xEF5D, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1AC0 (6848) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xD6BA, 0xBDF7, 0xA534, 0x9CF3, 0x9CD3, 0x9CF3, 0xAD55, 0xBDF7, 0xD6BA, 0xF79E, 0xFFFF, 0xFFFF, // 0x1AD0 (6864) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0xDEDB, 0xFFFF, // 0x1AE0 (6880) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C51, 0x0000, 0x9012, 0xF81F, 0xF81F, 0x1002, 0x0000, 0xB596, 0xFFFF, 0xFFFF, // 0x1AF0 (6896) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1B00 (6912) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x5AEB, 0x9CF3, 0xD6BA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, // 0x1B10 (6928) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1B20 (6944) pixels +0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1B30 (6960) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2124, 0x73AE, 0xB596, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1B40 (6976) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xCE79, 0x8410, 0x0861, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0861, // 0x1B50 (6992) pixels +0x8410, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xC618, 0x73AE, 0x0000, // 0x1B60 (7008) pixels +0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB596, 0x0000, 0x580B, 0xF81F, 0xE01C, 0x0000, 0x0000, 0xCE79, // 0x1B70 (7024) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1B80 (7040) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7BCF, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1B90 (7056) pixels +0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1BA0 (7072) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1BB0 (7088) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x18E3, 0xAD55, 0xFFDF, 0xFFFF, 0xFFFF, // 0x1BC0 (7104) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x738E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1BD0 (7120) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x73AE, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xD69A, 0x8C71, 0x2104, 0x0000, // 0x1BE0 (7136) pixels +0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x0000, 0x0801, 0xF01E, 0xC819, 0x0000, // 0x1BF0 (7152) pixels +0x0000, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C00 (7168) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0xE73C, 0xFFFF, // 0x1C10 (7184) pixels +0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C20 (7200) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C30 (7216) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8C51, // 0x1C40 (7232) pixels +0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x4A69, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C50 (7248) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xD6BA, 0x39E7, 0x0000, 0x0000, // 0x1C60 (7264) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x0000, 0x0000, 0xE01C, // 0x1C70 (7280) pixels +0xB817, 0x0000, 0x10A2, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C80 (7296) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1C90 (7312) pixels +0x5AEB, 0xFFDF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1CA0 (7328) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, // 0x1CB0 (7344) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1CC0 (7360) pixels +0x0000, 0x0000, 0xAD75, 0xFFFF, 0xFFFF, 0xFFFF, 0x738E, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1CD0 (7376) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, // 0x1CE0 (7392) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x1082, // 0x1CF0 (7408) pixels +0x0000, 0xD01A, 0xA014, 0x0000, 0x4A49, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, // 0x1D00 (7424) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1D10 (7440) pixels +0x0000, 0x0000, 0x0000, 0xBDF7, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1D20 (7456) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, // 0x1D30 (7472) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1D40 (7488) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x18E3, 0xEF7D, 0xFFFF, 0xD69A, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1D50 (7504) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xD6BA, 0xFFFF, 0xFFFF, // 0x1D60 (7520) pixels +0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1D70 (7536) pixels +0xFFFF, 0x4A49, 0x0000, 0xB817, 0x8811, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, // 0x1D80 (7552) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8410, 0x8C71, 0x8C71, 0x8C71, 0x8410, 0x2104, 0x0000, 0x0000, 0x0000, // 0x1D90 (7568) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x6B6D, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1DA0 (7584) pixels +0x6B4D, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1DB0 (7600) pixels +0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x39E7, 0x8C71, 0x8C71, 0x8C71, 0x8C71, 0x6B4D, 0x0000, 0x0000, // 0x1DC0 (7616) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC618, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1DD0 (7632) pixels +0x0000, 0x528A, 0xBDD7, 0xD6BA, 0xDEFB, 0xD6BA, 0xAD75, 0x4208, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1082, 0x8430, 0xDEFB, // 0x1DE0 (7648) pixels +0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1DF0 (7664) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x73AE, 0x0000, 0xA815, 0x700E, 0x0000, 0x8C71, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E00 (7680) pixels +0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x4A49, // 0x1E10 (7696) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2124, 0xFFDF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1E20 (7712) pixels +0x0000, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E30 (7728) pixels +0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E40 (7744) pixels +0xBDF7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x9CF3, 0xFFFF, 0x73AE, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1E50 (7760) pixels +0x0000, 0x0000, 0x0000, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0x632C, 0x0000, 0x0020, 0x6B6D, 0xBDF7, 0xEF7D, // 0x1E60 (7776) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E70 (7792) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C51, 0x0000, 0x9813, 0x500A, 0x0000, 0xA514, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E80 (7808) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1E90 (7824) pixels +0xFFFF, 0xBDD7, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0841, 0xF79E, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, // 0x1EA0 (7840) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1EB0 (7856) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1EC0 (7872) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x630C, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x94B2, 0xFFFF, 0x738E, 0x0000, 0x0000, // 0x1ED0 (7888) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0xAD75, 0xE73C, 0xFFFF, // 0x1EE0 (7904) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, // 0x1EF0 (7920) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA514, 0x0000, 0x8010, 0x2004, 0x0000, 0xAD75, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1F00 (7936) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, // 0x1F10 (7952) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xCE59, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x10A2, 0xF7BE, 0xFFFF, 0x9CF3, 0x0000, // 0x1F20 (7968) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD75, 0xE71C, 0xE71C, 0xE71C, 0xE71C, 0xE71C, 0xE71C, 0xE71C, 0xE71C, 0xE71C, // 0x1F30 (7984) pixels +0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x73AE, 0xFFFF, // 0x1F40 (8000) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8430, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x9CD3, 0xFFFF, 0x9CD3, // 0x1F50 (8016) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x6B6D, 0xB596, 0xD6BA, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1F60 (8032) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1F70 (8048) pixels +0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD75, 0x0000, 0x700E, 0x0000, 0x0000, 0xB5B6, 0xFFFF, // 0x1F80 (8064) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, // 0x1F90 (8080) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA534, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0xFFFF, 0xFFFF, // 0x1FA0 (8096) pixels +0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1FB0 (8112) pixels +0x0000, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1FC0 (8128) pixels +0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x4208, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xAD75, // 0x1FD0 (8144) pixels +0xFFFF, 0xD6BA, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0020, 0x52AA, 0x94B2, // 0x1FE0 (8160) pixels +0xC638, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x1FF0 (8176) pixels +0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB596, 0x0000, 0x680D, 0x0000, 0x0000, // 0x2000 (8192) pixels +0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2010 (8208) pixels +0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x18E3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x94B2, // 0x2020 (8224) pixels +0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2030 (8240) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2040 (8256) pixels +0x0000, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x9CD3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2050 (8272) pixels +0x0000, 0xD6BA, 0xFFFF, 0xFFFF, 0x8410, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2060 (8288) pixels +0x0000, 0x0000, 0x0000, 0x0841, 0x6B4D, 0xBDF7, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, // 0x2070 (8304) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x0000, 0x500A, // 0x2080 (8320) pixels +0x0000, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2090 (8336) pixels +0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xE71C, 0x8C71, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x20A0 (8352) pixels +0x0020, 0xDEFB, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x20B0 (8368) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, // 0x20C0 (8384) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFDF, 0xCE59, 0x5AEB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x20D0 (8400) pixels +0x0000, 0x0000, 0x630C, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0x6B4D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x20E0 (8416) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4A69, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, // 0x20F0 (8432) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, // 0x2100 (8448) pixels +0x0000, 0x3006, 0x1002, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, // 0x2110 (8464) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEFB, 0xC618, 0x6B6D, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2120 (8480) pixels +0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2130 (8496) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, // 0x2140 (8512) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xE73C, 0xAD55, 0x4208, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2150 (8528) pixels +0x0000, 0x0000, 0x0000, 0x2104, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xBDD7, 0x39E7, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2160 (8544) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0841, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2170 (8560) pixels +0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2180 (8576) pixels +0xFFFF, 0xC618, 0x0000, 0x1002, 0x2805, 0x0000, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, // 0x2190 (8592) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x1082, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x21A0 (8608) pixels +0x0000, 0x0000, 0x0000, 0x9CD3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x21B0 (8624) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x21C0 (8640) pixels +0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0861, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x21D0 (8656) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x2945, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0xC618, 0x73AE, // 0x21E0 (8672) pixels +0x18C3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x10A2, 0xDEFB, // 0x21F0 (8688) pixels +0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2200 (8704) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x0000, 0x2805, 0x4008, 0x0000, 0xB5B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2210 (8720) pixels +0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2220 (8736) pixels +0x0000, 0x0000, 0x0000, 0x39C7, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2230 (8752) pixels +0x0000, 0x0000, 0x8410, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xAD55, 0xD6BA, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2240 (8768) pixels +0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2250 (8784) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2260 (8800) pixels +0xFFFF, 0xFFFF, 0xF79E, 0xD69A, 0xAD75, 0x7BCF, 0x2945, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2270 (8816) pixels +0x0000, 0x8C51, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2280 (8832) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB596, 0x0000, 0x4008, 0x580B, 0x0000, 0xAD75, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2290 (8848) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x22A0 (8864) pixels +0x0000, 0x0000, 0x0000, 0x3186, 0xA534, 0xF79E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, // 0x22B0 (8880) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x22C0 (8896) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x22D0 (8912) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x5AEB, 0xCE59, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x22E0 (8928) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xD6BA, 0x9CD3, 0x2104, 0x0000, 0x0000, 0x0000, 0x0000, // 0x22F0 (8944) pixels +0x0000, 0x0000, 0x0000, 0x3186, 0xFFDF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, // 0x2300 (8960) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD75, 0x0000, 0x580B, 0x700E, 0x0000, 0xA514, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2310 (8976) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2320 (8992) pixels +0x0000, 0x0000, 0x0000, 0x6B4D, 0xC618, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, // 0x2330 (9008) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2340 (9024) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2350 (9040) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x18C3, 0x9492, 0xD6BA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2360 (9056) pixels +0xFFFF, 0xFFDF, 0xCE79, 0x8C51, 0xCE59, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEDB, 0x0000, 0x0000, // 0x2370 (9072) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xFFDF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2380 (9088) pixels +0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA514, 0x0000, 0x700E, 0x8010, 0x0000, 0x8C71, 0xFFFF, // 0x2390 (9104) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x23A0 (9120) pixels +0x0000, 0x0000, 0x39C7, 0x9CF3, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x23B0 (9136) pixels +0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, // 0x23C0 (9152) pixels +0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, 0xDEDB, 0xF7BE, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x23D0 (9168) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x630C, 0xB5B6, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x23E0 (9184) pixels +0xFFFF, 0xDEFB, 0xA514, 0x39C7, 0x0000, 0x0000, 0x18E3, 0xC618, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF7D, // 0x23F0 (9200) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x18E3, 0xFFDF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2400 (9216) pixels +0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C51, 0x0000, 0x8010, 0x9813, 0x0000, // 0x2410 (9232) pixels +0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2420 (9248) pixels +0x0000, 0x2124, 0x9492, 0xD69A, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2430 (9264) pixels +0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2440 (9280) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2450 (9296) pixels +0x0000, 0x0000, 0x0000, 0x528A, 0xAD75, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2460 (9312) pixels +0xFFFF, 0xDEDB, 0x5ACB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x5AEB, 0xAD75, 0xD69A, 0xDEFB, 0xDEFB, 0xD6BA, // 0x2470 (9328) pixels +0xBDD7, 0x5ACB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x6B4D, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, // 0x2480 (9344) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x73AE, 0x0000, 0x9813, // 0x2490 (9360) pixels +0xA815, 0x0000, 0x4A49, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, // 0x24A0 (9376) pixels +0x0000, 0x0000, 0x0000, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x24B0 (9392) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x24C0 (9408) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, // 0x24D0 (9424) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x6B4D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x24E0 (9440) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xEF7D, 0x2945, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x24F0 (9456) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xB5B6, 0xFFFF, 0xFFFF, 0xC618, 0x0000, // 0x2500 (9472) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, // 0x2510 (9488) pixels +0x0000, 0xB016, 0xC018, 0x0000, 0x10A2, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, 0x0000, 0x0000, // 0x2520 (9504) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2530 (9520) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2540 (9536) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, 0xDEFB, 0x0000, // 0x2550 (9552) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2560 (9568) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0020, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2570 (9584) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x5ACB, 0xF7BE, 0xFFFF, 0xFFFF, // 0x2580 (9600) pixels +0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2590 (9616) pixels +0xF7BE, 0x10A2, 0x0000, 0xC018, 0xD01A, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4A49, // 0x25A0 (9632) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x25B0 (9648) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x25C0 (9664) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, // 0x25D0 (9680) pixels +0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x25E0 (9696) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC638, 0x3186, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x25F0 (9712) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0xE73C, 0xFFFF, // 0x2600 (9728) pixels +0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2610 (9744) pixels +0xFFFF, 0xFFFF, 0xEF5D, 0x0000, 0x0000, 0xD81B, 0xE81D, 0x0000, 0x0000, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2620 (9760) pixels +0xFFFF, 0x4A49, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2630 (9776) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CF3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x2640 (9792) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xDEDB, // 0x2650 (9808) pixels +0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2660 (9824) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x94B2, 0x18C3, 0x0000, // 0x2670 (9840) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x7BEF, 0xEF5D, // 0x2680 (9856) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC618, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0xA534, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2690 (9872) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x0000, 0x0000, 0xE81D, 0xF81F, 0x3807, 0x0000, 0xB596, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x26A0 (9888) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xAD75, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xF79E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x26B0 (9904) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCE59, 0xA534, 0xA534, 0xA534, // 0x26C0 (9920) pixels +0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, // 0x26D0 (9936) pixels +0xA534, 0xEF5D, 0xFFFF, 0xFFFF, 0xEF5D, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, // 0x26E0 (9952) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x26F0 (9968) pixels +0xEF7D, 0xBDD7, 0x7BCF, 0x18E3, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x3186, 0x9492, 0xD69A, // 0x2700 (9984) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEDB, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xA534, 0xCE79, 0xFFFF, // 0x2710 (10000) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xB596, 0x0000, 0x4008, 0xF81F, 0xF81F, 0x780F, 0x0000, 0x8C51, 0xFFFF, 0xFFFF, // 0x2720 (10016) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2730 (10032) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2740 (10048) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2750 (10064) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2760 (10080) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2770 (10096) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xE71C, 0xD69A, 0xBDF7, 0xBDD7, 0xBDD7, 0xBDD7, 0xC618, 0xD69A, 0xE71C, 0xFFDF, // 0x2780 (10112) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2790 (10128) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C51, 0x0000, 0x8010, 0xF81F, 0xF81F, 0xA815, 0x0000, 0x4228, // 0x27A0 (10144) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x27B0 (10160) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x27C0 (10176) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x27D0 (10192) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x27E0 (10208) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x27F0 (10224) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2800 (10240) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2810 (10256) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x4228, 0x0000, 0xA815, 0xF81F, 0xF81F, 0xC819, // 0x2820 (10272) pixels +0x0000, 0x0000, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2830 (10288) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2840 (10304) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2850 (10320) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2860 (10336) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2870 (10352) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2880 (10368) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2890 (10384) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x0000, 0x0000, 0xD01A, 0xF81F, // 0x28A0 (10400) pixels +0xF81F, 0xE81D, 0x0000, 0x0000, 0xC638, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x28B0 (10416) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x28C0 (10432) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x28D0 (10448) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x28E0 (10464) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x28F0 (10480) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2900 (10496) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2910 (10512) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC638, 0x0000, 0x0000, // 0x2920 (10528) pixels +0xF01E, 0xF81F, 0xF81F, 0xF81F, 0x580B, 0x0000, 0x9492, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2930 (10544) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2940 (10560) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2950 (10576) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2960 (10592) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2970 (10608) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2980 (10624) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2990 (10640) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9492, // 0x29A0 (10656) pixels +0x0000, 0x680D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA014, 0x0000, 0x4208, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x29B0 (10672) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x29C0 (10688) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x29D0 (10704) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x29E0 (10720) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x29F0 (10736) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A00 (10752) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A10 (10768) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A20 (10784) pixels +0xFFFF, 0x4208, 0x0000, 0xA815, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x0000, 0x0000, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A30 (10800) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A40 (10816) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A50 (10832) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A60 (10848) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A70 (10864) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A80 (10880) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2A90 (10896) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2AA0 (10912) pixels +0xFFFF, 0xFFFF, 0xDEFB, 0x0000, 0x0000, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF01E, 0x3006, 0x0000, 0xAD55, 0xFFFF, 0xFFFF, // 0x2AB0 (10928) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2AC0 (10944) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2AD0 (10960) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2AE0 (10976) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2AF0 (10992) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF7E, 0xDEFD, 0xCE7C, 0xBDFA, 0xAD79, // 0x2B00 (11008) pixels +0x9CD8, 0x8437, 0x7BD6, 0x5AD4, 0x39F4, 0x3193, 0x2953, 0x2133, 0x2133, 0x2973, 0x4214, 0x6B75, 0x7BF6, 0x9CD8, 0xB599, 0xC63B, // 0x2B10 (11024) pixels +0xDEDD, 0xF79E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2B20 (11040) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD55, 0x0000, 0x3807, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x9012, 0x0000, 0x528A, // 0x2B30 (11056) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2B40 (11072) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2B50 (11088) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2B60 (11104) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2B70 (11120) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xDEDD, 0xC63B, 0xAD59, 0x8C57, 0x5AF5, 0x2113, 0x0872, 0x0872, 0x0872, // 0x2B80 (11136) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2B90 (11152) pixels +0x0872, 0x0872, 0x0872, 0x1092, 0x4234, 0x8416, 0xAD79, 0xCE7C, 0xF79E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2BA0 (11168) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x528A, 0x0000, 0x9813, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, // 0x2BB0 (11184) pixels +0x0000, 0x0000, 0xDEDB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2BC0 (11200) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2BD0 (11216) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2BE0 (11232) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2BF0 (11248) pixels +0xFFFF, 0xFFFF, 0xF79E, 0xDEDC, 0xC61A, 0xA518, 0x7BD6, 0x4214, 0x0852, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2C00 (11264) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2C10 (11280) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x5AF5, 0x9CF8, 0xCE7C, 0xEF7E, 0xFFFF, // 0x2C20 (11296) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEDB, 0x0000, 0x0000, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2C30 (11312) pixels +0xF81F, 0xF81F, 0x3807, 0x0000, 0x9CD3, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2C40 (11328) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2C50 (11344) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2C60 (11360) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xE71D, // 0x2C70 (11376) pixels +0xC63B, 0xAD59, 0x7BF6, 0x4234, 0x0852, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2C80 (11392) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2C90 (11408) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2CA0 (11424) pixels +0x10B2, 0x73B6, 0xB5BA, 0xE73D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x9CD3, 0x0000, 0x4809, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2CB0 (11440) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA014, 0x0000, 0x2965, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5E, 0xAD59, 0xDEDC, 0xFFDF, 0xFFFF, // 0x2CC0 (11456) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2CD0 (11472) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2CE0 (11488) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xE71D, 0xCE5B, 0xB599, 0x8C77, 0x52B4, // 0x2CF0 (11504) pixels +0x18D3, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2D00 (11520) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2D10 (11536) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2D20 (11552) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0xDEFD, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x2945, 0x0000, 0xA815, 0xF81F, 0xF81F, // 0x2D30 (11568) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x0000, 0x0000, 0xBDF7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x7BF6, 0x0872, // 0x2D40 (11584) pixels +0x2973, 0x7BF6, 0xB59A, 0xD6BC, 0xEF7E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2D50 (11600) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2D60 (11616) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xE73E, 0xD6BC, 0xBDFA, 0xA539, 0x8416, 0x4A54, 0x18F3, 0x0872, 0x0872, 0x0872, // 0x2D70 (11632) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2D80 (11648) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2D90 (11664) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2DA0 (11680) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x73B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDF7, 0x0000, 0x0801, 0xE01C, // 0x2DB0 (11696) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x700E, 0x0000, 0x5ACB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2DC0 (11712) pixels +0xD6BC, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x10B2, 0x4A54, 0x8C57, 0xAD59, 0xC63B, 0xDEDC, 0xEF5E, 0xF7BF, 0xFFFF, 0xFFFF, // 0x2DD0 (11728) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xEF7E, // 0x2DE0 (11744) pixels +0xDEFD, 0xD6BC, 0xC61B, 0xB599, 0x9CF8, 0x7BF6, 0x4A54, 0x18F3, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2DF0 (11760) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E00 (11776) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E10 (11792) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E20 (11808) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0xD69C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x5ACB, 0x0000, // 0x2E30 (11824) pixels +0x8010, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0000, 0x0000, 0xD69A, 0xFFFF, // 0x2E40 (11840) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x73B6, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x10B2, // 0x2E50 (11856) pixels +0x2133, 0x52B4, 0x73B6, 0x7BF6, 0x8C57, 0x94B7, 0x9CD8, 0x94B7, 0x8C77, 0x8437, 0x8416, 0x7BD6, 0x73B5, 0x6315, 0x4214, 0x2133, // 0x2E60 (11872) pixels +0x10B2, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E70 (11888) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E80 (11904) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2E90 (11920) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2EA0 (11936) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x6B75, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, // 0x2EB0 (11952) pixels +0x0000, 0x0000, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x580B, 0x0000, // 0x2EC0 (11968) pixels +0x738E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEDC, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2ED0 (11984) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2EE0 (12000) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2EF0 (12016) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F00 (12032) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F10 (12048) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F20 (12064) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0xD6BC, 0xFFFF, 0xFFFF, 0xFFFF, // 0x2F30 (12080) pixels +0xFFFF, 0x738E, 0x0000, 0x680D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2F40 (12096) pixels +0xC018, 0x0000, 0x0000, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x8C57, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F50 (12112) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F60 (12128) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F70 (12144) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F80 (12160) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2F90 (12176) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2FA0 (12192) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x8416, 0xFFFF, 0xFFFF, // 0x2FB0 (12208) pixels +0xFFFF, 0xFFFF, 0xD69A, 0x0000, 0x0000, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2FC0 (12224) pixels +0xF81F, 0xF81F, 0xF81F, 0x580B, 0x0000, 0x6B6D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73E, 0x2133, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2FD0 (12240) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2FE0 (12256) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x2FF0 (12272) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3000 (12288) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3010 (12304) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3020 (12320) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x2113, 0xE73D, // 0x3030 (12336) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x6B6D, 0x0000, 0x600C, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3040 (12352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x0000, 0x0000, 0xCE59, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD59, 0x0872, 0x0872, // 0x3050 (12368) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3060 (12384) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3070 (12400) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3080 (12416) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3090 (12432) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x30A0 (12448) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x30B0 (12464) pixels +0xAD59, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCE59, 0x0000, 0x0000, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x30C0 (12480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x680D, 0x0000, 0x528A, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, // 0x30D0 (12496) pixels +0x5AD4, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x30E0 (12512) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x30F0 (12528) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3100 (12544) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3110 (12560) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3120 (12576) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3130 (12592) pixels +0x0872, 0x5AD4, 0xF7BF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x528A, 0x0000, 0x700E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3140 (12608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD81B, 0x0000, 0x0000, 0xAD75, 0xFFFF, 0xFFFF, // 0x3150 (12624) pixels +0xFFFF, 0xFFFF, 0xDEDC, 0x10B2, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3160 (12640) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3170 (12656) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3180 (12672) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3190 (12688) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x31A0 (12704) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x31B0 (12720) pixels +0x0872, 0x0872, 0x10B2, 0xD6BC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xAD75, 0x0000, 0x0801, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31C0 (12736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x8811, 0x0000, 0x18E3, // 0x31D0 (12752) pixels +0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xA539, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x31E0 (12768) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x31F0 (12784) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3200 (12800) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3210 (12816) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3220 (12832) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3230 (12848) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0xA539, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x18E3, 0x0000, 0x9813, 0xF81F, 0xF81F, 0xF81F, // 0x3240 (12864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, // 0x3250 (12880) pixels +0x3006, 0x0000, 0x73AE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x6B75, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3260 (12896) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3270 (12912) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3280 (12928) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3290 (12944) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x32A0 (12960) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x32B0 (12976) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x6B75, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0x73AE, 0x0000, 0x4008, 0xF01E, 0xF81F, // 0x32C0 (12992) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32D0 (13008) pixels +0xF81F, 0xF81F, 0xC018, 0x0000, 0x0000, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5E, 0x39D3, 0x0872, 0x0872, 0x0872, 0x0872, // 0x32E0 (13024) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x32F0 (13040) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3300 (13056) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3310 (13072) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3320 (13088) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3330 (13104) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x39D3, 0xEF5E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x0000, 0x0000, 0xC819, // 0x3340 (13120) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3350 (13136) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0x8010, 0x0000, 0x18E3, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69C, 0x10B2, 0x0872, // 0x3360 (13152) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3370 (13168) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3380 (13184) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3390 (13200) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x33A0 (13216) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x33B0 (13232) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x1092, 0xCE7C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x18E3, 0x0000, // 0x33C0 (13248) pixels +0x8811, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33D0 (13264) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x3807, 0x0000, 0x630C, 0xF7BE, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x33E0 (13280) pixels +0xB5BA, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x33F0 (13296) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3400 (13312) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3410 (13328) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3420 (13344) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3430 (13360) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0xB599, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x630C, // 0x3440 (13376) pixels +0x0000, 0x4809, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3450 (13392) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0000, 0x0000, 0x8C71, 0xFFFF, 0xFFFF, // 0x3460 (13408) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0x9CF8, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3470 (13424) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3480 (13440) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3490 (13456) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x34A0 (13472) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x34B0 (13488) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x9CD8, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x34C0 (13504) pixels +0x8C71, 0x0000, 0x0801, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34D0 (13520) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xA014, 0x0000, 0x0000, // 0x34E0 (13536) pixels +0xB5B6, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x8C77, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x34F0 (13552) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3500 (13568) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3510 (13584) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3520 (13600) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3530 (13616) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x8437, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3540 (13632) pixels +0xFFFF, 0xB5B6, 0x0000, 0x0000, 0xA815, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3550 (13648) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3560 (13664) pixels +0x700E, 0x0000, 0x1082, 0xCE79, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x8436, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3570 (13680) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3580 (13696) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3590 (13712) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x35A0 (13728) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x35B0 (13744) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x7BF6, 0xFFDF, 0xFFFF, 0xFFFF, // 0x35C0 (13760) pixels +0xFFFF, 0xFFFF, 0xCE79, 0x1082, 0x0000, 0x8010, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35D0 (13776) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35E0 (13792) pixels +0xF81F, 0xF81F, 0xE81D, 0x500A, 0x0000, 0x2965, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x8437, 0x0872, 0x0872, 0x0872, // 0x35F0 (13808) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3600 (13824) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3610 (13840) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3620 (13856) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3630 (13872) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x7BF6, 0xF7BF, 0xFFFF, // 0x3640 (13888) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x2965, 0x0000, 0x580B, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3650 (13904) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3660 (13920) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x3006, 0x0000, 0x4228, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0x8C77, // 0x3670 (13936) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3680 (13952) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3690 (13968) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x36A0 (13984) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x36B0 (14000) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x8437, 0xFFDF, // 0x36C0 (14016) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x4228, 0x0000, 0x4008, 0xE81D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36D0 (14032) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36E0 (14048) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x1803, 0x0000, 0x528A, 0xEF5D, 0xFFFF, 0xFFFF, 0xFFFF, // 0x36F0 (14064) pixels +0xFFFF, 0xFFFF, 0x9CF8, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3700 (14080) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3710 (14096) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3720 (14112) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3730 (14128) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x9CD8, // 0x3740 (14144) pixels +0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x528A, 0x0000, 0x2805, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3750 (14160) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3760 (14176) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x0801, 0x0000, 0x5ACB, 0xEF5D, // 0x3770 (14192) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDDA, 0x10B2, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3780 (14208) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3790 (14224) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x37A0 (14240) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x37B0 (14256) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x1092, // 0x37C0 (14272) pixels +0xB599, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5D, 0x5ACB, 0x0000, 0x1803, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37D0 (14288) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37E0 (14304) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x0801, // 0x37F0 (14320) pixels +0x0000, 0x528A, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69C, 0x39F3, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3800 (14336) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3810 (14352) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3820 (14368) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3830 (14384) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3840 (14400) pixels +0x31B3, 0xCE7C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x528A, 0x0000, 0x1803, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3850 (14416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3860 (14432) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3870 (14448) pixels +0xF81F, 0xC018, 0x0801, 0x0000, 0x4228, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF5E, 0x7395, 0x0872, 0x0872, 0x0872, // 0x3880 (14464) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3890 (14480) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x38A0 (14496) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x38B0 (14512) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x38C0 (14528) pixels +0x0872, 0x6B55, 0xE73E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x4228, 0x0000, 0x2004, 0xD01A, 0xF81F, 0xF81F, 0xF81F, // 0x38D0 (14544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38E0 (14560) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38F0 (14576) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x1803, 0x0000, 0x2965, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xAD59, // 0x3900 (14592) pixels +0x10B2, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3910 (14608) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3920 (14624) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3930 (14640) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3940 (14656) pixels +0x0872, 0x1092, 0xA518, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x2965, 0x0000, 0x3006, 0xD01A, 0xF81F, 0xF81F, // 0x3950 (14672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3960 (14688) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3970 (14704) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xD01A, 0x3006, 0x0000, 0x10A2, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3980 (14720) pixels +0xFFFF, 0xFFFF, 0xDEDC, 0x5AD4, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3990 (14736) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x39A0 (14752) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x39B0 (14768) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x39C0 (14784) pixels +0x0872, 0x0872, 0x5294, 0xD6BC, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x10A2, 0x0000, 0x4008, 0xD81B, 0xF81F, // 0x39D0 (14800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39E0 (14816) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39F0 (14832) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE01C, 0x4809, 0x0000, 0x0000, 0x9492, 0xF7BE, // 0x3A00 (14848) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xAD59, 0x2113, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3A10 (14864) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3A20 (14880) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3A30 (14896) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3A40 (14912) pixels +0x0872, 0x0872, 0x18F3, 0xA518, 0xF7BF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0x9492, 0x0000, 0x0000, 0x580B, 0xE81D, // 0x3A50 (14928) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A60 (14944) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A70 (14960) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x700E, 0x0000, // 0x3A80 (14976) pixels +0x0000, 0x632C, 0xE71C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73D, 0x8416, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3A90 (14992) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3AA0 (15008) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3AB0 (15024) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3AC0 (15040) pixels +0x0872, 0x0872, 0x0872, 0x7BF6, 0xE71D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE71C, 0x632C, 0x0000, 0x0000, 0x8010, // 0x3AD0 (15056) pixels +0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AE0 (15072) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AF0 (15088) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B00 (15104) pixels +0xF81F, 0x9813, 0x0000, 0x0000, 0x2104, 0xBDD7, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD6BC, 0x7395, 0x0872, // 0x3B10 (15120) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3B20 (15136) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3B30 (15152) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3B40 (15168) pixels +0x0872, 0x0872, 0x0872, 0x6B55, 0xD69C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xBDD7, 0x2104, 0x0000, 0x0801, // 0x3B50 (15184) pixels +0xA815, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B60 (15200) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B70 (15216) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B80 (15232) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x3807, 0x0000, 0x0000, 0x7BCF, 0xE73C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3B90 (15248) pixels +0xFFFF, 0xD69C, 0x73B6, 0x1092, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3BA0 (15264) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3BB0 (15280) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3BC0 (15296) pixels +0x0872, 0x0872, 0x0872, 0x7395, 0xD69C, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73C, 0x7BCF, 0x0000, 0x0000, // 0x3BD0 (15312) pixels +0x4809, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BE0 (15328) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BF0 (15344) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C00 (15360) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x780F, 0x0000, 0x0000, 0x2104, 0xB596, 0xFFDF, 0xFFFF, 0xFFFF, // 0x3C10 (15376) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFD, 0x94B8, 0x2133, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3C20 (15392) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3C30 (15408) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3C40 (15424) pixels +0x0872, 0x0872, 0x2113, 0x8C77, 0xDEDD, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xB596, 0x2104, 0x0000, // 0x3C50 (15440) pixels +0x0000, 0x8811, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C60 (15456) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C70 (15472) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C80 (15488) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xB817, 0x3006, 0x0000, 0x0000, 0x528A, // 0x3C90 (15504) pixels +0xC638, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xBDFA, 0x6B55, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3CA0 (15520) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3CB0 (15536) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3CC0 (15552) pixels +0x0872, 0x0872, 0x6335, 0xBDDA, 0xEF7E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xC638, 0x528A, 0x0000, // 0x3CD0 (15568) pixels +0x0000, 0x4008, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CE0 (15584) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CF0 (15600) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D00 (15616) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0x9012, // 0x3D10 (15632) pixels +0x0000, 0x0000, 0x0000, 0x6B4D, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xE73E, 0xB5BA, // 0x3D20 (15648) pixels +0x6B75, 0x10B2, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3D30 (15664) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3D40 (15680) pixels +0x1092, 0x6B55, 0xB59A, 0xE73D, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x6B4D, 0x0000, // 0x3D50 (15696) pixels +0x0000, 0x0801, 0x9813, 0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D60 (15712) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D70 (15728) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D80 (15744) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D90 (15760) pixels +0xF81F, 0xF81F, 0xD01A, 0x680D, 0x0000, 0x0000, 0x0000, 0x6B6D, 0xD69A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3DA0 (15776) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xF79E, 0xCE5B, 0xA518, 0x6335, 0x1092, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, // 0x3DB0 (15792) pixels +0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x1092, 0x5AF5, // 0x3DC0 (15808) pixels +0x9CF8, 0xCE5B, 0xEF7E, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xD69A, 0x6B6D, 0x0000, // 0x3DD0 (15824) pixels +0x0000, 0x0000, 0x700E, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DE0 (15840) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DF0 (15856) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E00 (15872) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E10 (15888) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x500A, 0x0000, 0x0000, 0x0000, 0x5ACB, 0xC618, 0xF7BE, 0xFFFF, 0xFFFF, // 0x3E20 (15904) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BF, 0xDEFD, 0xC63B, 0xA539, 0x8C57, 0x6315, 0x3193, // 0x3E30 (15920) pixels +0x1092, 0x1092, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x0872, 0x1092, 0x2973, 0x6315, 0x8437, 0xA539, 0xC61B, 0xDEDD, // 0x3E40 (15936) pixels +0xF7BF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF7BE, 0xC618, 0x5ACB, 0x0000, // 0x3E50 (15952) pixels +0x0000, 0x0000, 0x600C, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E60 (15968) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E70 (15984) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E80 (16000) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E90 (16016) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC018, 0x580B, 0x0000, 0x0000, 0x0000, 0x2965, // 0x3EA0 (16032) pixels +0x9CF3, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3EB0 (16048) pixels +0xFFFF, 0xFFFF, 0xFFDF, 0xEF7E, 0xEF5E, 0xE73D, 0xE71D, 0xE71D, 0xE73D, 0xEF5E, 0xEF7E, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3EC0 (16064) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0x9CF3, 0x2965, 0x0000, // 0x3ED0 (16080) pixels +0x0000, 0x0000, 0x680D, 0xC819, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EE0 (16096) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EF0 (16112) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F00 (16128) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F10 (16144) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x700E, // 0x3F20 (16160) pixels +0x0801, 0x0000, 0x0000, 0x0000, 0x5ACB, 0xAD75, 0xDEFB, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3F30 (16176) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3F40 (16192) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xDEFB, 0xAD75, 0x52AA, 0x0000, 0x0000, // 0x3F50 (16208) pixels +0x0000, 0x0801, 0x8010, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F60 (16224) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F70 (16240) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F80 (16256) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F90 (16272) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FA0 (16288) pixels +0xF81F, 0xF81F, 0xE01C, 0xA014, 0x3807, 0x0000, 0x0000, 0x0000, 0x0000, 0x4228, 0x94B2, 0xCE59, 0xEF7D, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3FB0 (16304) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x3FC0 (16320) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xEF7D, 0xCE59, 0x94B2, 0x4228, 0x0000, 0x0000, 0x0000, // 0x3FD0 (16336) pixels +0x0000, 0x4809, 0xA815, 0xE81D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FE0 (16352) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FF0 (16368) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4000 (16384) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4010 (16400) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4020 (16416) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xC819, 0x9012, 0x3006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4A69, // 0x4030 (16432) pixels +0x9492, 0xB5B6, 0xD69A, 0xEF5D, 0xFFDF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x4040 (16448) pixels +0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFDF, 0xEF5D, 0xD69A, 0xB5B6, 0x9492, 0x4A69, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x4050 (16464) pixels +0x3807, 0x9813, 0xD01A, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4060 (16480) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4070 (16496) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4080 (16512) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4090 (16528) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40A0 (16544) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF01E, 0xD01A, 0xA014, 0x580B, 0x0000, // 0x40B0 (16560) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x2104, 0x528A, 0x7BEF, 0x9492, 0xAD55, 0xB596, 0xBDD7, 0xBDF7, 0xC638, 0xC638, // 0x40C0 (16576) pixels +0xBDF7, 0xBDD7, 0xB596, 0xAD55, 0x9492, 0x7BEF, 0x528A, 0x2104, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x600C, // 0x40D0 (16592) pixels +0xA815, 0xD81B, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40E0 (16608) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40F0 (16624) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4100 (16640) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4110 (16656) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4120 (16672) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4130 (16688) pixels +0xF81F, 0xE81D, 0xC819, 0xA815, 0x780F, 0x3006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, // 0x4140 (16704) pixels +0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x4008, 0x8010, 0xB016, 0xD01A, // 0x4150 (16720) pixels +0xF01E, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4160 (16736) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4170 (16752) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4180 (16768) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4190 (16784) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41A0 (16800) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41B0 (16816) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xE81D, 0xD01A, 0xC018, 0xA815, 0x9813, 0x8010, 0x700E, 0x500A, // 0x41C0 (16832) pixels +0x4008, 0x3006, 0x1803, 0x1803, 0x3807, 0x4008, 0x580B, 0x700E, 0x8811, 0x9813, 0xB016, 0xC018, 0xD81B, 0xE81D, 0xF81F, 0xF81F, // 0x41D0 (16848) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41E0 (16864) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41F0 (16880) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4200 (16896) pixels +0xF81F, 0xF81F, 0xF81F, 0xF81F +}; diff --git a/firmware/include/images/icon_pepsi.png b/firmware/include/images/icon_pepsi.png new file mode 100644 index 0000000000000000000000000000000000000000..38548d6f55d05241c1f0605b2d53e49d1d321e29 GIT binary patch literal 26231 zcmeFYWmIHKwl12uyL;m9?(XhdxNG84xVuA9xN89=P`En<6z=XWh5M!U-rc?X-Sf_U zXN-6Mb&iplYejtXn=`(cGh(eAxuR5*q>8*?+b5JsL?r;g zj_aeL)FZZ24gBV^6R3H2eg|#(&ssfto+x*9}{S9pcp7K8R zC(D;d1*l)ur;pvnN>#@0&4#UFJ@Tu(KwsS=bojqq&kl0-nRN-gO2w>cX1*wM-RZ4$ zEcBi2bp6z2sQ?Ocb#QSF@g%hJ)!BgX}^_Vf#n>(eFHDDj-p>AmtdpEJkX+*`{J zQgq9%_e~W>4ZqsA;?#?-g|zPU?}ab@`~vs`vPk^V_?GS^r4;@5N!bRti$C|ka%bPV zhxJjGdYSIn#nLanuV%$DpuZ%_uz6_)nbkWk;hTJp#M;`qd(`XZcM2%eFF?EHKOe*8 z(NQsYemHu0>UsUK7Qj2+hQEaB`zWYopsgX{ikHdz{G%l!5fsp$is$YW*>e{3a!I22 zi8bUN$#n}lB5#e<5;>qZ-6l8f`D4dWzyi_X3ok&tw}~AGBNp$RQU{sx4&MwZ$k~46 z=lZa0Gf#*<={6y^HeQOZiOV4P*r=7Q*I68|KXXW~g2Z>RLM|$67F}A4?y}O|$buxs zvTWBO&W7bn8m&~5l4≀u+h@rgS~Ks;m$dUj1jge}3xYR^7cTd&%WAtht^e4PK|P8_W2T z0nTp72?6Ogoqdn37MU|!n9bn}m+LE^)2rp|_d(+}y!X2MtNA`Hh3#nfPOBVoDEGG*Ja@H51M3;& zA9xSZ8FK4j7+lmpHOvuNG5FWKYqEyEOm7w6KJ?=a>e6M*TO8YvNY{4sOG^K7Xc~O@ ziD{`>zG!hc`r@=lCz}&*CHe7l`jmnLx3XcbSqz-X%X>4qh7!w$9YU((!`(*y`HLo& z-VjehChoa<+}as+2>qzEB{81tAm?~@=STl^E^jbvq9EPM)0&S?R;#cJe7n&hUCR~m zgp0ZSD?(b}L*I5eCBYA}v*h2+4HH!^> zPUGo2dCNi{i~ZpnXU<&^I?^q+>CGyx3pH1ff9f&ul>dl{i;4p7#PspW(Kcd{ce7{lZ;;DP;L`W+oorJhjZ zmOXGAir?*;=;-H$GN`LQ+>o>GN5r_>+0ekUoJXwnJej#po3Pd{*^>I*)G<+uWP)Nk zxpBqPS+a0Wp_Hpvw_~Hsy}U4R)0NA6>CR|*LHTZWd+XK6o7SUWxbI-60sG55&U0SZ zmXv8*YePap|?0NkygmiXQ?Jfb?6bKWtdV7rx&phy-Ozac|gc2#m+tdFCYXD%gYn^oLmX zfNNL3N$1;^J#f*$%G|aI3pf4~EkM!G+?{LyG%|nn8BIP8sm7^~&8T**Ptwb?+L%u0DzY%(Zy90L)1oJp-b& zY{Ymy9EEyMtCl9Jt9{oRWNWb+>)x-u$N+XdV~7@sMb&PtQ?~0k`h)TBo48OI{QJ^E zjl(NmjvLXJG7%=YVB1XPLMXC<;}qVwGek&G<8vIz#TaAb^!L!YwGk&r+Y)ZFOdBarwgK`D(l;)c#W3a=bE*r4ul@nZ?(#US9pHkS}{ zM9{!iKQ?ozfzS;}6wP}!(LsbsV>6ygapo`qBj|^BUo=q7*GCY~-*FY}f%pZgvtb8u zw);a@GnhAXxp$`X8(EhtiV|;d(r(OW!RB{t6V=z}ZAKWZW$kwr4+FdWmCaRit*;T& z?-8<3*m+g7jDKp388<%Rm;1W)OG)%Xg|Jhg?cSsu)h*i}gW+h^e6#o4Fouleg<4%T zquPz?Yysof>^_EEZ9|gT72b!~;kARYM7M@y{0=^B5ge|*;W%ghPz#N0ay^0Qv_rA| zXqL!_;}_x*c_QoKPe9#!9_-IW({#iLqIBSABHdJ_kNN~(CW$poZ< z`9pG@6Zou=cgx21mF1!aBuTl@-PH|GodlRayWqy6uFnVNJ`n;(zKgb^GL<_EOOP<` zaQ=eS|BQ-z8D$MSz$wuM}d%w{}bm%(7N zWPYHa{(+cuRSy!|7lnAf{Dq-mB!jIu_OaxDhXwogA z!i`3cXAWgjcnBibD0(HwN)rGc9Gms@?S+%WaE5syo+{8InB%m8!mxQ7Kwc7W9vx-@x$9V z@({}RMzTGN5N#=h4g$%?T;KDdr$Md#RK&b^6xRbG=R2a+l~b5z&%>!oSx_&2Ze0vcF7P_c@0_;0kWMWxLr7;L=>OyQ|PIyqg1TE2+MDnWR21y z7qwd8jjga7Zu3D&{iTUykuO#Fc$ZQFfJPI>ZB}Ub2*oE-6;^mHNGOAxd;s+BE6MMfbV0K5no*bG8T7C30W_UK}qywckS72uq)jk}R_6y{Tlg5~32a3eg~r5Evspmea_%c z1v8676tj*#SH19(cLBi;e}i{qQ4NUCYgz@i^-|R~qfNYq(hH$dkhPV;ORErX%e0$1 z(=9}ZcKza@B#h{vAWd>+hlqnr(Olh%B~URb{c+%ep^qLuzR{V;+tWu=@S~b`K2jnZ zMdo7IFoX5WP3JiEIcy#Y-g_w}Fs=#fW$+s9_PXBBMsS!tJtA&t4up#dTN(RG*^MczO0J|uG&t>Vji%upx-5qq*Mn)U-<5I39 z(DS|P)QH%8+1`}pH3Q=W83^lh+heoGh+EwPYCTpf8=_8Jn=wb>=!nl0`Ykw>%iG*wYjaA|tIHiS)khT&iC~bhzTDCLmJMs-KjudGWWlL3b#wC48&|*Xdc~w8teyLtIS~Hj;lDvJL zhToz@AKiopOkKgRBB+jvXw-t>1W`yrM51&km1vy2y2=(W_ts0}L*Q!Kx^w8AmWyUK zt^Jb8!}8@Wa;UzJsEC+5=2y!2Z!rd*p0VZCj}3zo>rJxYjS#Pp z7NqP=hAhH?ItwZp*5HKX3&N&t`a3vr;3oX$2dqcpJK#E>2W@TpF!X~XF+$eyRBuQ6 zx`aug6OoYFFnfik*h{r?$!sT6HYvO?S;W)JEumP)Ao@g+&8x4aylX1J_Ur>ALd|>eU1ULLAJcAVK`xkXjN)$|8yP;i z-KCnnPprEe^A%Z7q6^YC4*Xr89VZH0S25*mYi5ro)Fcc8jgZq*h{T=By*(8}j&vT# z>!NNCH271h)!26&NXWMT>->nCPb$|H15WygusEp@nV&N~f6#~#Gwwqo{BZm(^$$%p zF&1bzN&@t!DWr}G|8dC*5&T`0#wMO6dC0tYTXeQ-L@voEyA(Eqt#H{1mBcs z;$USjLEE)J{?H=5JN*R@1xO^&JwohPd=HMwtOKV79pl_N!g8p_mYD!<&1jDZiONto z<{Rw#lc5uCvb!xDA%h~WN!uDB!T(x*5^;NuGdX7dDP|WYP=-M13q%22eaad@Xf8F_ zZt&BOg~;o`XmJ`Fuz>L6BG~g}E{XM%lgijBjd4|TO7X zeA+47wE3C(^x;akjZ@neePDBGR|JhLl-FZem3DG&C<&vsDg!Ji!w1?jpUzf&L+=I+ zx&>M8mVEw$gUwDEv+O?8Ii*7s?n#Xp zPBCmeOK~Cn69IM*M?FqK<((a`>MWMK`7YcXkOnT5r?+;8} zPmUYn_mv=?drk4|(dYCd)D=Bb3kpK$0pMs`xYIInp<5NG+#c`31Z#YZ_N$hoEHfk4 zmT4E4PBcZV3j7cUAhcG+%F>GK?jl|hQK?Z$ZEl>i+MF$ z!b7zRnQX?0Smt?s<#rV%=`Zan&??$_!~s$$dpb`!D)iZAn5WnP##HeK$QORn*jisQ zRTYFr#bsDI1h%M&`cL7x8zCpt3Gk)EzSAIXCOAfYrx{zCWX-BB4E1-i=;hpZ&tY2mc5qQq+d^;v~_RigiLVh^#fHvl?BaEmM1^h@}ox+I&-_m;1j-7zS zfER8cgMcK9=Jg>BLdY zca(Mc@E91-i{*V+P5#T9^^t4h&uFj90XDM)Y4)b-4Gp}~3s zEfaHaQ*cJRuMbpUlgU(&4O4m{2+-Ru_R`CuY2=i`QB2`RnYi-tyf+kh&>7l(=EK$H z$7wvmq9yiqS zY}1WlGmilK2&_G-9&iI`#}aRZvxI(we|5`ex%yP;Gp80ngb3wJo@R@O7~ZE^k^8ig zOE1=Qx|dK8T37=ejE8Z)&R6+tPPu*(J#n}*`$Yov5xXKBMbL3LF!&oDOI(WLw53SQnFBJ;U0XJrj(q6;I6h}_(UfP(jJh` zXeFF_eMflU#*(BP))%?E*P-r6Dldcov-J?7szPH>hw~kl;lnTnwsQ@=v7B%i?Yd7( zTL0jkNPLVG;65@X*+8Ow-8uSrElIdX1OqgfSt*(9Z6Io|2bEo9S#HJp!Jl(Agdku%4>5OmJ#!)5HPsjDg^Fi_ z7tX`524+XlJY&UsUfz@iAiozOej!JQ%3*^3rT0*FVlq40D4k0H@4^wWC)D%1mHCn) zvKRc}sG2lp<6cLl#)jN9GMTJuePdB+WjY*|7>h~NXH!T94CTT~*Aae}A}aMVWGeJT z^#VfYa3$Nr%~pNNeXOh}Y@3nirLR^PtU`EXNzA@lTT6I+SLwier+We=-G(x#cWr0G zQA}F&OpL*U+1eNCM^Q?bz5w!EOc3G5GWG2XNRmT>a4XkN(1&il*}t@R}SA#Sp3-i186e zADwzTQ#h?1=gsym$XRC;IbADMPqPKiT+$J9r0J^?5NJ^fAblsa_RvqsOqsqM4Zem3 zmSbe=D@aQ`qcG0dyTG{TQR2~g2A*j(^$1UHu+h!aRFr$rXf*J@KWWdSNOO#&^Su;8 z3qBsVvk0=37SAn26ET?XklQ{K={+`H5{pP)E*e#}*E2f?KftrFv;ru@C8`T+gW(a#y?~8L0JRogD z2GL^TiP3Ji`C*kwW;8)m5-$$!ZUmr9D043^6u>8bh9ElD!zAmG$A z@wipE^Lsc&T%-}Q@7VPQ!Eyv(ug*XQ9eOUM}IB$YvL}=Bg)rQri6dLhSJE{^s8}FufsYdBUY-jd@2?w{= zr_N;#0k&o|URHH`^E0nnb_NdhzUn6~&dVb##LY+lU>y!!*@xXHR&Z<#v8%cXHi7Od z9ro0gKdMj)Io)pQ`jLQGzo{jOIVzjX7Jrnt)1?m6R>f1?$1-G}$PiM|AOj-9p9n&h z&+>w5))UMBK7v6K3|kY=EAp%Iqql3uh?pOE>#rh_&(zLk! z3rI#bdJfLy9=7d}#TUbJjGX<|;4gE@u+5mBoSGgWKH-8p11U*=JHG?A-bCQ{yw73h z{A6sdy@NSsVc!)u%PQWB)4Aqtqo zwE)*lo^WTD!&$aO?o=J2b>a6uwM<3j(+`D~DfjsvNjhlV%!|Dv2qZ9zrVPBC$@FN6 zLO`(HCYb+?lc zqEstkZWKiEirlFNqw&&+JRFf%JmX)`3a{PU<|#>3b+}Jw8=|Zi(7g&u>x8?kvG7M% z997hoG_OO%d!33n*3?#K=bt+{9k|jz@lb{a?S1W$%><#l*dl5rOlMKdU?(^t95j+v z20wICgO@7xLWqfvjfXS`hhPtL_?I(i%KPWZl*u~eImHIz;oW#zh!D6*_>>L-&3u!l z0a1t$3$IN&7UddMnPlEYrO*=)!3x*=9{p}E)Is`F&Bu?^MndG};O}|yc#hDjX>5JR zx&V%hztAW14}bVrwc{aG8z$Gjvglz!PZJ= zdZXof9AF99cOiXB@5>RsRxrbjfXigO+a6S((W}X{`OaJaOj`h%S@=;z)5Kp$%o_(OA?wf#A$tBz3BE9EI)*hl9PFb^}{J3ir)hom_;SKi9eQvt%Iu zx=3?DD1m8Mz{5lywtuQjp%9sk=)lSq9S3Bv?r-Dxws z9>r5wh1ekds@)R~4{qi11^vw>4nWi}9qae@Gb zNa+{#O=HbA(^&=UD-W<4>>lQvO*W`bVQHn*-QfUC-WEb@6s$PinK#P= zJ6w)yuOV0TNLV_xuuBFOSm}r-Ri^^;$vkb5&LZwa5tvk2ud(R%lEeKJt<)N4 zZ|BT=bn_9TW@VOA+VF_+s7y+T`6SL+wvh^s|7|Wc)Yc-_a2`k;2zZg`eU4i>zDB(* zQeFuveZ9nHQ$XTK&!BM|Rc*nzv8()sVkP78arqVe4#)HuIFMq1K4wu8$u;SSEn-)- zHEiQM!=$gTlJ!Hy(4`9^s_)uDLPAxY+w{wa%s|sv5##u?Ecn_WzfnP@xaMW2v?E5a zVq6h@hQQ444RNS>G>C^W^@1LHz+4}$`jnjv3Py}k6~6j(leVxDNxP#?`3Wt-a#(`M z3-YfY9^#s~>T82dz)FL_WD7A11|*=V&&2ZRUPlX=kQjT>zf_>=AIR3Iw@G3Z*WTbG zF_Ns+wufwM{jk?vtf(uD>AH>AgdRZL`{4LI9MU0v%kRq9<|{p>7-&RPwo3-M;t-UGZ1NsFb) zOe^<9>0MMPc+TPgkU}jEci25aXDTq9CM}HSwYjh=JLPHZm2OaQ(1Z=Wk2ydZ7W}&G z3y&JRoKM!gOgqKZiQKXqXb%|ll~^4{^s%G6SHkqMmG#!t(5S(0BQDZea$Ll$msF@s z6GecMlA<_1@KHYOf_1uxzMrP+z^;5hH)AeA9;}`e={td)BqL*bQ6_39IV~HX=;ei$ zkNX(UJSjk4QFq>MxJ7c`b}J2n-uMMjVh^V#p``($nR`&Xs7m=MkCZ*cLwZvuA5Mme?)mz_6=~TgYOjW% zH_5oj_<%?@)sX@wC_d?oF$STI@@XX8O-O!>2D_dRp%PkmWCfS7cBGy69= ze0}*=6*Pccd5Zb4C{2#2qb(Kuh$Fm@B{-cyxMFoP53#{mClqNf%k30rbs{liYrzMt z-Cw>KLKd76*57ny#)K2W$-<#BE1jb!ZN4phJh6f?^pxly!^ayD!o-~Mm|hE3zt9ui zbJaV{A~T9b`xtHA3BU-OobrsvoQ~Odyn=floNd>=QKI#$AYE_$d-;x8rC(^IHmPo# z_!n(}X@`I0rGsVf3zN-Y?4&wWs(Kcehy8WplZFtG&KjX8u$&oJma$V99eWu>Vp{@S z_jRu9GWo%T?nZ0jnWC&Rk-yu&|86D_$h;+0f7%6cz<)?gzM9O6kvD%<&N5hdE~wgcvUV}%~FHV)c2l37&5C*IPN7)!PI!@ex-(~@*=tVh3A6e$po+%ZW{1x%s>5EUIr>JU2n&9%xUS04_4!Z z7r^v1nhaj~)#m7eG2BRfpfdz=OI`0o+Pjr8Bl5t%Q_0=jW%DUL#Y3ZQwMe0E!ZaYY z#u7^N`E-UYLxq;yWY)|aa?Q{eS}`)v1T~C_&O+uuXMJq(*P$~HPuw6>vqDA*4u!OS zPUXYacUyvSO!LGKt3P zkvEB2eGD#w@+emKpF#VK=lw`mBbxA0Rr@(xyxLYUF;^E!Uj7HB4wOTcc0>;AIo2j6 zSG{9G3B=vmQh|b$?HT{Yh8zIsf>D|G^u3lavfVWewHlw6C3Q~Ygiw$(JBiq|&@V`R z2UkN(GP+AHa-yO-Wl`3r*cSIQ+n}w3p>FN9E~D9i9h6+(aa<^Xkm@y1ZqX4+zr<1r zNpdkSTu|K;q@{@m1yHbHXqS7DsL)HxA1|I+4QvyZv8`Rj7xV=*Mw<(Zw4CiqcnMaU zxpQg<7gp1TrqWp3Tu<1<%+D$vGz34SYdhO;k=+dON&G_^j009Si^0%R>hME#Om4 zH7@c%g$H%0qhOf+aiwm#xR@T*Sw#+su0ILcX+p$L4IIQ=DZnam;94|}st2e-zlscN zldgY(&M|7L;%C)>UQ)gKdz70+S%P5p+{Vxjmh#6kFQ%8#7CTvQLEFKN`S`_SRX?N!#=L$^4| z2o%H>nIQAEokK>Mm!(gmOJ%6vp4DuBes;IKI@c6S zgHp`;UQ(5Sj7YGU|882XWD}(34QA}mIX9H0OhN~87|2rfA50UqCbwFBf3x&J))Qx~ zS@D5^k*?_&kt^={X4sabp`rq57;%7Ua9@n>C%NC%Tj!{MFe}*(>V-n2jD!=Ib|0-UWNJ)_c~8 zF`rD_Yl(!Pt;pl&zOTe&A(6rD9a~Bsxm5$4*%|o1Aw6&9f zG>XZHYRX@myKNoQpOScDf3J@MB)!>y%m0ArvM9--NjQB|<)prVrZC73e~wd0;bZ`V zU)~|O_pU|muB18t0)|R*Ax6Dy#*Y`$qSM^RT!hCcxjZ-F^Gm|bLgTBkxKjF$5ZK6z zm^Se%tbOP&P&{o^C@fW`-vW_~wXZ^*(xc9#a9JYzNg*lo z#%A*j@x6+vKa_7QK#&_kfeG$Op=fu(o1F5{?~7-XA^>sSQ{iG}J_^qe6$q_N{d89- zSbS>w)%P(4=Br=DB5x_*-uNZm0h}X69i83`>haM@i9+c~ zvtIa2Fruz@oyxrftGsGgC8MPbn?WJ=4vPKPtL$jPO!+fa0sWZ-jx25Ngk}Yip?ja; z%b6E~U2KkAUW|{Ij3o0%ofzrhs}$vrf}Fb77;AL_h564GCaEWCPs^2DKfvRlOk4*K zYuX7ynWNmEO+w46DltJCJUR4er}s=1C5%?lyw!v3pjvnFILXEPd=nx9sg1%`$^D%K+K&JjJ$Ya0 z92tAyv?eS>m=-!d?6lRAn-wUvIy-Zn2pB;EnO_rgwh^rAFBZesnV?80>+qtkTjzZu zW_|l{n{1+vhpHcfK~)SG=%Z~u^?X#st%kYwG4i9LOu4aPohs22DuaRAe0}4!K!Tg_ z`8bB}_-U-yluKwaJI_P(@qWEE?7GHhKzA(M`bkO=*V51}iQOR%XSJm5qC%UhUV|DO zpHdzwDXR_1=p!MtJrHoE5PK_7T=@Mlj#tTg*OV?Kn_4Y#K0^vsDvP$j52L43JW=KS z7eq8Soo30mzlFIs^Caadp-_;W&9r^_rsvn6)7*=>wJvF}bUtoqy_hFC=KfmNb{CMW zuC!*}35bg^h*>19)!Yy!mC^S>h3e+d;B>edUMziowT>i+E$OFiT#-Wm!zTl>6p6h| zVs5z76r5VM?qoj=|8k-yDr34Tob0QQwf^mqeDBQkVJALh{MjH&c)T^mszzA^QbdI$ zmkB|PP$az7!_e(<1C6$Ba&)AbwUgFILUC|7f*2ifO&Cp~`bP*eHppQWpNVkkZ1vGn zEyNQTh|o2~oIXI1X3EW?Te$_&iq;;zlrjpC zkkF=pbZUn0>Ahu%+;gFZ%xV8^EJIWOx!_rF_S!pT z8!|SFH4BpCeFIc((Gy{<+iR|j`>sRLl7-Z^DFCK5X?2q7ekgR!ih`CEGUdq@`BI&2 zW}t0D9ESPH+d!;_mrntK&UX+h;VK9tyear zQ+8_DTFi^IJg(is1{YQrp-oEL!Q4SZ2`gK&NhM@F(@mNN#sXd|6)iG-ljrr98PTG) zC0v5iR!P$3aMys7_;2GFeyghj^*?O0><(T`Nr(55JvQPkP{lP85HPf6;IXgj_<2mp zT@k~iG{6R{`sP%%V?0bMCt5DzZqj(ee^lAd&xD5Gs*oC_K%T*H&bzsn*d>T`L0VPI zITZROn%H(&>xrbUA1P&}Wqsd19AX^*oEui=h*%q8Q!XAVo8sG^Oix`|Yhf3Jvmg^l zmiIh9JGpyA`$f(_ELh4~M~k!A1|7fgYI1h{H_z6;EnqM1gr)tk&0q z>4z$qRR=)@$)z+^y>*qqrC$>3<&ua?V>xH6*LT>0;>t0~3SV!)+L9lMea0yos-N3@ zx??54w#w9eG;TMy`c_-hdkTp?u%$3V%kLr9dI=$jm)VOB+Kr64raQjtC%1}19O5}N z#X320x~*YHyPBAed*?G3)a0nxV%}})?%oNnh$cu0|DrY4p0bqi!uAe_Z%zIX57}2y z=rXoM9_Z>smrup(CPQrVsccgwqfc8T7ct6tiDv5u528CE1oY?u81?cbCT?XO%1*F= zb?qE+Ypjn%%J<+RCxyo^=IEJxf@@Uz;h$|g%m$UDe2yE2hZOCq7W8l4AsR%Ty=R2A ztID2JpDFa9DZYhmyvh9z&5Bi0^+?=9we!niZM|U`l|0Ix5y;sTVl)+uNGF42LNQsV zUCnq1MFnbXl@hTfn0ms0S3gn-&eQE0BDll2wtB$@1^1x^>{jn9M0x!RUe~7xX40G) ze`wW^FvS+0Kw%kWWaOY)O+{$!bmutaF)2Yx<(=DVDb#9xyB?+sgpRYLD?~{i^*Al< z=V9$x|LpfxCV`w&ImNmtDfRxtNIBvihKHv z;-PnL`7VQ8giu|7)wRnC8b(CBR?8HlPoVeYc~x)ztOwgt+a8K-m5|c)-gQ#|F^^m0 zQGoBwxTar7hu!yvS+1_dFSzr$R&&UDhW7_Lq(A^F||DPH|>A! zWbcup;#4N2!j2(g@MNLKqr2d6mQdv!#`(%bpWVPym4ZGp99aP{td}lb$Go zPG&x>A_awASZ7uV!NSc1=;dJV=*s6MNcIOW z-`nx;W@a+r9}qV?K{6dh6`;72iv^H_iGzuSQPRuCgN;lG9w^{qZpo)6A@vu<+nFGl zwVNA=kD1xi)04@Qoyp0?ikX#{mzSA^jhT&&@eRS~>h0)e;>GCbO8%STPYwwSS2GtI zkeiK@Bk(t;iK&yjn;;q4TR-qG^0!_yNw2rYza0EN|AXGu&74{G?S$iPJZ}JIHWn5h zMiw?kHeTkx+rRZHD*n^j(e*DC-}GemG66BOGO;i_IQ*N1tDB_9zx@4|7Oom^&yF#x zS-3j6yO>!>dRRERk^kK($ll%c?>^mKEq*us5x2d$CG%TQe|Y}8jkK(y%0F#>%V=fe z0Q$q?H~Q~LbF+WqK<+N~e_+hbm@Vur9Nq$PePd?*H@usT<-aEA-^TO1=YI?2&D}rw z{|)^Qz5a;h4_)~roXp&RJC&6XB>NpNpShEnjXB>RhZbxm=BAb=mW(Fc=Io3d=By@+ zyrwL?j4ZrdY}}kY7OZA09Dk#db#!$zaWu2|P4!03Wb?*j%FW4Z!o$PMXvWRM$;k2B zn%CUag3*%2l83{R%iM&;(&TRx$}ToEz<@R$exa=1!ij z|LW4Paj;N#Gx;qWD>oYpD>pAU3lA$B4=?-Q?6oXhT;J0CH!CX(6C2wf=H_O6Qg56l zZ|P>^U}9y#405#kqvQ9)@VyP^OgM7t`+peJ{{tuRXTr+7**dv+|INR;h4Y_h ze=bS(Hh)M31pcuM@R^wXDZi_UhsB@y@uuUSU1ruMj#d_L>+N5~_K$X(|00oDxy(3B z%q&H7ETQ|7CsJEJ`NtTKW7^#!2ElK{l{npey>4_ zihO_9L*Vxs#V7ka5!Ky6AbT4Nmw(skA9?ftg8Q5OUpe}JGXFd5Pit`}koQ{^Te~TH zI{vrr{|n%s3<@@87LKk?|6S;Rhx{qa-!`JRnEz~h+u+{zOXh!Wn15x-??U>&`1&jR z{x5oXqyCSR|B=4`k?TKl{f`v*AA$dqUH_5mf26?w2>hSy`u|NX_$_l^51VTuM%O8w?-H@Sw%^heHbVN zT9%btqaSZgcy5w9ZsJZ3zwe>|e_gEuJ#E~qfxj>9wS$mx001CBRzg(6Yw_frrw)eh ztv^PkG2lQ3h9Ee&k%sG2;KM><{RM4J;g_OMe)Na_E&k1P2uDA`8>5j3J~ii=Y8$9V zbI=CUp!30neoYihzJF%L2d6-dp7+Z0_8l1kS?#vouDbqg+RK$E6C`I>Z5@+h9q~ez7?xo-LHlbB--B>Res7d+?Ry97JpF~j*x-?D@0F2 zWHH>dz@K#7+$9${H2zq`GE8*p@%dYD8t({afh<@_7Bua8$SZtpauQ)!IR?+f6Ep$> z0t^fcKR-Va5z)JM?>IR*Ei5dsu&^{WH30@p9|pEA?w^{Q`KVHKRaKMH)6-K^Q$Ku| zYb`I)IYjn^;nC?7!y8`V2t;{t6$propazizRd*Dql`pt=~yhbfRP@w#e&UIK-TB~cQEILwBj zp`nPx0yuRwoXn?wzjsmx2KDw6R_VafV4J7|)Ru$+gX)lD(>FOq#p}yUJ1-O8XLM%aO{FNLo3p!jPl9%H zHHwOgSny$NEt%cmo3&NF5A~r1YSfX2vnJ&dcQvFh&2pHSgNN;*VkWk>hnbvqy?o@j zU7F<&gVv~GUL?0U8w&pYv3TsJrl#iK1Bc}{caco{*u-GYR31iAh*0T$BeS^8>rsM4 zP%$vr9Tw}jA*^HH#}1H13L)d`7(WM+3~F<8a<+M0kVW()89p2BqKbB)Ju9|oJd7sz zQ%M*Dk}kz;x7I;CZ_;ycaZz>b<2-dkKZ=)a(B{T>)BK8Sa9T6e(V>Y0kX|sP&3}y= zFKf2~r$7;;`*Lqb#+jBl>wt}kX}|RSB+eOPj-%KwpX0m}K^*tK@GYyk9hVaep(V)3 z$aBswp9726o^(3WWt8n!+1`e$p^+LE235hxv@S*+Z_OVO4zFR~Zg0I>BtkqJ-n%d*@$Q|g0~#)_jQ^P!1B2+$Blj9xQWI{>7TVf3tDooR=d|jjDiv{Y zIUjiE>mgqG!xAvJ1lKzdARGv?(t1yp8roW0br@zL6<|dpDH+zXA~?a|E?Lg746o1D zUPnH%$H&LVG0n|wxlnM`h`oanvZbLjyG0u@3hZZEot~bqZGWJnrL|#!&g)wU!o4H5 z_KF=i1=Wg<_C{fZgoH3M5@^FuPPH=o{jk>Z=J7CRL8HT_H&+cU7vqw&kY#^&H~riz@5 zOu_gU&WkMx#z_Gw_Lmw^wdiOt6aqK|guA;t0q>UMTJI$9`s{iNWLR(_tU?~`0gd?= zouk=m1QZnRAX&^pdy}PEwWVWz1?h(mcVD47iHV67c~Rj(PbglBFXp_!qmKzNV)0@? z)V!&g86-r+Y+hGOF!Yxvqly~<$Wr=1RdlKV>*W(3`uni^74(uao!^vjRFkk8>H zbDOT6IUV`=@yyxz`Att!k|b<- z)f%=j8MO--cLJh)U)`h_^M&w-T_ z2$>x6{CL~a)WkWdZKdQlO9+_%hBS~6WL-XbdV2bJb3*=(NO$lsTPX@^mW+W;q$`-GYG9F}k}1q`SMj zTab~$q@_!`VTeeBBK+^)a~#io?8RPecU;#u&d+&WTlO)#{<6Nl{(J7b*|Ik{fH$d+ z{UJ?_MExViYgY!ssi~=;pdhL*P0!u2Go~;&H`)>I-mYIvqRQa%^77>5Pw__6frwjA zl@q1TaGH*~u1Y8eC6I@bdNs|Hhdfm8!g18bA-ByG44}>0FSOraKhT z${z@;G5+elvzEZhP5A8Y?rzlSO!LtUHI62$9 zKOzUO6v|Lx-V;M6xW}0`sw$zqk;Oo6HXi~T8^@f)<&)i?FF9X)MnaO_e(<>9F*aTU zLJI{${+);7jGoBw3unU7{zdFxQO($!00UaNxp{5t7 zuN~8shg5rhhD(8dZNPw%-xUe-eu^$GpiAh&EUpp;2O(7v9M7ZoCX_VcIXOY?OQU|k zf;_F6W8;1!m@SI7?NPA`hinDB&+X_BHhVFNULMO!qfm&?q-ejfEI7orUDp>J?_F{+^N5)({-F;{;`An%7vtrEtMvl&eJ^$u`-A_1_UT6`m-fdZYhY_# z>i#_S4Gq@!MXWPRkyjX^NuVI+aYHlS-~8w~6kU{|J=eIWu2}xJVH3nFkgy15|GUUg zr0Z+hvY;M07hDI2(;NX?lfJ+Q%))3p-+4-VZ3*&l)1AdH9!E@BSQ8&?BdV)GacHF2 zii)hZCGDsrBubjH+JjX7#3aQ(P+r;b3Ww(~3qof^K9B34C;+gtEguM= zdFN=A;o)TH=KkOdy3kg2FCJfA`AHzfhlV7LnxH8vhz_?}%G;(sbEq&4I(nr^DCO})Ycgtw5O z;Qq27pu=&%W##3c#;lf>mIzU0gR_8(q@<+O)Yt)1?;zx?wY3$2Ku87s5fsy`(AijD zci!l}p`PB@26K1)nL&3w7$>PIs{^FN=(8RPNk#-VS>&frRyH=mn$sRXS6XoW_%<~Y^PZYDi=^^IT>9J(una4#Dr}V0)Ye-pqbe(HR85Q^2?Y{9FIiY7)ShJVQDp(a zpz!96%SLyGTnW%nUn5=_G?}1nqwx?Z(Eoi2QdU-`sgfmJ`|QNQ!SNR#ia)fla1BWC zo0}W;zLYVmU_h!mvD?pAs#jE0sE_YlJpjc39hW+Jp|usNq@e+pL21DCQc_^>^8qm7 zXbxmHSFW7;K)ERaVY>af$~cCaHkL0HywoK@ z1S`nj-+zuM-ZLHy+T7d(x7pN*}(x7KMV2Yh`WSaqjX~PcITe z1umvOh_%u;m5_ja&fGme=b;r?SX!c$4we)VF+db#@t6Yd1F;nwOnQHekJs*frFrFl zU2oVnEbZa$o=E$OJylpXH7hGCHy5$Ho0^3^jSUbflRK`hZEYwhD0vG$ zG&BY3dBPpAKRT?$z|~Hdo3hHNshW`nn);vERZk`m>ABTgb8|X|KDTFU0Je3^u)PYr zZ&q*QGc1^^RL{#T7wGXhFXUwV6d6f~+G3Y>J<0(NpXL0k&rGpjT3+7nef9(9bKbg| zaB1Grq04|_alY9LO5DZ}qI)QJJ~mTdSYF<0HVVi4|NfoGf+X0V>3xxih6BaA{o1PnMi1FZ;<@QP{F{5gDkH1z5X|~qB|dq z2am3-Z&a<+sm0vO>-!;C{~Zixu9}+DZhPOvL`AOw4NdexY9H&fy`C;}hz}|dlg6DY zc&84;Sz8g?R}U;2`CT4Ii3@f6xfZ}}g@iooqmLhbFEH+~UyLSToSdB0Z!q$DehO;U zJSC=N*ODc{EBx_+G<*;P1H;wbeSCU4>krdh)FK-G-C>}VG&gsmM%Da@XN^zr&7#46 zk7R`0#nlzj(H}<=K>9}E(E&m8*yDFKH#aAK|FwL@BSn>&M&)ck`uUNFhNc&2+N!Fm z($f8Z{7*vFMiOaGRi6E{wPzo$j;b^Y!~^et6R~KFy7PxT|GTsBswgRO+a3t5uCC^@ z{5JYrTxZZEE+O&e%^M$Ad?b22Kl(JjjUKb$ff;6{rWm*$x$Z z9j+SCIE~s_g@r#`pSSC|epv^;3^4b2Ekr&FmzgHw3<}BO{=g8l5gWo`GBiXDu~_QMTU*dTb`U9A&Un}TYY*P!T37<-Dk(ul@`l}`g+#%+hN_q zqa#~e+fM(ymX?<8K%}q4Sk_~y%pQl6ZrC1~bZ%}gaAN=c z(UhsPsn?i2OlHo`n|)0$Y2-lH#dne5OP{B~5tHnE74p>cyw*e9 zS}qN!QXU>278VvFqO{D4w2X{{^Yf5j>tf1PiowBsadB~-ot;rpQ5hK|b_bWfJz-(U zhOZFqcJt{!sO*%Mp1_nq%#A7=Jj8Vc+%<}Z-lK;t))}CK!8!-nzKe^C%u#AnJQ4qQ zj6KYF8{hA%5h$a)CF=$OCdC>e=(KBR<3#K2+lp}-&-$`5B5LaTg_dMP9-gMxr*1!x zX_Lsv$k1iTx?P}V{5h8+;(`3+ulzkdJq5qeHqf|wc&q_g4m+Z%t*r%sHgAdkbE&F| z%I}B4)zyc?sXV~&5lkO_WWekD`wKQ-rP+DY#Pou+i`$b#sN@9ah{ZRxsK~{IhdBe_ z1Jib>_SIFx&qO9jI2a=Cai|8{GL}4?ota5CN~|ALo0yrOHwFsCk* z>s)~lpk<(G02$QY(cx68l8}&4(U-Ac&Mig_;>Fz)EKzDdHI6)gVqYS(7xOprr(j zsCNs8yyz6O4QO2MNuyPoMc&xe4l=R+%=-2+dk_e**83eSdIp9tJ(UgLDtxSna+eG> z7R;UxVcChEm$r9=!`vTNw2LA*0ZHV+^mUvdF-A#EN$EHp)Q#6C6?nvvnPxagIU zk$-t{U-qW1JX)dD zrR7ACWX)+VTBCps zR5RPm!Ufp9Rnb(jD8ED=GZ~Q*ibJ&`Xb%B_%?8Nh{i37p0JkI>T3xp%kRt?#XZZP% zYNMd-#gkADBoEi9}rnPO;UE~8Qqpc04)qNH(B|gnr}rMICkeKel}!q8C6w|vUws-&UBkeUP`I;;U7lRqt#jUua^zCT@iJ)2zK@-HjX+r_S*5Wn&>#o zit>fGZ-q$ypidxy#5Y#fdj@yN$;h$_3pG%ja46vlLZtxB8n@i^-@CZF{?6vJ9KLEX zF_8}oBX2TYMhEkQz@WEp$7f0qL@&t=-(B?)c?~jYGG+g9IH&gu*qOSZ z!y?}KVpToe?z(P~a!j*ycrmn8dR`d(I@S*D)*ww@+8VwDi)tYhGNS28(;?pNkak$2JHo>#G zib|D`FcwB$QCo- zycf| zNUjKSI_ilaF+JT6tqzu$$0^}P?VX<&Wo6y*JWilXr7z7vJEbNnA!jnE$QTgFEBF2A za@nI_QpcTJKVf1kZ6n^_Z~~MwT!D6NIg&UlvliED^;3}}LAmf>X@Z#*BP^f5q`J%sg zpXI_}FwxW;l?oghdyNu3N&joH?kizO-nCZvTf;Ad^11o>l9u0afLX?dWt!*YuX#_X zh9eW_Z>J~xWOCL~Q=43<(S=_uVqxjo+G4YJT+TAy+QLJNiXLvyoNH?#0*!uten2&y znwT(-hfB2Rl3a2JC1-&_wOTYWDF6ePx0c7p;}{qixNTCPp&@?nkRlMz>;0xr57&x$ zBATiD#pUJ0BO^x_dm}`=I~a-Aay$>JX+A^&d~+#M6lvI6+dDgM*T>pp%xH2Y$NOU} zr^{7=xZ9 z=v}XDev~SPT=^DN&MV7?x<7VZU7y^hYFluUPAo6q{akH($r~e=&gHmfVd#E;EDZvCi43|AG`F(a{d1!!mY)O**U>5c zx#c45QmC88+pw?zY$7~QPbYpOS~xuRvhDH18UlcGTo2!f}7WxDXJy0UWp-lGes#C?5zAX4*Llb=E|Z#JHs znyIX0iE@9rlF9rgPXZLDiN5~f_BMUa-3M*$!KEeNU+YppEPopgr2cmM=f(y|xg^>R z1LYcRVuj$~r_WAnPgh0Hpp}>c=^Yj)sL!KY!3^al6#4h`_9P)El9{==8;~^0X)lE^ zXIL;KLrgrcp@8Q=*NAw!>%TE*pmesiT`%_`iBb4(S*OJU4gjxRG0n&YMRrN_cTB{Y zAmy-10L{{?K_bvr;MLTR4u?DzHKLg4`1n%YD#9dMu6x%^Uv1Xk7krnu-qeDldi7muGW#@zJw2=Y`hu#|+d4iALuu(J_AW|GOCKK}M{)K8nxk;LUkXZmA@mf7fIi{B zX7nc#PRWz^_4Nh(JNVd`vc2;z!d@dV&{<*lO1DY{rt^n^fRuCQ=DM9iYF$NDRYhE| zM2nn`uHSaLaCB4&1mu(Lqa@A{>$w@nf{z>urtpe62BU5Z%DXljNP$LYTU%OpX&k+l zj!s7YWS1M`(e;Ur)boelj4)*Qglfp8)#(fQN&ma0)NXfmCPKh!`OkOQ9t;OTi_z%K zxDRg?rhOKf`y?obBNge9@%DHh=9ia!0AdH+LN2Sb#@byg+bk=aOv%7^_!NdFrdzWj zwvLvT3;q2im6f^m$AKkU*_oN7l$0u!5#VMLV*?9o>=rIMbO;C)fA|GA z2#L&f=M1{PHIc{)E%}BtZs`u-O9-uy$T5UJ2NwrpPzpiV9^0eQ5xRET=^;fvaP!-d&(?bfcyXs zcYN8FJJq;%f$GG?1t624pdRoaf%f&mu~oZ7{=c^jHc&EGs2oUq*V8$nU{VdTcw)6;xHG~Oekj?1yZ%o^8&5_^=~;8>YQv) z9ESPw!G&UcT>Cq69e#vkGl#aabG8jKkvbG1GeZ7Nd}cN@QR@1pxC-JFbnGD4#sMy% c8F)qkzpvyUw{@fgwrqfu6*UwZ<;=qV2ag)79RL6T literal 0 HcmV?d00001 diff --git a/firmware/include/images/icon_whiskey.h b/firmware/include/images/icon_whiskey.h deleted file mode 100644 index fd5ee7a..0000000 --- a/firmware/include/images/icon_whiskey.h +++ /dev/null @@ -1,1066 +0,0 @@ -// Generated by : ImageConverter 565 v2.2 -// Generated from: icons_for_nose_whiskey.png -// Time generated: 4/21/2021 5:27:14 PM -// Dimensions : 130x130 pixels -// Size : 33,800 Bytes - -#include - -const unsigned short icon_whiskey[0x4204] PROGMEM ={ -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0010 (16) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0020 (32) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0030 (48) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0040 (64) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0050 (80) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0060 (96) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0070 (112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0080 (128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0090 (144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00A0 (160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00B0 (176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00C0 (192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00D0 (208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00E0 (224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x00F0 (240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0100 (256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0110 (272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0120 (288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0130 (304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0140 (320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0150 (336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0160 (352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0170 (368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0180 (384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0190 (400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01A0 (416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01B0 (432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01C0 (448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01D0 (464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01E0 (480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x01F0 (496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0200 (512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0210 (528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0220 (544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0230 (560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0240 (576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0250 (592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0260 (608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0270 (624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0280 (640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0290 (656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02A0 (672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02B0 (688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02C0 (704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02D0 (720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02E0 (736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x02F0 (752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0300 (768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0310 (784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0320 (800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0330 (816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0340 (832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0350 (848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0360 (864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0370 (880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0380 (896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0390 (912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03A0 (928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03B0 (944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03C0 (960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03D0 (976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03E0 (992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x03F0 (1008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0400 (1024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0410 (1040) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0420 (1056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0430 (1072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0440 (1088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0450 (1104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0460 (1120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0470 (1136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0480 (1152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0490 (1168) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04A0 (1184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04B0 (1200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04C0 (1216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04D0 (1232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04E0 (1248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x04F0 (1264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0500 (1280) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0510 (1296) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x0520 (1312) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0530 (1328) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0540 (1344) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0550 (1360) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0560 (1376) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0570 (1392) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0580 (1408) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0590 (1424) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x05A0 (1440) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x05B0 (1456) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x05C0 (1472) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x05D0 (1488) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x05E0 (1504) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x05F0 (1520) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0600 (1536) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x0610 (1552) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0620 (1568) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0630 (1584) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0640 (1600) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0650 (1616) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0660 (1632) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0670 (1648) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0680 (1664) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0690 (1680) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x06A0 (1696) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x06B0 (1712) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x06C0 (1728) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x06D0 (1744) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x06E0 (1760) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x06F0 (1776) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0700 (1792) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0710 (1808) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0720 (1824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0730 (1840) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0740 (1856) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0750 (1872) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0760 (1888) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0770 (1904) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0780 (1920) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0790 (1936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x07A0 (1952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x07B0 (1968) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x07C0 (1984) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x07D0 (2000) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x07E0 (2016) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x07F0 (2032) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0800 (2048) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0810 (2064) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0820 (2080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0830 (2096) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0840 (2112) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0850 (2128) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0860 (2144) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0870 (2160) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0880 (2176) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0890 (2192) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x08A0 (2208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x08B0 (2224) -0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x08C0 (2240) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x08D0 (2256) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x08E0 (2272) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x08F0 (2288) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0900 (2304) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0910 (2320) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0920 (2336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0930 (2352) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0940 (2368) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0950 (2384) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0960 (2400) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0970 (2416) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, // 0x0980 (2432) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0990 (2448) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x09A0 (2464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x09B0 (2480) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x09C0 (2496) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x09D0 (2512) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x09E0 (2528) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x09F0 (2544) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A00 (2560) -0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0A10 (2576) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A20 (2592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0A30 (2608) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, // 0x0A40 (2624) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A50 (2640) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A60 (2656) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A70 (2672) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0A80 (2688) -0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0A90 (2704) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0AA0 (2720) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0AB0 (2736) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0AC0 (2752) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0AD0 (2768) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0AE0 (2784) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0AF0 (2800) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B00 (2816) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0B10 (2832) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B20 (2848) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0B30 (2864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0B40 (2880) -0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B50 (2896) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B60 (2912) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B70 (2928) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0B80 (2944) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0B90 (2960) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, // 0x0BA0 (2976) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0BB0 (2992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, // 0x0BC0 (3008) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0BD0 (3024) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0BE0 (3040) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0BF0 (3056) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C00 (3072) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0C10 (3088) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0C20 (3104) -0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0C30 (3120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C40 (3136) -0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C50 (3152) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C60 (3168) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C70 (3184) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0C80 (3200) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0C90 (3216) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0CA0 (3232) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0CB0 (3248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0CC0 (3264) -0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0CD0 (3280) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0CE0 (3296) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0CF0 (3312) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D00 (3328) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0D10 (3344) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0D20 (3360) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0D30 (3376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D40 (3392) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D50 (3408) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D60 (3424) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D70 (3440) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0D80 (3456) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, // 0x0D90 (3472) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0DA0 (3488) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x0DB0 (3504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x0DC0 (3520) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0DD0 (3536) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0DE0 (3552) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0DF0 (3568) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E00 (3584) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E10 (3600) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0E20 (3616) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x0E30 (3632) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0E40 (3648) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, // 0x0E50 (3664) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E60 (3680) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E70 (3696) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E80 (3712) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0E90 (3728) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0EA0 (3744) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0EB0 (3760) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0EC0 (3776) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0ED0 (3792) -0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0EE0 (3808) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0EF0 (3824) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F00 (3840) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F10 (3856) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0F20 (3872) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F30 (3888) -0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0F40 (3904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, // 0x0F50 (3920) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F60 (3936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F70 (3952) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F80 (3968) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0F90 (3984) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x0FA0 (4000) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, // 0x0FB0 (4016) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x0FC0 (4032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0FD0 (4048) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0FE0 (4064) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x0FF0 (4080) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1000 (4096) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1010 (4112) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1020 (4128) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1030 (4144) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1040 (4160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1050 (4176) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1060 (4192) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1070 (4208) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1080 (4224) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1090 (4240) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x10A0 (4256) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x10B0 (4272) -0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x10C0 (4288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x10D0 (4304) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x10E0 (4320) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x10F0 (4336) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1100 (4352) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1110 (4368) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1120 (4384) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1130 (4400) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1140 (4416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1150 (4432) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1160 (4448) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1170 (4464) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1180 (4480) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1190 (4496) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, // 0x11A0 (4512) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x11B0 (4528) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x11C0 (4544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x11D0 (4560) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, // 0x11E0 (4576) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x11F0 (4592) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1200 (4608) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1210 (4624) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1220 (4640) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1230 (4656) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x1240 (4672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1250 (4688) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1260 (4704) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1270 (4720) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, // 0x1280 (4736) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1290 (4752) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x12A0 (4768) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x12B0 (4784) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x12C0 (4800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x12D0 (4816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x12E0 (4832) -0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x12F0 (4848) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1300 (4864) -0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1310 (4880) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1320 (4896) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1330 (4912) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1340 (4928) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1350 (4944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, // 0x1360 (4960) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1370 (4976) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1380 (4992) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1390 (5008) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x13A0 (5024) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x13B0 (5040) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, // 0x13C0 (5056) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x13D0 (5072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x13E0 (5088) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x13F0 (5104) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1400 (5120) -0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1410 (5136) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1420 (5152) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1430 (5168) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, // 0x1440 (5184) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1450 (5200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1460 (5216) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1470 (5232) -0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1480 (5248) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1490 (5264) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x14A0 (5280) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x14B0 (5296) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x14C0 (5312) -0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x14D0 (5328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x14E0 (5344) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x14F0 (5360) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1500 (5376) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1510 (5392) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1520 (5408) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1530 (5424) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1540 (5440) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1550 (5456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x1560 (5472) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1570 (5488) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1580 (5504) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, // 0x1590 (5520) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x15A0 (5536) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, // 0x15B0 (5552) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x15C0 (5568) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15D0 (5584) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x15E0 (5600) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, // 0x15F0 (5616) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1600 (5632) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1610 (5648) -0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1620 (5664) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1630 (5680) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1640 (5696) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x1650 (5712) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1660 (5728) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1670 (5744) -0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1680 (5760) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, // 0x1690 (5776) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x16A0 (5792) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x16B0 (5808) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x16C0 (5824) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x16D0 (5840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x16E0 (5856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, // 0x16F0 (5872) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1700 (5888) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1710 (5904) -0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1720 (5920) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1730 (5936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1740 (5952) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1750 (5968) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1760 (5984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, // 0x1770 (6000) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1780 (6016) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, // 0x1790 (6032) -0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x17A0 (6048) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x17B0 (6064) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x17C0 (6080) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x17D0 (6096) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x17E0 (6112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x17F0 (6128) -0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1800 (6144) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1810 (6160) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1820 (6176) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1830 (6192) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1840 (6208) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, // 0x1850 (6224) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1860 (6240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1870 (6256) -0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1880 (6272) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1890 (6288) -0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, // 0x18A0 (6304) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x18B0 (6320) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x18C0 (6336) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x18D0 (6352) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x18E0 (6368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x18F0 (6384) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, // 0x1900 (6400) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1910 (6416) -0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1920 (6432) -0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1930 (6448) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, // 0x1940 (6464) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1950 (6480) -0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1960 (6496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x1970 (6512) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, // 0x1980 (6528) -0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1990 (6544) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x19A0 (6560) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x19B0 (6576) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, // 0x19C0 (6592) -0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x19D0 (6608) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x19E0 (6624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x19F0 (6640) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1A00 (6656) -0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1A10 (6672) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1A20 (6688) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1A30 (6704) -0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1A40 (6720) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1A50 (6736) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x1A60 (6752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1A70 (6768) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1A80 (6784) -0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1A90 (6800) -0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1AA0 (6816) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1AB0 (6832) -0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1AC0 (6848) -0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1AD0 (6864) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x1AE0 (6880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1AF0 (6896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1B00 (6912) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1B10 (6928) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, // 0x1B20 (6944) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xCEBA, // 0x1B30 (6960) -0xCEBA, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1B40 (6976) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1B50 (6992) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1B60 (7008) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1B70 (7024) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, // 0x1B80 (7040) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, // 0x1B90 (7056) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1BA0 (7072) -0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1BB0 (7088) -0xCEBA, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1BC0 (7104) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, // 0x1BD0 (7120) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1BE0 (7136) -0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1BF0 (7152) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1C00 (7168) -0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1C10 (7184) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1C20 (7200) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, // 0x1C30 (7216) -0xEF5D, 0xEF5D, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1C40 (7232) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1C50 (7248) -0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1C60 (7264) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1C70 (7280) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1C80 (7296) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1C90 (7312) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1CA0 (7328) -0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1CB0 (7344) -0xCEBA, 0xEF5D, 0xEF5D, 0xCEBA, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1CC0 (7360) -0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1CD0 (7376) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, // 0x1CE0 (7392) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1CF0 (7408) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x1D00 (7424) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, // 0x1D10 (7440) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1D20 (7456) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1D30 (7472) -0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xEF5D, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1D40 (7488) -0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, // 0x1D50 (7504) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1D60 (7520) -0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1D70 (7536) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1D80 (7552) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xE75D, 0xCEBA, 0xCEBA, // 0x1D90 (7568) -0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1DA0 (7584) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1DB0 (7600) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xCEBA, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1DC0 (7616) -0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, // 0x1DD0 (7632) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1DE0 (7648) -0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1DF0 (7664) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E00 (7680) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xE75D, // 0x1E10 (7696) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1E20 (7712) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1E30 (7728) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xEF5D, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1E40 (7744) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1E50 (7760) -0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1E60 (7776) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E70 (7792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1E80 (7808) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, // 0x1E90 (7824) -0xF7BE, 0xE75D, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, // 0x1EA0 (7840) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1EB0 (7856) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0x9C4F, 0x9C4F, 0x9C4F, 0x9C4F, 0xCEBA, // 0x1EC0 (7872) -0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xCEBA, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xCEBA, // 0x1ED0 (7888) -0xCEBA, 0xCEBA, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xD6DB, 0xF7BE, // 0x1EE0 (7904) -0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xF7BE, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x1EF0 (7920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1F00 (7936) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0xC594, 0xC594, // 0x1F10 (7952) -0xC594, 0xC594, 0xC594, 0xC594, 0xE635, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, // 0x1F20 (7968) -0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xFFFF, // 0x1F30 (7984) -0xFFFF, 0xFFFF, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xA38A, 0xA38A, 0xA38A, 0xA38A, // 0x1F40 (8000) -0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xCC6B, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, // 0x1F50 (8016) -0xFFFF, 0xE568, 0xCC6B, 0xCC6B, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, 0xDD0E, // 0x1F60 (8032) -0xDD0E, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1F70 (8048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x1F80 (8064) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, // 0x1F90 (8080) -0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xE5D4, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x1FA0 (8096) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x1FB0 (8112) -0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE3E7, 0xC3A8, 0x9B69, 0x9B69, // 0x1FC0 (8128) -0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, // 0x1FD0 (8144) -0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xE4ED, 0xE4ED, 0xE4ED, 0xE4ED, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, // 0x1FE0 (8160) -0xF5A9, 0xE4ED, 0xE4ED, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x1FF0 (8176) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2000 (8192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, // 0x2010 (8208) -0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xE5D4, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, // 0x2020 (8224) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x2030 (8240) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x9B69, // 0x2040 (8256) -0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x2050 (8272) -0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, // 0x2060 (8288) -0xF5A9, 0xF5A9, 0xE4ED, 0xE4ED, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, // 0x2070 (8304) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2080 (8320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2090 (8336) -0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, // 0x20A0 (8352) -0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x20B0 (8368) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE3E7, // 0x20C0 (8384) -0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x20D0 (8400) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, 0xF5A9, // 0x20E0 (8416) -0xF5A9, 0xF5A9, 0xF5A9, 0xE4ED, 0xE4ED, 0xE4ED, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, // 0x20F0 (8432) -0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2100 (8448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x2110 (8464) -0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xDBE7, 0xEC07, 0xEC07, // 0x2120 (8480) -0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2130 (8496) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2140 (8512) -0xEC07, 0xF426, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2150 (8528) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xF546, 0xFDC9, 0xFDC9, 0xFDC9, 0xFDC9, // 0x2160 (8544) -0xFDC9, 0xFDC9, 0xFDC9, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, 0xC594, // 0x2170 (8560) -0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2180 (8576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2190 (8592) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xEDF3, 0xEDF3, 0xEDF3, 0xDBE7, // 0x21A0 (8608) -0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x21B0 (8624) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, // 0x21C0 (8640) -0xEC07, 0xEC07, 0xEC07, 0xF426, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x21D0 (8656) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xF546, 0xFDC9, 0xFDC9, // 0x21E0 (8672) -0xFDC9, 0xFDC9, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x21F0 (8688) -0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2200 (8704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2210 (8720) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, // 0x2220 (8736) -0xC594, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2230 (8752) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, // 0x2240 (8768) -0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2250 (8784) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xF546, // 0x2260 (8800) -0xFDC9, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2270 (8816) -0xEDF3, 0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2280 (8832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2290 (8848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, // 0x22A0 (8864) -0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x22B0 (8880) -0xC3A8, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x22C0 (8896) -0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, // 0x22D0 (8912) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, // 0x22E0 (8928) -0xF546, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x22F0 (8944) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x2300 (8960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2310 (8976) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0xC594, // 0x2320 (8992) -0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, // 0x2330 (9008) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2340 (9024) -0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, // 0x2350 (9040) -0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, // 0x2360 (9056) -0xF546, 0xF546, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, // 0x2370 (9072) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x2380 (9088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2390 (9104) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, // 0x23A0 (9120) -0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, // 0x23B0 (9136) -0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xEC07, 0xEC07, 0xEC07, // 0x23C0 (9152) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xDBE7, 0xEC07, 0xEC07, 0xEC07, 0xAB68, // 0x23D0 (9168) -0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, // 0x23E0 (9184) -0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, // 0x23F0 (9200) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2400 (9216) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2410 (9232) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2420 (9248) -0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, // 0x2430 (9264) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, // 0x2440 (9280) -0x9B69, 0x9B69, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xDBE7, 0xDBE7, 0xEC07, 0xEC07, // 0x2450 (9296) -0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x2460 (9312) -0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, // 0x2470 (9328) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, // 0x2480 (9344) -0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2490 (9360) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x24A0 (9376) -0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x24B0 (9392) -0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, // 0x24C0 (9408) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xDBE7, 0xDBE7, // 0x24D0 (9424) -0xDBE7, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x24E0 (9440) -0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, // 0x24F0 (9456) -0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, // 0x2500 (9472) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2510 (9488) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x2520 (9504) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, // 0x2530 (9520) -0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, // 0x2540 (9536) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xEC07, 0xEC07, 0xEC07, 0xDBE7, // 0x2550 (9552) -0xDBE7, 0xDBE7, 0xDBE7, 0xDBE7, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, // 0x2560 (9568) -0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, // 0x2570 (9584) -0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, // 0x2580 (9600) -0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2590 (9616) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x25A0 (9632) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, // 0x25B0 (9648) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, // 0x25C0 (9664) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x25D0 (9680) -0x8B29, 0xDBE7, 0xDBE7, 0xDBE7, 0xDBE7, 0xDBE7, 0xDBE7, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, 0xAB68, 0xAB68, 0xAB68, // 0x25E0 (9696) -0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, // 0x25F0 (9712) -0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, // 0x2600 (9728) -0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2610 (9744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2620 (9760) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, // 0x2630 (9776) -0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, // 0x2640 (9792) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x2650 (9808) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xDBE7, 0xDBE7, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xAB68, 0xAB68, // 0x2660 (9824) -0xAB68, 0xAB68, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xF50D, // 0x2670 (9840) -0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2680 (9856) -0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2690 (9872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x26A0 (9888) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, // 0x26B0 (9904) -0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, // 0x26C0 (9920) -0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x8B29, 0x8B29, // 0x26D0 (9936) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, // 0x26E0 (9952) -0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0xAB68, 0xEC07, 0xEC07, 0xF546, 0xF546, 0xF546, 0xF546, 0xEC07, 0xEC07, 0xEC07, 0xEC07, 0xEC07, // 0x26F0 (9968) -0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2700 (9984) -0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x2710 (10000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2720 (10016) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2730 (10032) -0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, // 0x2740 (10048) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2750 (10064) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2760 (10080) -0xC3A8, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0x9B69, 0x9B69, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xE507, 0xC3A8, 0xEC07, 0xEC07, // 0x2770 (10096) -0xEC07, 0xEC07, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2780 (10112) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x2790 (10128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x27A0 (10144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, // 0x27B0 (10160) -0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x27C0 (10176) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x27D0 (10192) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x27E0 (10208) -0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x9B69, 0x9B69, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xC3A8, 0xC3A8, // 0x27F0 (10224) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xEC07, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2800 (10240) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2810 (10256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2820 (10272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2830 (10288) -0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2840 (10304) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2850 (10320) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2860 (10336) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, 0xE507, 0xC3A8, // 0x2870 (10352) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE4ED, 0xE4ED, 0xF50D, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, // 0x2880 (10368) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2890 (10384) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x28A0 (10400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x28B0 (10416) -0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x28C0 (10432) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x28D0 (10448) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x28E0 (10464) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xE507, 0xE507, 0xE507, // 0x28F0 (10480) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xEDF3, 0xEDF3, // 0x2900 (10496) -0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, // 0x2910 (10512) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2920 (10528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2930 (10544) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, // 0x2940 (10560) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2950 (10576) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2960 (10592) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, // 0x2970 (10608) -0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0xC3A8, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, // 0x2980 (10624) -0xC594, 0xC594, 0xC594, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, 0x8B29, 0x8B29, // 0x2990 (10640) -0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x29A0 (10656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x29B0 (10672) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, // 0x29C0 (10688) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x29D0 (10704) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x29E0 (10720) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0xB367, // 0x29F0 (10736) -0xB367, 0xB367, 0xB367, 0xB367, 0xB367, 0xB367, 0xB367, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0xB554, 0xB554, 0xB554, // 0x2A00 (10752) -0xB554, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xEDF3, 0xC594, 0xC594, // 0x2A10 (10768) -0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2A20 (10784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2A30 (10800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xB554, 0xB554, // 0x2A40 (10816) -0xB554, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2A50 (10832) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2A60 (10848) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2A70 (10864) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0xB554, // 0x2A80 (10880) -0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, // 0x2A90 (10896) -0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2AA0 (10912) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2AB0 (10928) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2AC0 (10944) -0xB554, 0xB554, 0xB554, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2AD0 (10960) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2AE0 (10976) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2AF0 (10992) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2B00 (11008) -0x6A88, 0x6A88, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, // 0x2B10 (11024) -0xB554, 0xB554, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2B20 (11040) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2B30 (11056) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, // 0x2B40 (11072) -0x8B29, 0x8B29, 0xB554, 0xB554, 0xB554, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2B50 (11088) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2B60 (11104) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2B70 (11120) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2B80 (11136) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, // 0x2B90 (11152) -0xB554, 0xB554, 0xB554, 0xB554, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x2BA0 (11168) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2BB0 (11184) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, // 0x2BC0 (11200) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2BD0 (11216) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2BE0 (11232) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2BF0 (11248) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, // 0x2C00 (11264) -0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0x6A88, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xB554, 0xC594, 0xC594, // 0x2C10 (11280) -0xC594, 0xC594, 0xC594, 0xC594, 0xC594, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2C20 (11296) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2C30 (11312) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2C40 (11328) -0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2C50 (11344) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2C60 (11360) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2C70 (11376) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2C80 (11392) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2C90 (11408) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2CA0 (11424) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2CB0 (11440) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x2CC0 (11456) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2CD0 (11472) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2CE0 (11488) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2CF0 (11504) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D00 (11520) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D10 (11536) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, 0xEF5D, // 0x2D20 (11552) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2D30 (11568) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2D40 (11584) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D50 (11600) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D60 (11616) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D70 (11632) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D80 (11648) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, // 0x2D90 (11664) -0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0x8B29, 0xEF5D, // 0x2DA0 (11680) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2DB0 (11696) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2DC0 (11712) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2DD0 (11728) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2DE0 (11744) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2DF0 (11760) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E00 (11776) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E10 (11792) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E20 (11808) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2E30 (11824) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2E40 (11840) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E50 (11856) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E60 (11872) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E70 (11888) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E80 (11904) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2E90 (11920) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2EA0 (11936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2EB0 (11952) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2EC0 (11968) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2ED0 (11984) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2EE0 (12000) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2EF0 (12016) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2F00 (12032) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2F10 (12048) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2F20 (12064) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2F30 (12080) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2F40 (12096) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, // 0x2F50 (12112) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x2F60 (12128) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x2F70 (12144) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x2F80 (12160) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2F90 (12176) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, // 0x2FA0 (12192) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x2FB0 (12208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x2FC0 (12224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x2FD0 (12240) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x2FE0 (12256) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x2FF0 (12272) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3000 (12288) -0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3010 (12304) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3020 (12320) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x3030 (12336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3040 (12352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3050 (12368) -0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3060 (12384) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3070 (12400) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3080 (12416) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3090 (12432) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x30A0 (12448) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x30B0 (12464) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x30C0 (12480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x30D0 (12496) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x30E0 (12512) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x30F0 (12528) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3100 (12544) -0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3110 (12560) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3120 (12576) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, // 0x3130 (12592) -0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3140 (12608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3150 (12624) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3160 (12640) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3170 (12656) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3180 (12672) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3190 (12688) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x31A0 (12704) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x31B0 (12720) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31C0 (12736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x31D0 (12752) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x31E0 (12768) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x31F0 (12784) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3200 (12800) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3210 (12816) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3220 (12832) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3230 (12848) -0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3240 (12864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3250 (12880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3260 (12896) -0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3270 (12912) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3280 (12928) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3290 (12944) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x32A0 (12960) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x32B0 (12976) -0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32C0 (12992) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x32D0 (13008) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x32E0 (13024) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x32F0 (13040) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3300 (13056) -0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3310 (13072) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3320 (13088) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3330 (13104) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3340 (13120) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3350 (13136) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, // 0x3360 (13152) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3370 (13168) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3380 (13184) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3390 (13200) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x33A0 (13216) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x33B0 (13232) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33C0 (13248) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x33D0 (13264) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x33E0 (13280) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, // 0x33F0 (13296) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3400 (13312) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3410 (13328) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3420 (13344) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3430 (13360) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x3440 (13376) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3450 (13392) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, // 0x3460 (13408) -0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3470 (13424) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3480 (13440) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3490 (13456) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x34A0 (13472) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x34B0 (13488) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x34C0 (13504) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34D0 (13520) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x34E0 (13536) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x34F0 (13552) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3500 (13568) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3510 (13584) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3520 (13600) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3530 (13616) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3540 (13632) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3550 (13648) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3560 (13664) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3570 (13680) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, // 0x3580 (13696) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3590 (13712) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x35A0 (13728) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x35B0 (13744) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, // 0x35C0 (13760) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35D0 (13776) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x35E0 (13792) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, // 0x35F0 (13808) -0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3600 (13824) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3610 (13840) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3620 (13856) -0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3630 (13872) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, // 0x3640 (13888) -0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3650 (13904) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3660 (13920) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, // 0x3670 (13936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3680 (13952) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3690 (13968) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x36A0 (13984) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x36B0 (14000) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x36C0 (14016) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36D0 (14032) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x36E0 (14048) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, // 0x36F0 (14064) -0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3700 (14080) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3710 (14096) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3720 (14112) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3730 (14128) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3740 (14144) -0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3750 (14160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3760 (14176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, // 0x3770 (14192) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, // 0x3780 (14208) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3790 (14224) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x37A0 (14240) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x37B0 (14256) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x37C0 (14272) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37D0 (14288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x37E0 (14304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x37F0 (14320) -0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3800 (14336) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3810 (14352) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3820 (14368) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3830 (14384) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3840 (14400) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x3850 (14416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3860 (14432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, // 0x3870 (14448) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3880 (14464) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, // 0x3890 (14480) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x38A0 (14496) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x38B0 (14512) -0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x38C0 (14528) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, // 0x38D0 (14544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38E0 (14560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x38F0 (14576) -0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3900 (14592) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3910 (14608) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3920 (14624) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3930 (14640) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3940 (14656) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3950 (14672) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3960 (14688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3970 (14704) -0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3980 (14720) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3990 (14736) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x39A0 (14752) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x39B0 (14768) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x39C0 (14784) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, // 0x39D0 (14800) -0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39E0 (14816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x39F0 (14832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, // 0x3A00 (14848) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3A10 (14864) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, // 0x3A20 (14880) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3A30 (14896) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3A40 (14912) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3A50 (14928) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A60 (14944) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3A70 (14960) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3A80 (14976) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3A90 (14992) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3AA0 (15008) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3AB0 (15024) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3AC0 (15040) -0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, // 0x3AD0 (15056) -0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AE0 (15072) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3AF0 (15088) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, // 0x3B00 (15104) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B10 (15120) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B20 (15136) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B30 (15152) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B40 (15168) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B50 (15184) -0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B60 (15200) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3B70 (15216) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3B80 (15232) -0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3B90 (15248) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3BA0 (15264) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3BB0 (15280) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3BC0 (15296) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3BD0 (15312) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BE0 (15328) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3BF0 (15344) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3C00 (15360) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3C10 (15376) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3C20 (15392) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3C30 (15408) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3C40 (15424) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xB618, // 0x3C50 (15440) -0xB618, 0xB618, 0xB618, 0xB618, 0xB618, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, // 0x3C60 (15456) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C70 (15472) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3C80 (15488) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3C90 (15504) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3CA0 (15520) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3CB0 (15536) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3CC0 (15552) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3CD0 (15568) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xF81F, 0xF81F, // 0x3CE0 (15584) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3CF0 (15600) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D00 (15616) -0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D10 (15632) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D20 (15648) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D30 (15664) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D40 (15680) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D50 (15696) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D60 (15712) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D70 (15728) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3D80 (15744) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3D90 (15760) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3DA0 (15776) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3DB0 (15792) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3DC0 (15808) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3DD0 (15824) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3DE0 (15840) -0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3DF0 (15856) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E00 (15872) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E10 (15888) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E20 (15904) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E30 (15920) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E40 (15936) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E50 (15952) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E60 (15968) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E70 (15984) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3E80 (16000) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3E90 (16016) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3EA0 (16032) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3EB0 (16048) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3EC0 (16064) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3ED0 (16080) -0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, 0xEF5D, // 0x3EE0 (16096) -0xEF5D, 0xEF5D, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3EF0 (16112) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F00 (16128) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F10 (16144) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F20 (16160) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F30 (16176) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F40 (16192) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F50 (16208) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F60 (16224) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F70 (16240) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F80 (16256) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3F90 (16272) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FA0 (16288) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FB0 (16304) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FC0 (16320) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FD0 (16336) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FE0 (16352) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x3FF0 (16368) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4000 (16384) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4010 (16400) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4020 (16416) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4030 (16432) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4040 (16448) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4050 (16464) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4060 (16480) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4070 (16496) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4080 (16512) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4090 (16528) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40A0 (16544) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40B0 (16560) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40C0 (16576) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40D0 (16592) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40E0 (16608) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x40F0 (16624) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4100 (16640) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4110 (16656) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4120 (16672) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4130 (16688) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4140 (16704) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4150 (16720) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4160 (16736) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4170 (16752) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4180 (16768) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4190 (16784) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41A0 (16800) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41B0 (16816) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41C0 (16832) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41D0 (16848) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41E0 (16864) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x41F0 (16880) -0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, 0xF81F, // 0x4200 (16896) -0xF81F, 0xF81F, 0xF81F, 0xF81F, }; diff --git a/firmware/include/images/icon_wifi.h b/firmware/include/images/icon_wifi.h deleted file mode 100644 index a01aaa6..0000000 --- a/firmware/include/images/icon_wifi.h +++ /dev/null @@ -1,15 +0,0 @@ -// Generated by : ImageConverter 565 v2.2 -// Generated from: wifi (2).png -// Time generated: 4/28/2021 4:31:05 PM -// Dimensions : 24x24 pixels -// Size : 1,152 Bytes - -#include - -const uint8_t icon_wifi[] PROGMEM = { - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0xc0, 0xff, 0x03, 0xf0, 0xff, 0x0f, 0xfc, 0xff, 0x3f, 0x7e, 0x00, 0x7e, - 0x1f, 0x00, 0xf8, 0x0e, 0x00, 0x70, 0x00, 0xff, 0x00, 0xc0, 0xff, 0x03, - 0xe0, 0xff, 0x07, 0xe0, 0x81, 0x07, 0x40, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x3c, 0x00, 0x00, 0x7e, 0x00, 0x00, 0x3c, 0x00, 0x00, 0x18, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.gitignore b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.gitignore index b071295..ce7014e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.gitignore +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.gitignore @@ -7,3 +7,7 @@ utensor.lib utensor/libutensor.a *.o *.d +doc/ +node_modules/ +package-lock.json +package.json \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.mbedignore b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.mbedignore index 0c18f9c..08e33ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.mbedignore +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/.mbedignore @@ -1,7 +1,7 @@ utensor/CMakeFiles/ tensorflow/lite/micro/mbed/ porting/arduino/ -porting/ecm3532/ +porting/espressif/ porting/himax/ porting/posix/ porting/silabs/ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h index d2c3e22..e8f4002 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cachel1_armv7.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file cachel1_armv7.h * @brief CMSIS Level 1 Cache API for Armv7-M and later - * @version V1.0.0 - * @date 03. March 2020 + * @version V1.0.2 + * @date 22. June 2022 ******************************************************************************/ /* - * Copyright (c) 2020 Arm Limited. All rights reserved. + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -48,7 +48,7 @@ #ifndef __SCB_ICACHE_LINE_SIZE #define __SCB_ICACHE_LINE_SIZE 32U /*!< Cortex-M7 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ -#endif +#endif /** \brief Enable I-Cache @@ -112,7 +112,7 @@ __STATIC_FORCEINLINE void SCB_InvalidateICache (void) \param[in] addr address \param[in] isize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (volatile void *addr, int32_t isize) { #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) if ( isize > 0 ) { @@ -181,9 +181,15 @@ __STATIC_FORCEINLINE void SCB_EnableDCache (void) __STATIC_FORCEINLINE void SCB_DisableDCache (void) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - uint32_t ccsidr; - uint32_t sets; - uint32_t ways; + struct { + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + } locals + #if ((defined(__GNUC__) || defined(__clang__)) && !defined(__OPTIMIZE__)) + __ALIGNED(__SCB_DCACHE_LINE_SIZE) + #endif + ; SCB->CSSELR = 0U; /* select Level 1 data cache */ __DSB(); @@ -191,20 +197,37 @@ __STATIC_FORCEINLINE void SCB_DisableDCache (void) SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ __DSB(); - ccsidr = SCB->CCSIDR; + #if ((defined(__GNUC__) || defined(__clang__)) && !defined(__OPTIMIZE__)) + /* + * For the endless loop issue with GCC and clang with O0. + * More details, see https://github.com/ARM-software/CMSIS_5/issues/620 + * + * The issue only happens when local variables are in stack (GCC/clang O0). If + * local variables are saved in general purpose register, then the function + * is OK. + * + * When local variables are in stack, after disabling the cache, flush the + * local variables cache line for data consistency. + */ + /* Clean and invalidate the local variable cache. */ + SCB->DCCIMVAC = (uint32_t)&locals; + __DSB(); + __ISB(); + #endif + locals.ccsidr = SCB->CCSIDR; /* clean & invalidate D-Cache */ - sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + locals.sets = (uint32_t)(CCSIDR_SETS(locals.ccsidr)); do { - ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + locals.ways = (uint32_t)(CCSIDR_WAYS(locals.ccsidr)); do { - SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | - ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + SCB->DCCISW = (((locals.sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((locals.ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); #if defined ( __CC_ARM ) __schedule_barrier(); #endif - } while (ways-- != 0U); - } while(sets-- != 0U); + } while (locals.ways-- != 0U); + } while(locals.sets-- != 0U); __DSB(); __ISB(); @@ -325,13 +348,13 @@ __STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) \param[in] addr address \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { @@ -355,13 +378,13 @@ __STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsiz \param[in] addr address \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { @@ -385,13 +408,13 @@ __STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize \param[in] addr address (aligned to 32-byte boundary) \param[in] dsize size of memory block (in number of bytes) */ -__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (volatile void *addr, int32_t dsize) { #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) - if ( dsize > 0 ) { + if ( dsize > 0 ) { int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; - + __DSB(); do { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h index ced0a2c..a955d47 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armcc.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_armcc.h * @brief CMSIS compiler ARMCC (Arm Compiler 5) header file - * @version V5.3.0 - * @date 19. February 2021 + * @version V5.3.2 + * @date 27. May 2021 ******************************************************************************/ /* * Copyright (c) 2009-2021 Arm Limited. All rights reserved. @@ -131,672 +131,673 @@ #define __VECTOR_TABLE_ATTRIBUTE __attribute__((used, section("RESET"))) #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ - */ +*/ /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -/* intrinsic void __enable_irq(); */ +#define __NOP __nop /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -/* intrinsic void __disable_irq(); */ +#define __WFI __wfi + /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_INLINE uint32_t __get_CONTROL(void) -{ - register uint32_t __regControl __ASM("control"); - return(__regControl); -} +#define __WFE __wfe /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_INLINE void __set_CONTROL(uint32_t control) -{ - register uint32_t __regControl __ASM("control"); - __regControl = control; -} +#define __SEV __sev /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_INLINE uint32_t __get_IPSR(void) -{ - register uint32_t __regIPSR __ASM("ipsr"); - return(__regIPSR); -} +#define __ISB() __isb(0xF) +/** + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. + */ +#define __DSB() __dsb(0xF) /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_INLINE uint32_t __get_APSR(void) -{ - register uint32_t __regAPSR __ASM("apsr"); - return(__regAPSR); -} +#define __DMB() __dmb(0xF) /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_INLINE uint32_t __get_xPSR(void) -{ - register uint32_t __regXPSR __ASM("xpsr"); - return(__regXPSR); -} +#define __REV __rev /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_INLINE uint32_t __get_PSP(void) +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) { - register uint32_t __regProcessStackPointer __ASM("psp"); - return(__regProcessStackPointer); + rev16 r0, r0 + bx lr } +#endif /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) { - register uint32_t __regProcessStackPointer __ASM("psp"); - __regProcessStackPointer = topOfProcStack; + revsh r0, r0 + bx lr } +#endif /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_INLINE uint32_t __get_MSP(void) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - return(__regMainStackPointer); -} +#define __ROR __ror /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) -{ - register uint32_t __regMainStackPointer __ASM("msp"); - __regMainStackPointer = topOfMainStack; -} +#define __BKPT(value) __breakpoint(value) /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_INLINE uint32_t __get_PRIMASK(void) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + #define __RBIT __rbit +#else +__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) { - register uint32_t __regPriMask __ASM("primask"); - return(__regPriMask); + uint32_t result; + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ + return result; } +#endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) -{ - register uint32_t __regPriMask __ASM("primask"); - __regPriMask = (priMask); -} +#define __CLZ __clz #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -#define __enable_fault_irq __enable_fiq +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) +#else + #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") +#endif /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -#define __disable_fault_irq __disable_fiq +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) +#else + #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") +#endif /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_INLINE uint32_t __get_BASEPRI(void) -{ - register uint32_t __regBasePri __ASM("basepri"); - return(__regBasePri); -} +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) +#else + #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") +#endif /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) -{ - register uint32_t __regBasePri __ASM("basepri"); - __regBasePri = (basePri & 0xFFU); -} +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXB(value, ptr) __strex(value, ptr) +#else + #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - register uint32_t __regBasePriMax __ASM("basepri_max"); - __regBasePriMax = (basePri & 0xFFU); -} + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXH(value, ptr) __strex(value, ptr) +#else + #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_INLINE uint32_t __get_FAULTMASK(void) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - return(__regFaultMask); -} +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) + #define __STREXW(value, ptr) __strex(value, ptr) +#else + #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") +#endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) -{ - register uint32_t __regFaultMask __ASM("faultmask"); - __regFaultMask = (faultMask & (uint32_t)1U); -} - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ +#define __CLREX __clrex /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_INLINE uint32_t __get_FPSCR(void) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - return(__regfpscr); -#else - return(0U); -#endif -} +#define __SSAT __ssat /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) -{ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) - register uint32_t __regfpscr __ASM("fpscr"); - __regfpscr = (fpscr); -#else - (void)fpscr; -#endif -} - - -/*@} end of CMSIS_Core_RegAccFunctions */ - +#define __USAT __usat -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -#define __NOP __nop +#ifndef __NO_EMBEDDED_ASM +__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +{ + rrx r0, r0 + bx lr +} +#endif /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -#define __WFI __wfi +#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -#define __WFE __wfe +#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -#define __SEV __sev +#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -#define __ISB() __isb(0xF) +#define __STRBT(value, ptr) __strt(value, ptr) -/** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. - */ -#define __DSB() __dsb(0xF) /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -#define __DMB() __dmb(0xF) +#define __STRHT(value, ptr) __strt(value, ptr) /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -#define __REV __rev +#define __STRT(value, ptr) __strt(value, ptr) +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -#ifndef __NO_EMBEDDED_ASM -__attribute__((section(".rev16_text"))) __STATIC_INLINE __ASM uint32_t __REV16(uint32_t value) +__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) { - rev16 r0, r0 - bx lr + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } -#endif - /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -#ifndef __NO_EMBEDDED_ASM -__attribute__((section(".revsh_text"))) __STATIC_INLINE __ASM int16_t __REVSH(int16_t value) +__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) { - revsh r0, r0 - bx lr + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ -/** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value - */ -#define __ROR __ror +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -/** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -#define __BKPT(value) __breakpoint(value) - /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) - #define __RBIT __rbit -#else -__attribute__((always_inline)) __STATIC_INLINE uint32_t __RBIT(uint32_t value) -{ - uint32_t result; - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ - return result; -} -#endif +/* intrinsic void __enable_irq(); */ /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -#define __CLZ __clz - - -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) +/* intrinsic void __disable_irq(); */ /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __LDREXB(ptr) ((uint8_t ) __ldrex(ptr)) -#else - #define __LDREXB(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint8_t ) __ldrex(ptr)) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_CONTROL(void) +{ + register uint32_t __regControl __ASM("control"); + return(__regControl); +} /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __LDREXH(ptr) ((uint16_t) __ldrex(ptr)) -#else - #define __LDREXH(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint16_t) __ldrex(ptr)) _Pragma("pop") -#endif +__STATIC_INLINE void __set_CONTROL(uint32_t control) +{ + register uint32_t __regControl __ASM("control"); + __regControl = control; + __ISB(); +} /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __LDREXW(ptr) ((uint32_t ) __ldrex(ptr)) -#else - #define __LDREXW(ptr) _Pragma("push") _Pragma("diag_suppress 3731") ((uint32_t ) __ldrex(ptr)) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_IPSR(void) +{ + register uint32_t __regIPSR __ASM("ipsr"); + return(__regIPSR); +} /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXB(value, ptr) __strex(value, ptr) -#else - #define __STREXB(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_APSR(void) +{ + register uint32_t __regAPSR __ASM("apsr"); + return(__regAPSR); +} /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXH(value, ptr) __strex(value, ptr) -#else - #define __STREXH(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_xPSR(void) +{ + register uint32_t __regXPSR __ASM("xpsr"); + return(__regXPSR); +} /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION < 5060020) - #define __STREXW(value, ptr) __strex(value, ptr) -#else - #define __STREXW(value, ptr) _Pragma("push") _Pragma("diag_suppress 3731") __strex(value, ptr) _Pragma("pop") -#endif +__STATIC_INLINE uint32_t __get_PSP(void) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + return(__regProcessStackPointer); +} /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#define __CLREX __clrex +__STATIC_INLINE void __set_PSP(uint32_t topOfProcStack) +{ + register uint32_t __regProcessStackPointer __ASM("psp"); + __regProcessStackPointer = topOfProcStack; +} /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __SSAT __ssat +__STATIC_INLINE uint32_t __get_MSP(void) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + return(__regMainStackPointer); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __USAT __usat +__STATIC_INLINE void __set_MSP(uint32_t topOfMainStack) +{ + register uint32_t __regMainStackPointer __ASM("msp"); + __regMainStackPointer = topOfMainStack; +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#ifndef __NO_EMBEDDED_ASM -__attribute__((section(".rrx_text"))) __STATIC_INLINE __ASM uint32_t __RRX(uint32_t value) +__STATIC_INLINE uint32_t __get_PRIMASK(void) { - rrx r0, r0 - bx lr + register uint32_t __regPriMask __ASM("primask"); + return(__regPriMask); } -#endif /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#define __LDRBT(ptr) ((uint8_t ) __ldrt(ptr)) +__STATIC_INLINE void __set_PRIMASK(uint32_t priMask) +{ + register uint32_t __regPriMask __ASM("primask"); + __regPriMask = (priMask); +} +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) + /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRHT(ptr) ((uint16_t) __ldrt(ptr)) +#define __enable_fault_irq __enable_fiq /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __LDRT(ptr) ((uint32_t ) __ldrt(ptr)) +#define __disable_fault_irq __disable_fiq /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -#define __STRBT(value, ptr) __strt(value, ptr) +__STATIC_INLINE uint32_t __get_BASEPRI(void) +{ + register uint32_t __regBasePri __ASM("basepri"); + return(__regBasePri); +} /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -#define __STRHT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI(uint32_t basePri) +{ + register uint32_t __regBasePri __ASM("basepri"); + __regBasePri = (basePri & 0xFFU); +} /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -#define __STRT(value, ptr) __strt(value, ptr) +__STATIC_INLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + register uint32_t __regBasePriMax __ASM("basepri_max"); + __regBasePriMax = (basePri & 0xFFU); +} -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__attribute__((always_inline)) __STATIC_INLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_INLINE uint32_t __get_FAULTMASK(void) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + register uint32_t __regFaultMask __ASM("faultmask"); + return(__regFaultMask); } + /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__attribute__((always_inline)) __STATIC_INLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_INLINE void __set_FAULTMASK(uint32_t faultMask) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + register uint32_t __regFaultMask __ASM("faultmask"); + __regFaultMask = (faultMask & (uint32_t)1U); } #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__) && (__ARM_ARCH_7EM__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + +/** + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value + */ +__STATIC_INLINE uint32_t __get_FPSCR(void) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + return(__regfpscr); +#else + return(0U); +#endif +} + + +/** + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set + */ +__STATIC_INLINE void __set_FPSCR(uint32_t fpscr) +{ +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) + register uint32_t __regfpscr __ASM("fpscr"); + __regfpscr = (fpscr); +#else + (void)fpscr; +#endif +} + + +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h index b14038c..b4a1200 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_armclang.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V5.4.0 - * @date 19. February 2020 + * @version V5.4.4 + * @date 30. May 2022 ******************************************************************************/ /* - * Copyright (c) 2009-2021 Arm Limited. All rights reserved. + * Copyright (c) 2009-2022 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,10 +29,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -156,456 +152,423 @@ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ - */ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -/* intrinsic void __enable_irq(); see arm_compat.h */ - +#define __NOP __builtin_arm_nop /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -/* intrinsic void __disable_irq(); see arm_compat.h */ +#define __WFI __builtin_arm_wfi /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#define __WFE __builtin_arm_wfe -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; +#define __SEV __builtin_arm_sev - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __DSB() __builtin_arm_dsb(0xF) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} -#endif +#define __DMB() __builtin_arm_dmb(0xF) /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} +#define __REV(value) __builtin_bswap32(value) /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +#define __REV16(value) __ROR(__REV(value), 16) /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} +#define __REVSH(value) (int16_t)__builtin_bswap16(value) /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __BKPT(value) __ASM volatile ("bkpt "#value) /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); -} - +#define __RBIT __builtin_arm_rbit -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} +#define __LDREXB (uint8_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __LDREXH (uint16_t)__builtin_arm_ldrex /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} +#define __LDREXW (uint32_t)__builtin_arm_ldrex /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __STREXB (uint32_t)__builtin_arm_strex /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} +#define __STREXH (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); -} -#endif +#define __STREXW (uint32_t)__builtin_arm_strex /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) -{ - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - +#define __CLREX __builtin_arm_clrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} -#endif +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) + /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ +#define __SSAT __builtin_arm_ssat /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ +#define __USAT __builtin_arm_usat /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) -{ - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif - + uint32_t result; -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ } /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } -#endif -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ + +/** + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value + */ +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +{ + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; +} + +/** + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value + */ +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +{ + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; +} + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ @@ -613,631 +576,615 @@ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) + */ +#define __LDAEXB (uint8_t)__builtin_arm_ldaex - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + +/** + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif -} +#define __LDAEXH (uint16_t)__builtin_arm_ldaex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) -{ -#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif +#define __STLEXB (uint32_t)__builtin_arm_stlex -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) -#endif +#define __STLEXH (uint32_t)__builtin_arm_stlex + /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) -#endif +#define __STLEX (uint32_t)__builtin_arm_stlex +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ -/*@} end of CMSIS_Core_RegAccFunctions */ +/** @}*/ /* end of group CMSIS_Core_InstructionInterface */ -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions @{ -*/ + */ -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif + + +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} #endif + /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#define __NOP __builtin_arm_nop +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __WFI __builtin_arm_wfi +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __WFE __builtin_arm_wfe +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -#define __SEV __builtin_arm_sev +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -#define __ISB() __builtin_arm_isb(0xF) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#define __DSB() __builtin_arm_dsb(0xF) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -#define __DMB() __builtin_arm_dmb(0xF) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#define __REV(value) __builtin_bswap32(value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -#define __REV16(value) __ROR(__REV(value), 16) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -#define __RBIT __builtin_arm_rbit +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __STREXB (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -#define __STREXH (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#define __STREXW (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) - /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT __builtin_arm_ssat +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT __builtin_arm_usat +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { uint32_t result; - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } -/** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value - */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) -{ - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set + */ +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +{ + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1250,150 +1197,217 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) /** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -#define __STLEXB (uint32_t)__builtin_arm_stlex +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__ ) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) ) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ + (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -#define __STLEX (uint32_t)__builtin_arm_stlex +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(fpscr) ((void)(fpscr)) +#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) || \ - (defined (__ARM_ARCH_8_1M_MAIN__) && (__ARM_ARCH_8_1M_MAIN__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/** @} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ @@ -1483,7 +1497,7 @@ __STATIC_FORCEINLINE int32_t __SMMLA (int32_t op1, int32_t op2, int32_t op3) } #endif /* (__ARM_FEATURE_DSP == 1) */ -/*@} end of group CMSIS_SIMD_intrinsics */ +/** @} end of group CMSIS_SIMD_intrinsics */ #endif /* __CMSIS_ARMCLANG_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h index 3972d01..1e255d5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_armclang_ltm.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_armclang_ltm.h * @brief CMSIS compiler armclang (Arm Compiler 6) header file - * @version V1.5.0 - * @date 19. February 2021 + * @version V1.5.3 + * @date 27. May 2021 ******************************************************************************/ /* * Copyright (c) 2018-2021 Arm Limited. All rights reserved. @@ -29,10 +29,6 @@ #pragma clang system_header /* treat file as system include file */ -#ifndef __ARM_COMPAT_H -#include /* Compatibility header for Arm Compiler 5 intrinsics */ -#endif - /* CMSIS compiler specific defines */ #ifndef __ASM #define __ASM __asm @@ -156,1069 +152,1027 @@ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ - */ +*/ + +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) +#endif /** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -/* intrinsic void __enable_irq(); see arm_compat.h */ - +#define __NOP __builtin_arm_nop /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -/* intrinsic void __disable_irq(); see arm_compat.h */ +#define __WFI __builtin_arm_wfi /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#define __WFE __builtin_arm_wfe -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; +#define __SEV __builtin_arm_sev - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. + */ +#define __ISB() __builtin_arm_isb(0xF) /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __DSB() __builtin_arm_dsb(0xF) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) -{ - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); -} -#endif +#define __DMB() __builtin_arm_dmb(0xF) /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); -} +#define __REV(value) __builtin_bswap32(value) /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); -} +#define __REV16(value) __ROR(__REV(value), 16) /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); -} +#define __REVSH(value) (int16_t)__builtin_bswap16(value) /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - uint32_t result; - - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) -{ - uint32_t result; +#define __BKPT(value) __ASM volatile ("bkpt "#value) - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} -#endif +/** + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value + */ +#define __RBIT __builtin_arm_rbit /** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); -} -#endif +#define __LDREXB (uint8_t)__builtin_arm_ldrex /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} +#define __LDREXH (uint16_t)__builtin_arm_ldrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} -#endif +#define __LDREXW (uint32_t)__builtin_arm_ldrex /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); -} - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) -{ - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} -#endif - - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value - */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); -} - - -/** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) -{ - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); -} -#endif +#define __STREXB (uint32_t)__builtin_arm_strex /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); -} +#define __STREXH (uint32_t)__builtin_arm_strex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); -} -#endif +#define __STREXW (uint32_t)__builtin_arm_strex /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) -{ - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); -} - +#define __CLREX __builtin_arm_clrex -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask - */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) -{ - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); -} -#endif +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -#define __enable_fault_irq __enable_fiq /* see arm_compat.h */ +#define __SSAT __builtin_arm_ssat /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -#define __disable_fault_irq __disable_fiq /* see arm_compat.h */ +#define __USAT __builtin_arm_usat /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); -} -#endif - - -/** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) -{ - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint8_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); -} -#endif - + uint32_t result; -/** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set - */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) -{ - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); + return ((uint16_t) result); /* Add explicit type cast here */ } /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { uint32_t result; - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#endif /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } -#endif -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ -/** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +/** + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr -#else -#define __get_FPSCR() ((uint32_t)0U) -#endif +#define __LDAEXB (uint8_t)__builtin_arm_ldaex + /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#define __set_FPSCR __builtin_arm_set_fpscr -#else -#define __set_FPSCR(x) ((void)(x)) -#endif +#define __LDAEXH (uint16_t)__builtin_arm_ldaex -/*@} end of CMSIS_Core_RegAccFunctions */ +/** + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) + */ +#define __LDAEX (uint32_t)__builtin_arm_ldaex + + +/** + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXB (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEXH (uint32_t)__builtin_arm_stlex + + +/** + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +#define __STLEX (uint32_t)__builtin_arm_stlex + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ + +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ + */ + +/** + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __enable_irq(void) +{ + __ASM volatile ("cpsie i" : : : "memory"); +} +#endif -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) +/** + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. + */ +#ifndef __ARM_COMPAT_H +__STATIC_FORCEINLINE void __disable_irq(void) +{ + __ASM volatile ("cpsid i" : : : "memory"); +} #endif + /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -#define __NOP __builtin_arm_nop +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -#define __WFI __builtin_arm_wfi +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -#define __WFE __builtin_arm_wfe +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -#define __SEV __builtin_arm_sev +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); +} +#endif /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -#define __ISB() __builtin_arm_isb(0xF) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); +} + /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#define __DSB() __builtin_arm_dsb(0xF) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -#define __DMB() __builtin_arm_dmb(0xF) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); +} /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -#define __REV(value) __builtin_bswap32(value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -#define __REV16(value) __ROR(__REV(value), 16) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -#define __REVSH(value) (int16_t)__builtin_bswap16(value) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) +{ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -#define __RBIT __builtin_arm_rbit +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); +} +#endif + /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM Compiler 6.10 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -#define __LDREXB (uint8_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +{ + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); +} +#endif +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -#define __LDREXH (uint16_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -#define __LDREXW (uint32_t)__builtin_arm_ldrex +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -#define __STREXB (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask" : "=r" (result) ); + return(result); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -#define __STREXH (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); +} +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -#define __STREXW (uint32_t)__builtin_arm_strex +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +{ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -#define __CLREX __builtin_arm_clrex - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +{ + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); +} +#endif #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __SSAT __builtin_arm_ssat +__STATIC_FORCEINLINE void __enable_fault_irq(void) +{ + __ASM volatile ("cpsie f" : : : "memory"); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -#define __USAT __builtin_arm_usat +__STATIC_FORCEINLINE void __disable_fault_irq(void) +{ + __ASM volatile ("cpsid f" : : : "memory"); +} /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { uint32_t result; - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); } +#endif /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -1227,150 +1181,210 @@ __STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) + /** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else uint32_t result; - - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; - - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) - */ -#define __LDAEXB (uint8_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -#define __LDAEXH (uint16_t)__builtin_arm_ldaex +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) - */ -#define __LDAEX (uint32_t)__builtin_arm_ldaex - + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. -/** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set */ -#define __STLEXB (uint32_t)__builtin_arm_stlex +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif +} +#endif +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -#define __STLEXH (uint32_t)__builtin_arm_stlex - +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __get_FPSCR (uint32_t)__builtin_arm_get_fpscr +#else +#define __get_FPSCR() ((uint32_t)0U) +#endif /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -#define __STLEX (uint32_t)__builtin_arm_stlex +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#define __set_FPSCR __builtin_arm_set_fpscr +#else +#define __set_FPSCR(x) ((void)(x)) +#endif -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h index edc9f86..bf7cd11 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_gcc.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file cmsis_gcc.h * @brief CMSIS compiler GCC header file - * @version V5.3.2 - * @date 25. January 2021 + * @version V5.4.1 + * @date 27. May 2021 ******************************************************************************/ /* * Copyright (c) 2009-2021 Arm Limited. All rights reserved. @@ -202,468 +202,549 @@ __STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { #endif -/* ########################### Core Function Access ########################### */ -/** \ingroup CMSIS_Core_FunctionInterface - \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions +/* ########################## Core Instruction Access ######################### */ +/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface + Access to dedicated instructions @{ - */ +*/ -/** - \brief Enable IRQ Interrupts - \details Enables IRQ interrupts by clearing the I-bit in the CPSR. - Can only be executed in Privileged modes. - */ -// Patched by Edge Impulse, fix for targets that already have __enable_irq -#ifndef __enable_irq -__STATIC_FORCEINLINE void __enable_irq(void) -{ - __ASM volatile ("cpsie i" : : : "memory"); -} +/* Define macros for porting to both thumb1 and thumb2. + * For thumb1, use low register (r0-r7), specified by constraint "l" + * Otherwise, use general registers, specified by constraint "r" */ +#if defined (__thumb__) && !defined (__thumb2__) +#define __CMSIS_GCC_OUT_REG(r) "=l" (r) +#define __CMSIS_GCC_RW_REG(r) "+l" (r) +#define __CMSIS_GCC_USE_REG(r) "l" (r) +#else +#define __CMSIS_GCC_OUT_REG(r) "=r" (r) +#define __CMSIS_GCC_RW_REG(r) "+r" (r) +#define __CMSIS_GCC_USE_REG(r) "r" (r) #endif - /** - \brief Disable IRQ Interrupts - \details Disables IRQ interrupts by setting the I-bit in the CPSR. - Can only be executed in Privileged modes. + \brief No Operation + \details No Operation does nothing. This instruction can be used for code alignment purposes. */ -// Patched by Edge Impulse, fix for targets that already have __disable_irq -#ifndef __disable_irq -__STATIC_FORCEINLINE void __disable_irq(void) -{ - __ASM volatile ("cpsid i" : : : "memory"); -} -#endif - +#define __NOP() __ASM volatile ("nop") /** - \brief Get Control Register - \details Returns the content of the Control Register. - \return Control Register value + \brief Wait For Interrupt + \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control" : "=r" (result) ); - return(result); -} +#define __WFI() __ASM volatile ("wfi":::"memory") -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Control Register (non-secure) - \details Returns the content of the non-secure Control Register when in secure mode. - \return non-secure Control Register value + \brief Wait For Event + \details Wait For Event is a hint instruction that permits the processor to enter + a low-power state until one of a number of events occurs. */ -__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); - return(result); -} -#endif +#define __WFE() __ASM volatile ("wfe":::"memory") /** - \brief Set Control Register - \details Writes the given value to the Control Register. - \param [in] control Control Register value to set + \brief Send Event + \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. */ -__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) -{ - __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); -} +#define __SEV() __ASM volatile ("sev") -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Control Register (non-secure) - \details Writes the given value to the non-secure Control Register when in secure state. - \param [in] control Control Register value to set + \brief Instruction Synchronization Barrier + \details Instruction Synchronization Barrier flushes the pipeline in the processor, + so that all instructions following the ISB are fetched from cache or memory, + after the instruction has been completed. */ -__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +__STATIC_FORCEINLINE void __ISB(void) { - __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ASM volatile ("isb 0xF":::"memory"); } -#endif /** - \brief Get IPSR Register - \details Returns the content of the IPSR Register. - \return IPSR Register value + \brief Data Synchronization Barrier + \details Acts as a special kind of Data Memory Barrier. + It completes when all explicit memory accesses before this instruction complete. */ -__STATIC_FORCEINLINE uint32_t __get_IPSR(void) +__STATIC_FORCEINLINE void __DSB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); - return(result); + __ASM volatile ("dsb 0xF":::"memory"); } /** - \brief Get APSR Register - \details Returns the content of the APSR Register. - \return APSR Register value + \brief Data Memory Barrier + \details Ensures the apparent order of the explicit memory operations before + and after the instruction, without ensuring their completion. */ -__STATIC_FORCEINLINE uint32_t __get_APSR(void) +__STATIC_FORCEINLINE void __DMB(void) { - uint32_t result; - - __ASM volatile ("MRS %0, apsr" : "=r" (result) ); - return(result); + __ASM volatile ("dmb 0xF":::"memory"); } /** - \brief Get xPSR Register - \details Returns the content of the xPSR Register. - \return xPSR Register value + \brief Reverse byte order (32 bit) + \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_xPSR(void) +__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) { +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) + return __builtin_bswap32(value); +#else uint32_t result; - __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); - return(result); + __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; +#endif } /** - \brief Get Process Stack Pointer - \details Returns the current value of the Process Stack Pointer (PSP). - \return PSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __get_PSP(void) +__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, psp" : "=r" (result) ); - return(result); + __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Process Stack Pointer (non-secure) - \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. - \return PSP Register value + \brief Reverse byte order (16 bit) + \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) +__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) { - uint32_t result; +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + return (int16_t)__builtin_bswap16(value); +#else + int16_t result; - __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); - return(result); -} + __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + return result; #endif - - -/** - \brief Set Process Stack Pointer - \details Assigns the given value to the Process Stack Pointer (PSP). - \param [in] topOfProcStack Process Stack Pointer value to set - */ -__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) -{ - __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. - \param [in] topOfProcStack Process Stack Pointer value to set + \brief Rotate Right in unsigned value (32 bit) + \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. + \param [in] op1 Value to rotate + \param [in] op2 Number of Bits to rotate + \return Rotated value */ -__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) +__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { - __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); + op2 %= 32U; + if (op2 == 0U) + { + return op1; + } + return (op1 >> op2) | (op1 << (32U - op2)); } -#endif /** - \brief Get Main Stack Pointer - \details Returns the current value of the Main Stack Pointer (MSP). - \return MSP Register value + \brief Breakpoint + \details Causes the processor to enter Debug state. + Debug tools can use this to investigate system state when the instruction at a particular address is reached. + \param [in] value is ignored by the processor. + If required, a debugger can use it to store additional information about the breakpoint. */ -__STATIC_FORCEINLINE uint32_t __get_MSP(void) -{ - uint32_t result; - - __ASM volatile ("MRS %0, msp" : "=r" (result) ); - return(result); -} +#define __BKPT(value) __ASM volatile ("bkpt "#value) -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer (non-secure) - \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. - \return MSP Register value + \brief Reverse bit order of value + \details Reverses the bit order of the given value. + \param [in] value Value to reverse + \return Reversed value */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) +__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); - return(result); -} +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) + __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); +#else + uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ + + result = value; /* r will be reversed bits of v; first get LSB of v */ + for (value >>= 1U; value != 0U; value >>= 1U) + { + result <<= 1U; + result |= value & 1U; + s--; + } + result <<= s; /* shift when v's highest bits are zero */ #endif + return result; +} /** - \brief Set Main Stack Pointer - \details Assigns the given value to the Main Stack Pointer (MSP). - \param [in] topOfMainStack Main Stack Pointer value to set + \brief Count leading zeros + \details Counts the number of leading zeros of a data value. + \param [in] value Value to count the leading zeros + \return number of leading zeros in value */ -__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) +__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) { - __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); + /* Even though __builtin_clz produces a CLZ instruction on ARM, formally + __builtin_clz(0) is undefined behaviour, so handle this case specially. + This guarantees ARM-compatible results if happening to compile on a non-ARM + target, and ensures the compiler doesn't decide to activate any + optimisations using the logic "value was passed to __builtin_clz, so it + is non-zero". + ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a + single CLZ instruction. + */ + if (value == 0U) + { + return 32U; + } + return __builtin_clz(value); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Set Main Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. - \param [in] topOfMainStack Main Stack Pointer value to set + \brief LDR Exclusive (8 bit) + \details Executes a exclusive LDR instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) +__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) { - __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); -} + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); #endif + return ((uint8_t) result); /* Add explicit type cast here */ +} -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Stack Pointer (non-secure) - \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. - \return SP Register value + \brief LDR Exclusive (16 bit) + \details Executes a exclusive LDR instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); - return(result); +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ } /** - \brief Set Stack Pointer (non-secure) - \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. - \param [in] topOfStack Stack Pointer value to set + \brief LDR Exclusive (32 bit) + \details Executes a exclusive LDR instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) { - __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); + uint32_t result; + + __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); + return(result); } -#endif /** - \brief Get Priority Mask - \details Returns the current state of the priority mask bit from the Priority Mask Register. - \return Priority Mask value + \brief STR Exclusive (8 bit) + \details Executes a exclusive STR instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) +__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, primask" : "=r" (result) ); - return(result); + __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Priority Mask (non-secure) - \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. - \return Priority Mask value + \brief STR Exclusive (16 bit) + \details Executes a exclusive STR instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) +__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); + return(result); } -#endif /** - \brief Set Priority Mask - \details Assigns the given value to the Priority Mask Register. - \param [in] priMask Priority Mask + \brief STR Exclusive (32 bit) + \details Executes a exclusive STR instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) +__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) { - __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); + uint32_t result; + + __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Priority Mask (non-secure) - \details Assigns the given value to the non-secure Priority Mask Register when in secure state. - \param [in] priMask Priority Mask + \brief Remove the exclusive lock + \details Removes the exclusive lock which is created by LDREX. */ -__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) +__STATIC_FORCEINLINE void __CLREX(void) { - __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); + __ASM volatile ("clrex" ::: "memory"); } -#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ #if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief Enable FIQ - \details Enables FIQ interrupts by clearing the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Signed Saturate + \details Saturates a signed value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __enable_fault_irq(void) -{ - __ASM volatile ("cpsie f" : : : "memory"); -} +#define __SSAT(ARG1, ARG2) \ +__extension__ \ +({ \ + int32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Disable FIQ - \details Disables FIQ interrupts by setting the F-bit in the CPSR. - Can only be executed in Privileged modes. + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] ARG1 Value to be saturated + \param [in] ARG2 Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __disable_fault_irq(void) -{ - __ASM volatile ("cpsid f" : : : "memory"); -} +#define __USAT(ARG1, ARG2) \ +__extension__ \ +({ \ + uint32_t __RES, __ARG1 = (ARG1); \ + __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ + __RES; \ + }) /** - \brief Get Base Priority - \details Returns the current value of the Base Priority register. - \return Base Priority register value + \brief Rotate Right with Extend (32 bit) + \details Moves each bit of a bitstring right by one bit. + The carry input is shifted in at the left end of the bitstring. + \param [in] value Value to rotate + \return Rotated value */ -__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) +__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) { uint32_t result; - __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Base Priority (non-secure) - \details Returns the current value of the non-secure Base Priority register when in secure state. - \return Base Priority register value + \brief LDRT Unprivileged (8 bit) + \details Executes a Unprivileged LDRT instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) +__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) { - uint32_t result; + uint32_t result; - __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); - return(result); -} +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); #endif + return ((uint8_t) result); /* Add explicit type cast here */ +} /** - \brief Set Base Priority - \details Assigns the given value to the Base Priority register. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (16 bit) + \details Executes a Unprivileged LDRT instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) +__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) { - __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); + uint32_t result; + +#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) + __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); +#else + /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not + accepted by assembler. So has to use following less efficient pattern. + */ + __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); +#endif + return ((uint16_t) result); /* Add explicit type cast here */ } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Base Priority (non-secure) - \details Assigns the given value to the non-secure Base Priority register when in secure state. - \param [in] basePri Base Priority value to set + \brief LDRT Unprivileged (32 bit) + \details Executes a Unprivileged LDRT instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) +__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) { - __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); + uint32_t result; + + __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); + return(result); } -#endif /** - \brief Set Base Priority with condition - \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, - or the new value increases the BASEPRI priority level. - \param [in] basePri Base Priority value to set + \brief STRT Unprivileged (8 bit) + \details Executes a Unprivileged STRT instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) { - __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); + __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } /** - \brief Get Fault Mask - \details Returns the current value of the Fault Mask register. - \return Fault Mask register value + \brief STRT Unprivileged (16 bit) + \details Executes a Unprivileged STRT instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) +__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); - return(result); + __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Fault Mask (non-secure) - \details Returns the current value of the non-secure Fault Mask register when in secure state. - \return Fault Mask register value + \brief STRT Unprivileged (32 bit) + \details Executes a Unprivileged STRT instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) +__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) { - uint32_t result; - - __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); - return(result); + __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); } -#endif +#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ /** - \brief Set Fault Mask - \details Assigns the given value to the Fault Mask register. - \param [in] faultMask Fault Mask value to set + \brief Signed Saturate + \details Saturates a signed value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (1..32) + \return Saturated value */ -__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) +__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); + if ((sat >= 1U) && (sat <= 32U)) + { + const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); + const int32_t min = -1 - max ; + if (val > max) + { + return max; + } + else if (val < min) + { + return min; + } + } + return val; } - -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Fault Mask (non-secure) - \details Assigns the given value to the non-secure Fault Mask register when in secure state. - \param [in] faultMask Fault Mask value to set + \brief Unsigned Saturate + \details Saturates an unsigned value. + \param [in] value Value to be saturated + \param [in] sat Bit position to saturate to (0..31) + \return Saturated value */ -__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) +__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) { - __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); + if (sat <= 31U) + { + const uint32_t max = ((1U << sat) - 1U); + if (val > (int32_t)max) + { + return max; + } + else if (val < 0) + { + return 0U; + } + } + return (uint32_t)val; } -#endif #endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ @@ -672,968 +753,889 @@ __STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) #if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) - /** - \brief Get Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). - \return PSPLIM Register value + \brief Load-Acquire (8 bit) + \details Executes a LDAB instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) +__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) -/** - \brief Get Process Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \return PSPLIM Register value +/** + \brief Load-Acquire (16 bit) + \details Executes a LDAH instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) +__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); - return result; -#endif + uint32_t result; + + __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); } -#endif /** - \brief Set Process Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Load-Acquire (32 bit) + \details Executes a LDA instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Process Stack Pointer (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. - \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + \brief Store-Release (8 bit) + \details Executes a STLB instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure PSPLIM is RAZ/WI - (void)ProcStackPtrLimit; -#else - __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); -#endif + __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Get Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always in non-secure - mode. - - \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). - \return MSPLIM Register value + \brief Store-Release (16 bit) + \details Executes a STLH instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) +__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Get Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence zero is returned always. - - \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. - \return MSPLIM Register value + \brief Store-Release (32 bit) + \details Executes a STL instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location */ -__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - return 0U; -#else - uint32_t result; - __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); - return result; -#endif + __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); } -#endif /** - \brief Set Main Stack Pointer Limit - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored in non-secure - mode. - - \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). - \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set + \brief Load-Acquire Exclusive (8 bit) + \details Executes a LDAB exclusive instruction for 8 bit value. + \param [in] ptr Pointer to data + \return value of type uint8_t at (*ptr) */ -__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ - (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); -#endif + uint32_t result; + + __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint8_t) result); } -#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Set Main Stack Pointer Limit (non-secure) - Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure - Stack Pointer Limit register hence the write is silently ignored. - - \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. - \param [in] MainStackPtrLimit Main Stack Pointer value to set + \brief Load-Acquire Exclusive (16 bit) + \details Executes a LDAH exclusive instruction for 16 bit values. + \param [in] ptr Pointer to data + \return value of type uint16_t at (*ptr) */ -__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) { -#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) - // without main extensions, the non-secure MSPLIM is RAZ/WI - (void)MainStackPtrLimit; -#else - __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); -#endif -} -#endif + uint32_t result; -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ + __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return ((uint16_t) result); +} /** - \brief Get FPSCR - \details Returns the current value of the Floating Point Status/Control register. - \return Floating Point Status/Control register value + \brief Load-Acquire Exclusive (32 bit) + \details Executes a LDA exclusive instruction for 32 bit values. + \param [in] ptr Pointer to data + \return value of type uint32_t at (*ptr) */ -__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) +__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_get_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - return __builtin_arm_get_fpscr(); -#else - uint32_t result; + uint32_t result; - __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); - return(result); -#endif -#else - return(0U); -#endif + __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); + return(result); } /** - \brief Set FPSCR - \details Assigns the given value to the Floating Point Status/Control register. - \param [in] fpscr Floating Point Status/Control value to set + \brief Store-Release Exclusive (8 bit) + \details Executes a STLB exclusive instruction for 8 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) +__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) { -#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ - (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) -#if __has_builtin(__builtin_arm_set_fpscr) -// Re-enable using built-in when GCC has been fixed -// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) - /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ - __builtin_arm_set_fpscr(fpscr); -#else - __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); -#endif -#else - (void)fpscr; -#endif -} + uint32_t result; + __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} -/*@} end of CMSIS_Core_RegAccFunctions */ +/** + \brief Store-Release Exclusive (16 bit) + \details Executes a STLH exclusive instruction for 16 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed + */ +__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +{ + uint32_t result; -/* ########################## Core Instruction Access ######################### */ -/** \defgroup CMSIS_Core_InstructionInterface CMSIS Core Instruction Interface - Access to dedicated instructions - @{ -*/ + __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} -/* Define macros for porting to both thumb1 and thumb2. - * For thumb1, use low register (r0-r7), specified by constraint "l" - * Otherwise, use general registers, specified by constraint "r" */ -#if defined (__thumb__) && !defined (__thumb2__) -#define __CMSIS_GCC_OUT_REG(r) "=l" (r) -#define __CMSIS_GCC_RW_REG(r) "+l" (r) -#define __CMSIS_GCC_USE_REG(r) "l" (r) -#else -#define __CMSIS_GCC_OUT_REG(r) "=r" (r) -#define __CMSIS_GCC_RW_REG(r) "+r" (r) -#define __CMSIS_GCC_USE_REG(r) "r" (r) -#endif /** - \brief No Operation - \details No Operation does nothing. This instruction can be used for code alignment purposes. + \brief Store-Release Exclusive (32 bit) + \details Executes a STL exclusive instruction for 32 bit values. + \param [in] value Value to store + \param [in] ptr Pointer to location + \return 0 Function succeeded + \return 1 Function failed */ -#define __NOP() __ASM volatile ("nop") +__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +{ + uint32_t result; -/** - \brief Wait For Interrupt - \details Wait For Interrupt is a hint instruction that suspends execution until one of a number of events occurs. - */ -#define __WFI() __ASM volatile ("wfi":::"memory") + __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + return(result); +} +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/** - \brief Wait For Event - \details Wait For Event is a hint instruction that permits the processor to enter - a low-power state until one of a number of events occurs. - */ -#define __WFE() __ASM volatile ("wfe":::"memory") +/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ -/** - \brief Send Event - \details Send Event is a hint instruction. It causes an event to be signaled to the CPU. +/* ########################### Core Function Access ########################### */ +/** \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_RegAccFunctions CMSIS Core Register Access Functions + @{ */ -#define __SEV() __ASM volatile ("sev") - /** - \brief Instruction Synchronization Barrier - \details Instruction Synchronization Barrier flushes the pipeline in the processor, - so that all instructions following the ISB are fetched from cache or memory, - after the instruction has been completed. + \brief Enable IRQ Interrupts + \details Enables IRQ interrupts by clearing special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __ISB(void) +// Patched by Edge Impulse, fix for targets that already have __enable_irq +#ifndef __enable_irq +__STATIC_FORCEINLINE void __enable_irq(void) { - __ASM volatile ("isb 0xF":::"memory"); + __ASM volatile ("cpsie i" : : : "memory"); } +#endif /** - \brief Data Synchronization Barrier - \details Acts as a special kind of Data Memory Barrier. - It completes when all explicit memory accesses before this instruction complete. + \brief Disable IRQ Interrupts + \details Disables IRQ interrupts by setting special-purpose register PRIMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __DSB(void) +// Patched by Edge Impulse, fix for targets that already have __disable_irq +#ifndef __disable_irq +__STATIC_FORCEINLINE void __disable_irq(void) { - __ASM volatile ("dsb 0xF":::"memory"); + __ASM volatile ("cpsid i" : : : "memory"); } +#endif /** - \brief Data Memory Barrier - \details Ensures the apparent order of the explicit memory operations before - and after the instruction, without ensuring their completion. + \brief Get Control Register + \details Returns the content of the Control Register. + \return Control Register value */ -__STATIC_FORCEINLINE void __DMB(void) +__STATIC_FORCEINLINE uint32_t __get_CONTROL(void) { - __ASM volatile ("dmb 0xF":::"memory"); + uint32_t result; + + __ASM volatile ("MRS %0, control" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (32 bit) - \details Reverses the byte order in unsigned integer value. For example, 0x12345678 becomes 0x78563412. - \param [in] value Value to reverse - \return Reversed value + \brief Get Control Register (non-secure) + \details Returns the content of the non-secure Control Register when in secure mode. + \return non-secure Control Register value */ -__STATIC_FORCEINLINE uint32_t __REV(uint32_t value) +__STATIC_FORCEINLINE uint32_t __TZ_get_CONTROL_NS(void) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5) - return __builtin_bswap32(value); -#else uint32_t result; - __ASM ("rev %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MRS %0, control_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order within each halfword of a word. For example, 0x12345678 becomes 0x34127856. - \param [in] value Value to reverse - \return Reversed value + \brief Set Control Register + \details Writes the given value to the Control Register. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE uint32_t __REV16(uint32_t value) +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) { - uint32_t result; - - __ASM ("rev16 %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; + __ASM volatile ("MSR control, %0" : : "r" (control) : "memory"); + __ISB(); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Reverse byte order (16 bit) - \details Reverses the byte order in a 16-bit value and returns the signed 16-bit result. For example, 0x0080 becomes 0x8000. - \param [in] value Value to reverse - \return Reversed value + \brief Set Control Register (non-secure) + \details Writes the given value to the non-secure Control Register when in secure state. + \param [in] control Control Register value to set */ -__STATIC_FORCEINLINE int16_t __REVSH(int16_t value) +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) { -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - return (int16_t)__builtin_bswap16(value); -#else - int16_t result; - - __ASM ("revsh %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); - return result; -#endif + __ASM volatile ("MSR control_ns, %0" : : "r" (control) : "memory"); + __ISB(); } +#endif /** - \brief Rotate Right in unsigned value (32 bit) - \details Rotate Right (immediate) provides the value of the contents of a register rotated by a variable number of bits. - \param [in] op1 Value to rotate - \param [in] op2 Number of Bits to rotate - \return Rotated value + \brief Get IPSR Register + \details Returns the content of the IPSR Register. + \return IPSR Register value */ -__STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) +__STATIC_FORCEINLINE uint32_t __get_IPSR(void) { - op2 %= 32U; - if (op2 == 0U) - { - return op1; - } - return (op1 >> op2) | (op1 << (32U - op2)); + uint32_t result; + + __ASM volatile ("MRS %0, ipsr" : "=r" (result) ); + return(result); } /** - \brief Breakpoint - \details Causes the processor to enter Debug state. - Debug tools can use this to investigate system state when the instruction at a particular address is reached. - \param [in] value is ignored by the processor. - If required, a debugger can use it to store additional information about the breakpoint. + \brief Get APSR Register + \details Returns the content of the APSR Register. + \return APSR Register value */ -#define __BKPT(value) __ASM volatile ("bkpt "#value) +__STATIC_FORCEINLINE uint32_t __get_APSR(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, apsr" : "=r" (result) ); + return(result); +} /** - \brief Reverse bit order of value - \details Reverses the bit order of the given value. - \param [in] value Value to reverse - \return Reversed value + \brief Get xPSR Register + \details Returns the content of the xPSR Register. + \return xPSR Register value */ -__STATIC_FORCEINLINE uint32_t __RBIT(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_xPSR(void) { uint32_t result; -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) - __ASM ("rbit %0, %1" : "=r" (result) : "r" (value) ); -#else - uint32_t s = (4U /*sizeof(v)*/ * 8U) - 1U; /* extra shift needed at end */ - - result = value; /* r will be reversed bits of v; first get LSB of v */ - for (value >>= 1U; value != 0U; value >>= 1U) - { - result <<= 1U; - result |= value & 1U; - s--; - } - result <<= s; /* shift when v's highest bits are zero */ -#endif - return result; + __ASM volatile ("MRS %0, xpsr" : "=r" (result) ); + return(result); } /** - \brief Count leading zeros - \details Counts the number of leading zeros of a data value. - \param [in] value Value to count the leading zeros - \return number of leading zeros in value + \brief Get Process Stack Pointer + \details Returns the current value of the Process Stack Pointer (PSP). + \return PSP Register value */ -__STATIC_FORCEINLINE uint8_t __CLZ(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_PSP(void) { - /* Even though __builtin_clz produces a CLZ instruction on ARM, formally - __builtin_clz(0) is undefined behaviour, so handle this case specially. - This guarantees ARM-compatible results if happening to compile on a non-ARM - target, and ensures the compiler doesn't decide to activate any - optimisations using the logic "value was passed to __builtin_clz, so it - is non-zero". - ARM GCC 7.3 and possibly earlier will optimise this test away, leaving a - single CLZ instruction. - */ - if (value == 0U) - { - return 32U; - } - return __builtin_clz(value); + uint32_t result; + + __ASM volatile ("MRS %0, psp" : "=r" (result) ); + return(result); } -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (8 bit) - \details Executes a exclusive LDR instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer (non-secure) + \details Returns the current value of the non-secure Process Stack Pointer (PSP) when in secure state. + \return PSP Register value */ -__STATIC_FORCEINLINE uint8_t __LDREXB(volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSP_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexb %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexb %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, psp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDR Exclusive (16 bit) - \details Executes a exclusive LDR instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Process Stack Pointer + \details Assigns the given value to the Process Stack Pointer (PSP). + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint16_t __LDREXH(volatile uint16_t *addr) +__STATIC_FORCEINLINE void __set_PSP(uint32_t topOfProcStack) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrexh %0, %1" : "=r" (result) : "Q" (*addr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrexh %0, [%1]" : "=r" (result) : "r" (addr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR psp, %0" : : "r" (topOfProcStack) : ); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDR Exclusive (32 bit) - \details Executes a exclusive LDR instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Process Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Process Stack Pointer (PSP) when in secure state. + \param [in] topOfProcStack Process Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __LDREXW(volatile uint32_t *addr) +__STATIC_FORCEINLINE void __TZ_set_PSP_NS(uint32_t topOfProcStack) { - uint32_t result; - - __ASM volatile ("ldrex %0, %1" : "=r" (result) : "Q" (*addr) ); - return(result); + __ASM volatile ("MSR psp_ns, %0" : : "r" (topOfProcStack) : ); } +#endif /** - \brief STR Exclusive (8 bit) - \details Executes a exclusive STR instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Main Stack Pointer + \details Returns the current value of the Main Stack Pointer (MSP). + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __STREXB(uint8_t value, volatile uint8_t *addr) +__STATIC_FORCEINLINE uint32_t __get_MSP(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexb %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, msp" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief STR Exclusive (16 bit) - \details Executes a exclusive STR instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get Main Stack Pointer (non-secure) + \details Returns the current value of the non-secure Main Stack Pointer (MSP) when in secure state. + \return MSP Register value */ -__STATIC_FORCEINLINE uint32_t __STREXH(uint16_t value, volatile uint16_t *addr) +__STATIC_FORCEINLINE uint32_t __TZ_get_MSP_NS(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("strexh %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" ((uint32_t)value) ); - return(result); + __ASM volatile ("MRS %0, msp_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief STR Exclusive (32 bit) - \details Executes a exclusive STR instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Main Stack Pointer + \details Assigns the given value to the Main Stack Pointer (MSP). + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE uint32_t __STREXW(uint32_t value, volatile uint32_t *addr) +__STATIC_FORCEINLINE void __set_MSP(uint32_t topOfMainStack) { - uint32_t result; - - __ASM volatile ("strex %0, %2, %1" : "=&r" (result), "=Q" (*addr) : "r" (value) ); - return(result); + __ASM volatile ("MSR msp, %0" : : "r" (topOfMainStack) : ); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Remove the exclusive lock - \details Removes the exclusive lock which is created by LDREX. + \brief Set Main Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Main Stack Pointer (MSP) when in secure state. + \param [in] topOfMainStack Main Stack Pointer value to set */ -__STATIC_FORCEINLINE void __CLREX(void) +__STATIC_FORCEINLINE void __TZ_set_MSP_NS(uint32_t topOfMainStack) { - __ASM volatile ("clrex" ::: "memory"); + __ASM volatile ("MSR msp_ns, %0" : : "r" (topOfMainStack) : ); } - -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ +#endif -#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (1..32) - \return Saturated value + \brief Get Stack Pointer (non-secure) + \details Returns the current value of the non-secure Stack Pointer (SP) when in secure state. + \return SP Register value */ -#define __SSAT(ARG1, ARG2) \ -__extension__ \ -({ \ - int32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("ssat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) +__STATIC_FORCEINLINE uint32_t __TZ_get_SP_NS(void) +{ + uint32_t result; + + __ASM volatile ("MRS %0, sp_ns" : "=r" (result) ); + return(result); +} /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] ARG1 Value to be saturated - \param [in] ARG2 Bit position to saturate to (0..31) - \return Saturated value + \brief Set Stack Pointer (non-secure) + \details Assigns the given value to the non-secure Stack Pointer (SP) when in secure state. + \param [in] topOfStack Stack Pointer value to set */ -#define __USAT(ARG1, ARG2) \ -__extension__ \ -({ \ - uint32_t __RES, __ARG1 = (ARG1); \ - __ASM volatile ("usat %0, %1, %2" : "=r" (__RES) : "I" (ARG2), "r" (__ARG1) : "cc" ); \ - __RES; \ - }) +__STATIC_FORCEINLINE void __TZ_set_SP_NS(uint32_t topOfStack) +{ + __ASM volatile ("MSR sp_ns, %0" : : "r" (topOfStack) : ); +} +#endif /** - \brief Rotate Right with Extend (32 bit) - \details Moves each bit of a bitstring right by one bit. - The carry input is shifted in at the left end of the bitstring. - \param [in] value Value to rotate - \return Rotated value + \brief Get Priority Mask + \details Returns the current state of the priority mask bit from the Priority Mask Register. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint32_t __RRX(uint32_t value) +__STATIC_FORCEINLINE uint32_t __get_PRIMASK(void) { uint32_t result; - __ASM volatile ("rrx %0, %1" : __CMSIS_GCC_OUT_REG (result) : __CMSIS_GCC_USE_REG (value) ); + __ASM volatile ("MRS %0, primask" : "=r" (result) ); return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (8 bit) - \details Executes a Unprivileged LDRT instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Priority Mask (non-secure) + \details Returns the current state of the non-secure priority mask bit from the Priority Mask Register when in secure state. + \return Priority Mask value */ -__STATIC_FORCEINLINE uint8_t __LDRBT(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PRIMASK_NS(void) { - uint32_t result; + uint32_t result; -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrbt %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrbt %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint8_t) result); /* Add explicit type cast here */ + __ASM volatile ("MRS %0, primask_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief LDRT Unprivileged (16 bit) - \details Executes a Unprivileged LDRT instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Priority Mask + \details Assigns the given value to the Priority Mask Register. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE uint16_t __LDRHT(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_PRIMASK(uint32_t priMask) { - uint32_t result; - -#if (__GNUC__ > 4) || (__GNUC__ == 4 && __GNUC_MINOR__ >= 8) - __ASM volatile ("ldrht %0, %1" : "=r" (result) : "Q" (*ptr) ); -#else - /* Prior to GCC 4.8, "Q" will be expanded to [rx, #0] which is not - accepted by assembler. So has to use following less efficient pattern. - */ - __ASM volatile ("ldrht %0, [%1]" : "=r" (result) : "r" (ptr) : "memory" ); -#endif - return ((uint16_t) result); /* Add explicit type cast here */ + __ASM volatile ("MSR primask, %0" : : "r" (priMask) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief LDRT Unprivileged (32 bit) - \details Executes a Unprivileged LDRT instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Set Priority Mask (non-secure) + \details Assigns the given value to the non-secure Priority Mask Register when in secure state. + \param [in] priMask Priority Mask */ -__STATIC_FORCEINLINE uint32_t __LDRT(volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_PRIMASK_NS(uint32_t priMask) { - uint32_t result; - - __ASM volatile ("ldrt %0, %1" : "=r" (result) : "Q" (*ptr) ); - return(result); + __ASM volatile ("MSR primask_ns, %0" : : "r" (priMask) : "memory"); } +#endif +#if ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) /** - \brief STRT Unprivileged (8 bit) - \details Executes a Unprivileged STRT instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Enable FIQ + \details Enables FIQ interrupts by clearing special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __STRBT(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __enable_fault_irq(void) { - __ASM volatile ("strbt %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("cpsie f" : : : "memory"); } /** - \brief STRT Unprivileged (16 bit) - \details Executes a Unprivileged STRT instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Disable FIQ + \details Disables FIQ interrupts by setting special-purpose register FAULTMASK. + Can only be executed in Privileged modes. */ -__STATIC_FORCEINLINE void __STRHT(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __disable_fault_irq(void) { - __ASM volatile ("strht %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) ); + __ASM volatile ("cpsid f" : : : "memory"); } /** - \brief STRT Unprivileged (32 bit) - \details Executes a Unprivileged STRT instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Base Priority + \details Returns the current value of the Base Priority register. + \return Base Priority register value */ -__STATIC_FORCEINLINE void __STRT(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_BASEPRI(void) { - __ASM volatile ("strt %1, %0" : "=Q" (*ptr) : "r" (value) ); + uint32_t result; + + __ASM volatile ("MRS %0, basepri" : "=r" (result) ); + return(result); } -#else /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Signed Saturate - \details Saturates a signed value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (1..32) - \return Saturated value + \brief Get Base Priority (non-secure) + \details Returns the current value of the non-secure Base Priority register when in secure state. + \return Base Priority register value */ -__STATIC_FORCEINLINE int32_t __SSAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE uint32_t __TZ_get_BASEPRI_NS(void) { - if ((sat >= 1U) && (sat <= 32U)) - { - const int32_t max = (int32_t)((1U << (sat - 1U)) - 1U); - const int32_t min = -1 - max ; - if (val > max) - { - return max; - } - else if (val < min) - { - return min; - } - } - return val; + uint32_t result; + + __ASM volatile ("MRS %0, basepri_ns" : "=r" (result) ); + return(result); } +#endif + /** - \brief Unsigned Saturate - \details Saturates an unsigned value. - \param [in] value Value to be saturated - \param [in] sat Bit position to saturate to (0..31) - \return Saturated value + \brief Set Base Priority + \details Assigns the given value to the Base Priority register. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint32_t __USAT(int32_t val, uint32_t sat) +__STATIC_FORCEINLINE void __set_BASEPRI(uint32_t basePri) { - if (sat <= 31U) - { - const uint32_t max = ((1U << sat) - 1U); - if (val > (int32_t)max) - { - return max; - } - else if (val < 0) - { - return 0U; - } - } - return (uint32_t)val; + __ASM volatile ("MSR basepri, %0" : : "r" (basePri) : "memory"); } -#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ - (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ - (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ - -#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (8 bit) - \details Executes a LDAB instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Set Base Priority (non-secure) + \details Assigns the given value to the non-secure Base Priority register when in secure state. + \param [in] basePri Base Priority value to set */ -__STATIC_FORCEINLINE uint8_t __LDAB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_BASEPRI_NS(uint32_t basePri) { - uint32_t result; + __ASM volatile ("MSR basepri_ns, %0" : : "r" (basePri) : "memory"); +} +#endif - __ASM volatile ("ldab %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); + +/** + \brief Set Base Priority with condition + \details Assigns the given value to the Base Priority register only if BASEPRI masking is disabled, + or the new value increases the BASEPRI priority level. + \param [in] basePri Base Priority value to set + */ +__STATIC_FORCEINLINE void __set_BASEPRI_MAX(uint32_t basePri) +{ + __ASM volatile ("MSR basepri_max, %0" : : "r" (basePri) : "memory"); } /** - \brief Load-Acquire (16 bit) - \details Executes a LDAH instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Get Fault Mask + \details Returns the current value of the Fault Mask register. + \return Fault Mask register value */ -__STATIC_FORCEINLINE uint16_t __LDAH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FAULTMASK(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("ldah %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + __ASM volatile ("MRS %0, faultmask" : "=r" (result) ); + return(result); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire (32 bit) - \details Executes a LDA instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Get Fault Mask (non-secure) + \details Returns the current value of the non-secure Fault Mask register when in secure state. + \return Fault Mask register value */ -__STATIC_FORCEINLINE uint32_t __LDA(volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_FAULTMASK_NS(void) { - uint32_t result; + uint32_t result; - __ASM volatile ("lda %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); + __ASM volatile ("MRS %0, faultmask_ns" : "=r" (result) ); + return(result); } +#endif /** - \brief Store-Release (8 bit) - \details Executes a STLB instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Fault Mask + \details Assigns the given value to the Fault Mask register. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE void __STLB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_FAULTMASK(uint32_t faultMask) { - __ASM volatile ("stlb %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + __ASM volatile ("MSR faultmask, %0" : : "r" (faultMask) : "memory"); } +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Store-Release (16 bit) - \details Executes a STLH instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Set Fault Mask (non-secure) + \details Assigns the given value to the non-secure Fault Mask register when in secure state. + \param [in] faultMask Fault Mask value to set */ -__STATIC_FORCEINLINE void __STLH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __TZ_set_FAULTMASK_NS(uint32_t faultMask) { - __ASM volatile ("stlh %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); + __ASM volatile ("MSR faultmask_ns, %0" : : "r" (faultMask) : "memory"); } +#endif + +#endif /* ((defined (__ARM_ARCH_7M__ ) && (__ARM_ARCH_7M__ == 1)) || \ + (defined (__ARM_ARCH_7EM__ ) && (__ARM_ARCH_7EM__ == 1)) || \ + (defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) ) */ + +#if ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) /** - \brief Store-Release (32 bit) - \details Executes a STL instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location + \brief Get Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Process Stack Pointer Limit (PSPLIM). + \return PSPLIM Register value */ -__STATIC_FORCEINLINE void __STL(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_PSPLIM(void) { - __ASM volatile ("stl %1, %0" : "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim" : "=r" (result) ); + return result; +#endif } - +#if (defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3)) /** - \brief Load-Acquire Exclusive (8 bit) - \details Executes a LDAB exclusive instruction for 8 bit value. - \param [in] ptr Pointer to data - \return value of type uint8_t at (*ptr) + \brief Get Process Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \return PSPLIM Register value */ -__STATIC_FORCEINLINE uint8_t __LDAEXB(volatile uint8_t *ptr) +__STATIC_FORCEINLINE uint32_t __TZ_get_PSPLIM_NS(void) { - uint32_t result; - - __ASM volatile ("ldaexb %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint8_t) result); +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, psplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Load-Acquire Exclusive (16 bit) - \details Executes a LDAH exclusive instruction for 16 bit values. - \param [in] ptr Pointer to data - \return value of type uint16_t at (*ptr) + \brief Set Process Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Process Stack Pointer Limit (PSPLIM). + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint16_t __LDAEXH(volatile uint16_t *ptr) +__STATIC_FORCEINLINE void __set_PSPLIM(uint32_t ProcStackPtrLimit) { - uint32_t result; +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim, %0" : : "r" (ProcStackPtrLimit)); +#endif +} - __ASM volatile ("ldaexh %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return ((uint16_t) result); + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Process Stack Pointer (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Process Stack Pointer Limit (PSPLIM) when in secure state. + \param [in] ProcStackPtrLimit Process Stack Pointer Limit value to set + */ +__STATIC_FORCEINLINE void __TZ_set_PSPLIM_NS(uint32_t ProcStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure PSPLIM is RAZ/WI + (void)ProcStackPtrLimit; +#else + __ASM volatile ("MSR psplim_ns, %0\n" : : "r" (ProcStackPtrLimit)); +#endif } +#endif /** - \brief Load-Acquire Exclusive (32 bit) - \details Executes a LDA exclusive instruction for 32 bit values. - \param [in] ptr Pointer to data - \return value of type uint32_t at (*ptr) + \brief Get Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always in non-secure + mode. + + \details Returns the current value of the Main Stack Pointer Limit (MSPLIM). + \return MSPLIM Register value */ -__STATIC_FORCEINLINE uint32_t __LDAEX(volatile uint32_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_MSPLIM(void) { - uint32_t result; +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim" : "=r" (result) ); + return result; +#endif +} - __ASM volatile ("ldaex %0, %1" : "=r" (result) : "Q" (*ptr) : "memory" ); - return(result); + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Get Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence zero is returned always. + + \details Returns the current value of the non-secure Main Stack Pointer Limit(MSPLIM) when in secure state. + \return MSPLIM Register value + */ +__STATIC_FORCEINLINE uint32_t __TZ_get_MSPLIM_NS(void) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + return 0U; +#else + uint32_t result; + __ASM volatile ("MRS %0, msplim_ns" : "=r" (result) ); + return result; +#endif } +#endif /** - \brief Store-Release Exclusive (8 bit) - \details Executes a STLB exclusive instruction for 8 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set Main Stack Pointer Limit + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored in non-secure + mode. + + \details Assigns the given value to the Main Stack Pointer Limit (MSPLIM). + \param [in] MainStackPtrLimit Main Stack Pointer Limit value to set */ -__STATIC_FORCEINLINE uint32_t __STLEXB(uint8_t value, volatile uint8_t *ptr) +__STATIC_FORCEINLINE void __set_MSPLIM(uint32_t MainStackPtrLimit) { - uint32_t result; +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) && \ + (!defined (__ARM_FEATURE_CMSE) || (__ARM_FEATURE_CMSE < 3))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim, %0" : : "r" (MainStackPtrLimit)); +#endif +} - __ASM volatile ("stlexb %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) +/** + \brief Set Main Stack Pointer Limit (non-secure) + Devices without ARMv8-M Main Extensions (i.e. Cortex-M23) lack the non-secure + Stack Pointer Limit register hence the write is silently ignored. + + \details Assigns the given value to the non-secure Main Stack Pointer Limit (MSPLIM) when in secure state. + \param [in] MainStackPtrLimit Main Stack Pointer value to set + */ +__STATIC_FORCEINLINE void __TZ_set_MSPLIM_NS(uint32_t MainStackPtrLimit) +{ +#if (!(defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1))) + // without main extensions, the non-secure MSPLIM is RAZ/WI + (void)MainStackPtrLimit; +#else + __ASM volatile ("MSR msplim_ns, %0" : : "r" (MainStackPtrLimit)); +#endif } +#endif + +#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ + (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ /** - \brief Store-Release Exclusive (16 bit) - \details Executes a STLH exclusive instruction for 16 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Get FPSCR + \details Returns the current value of the Floating Point Status/Control register. + \return Floating Point Status/Control register value */ -__STATIC_FORCEINLINE uint32_t __STLEXH(uint16_t value, volatile uint16_t *ptr) +__STATIC_FORCEINLINE uint32_t __get_FPSCR(void) { - uint32_t result; +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_get_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + return __builtin_arm_get_fpscr(); +#else + uint32_t result; - __ASM volatile ("stlexh %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); + __ASM volatile ("VMRS %0, fpscr" : "=r" (result) ); + return(result); +#endif +#else + return(0U); +#endif } /** - \brief Store-Release Exclusive (32 bit) - \details Executes a STL exclusive instruction for 32 bit values. - \param [in] value Value to store - \param [in] ptr Pointer to location - \return 0 Function succeeded - \return 1 Function failed + \brief Set FPSCR + \details Assigns the given value to the Floating Point Status/Control register. + \param [in] fpscr Floating Point Status/Control value to set */ -__STATIC_FORCEINLINE uint32_t __STLEX(uint32_t value, volatile uint32_t *ptr) +__STATIC_FORCEINLINE void __set_FPSCR(uint32_t fpscr) { - uint32_t result; - - __ASM volatile ("stlex %0, %2, %1" : "=&r" (result), "=Q" (*ptr) : "r" ((uint32_t)value) : "memory" ); - return(result); +#if ((defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U)) && \ + (defined (__FPU_USED ) && (__FPU_USED == 1U)) ) +#if __has_builtin(__builtin_arm_set_fpscr) +// Re-enable using built-in when GCC has been fixed +// || (__GNUC__ > 7) || (__GNUC__ == 7 && __GNUC_MINOR__ >= 2) + /* see https://gcc.gnu.org/ml/gcc-patches/2017-04/msg00443.html */ + __builtin_arm_set_fpscr(fpscr); +#else + __ASM volatile ("VMSR fpscr, %0" : : "r" (fpscr) : "vfpcc", "memory"); +#endif +#else + (void)fpscr; +#endif } -#endif /* ((defined (__ARM_ARCH_8M_MAIN__ ) && (__ARM_ARCH_8M_MAIN__ == 1)) || \ - (defined (__ARM_ARCH_8M_BASE__ ) && (__ARM_ARCH_8M_BASE__ == 1)) ) */ -/*@}*/ /* end of group CMSIS_Core_InstructionInterface */ +/*@} end of CMSIS_Core_RegAccFunctions */ /* ################### Compiler specific Intrinsics ########################### */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h index 45e90af..65b824b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_iccarm.h @@ -1,14 +1,14 @@ /**************************************************************************//** * @file cmsis_iccarm.h * @brief CMSIS compiler ICCARM (IAR Compiler for Arm) header file - * @version V5.2.0 - * @date 28. January 2020 + * @version V5.3.0 + * @date 14. April 2021 ******************************************************************************/ //------------------------------------------------------------------------------ // -// Copyright (c) 2017-2020 IAR Systems -// Copyright (c) 2017-2019 Arm Limited. All rights reserved. +// Copyright (c) 2017-2021 IAR Systems +// Copyright (c) 2017-2021 Arm Limited. All rights reserved. // // SPDX-License-Identifier: Apache-2.0 // @@ -267,6 +267,24 @@ __packed struct __iar_u32 { uint32_t v; }; #define __VECTOR_TABLE_ATTRIBUTE @".intvec" #endif +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +#ifndef __STACK_SEAL +#define __STACK_SEAL STACKSEAL$$Base +#endif + +#ifndef __TZ_STACK_SEAL_SIZE +#define __TZ_STACK_SEAL_SIZE 8U +#endif + +#ifndef __TZ_STACK_SEAL_VALUE +#define __TZ_STACK_SEAL_VALUE 0xFEF5EDA5FEF5EDA5ULL +#endif + +__STATIC_FORCEINLINE void __TZ_set_STACKSEAL_S (uint32_t* stackTop) { + *((uint64_t *)stackTop) = __TZ_STACK_SEAL_VALUE; +} +#endif + #ifndef __ICCARM_INTRINSICS_VERSION__ #define __ICCARM_INTRINSICS_VERSION__ 0 #endif @@ -337,7 +355,13 @@ __packed struct __iar_u32 { uint32_t v; }; #define __set_BASEPRI(VALUE) (__arm_wsr("BASEPRI", (VALUE))) #define __set_BASEPRI_MAX(VALUE) (__arm_wsr("BASEPRI_MAX", (VALUE))) - #define __set_CONTROL(VALUE) (__arm_wsr("CONTROL", (VALUE))) + +__STATIC_FORCEINLINE void __set_CONTROL(uint32_t control) +{ + __arm_wsr("CONTROL", control); + __iar_builtin_ISB(); +} + #define __set_FAULTMASK(VALUE) (__arm_wsr("FAULTMASK", (VALUE))) #define __set_MSP(VALUE) (__arm_wsr("MSP", (VALUE))) @@ -359,7 +383,13 @@ __packed struct __iar_u32 { uint32_t v; }; #endif #define __TZ_get_CONTROL_NS() (__arm_rsr("CONTROL_NS")) - #define __TZ_set_CONTROL_NS(VALUE) (__arm_wsr("CONTROL_NS", (VALUE))) + +__STATIC_FORCEINLINE void __TZ_set_CONTROL_NS(uint32_t control) +{ + __arm_wsr("CONTROL_NS", control); + __iar_builtin_ISB(); +} + #define __TZ_get_PSP_NS() (__arm_rsr("PSP_NS")) #define __TZ_set_PSP_NS(VALUE) (__arm_wsr("PSP_NS", (VALUE))) #define __TZ_get_MSP_NS() (__arm_rsr("MSP_NS")) @@ -681,6 +711,7 @@ __packed struct __iar_u32 { uint32_t v; }; __IAR_FT void __TZ_set_CONTROL_NS(uint32_t value) { __asm volatile("MSR CONTROL_NS,%0" :: "r" (value)); + __iar_builtin_ISB(); } __IAR_FT uint32_t __TZ_get_PSP_NS(void) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h index 2f048e4..8b4765f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/cmsis_version.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file cmsis_version.h * @brief CMSIS Core(M) Version definitions - * @version V5.0.4 - * @date 23. July 2019 + * @version V5.0.5 + * @date 02. February 2022 ******************************************************************************/ /* - * Copyright (c) 2009-2019 ARM Limited. All rights reserved. + * Copyright (c) 2009-2022 ARM Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,7 +33,7 @@ /* CMSIS Version definitions */ #define __CM_CMSIS_VERSION_MAIN ( 5U) /*!< [31:16] CMSIS Core(M) main version */ -#define __CM_CMSIS_VERSION_SUB ( 4U) /*!< [15:0] CMSIS Core(M) sub version */ +#define __CM_CMSIS_VERSION_SUB ( 6U) /*!< [15:0] CMSIS Core(M) sub version */ #define __CM_CMSIS_VERSION ((__CM_CMSIS_VERSION_MAIN << 16U) | \ __CM_CMSIS_VERSION_SUB ) /*!< CMSIS Core(M) version number */ #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h index 18bcb04..fa1afb8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv81mml.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv81mml.h * @brief CMSIS Armv8.1-M Mainline Core Peripheral Access Layer Header File - * @version V1.4.0 - * @date 15. April 2020 + * @version V1.4.2 + * @date 13. October 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -210,14 +210,14 @@ #define __FPU_PRESENT 0U #warning "__FPU_PRESENT not defined in device header file; using default!" #endif - + #if __FPU_PRESENT != 0U #ifndef __FPU_DP #define __FPU_DP 0U #warning "__FPU_DP not defined in device header file; using default!" #endif #endif - + #ifndef __MPU_PRESENT #define __MPU_PRESENT 0U #warning "__MPU_PRESENT not defined in device header file; using default!" @@ -232,7 +232,7 @@ #define __DCACHE_PRESENT 0U #warning "__DCACHE_PRESENT not defined in device header file; using default!" #endif - + #ifndef __PMU_PRESENT #define __PMU_PRESENT 0U #warning "__PMU_PRESENT not defined in device header file; using default!" @@ -261,7 +261,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -526,7 +526,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -535,7 +535,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ uint32_t RESERVED4[14U]; @@ -766,22 +769,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -1490,15 +1493,14 @@ typedef struct uint32_t RESERVED11[108]; __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ - uint32_t RESERVED12[4]; + uint32_t RESERVED12[3]; __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ uint32_t RESERVED13[3]; __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ - __IOM uint32_t PIDR1; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 1 */ - __IOM uint32_t PIDR2; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 2 */ - __IOM uint32_t PIDR3; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 3 */ - uint32_t RESERVED14[3]; + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ @@ -3158,6 +3160,15 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h index 0632732..ede72ec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_armv8mml.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_armv8mml.h * @brief CMSIS Armv8-M Mainline Core Peripheral Access Layer Header File - * @version V5.2.1 - * @date 19. August 2020 + * @version V5.2.4 + * @date 30. May 2022 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2022 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -254,7 +254,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -287,7 +287,7 @@ #define __OM volatile /*! Defines 'write only' structure member permissions */ #define __IOM volatile /*! Defines 'read / write' structure member permissions */ -/*@} end of group ARMv8MML */ +/** @} end of group ARMv8MML */ @@ -452,7 +452,7 @@ typedef union #define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ #define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ -/*@} end of group CMSIS_CORE */ +/** @} end of group CMSIS_CORE */ /** @@ -488,7 +488,7 @@ typedef struct #define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ #define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ -/*@} end of group CMSIS_NVIC */ +/** @} end of group CMSIS_NVIC */ /** @@ -519,7 +519,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -528,7 +528,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ uint32_t RESERVED4[15U]; __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ @@ -746,22 +749,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -921,7 +924,7 @@ typedef struct #define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ #define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ -/*@} end of group CMSIS_SCB */ +/** @} end of group CMSIS_SCB */ /** @@ -946,7 +949,7 @@ typedef struct #define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ #define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ -/*@} end of group CMSIS_SCnotSCB */ +/** @} end of group CMSIS_SCnotSCB */ /** @@ -998,7 +1001,7 @@ typedef struct #define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ #define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ -/*@} end of group CMSIS_SysTick */ +/** @} end of group CMSIS_SysTick */ /** @@ -1098,7 +1101,7 @@ typedef struct #define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ #define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ -/*@}*/ /* end of group CMSIS_ITM */ +/** @}*/ /* end of group CMSIS_ITM */ /** @@ -1284,7 +1287,7 @@ typedef struct #define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ #define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ -/*@}*/ /* end of group CMSIS_DWT */ +/** @}*/ /* end of group CMSIS_DWT */ /** @@ -1382,7 +1385,7 @@ typedef struct #define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ #define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ -/*@}*/ /* end of group CMSIS_TPI */ +/** @}*/ /* end of group CMSIS_TPI */ #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) @@ -1494,7 +1497,7 @@ typedef struct #define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ #define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ -/*@} end of group CMSIS_MPU */ +/** @} end of group CMSIS_MPU */ #endif @@ -1581,7 +1584,7 @@ typedef struct #define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ #define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ -/*@} end of group CMSIS_SAU */ +/** @} end of group CMSIS_SAU */ #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ @@ -1717,7 +1720,7 @@ typedef struct #define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ #define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ -/*@} end of group CMSIS_FPU */ +/** @} end of group CMSIS_FPU */ /* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ /** @@ -1851,7 +1854,7 @@ typedef struct #define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ #define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ -/*@} end of group CMSIS_CoreDebug */ +/** @} end of group CMSIS_CoreDebug */ /** @@ -2007,7 +2010,7 @@ typedef struct #define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ #define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ -/*@} end of group CMSIS_DCB */ +/** @} end of group CMSIS_DCB */ @@ -2081,7 +2084,7 @@ typedef struct #define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ -/*@} end of group CMSIS_DIB */ +/** @} end of group CMSIS_DIB */ /** @@ -2107,7 +2110,7 @@ typedef struct */ #define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) -/*@} end of group CMSIS_core_bitfield */ +/** @} end of group CMSIS_core_bitfield */ /** @@ -2179,8 +2182,17 @@ typedef struct #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ -/*@} */ +/** @} */ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ /******************************************************************************* @@ -2838,7 +2850,7 @@ __STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) } #endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ -/*@} end of CMSIS_Core_NVICFunctions */ +/** @} end of CMSIS_Core_NVICFunctions */ /* ########################## MPU functions #################################### */ @@ -2884,7 +2896,7 @@ __STATIC_INLINE uint32_t SCB_GetFPUType(void) } -/*@} end of CMSIS_Core_FpuFunctions */ +/** @} end of CMSIS_Core_FpuFunctions */ /* ########################## Cache functions #################################### */ @@ -2927,7 +2939,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ -/*@} end of CMSIS_Core_SAUFunctions */ +/** @} end of CMSIS_Core_SAUFunctions */ @@ -2940,7 +2952,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -2994,7 +3006,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) } #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ -/*@} end of CMSIS_Core_DCBFunctions */ +/** @} end of CMSIS_Core_DCBFunctions */ @@ -3007,7 +3019,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. @@ -3031,7 +3043,7 @@ __STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) } #endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ -/*@} end of CMSIS_Core_DCBFunctions */ +/** @} end of CMSIS_Core_DCBFunctions */ @@ -3105,7 +3117,7 @@ __STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) #endif -/*@} end of CMSIS_Core_SysTickFunctions */ +/** @} end of CMSIS_Core_SysTickFunctions */ @@ -3183,7 +3195,7 @@ __STATIC_INLINE int32_t ITM_CheckChar (void) } } -/*@} end of CMSIS_core_DebugFunctions */ +/** @} end of CMSIS_core_DebugFunctions */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h index 33c0f57..b73615f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm3.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm3.h * @brief CMSIS Cortex-M3 Core Peripheral Access Layer Header File - * @version V5.1.1 - * @date 27. March 2020 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -146,7 +146,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -565,19 +565,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h index 6294184..f964b15 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm33.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm33.h * @brief CMSIS Cortex-M33 Core Peripheral Access Layer Header File - * @version V5.2.1 - * @date 19. August 2020 + * @version V5.2.3 + * @date 13. October 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -254,7 +254,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -519,7 +519,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -528,7 +528,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ uint32_t RESERVED4[15U]; __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ @@ -746,22 +749,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -2257,6 +2260,15 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer @@ -3008,7 +3020,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -3075,7 +3087,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h index a1e51ad..c8bfddd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm35p.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm35p.h * @brief CMSIS Cortex-M35P Core Peripheral Access Layer Header File - * @version V1.1.1 - * @date 19. August 2020 + * @version V1.1.3 + * @date 13. October 2021 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -249,12 +249,12 @@ #define __DSP_PRESENT 0U #warning "__DSP_PRESENT not defined in device header file; using default!" #endif - + #ifndef __VTOR_PRESENT #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -519,7 +519,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -528,7 +528,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ uint32_t RESERVED4[15U]; __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ @@ -746,22 +749,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -2257,6 +2260,15 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer @@ -3008,7 +3020,7 @@ __STATIC_INLINE void TZ_SAU_Disable(void) @{ */ - + /** \brief Set Debug Authentication Control Register \details writes to Debug Authentication Control register. @@ -3075,7 +3087,7 @@ __STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) @{ */ - + /** \brief Get Debug Authentication Status Register \details Reads Debug Authentication Status register. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h index dfdc41a..a347f36 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm4.h @@ -1,8 +1,8 @@ /**************************************************************************//** * @file core_cm4.h * @brief CMSIS Cortex-M4 Core Peripheral Access Layer Header File - * @version V5.1.1 - * @date 27. March 2020 + * @version V5.1.2 + * @date 04. June 2021 ******************************************************************************/ /* * Copyright (c) 2009-2020 Arm Limited. All rights reserved. @@ -198,7 +198,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -623,22 +623,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h index 03c1aa5..2f40d61 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm55.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm55.h * @brief CMSIS Cortex-M55 Core Peripheral Access Layer Header File - * @version V1.1.0 - * @date 15. April 2020 + * @version V1.2.5 + * @date 12. May 2022 ******************************************************************************/ /* - * Copyright (c) 2018-2020 Arm Limited. All rights reserved. + * Copyright (c) 2018-2022 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,7 +58,7 @@ * CMSIS definitions ******************************************************************************/ /** - \ingroup Cortex_CM55 + \ingroup Cortex_M55 @{ */ @@ -210,7 +210,7 @@ #define __FPU_PRESENT 0U #warning "__FPU_PRESENT not defined in device header file; using default!" #endif - + #if __FPU_PRESENT != 0U #ifndef __FPU_DP #define __FPU_DP 0U @@ -232,12 +232,12 @@ #define __DCACHE_PRESENT 0U #warning "__DCACHE_PRESENT not defined in device header file; using default!" #endif - + #ifndef __VTOR_PRESENT #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __PMU_PRESENT #define __PMU_PRESENT 0U #warning "__PMU_PRESENT not defined in device header file; using default!" @@ -303,9 +303,11 @@ Core Register contain: - Core Register - Core NVIC Register + - Core EWIC Register - Core SCB Register - Core SysTick Register - Core Debug Register + - Core PMU Register - Core MPU Register - Core SAU Register - Core FPU Register @@ -526,7 +528,7 @@ typedef struct __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ - __IM uint32_t ID_ADR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ @@ -535,7 +537,10 @@ typedef struct __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ - uint32_t RESERVED3[92U]; + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ uint32_t RESERVED4[14U]; @@ -766,22 +771,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ @@ -987,13 +992,13 @@ typedef struct /** \ingroup CMSIS_core_register - \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) - \brief Type definitions for the System Control and ID Register not in the SCB + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register @{ */ /** - \brief Structure type to access the System Control and ID Register not in the SCB. + \brief Structure type to access the Implementation Control Block (ICB). */ typedef struct { @@ -1001,13 +1006,56 @@ typedef struct __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ -} SCnSCB_Type; +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISDI_Pos 16U /*!< ACTLR: DISDI Position */ +#define ICB_ACTLR_DISDI_Msk (3UL << ICB_ACTLR_DISDI_Pos) /*!< ACTLR: DISDI Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +#define ICB_ACTLR_DISOLAP_Pos 7U /*!< ACTLR: DISOLAP Position */ +#define ICB_ACTLR_DISOLAP_Msk (1UL << ICB_ACTLR_DISOLAP_Pos) /*!< ACTLR: DISOLAP Mask */ + +#define ICB_ACTLR_DISOLAPS_Pos 6U /*!< ACTLR: DISOLAPS Position */ +#define ICB_ACTLR_DISOLAPS_Msk (1UL << ICB_ACTLR_DISOLAPS_Pos) /*!< ACTLR: DISOLAPS Mask */ + +#define ICB_ACTLR_DISLOBR_Pos 5U /*!< ACTLR: DISLOBR Position */ +#define ICB_ACTLR_DISLOBR_Msk (1UL << ICB_ACTLR_DISLOBR_Pos) /*!< ACTLR: DISLOBR Mask */ + +#define ICB_ACTLR_DISLO_Pos 4U /*!< ACTLR: DISLO Position */ +#define ICB_ACTLR_DISLO_Msk (1UL << ICB_ACTLR_DISLO_Pos) /*!< ACTLR: DISLO Mask */ + +#define ICB_ACTLR_DISLOLEP_Pos 3U /*!< ACTLR: DISLOLEP Position */ +#define ICB_ACTLR_DISLOLEP_Msk (1UL << ICB_ACTLR_DISLOLEP_Pos) /*!< ACTLR: DISLOLEP Mask */ + +#define ICB_ACTLR_DISFOLD_Pos 2U /*!< ACTLR: DISFOLD Position */ +#define ICB_ACTLR_DISFOLD_Msk (1UL << ICB_ACTLR_DISFOLD_Pos) /*!< ACTLR: DISFOLD Mask */ /* Interrupt Controller Type Register Definitions */ -#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ -#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ -/*@} end of group CMSIS_SCnotSCB */ +/*@} end of group CMSIS_ICB */ /** @@ -1086,13 +1134,15 @@ typedef struct __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ uint32_t RESERVED2[15U]; __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ - uint32_t RESERVED3[32U]; - uint32_t RESERVED4[43U]; - __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ - __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) ITM Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED6[46U]; __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ - uint32_t RESERVED6[3U]; + uint32_t RESERVED7[3U]; __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ @@ -1150,15 +1200,23 @@ typedef struct #define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ #define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ -/* ITM Lock Status Register Definitions */ -#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ -#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ +/* ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (0x1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (0x1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (0x1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ -#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ -#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (0x1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ -#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ -#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ +/* ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (0x1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ /*@}*/ /* end of group CMSIS_ITM */ @@ -1190,66 +1248,34 @@ typedef struct __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ uint32_t RESERVED3[1U]; __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ - uint32_t RESERVED4[1U]; + __IOM uint32_t VMASK1; /*!< Offset: 0x03C (R/W) Comparator Value Mask 1 */ __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ - uint32_t RESERVED5[1U]; + uint32_t RESERVED4[1U]; __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ - uint32_t RESERVED6[1U]; + uint32_t RESERVED5[1U]; __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ - uint32_t RESERVED7[1U]; + uint32_t RESERVED6[1U]; __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ - uint32_t RESERVED8[1U]; + __IOM uint32_t VMASK3; /*!< Offset: 0x05C (R/W) Comparator Value Mask 3 */ __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ - uint32_t RESERVED9[1U]; + uint32_t RESERVED7[1U]; __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ - uint32_t RESERVED10[1U]; + uint32_t RESERVED8[1U]; __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ - uint32_t RESERVED11[1U]; + uint32_t RESERVED9[1U]; __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ - uint32_t RESERVED12[1U]; + uint32_t RESERVED10[1U]; __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ - uint32_t RESERVED13[1U]; + uint32_t RESERVED11[1U]; __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ - uint32_t RESERVED14[1U]; + uint32_t RESERVED12[1U]; __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ - uint32_t RESERVED15[1U]; + uint32_t RESERVED13[1U]; __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ - uint32_t RESERVED16[1U]; - __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ - uint32_t RESERVED17[1U]; - __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ - uint32_t RESERVED18[1U]; - __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ - uint32_t RESERVED19[1U]; - __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ - uint32_t RESERVED20[1U]; - __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ - uint32_t RESERVED21[1U]; - __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ - uint32_t RESERVED22[1U]; - __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ - uint32_t RESERVED23[1U]; - __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ - uint32_t RESERVED24[1U]; - __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ - uint32_t RESERVED25[1U]; - __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ - uint32_t RESERVED26[1U]; - __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ - uint32_t RESERVED27[1U]; - __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ - uint32_t RESERVED28[1U]; - __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ - uint32_t RESERVED29[1U]; - __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ - uint32_t RESERVED30[1U]; - __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ - uint32_t RESERVED31[1U]; - __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ - uint32_t RESERVED32[934U]; - __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ - uint32_t RESERVED33[1U]; - __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ + uint32_t RESERVED14[968U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ } DWT_Type; /* DWT Control Register Definitions */ @@ -1341,7 +1367,7 @@ typedef struct #define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ #define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ -#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ #define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ #define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ @@ -1349,6 +1375,456 @@ typedef struct /*@}*/ /* end of group CMSIS_DWT */ +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_TECCCHKDIS_Pos 4U /*!< MEMSYSCTL MSCR: TECCCHKDIS Position */ +#define MEMSYSCTL_MSCR_TECCCHKDIS_Msk (0x1UL << MEMSYSCTL_MSCR_TECCCHKDIS_Pos) /*!< MEMSYSCTL MSCR: TECCCHKDIS Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_MAX_OS_Pos 7U /*!< MEMSYSCTL PFCR: MAX_OS Position */ +#define MEMSYSCTL_PFCR_MAX_OS_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_OS_Pos) /*!< MEMSYSCTL PFCR: MAX_OS Mask */ + +#define MEMSYSCTL_PFCR_MAX_LA_Pos 4U /*!< MEMSYSCTL PFCR: MAX_LA Position */ +#define MEMSYSCTL_PFCR_MAX_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MAX_LA_Pos) /*!< MEMSYSCTL PFCR: MAX_LA Mask */ + +#define MEMSYSCTL_PFCR_MIN_LA_Pos 1U /*!< MEMSYSCTL PFCR: MIN_LA Position */ +#define MEMSYSCTL_PFCR_MIN_LA_Msk (0x7UL << MEMSYSCTL_PFCR_MIN_LA_Pos) /*!< MEMSYSCTL PFCR: MIN_LA Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup STL_Type Software Test Library Observation Registers + \brief Type definitions for the Software Test Library Observation Registerss (STL) + @{ + */ + +/** + \brief Structure type to access the Software Test Library Observation Registerss (STL). + */ +typedef struct +{ + __IM uint32_t STLNVICPENDOR; /*!< Offset: 0x000 (R/ ) NVIC Pending Priority Tree Register */ + __IM uint32_t STLNVICACTVOR; /*!< Offset: 0x004 (R/ ) NVIC Active Priority Tree Register */ + uint32_t RESERVED0[2U]; + __OM uint32_t STLIDMPUSR; /*!< Offset: 0x010 ( /W) MPU Sanple Register */ + __IM uint32_t STLIMPUOR; /*!< Offset: 0x014 (R/ ) MPU Region Hit Register */ + __IM uint32_t STLD0MPUOR; /*!< Offset: 0x018 (R/ ) MPU Memory Attributes Register 0 */ + __IM uint32_t STLD1MPUOR; /*!< Offset: 0x01C (R/ ) MPU Memory Attributes Register 1 */ + +} STL_Type; + +/* STL Software Test Library Observation Register (STLNVICPENDOR) Definitions */ +#define STL_STLNVICPENDOR_VALID_Pos 18U /*!< STL STLNVICPENDOR: VALID Position */ +#define STL_STLNVICPENDOR_VALID_Msk (0x1UL << STL_STLNVICPENDOR_VALID_Pos) /*!< STL STLNVICPENDOR: VALID Mask */ + +#define STL_STLNVICPENDOR_TARGET_Pos 17U /*!< STL STLNVICPENDOR: TARGET Position */ +#define STL_STLNVICPENDOR_TARGET_Msk (0x1UL << STL_STLNVICPENDOR_TARGET_Pos) /*!< STL STLNVICPENDOR: TARGET Mask */ + +#define STL_STLNVICPENDOR_PRIORITY_Pos 9U /*!< STL STLNVICPENDOR: PRIORITY Position */ +#define STL_STLNVICPENDOR_PRIORITY_Msk (0xFFUL << STL_STLNVICPENDOR_PRIORITY_Pos) /*!< STL STLNVICPENDOR: PRIORITY Mask */ + +#define STL_STLNVICPENDOR_INTNUM_Pos 0U /*!< STL STLNVICPENDOR: INTNUM Position */ +#define STL_STLNVICPENDOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICPENDOR_INTNUM_Pos*/) /*!< STL STLNVICPENDOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLNVICACTVOR) Definitions */ +#define STL_STLNVICACTVOR_VALID_Pos 18U /*!< STL STLNVICACTVOR: VALID Position */ +#define STL_STLNVICACTVOR_VALID_Msk (0x1UL << STL_STLNVICACTVOR_VALID_Pos) /*!< STL STLNVICACTVOR: VALID Mask */ + +#define STL_STLNVICACTVOR_TARGET_Pos 17U /*!< STL STLNVICACTVOR: TARGET Position */ +#define STL_STLNVICACTVOR_TARGET_Msk (0x1UL << STL_STLNVICACTVOR_TARGET_Pos) /*!< STL STLNVICACTVOR: TARGET Mask */ + +#define STL_STLNVICACTVOR_PRIORITY_Pos 9U /*!< STL STLNVICACTVOR: PRIORITY Position */ +#define STL_STLNVICACTVOR_PRIORITY_Msk (0xFFUL << STL_STLNVICACTVOR_PRIORITY_Pos) /*!< STL STLNVICACTVOR: PRIORITY Mask */ + +#define STL_STLNVICACTVOR_INTNUM_Pos 0U /*!< STL STLNVICACTVOR: INTNUM Position */ +#define STL_STLNVICACTVOR_INTNUM_Msk (0x1FFUL /*<< STL_STLNVICACTVOR_INTNUM_Pos*/) /*!< STL STLNVICACTVOR: INTNUM Mask */ + +/* STL Software Test Library Observation Register (STLIDMPUSR) Definitions */ +#define STL_STLIDMPUSR_ADDR_Pos 5U /*!< STL STLIDMPUSR: ADDR Position */ +#define STL_STLIDMPUSR_ADDR_Msk (0x7FFFFFFUL << STL_STLIDMPUSR_ADDR_Pos) /*!< STL STLIDMPUSR: ADDR Mask */ + +#define STL_STLIDMPUSR_INSTR_Pos 2U /*!< STL STLIDMPUSR: INSTR Position */ +#define STL_STLIDMPUSR_INSTR_Msk (0x1UL << STL_STLIDMPUSR_INSTR_Pos) /*!< STL STLIDMPUSR: INSTR Mask */ + +#define STL_STLIDMPUSR_DATA_Pos 1U /*!< STL STLIDMPUSR: DATA Position */ +#define STL_STLIDMPUSR_DATA_Msk (0x1UL << STL_STLIDMPUSR_DATA_Pos) /*!< STL STLIDMPUSR: DATA Mask */ + +/* STL Software Test Library Observation Register (STLIMPUOR) Definitions */ +#define STL_STLIMPUOR_HITREGION_Pos 9U /*!< STL STLIMPUOR: HITREGION Position */ +#define STL_STLIMPUOR_HITREGION_Msk (0xFFUL << STL_STLIMPUOR_HITREGION_Pos) /*!< STL STLIMPUOR: HITREGION Mask */ + +#define STL_STLIMPUOR_ATTR_Pos 0U /*!< STL STLIMPUOR: ATTR Position */ +#define STL_STLIMPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLIMPUOR_ATTR_Pos*/) /*!< STL STLIMPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD0MPUOR) Definitions */ +#define STL_STLD0MPUOR_HITREGION_Pos 9U /*!< STL STLD0MPUOR: HITREGION Position */ +#define STL_STLD0MPUOR_HITREGION_Msk (0xFFUL << STL_STLD0MPUOR_HITREGION_Pos) /*!< STL STLD0MPUOR: HITREGION Mask */ + +#define STL_STLD0MPUOR_ATTR_Pos 0U /*!< STL STLD0MPUOR: ATTR Position */ +#define STL_STLD0MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD0MPUOR_ATTR_Pos*/) /*!< STL STLD0MPUOR: ATTR Mask */ + +/* STL Software Test Library Observation Register (STLD1MPUOR) Definitions */ +#define STL_STLD1MPUOR_HITREGION_Pos 9U /*!< STL STLD1MPUOR: HITREGION Position */ +#define STL_STLD1MPUOR_HITREGION_Msk (0xFFUL << STL_STLD1MPUOR_HITREGION_Pos) /*!< STL STLD1MPUOR: HITREGION Mask */ + +#define STL_STLD1MPUOR_ATTR_Pos 0U /*!< STL STLD1MPUOR: ATTR Position */ +#define STL_STLD1MPUOR_ATTR_Msk (0x1FFUL /*<< STL_STLD1MPUOR_ATTR_Pos*/) /*!< STL STLD1MPUOR: ATTR Mask */ + +/*@}*/ /* end of group STL_Type */ + + /** \ingroup CMSIS_core_register \defgroup CMSIS_TPI Trace Port Interface (TPI) @@ -1490,15 +1966,14 @@ typedef struct uint32_t RESERVED11[108]; __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ - uint32_t RESERVED12[4]; + uint32_t RESERVED12[3]; __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ uint32_t RESERVED13[3]; __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ - __IOM uint32_t PIDR1; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 1 */ - __IOM uint32_t PIDR2; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 2 */ - __IOM uint32_t PIDR3; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 3 */ - uint32_t RESERVED14[3]; + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ @@ -2983,27 +3458,13 @@ typedef struct */ typedef struct { - __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ - __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + uint32_t RESERVED0[2U]; __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ - __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ + uint32_t RESERVED1[3U]; + __IM uint32_t DDEVTYPE; /*!< Offset: 0x01C (R/ ) SCS Device Type Register */ } DIB_Type; -/* DLAR, SCS Software Lock Access Register Definitions */ -#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ -#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ - -/* DLSR, SCS Software Lock Status Register Definitions */ -#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ -#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ - -#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ -#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ - -#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ -#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ - /* DAUTHSTATUS, Debug Authentication Status Register Definitions */ #define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ #define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ @@ -3093,6 +3554,12 @@ typedef struct #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define STL_BASE (0xE001E800UL) /*!< Software Test Library Base Address */ #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ @@ -3101,13 +3568,19 @@ typedef struct #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ - #define SCnSCB ((SCnSCB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define STL ((STL_Type *) STL_BASE ) /*!< Software Test Library configuration struct */ #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ @@ -3139,7 +3612,7 @@ typedef struct #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ - #define SCnSCB_NS ((SCnSCB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ @@ -3159,6 +3632,69 @@ typedef struct /*@} */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ +#define ID_ADR (ID_AFR) /*!< SCB Auxiliary Feature Register */ + +/* 'SCnSCB' is deprecated and replaced by 'ICB' */ +typedef ICB_Type SCnSCB_Type; + +/* Auxiliary Control Register Definitions */ +#define SCnSCB_ACTLR_DISCRITAXIRUW_Pos (ICB_ACTLR_DISCRITAXIRUW_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUW_Msk (ICB_ACTLR_DISCRITAXIRUW_Msk) + +#define SCnSCB_ACTLR_DISDI_Pos (ICB_ACTLR_DISDI_Pos) +#define SCnSCB_ACTLR_DISDI_Msk (ICB_ACTLR_DISDI_Msk) + +#define SCnSCB_ACTLR_DISCRITAXIRUR_Pos (ICB_ACTLR_DISCRITAXIRUR_Pos) +#define SCnSCB_ACTLR_DISCRITAXIRUR_Msk (ICB_ACTLR_DISCRITAXIRUR_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_Pos (ICB_ACTLR_EVENTBUSEN_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_Msk (ICB_ACTLR_EVENTBUSEN_Msk) + +#define SCnSCB_ACTLR_EVENTBUSEN_S_Pos (ICB_ACTLR_EVENTBUSEN_S_Pos) +#define SCnSCB_ACTLR_EVENTBUSEN_S_Msk (ICB_ACTLR_EVENTBUSEN_S_Msk) + +#define SCnSCB_ACTLR_DISITMATBFLUSH_Pos (ICB_ACTLR_DISITMATBFLUSH_Pos) +#define SCnSCB_ACTLR_DISITMATBFLUSH_Msk (ICB_ACTLR_DISITMATBFLUSH_Msk) + +#define SCnSCB_ACTLR_DISNWAMODE_Pos (ICB_ACTLR_DISNWAMODE_Pos) +#define SCnSCB_ACTLR_DISNWAMODE_Msk (ICB_ACTLR_DISNWAMODE_Msk) + +#define SCnSCB_ACTLR_FPEXCODIS_Pos (ICB_ACTLR_FPEXCODIS_Pos) +#define SCnSCB_ACTLR_FPEXCODIS_Msk (ICB_ACTLR_FPEXCODIS_Msk) + +#define SCnSCB_ACTLR_DISOLAP_Pos (ICB_ACTLR_DISOLAP_Pos) +#define SCnSCB_ACTLR_DISOLAP_Msk (ICB_ACTLR_DISOLAP_Msk) + +#define SCnSCB_ACTLR_DISOLAPS_Pos (ICB_ACTLR_DISOLAPS_Pos) +#define SCnSCB_ACTLR_DISOLAPS_Msk (ICB_ACTLR_DISOLAPS_Msk) + +#define SCnSCB_ACTLR_DISLOBR_Pos (ICB_ACTLR_DISLOBR_Pos) +#define SCnSCB_ACTLR_DISLOBR_Msk (ICB_ACTLR_DISLOBR_Msk) + +#define SCnSCB_ACTLR_DISLO_Pos (ICB_ACTLR_DISLO_Pos) +#define SCnSCB_ACTLR_DISLO_Msk (ICB_ACTLR_DISLO_Msk) + +#define SCnSCB_ACTLR_DISLOLEP_Pos (ICB_ACTLR_DISLOLEP_Pos) +#define SCnSCB_ACTLR_DISLOLEP_Msk (ICB_ACTLR_DISLOLEP_Msk) + +#define SCnSCB_ACTLR_DISFOLD_Pos (ICB_ACTLR_DISFOLD_Pos) +#define SCnSCB_ACTLR_DISFOLD_Msk (ICB_ACTLR_DISFOLD_Msk) + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos (ICB_ICTR_INTLINESNUM_Pos) +#define SCnSCB_ICTR_INTLINESNUM_Msk (ICB_ICTR_INTLINESNUM_Msk) + +#define SCnSCB (ICB) +#define SCnSCB_NS (ICB_NS) + +/*@} */ + /******************************************************************************* * Hardware Abstraction Layer @@ -3852,6 +4388,9 @@ __STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) #define ARMCM55_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ #define ARMCM55_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ #define ARMCM55_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM55_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access to the P-AHB write interface */ +#define ARMCM55_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM55_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ #define ARMCM55_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ #define ARMCM55_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h index a82367a..649894a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm7.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_cm7.h * @brief CMSIS Cortex-M7 Core Peripheral Access Layer Header File - * @version V5.1.5 - * @date 03. November 2020 + * @version V5.1.6 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -213,7 +213,7 @@ #define __VTOR_PRESENT 1U #warning "__VTOR_PRESENT not defined in device header file; using default!" #endif - + #ifndef __NVIC_PRIO_BITS #define __NVIC_PRIO_BITS 3U #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" @@ -677,22 +677,22 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MLSPERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ #define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm85.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm85.h new file mode 100644 index 0000000..acb2eb1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_cm85.h @@ -0,0 +1,4636 @@ +/**************************************************************************//** + * @file core_cm85.h + * @brief CMSIS Cortex-M85 Core Peripheral Access Layer Header File + * @version V1.0.5 + * @date 12. May 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_CM85_H_GENERIC +#define __CORE_CM85_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup Cortex_M85 + @{ + */ + +#include "cmsis_version.h" + +/* CMSIS CM85 definitions */ + +#define __CORTEX_M (85U) /*!< Cortex-M Core */ + +#if defined ( __CC_ARM ) + #error Legacy Arm Compiler does not support Armv8.1-M target architecture. +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined __ARM_FP + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined __ARMVFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined(__ARM_FEATURE_DSP) + #if defined(__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined __TI_VFP_SUPPORT__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined __FPU_VFP__ + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_CM85_H_DEPENDANT +#define __CORE_CM85_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __CM85_REV + #define __CM85_REV 0x0001U + #warning "__CM85_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #if __FPU_PRESENT != 0U + #ifndef __FPU_DP + #define __FPU_DP 0U + #warning "__FPU_DP not defined in device header file; using default!" + #endif + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __VTOR_PRESENT + #define __VTOR_PRESENT 1U + #warning "__VTOR_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __PMU_PRESENT + #define __PMU_PRESENT 0U + #warning "__PMU_PRESENT not defined in device header file; using default!" + #endif + + #if __PMU_PRESENT != 0U + #ifndef __PMU_NUM_EVENTCNT + #define __PMU_NUM_EVENTCNT 8U + #warning "__PMU_NUM_EVENTCNT not defined in device header file; using default!" + #elif (__PMU_NUM_EVENTCNT > 8 || __PMU_NUM_EVENTCNT < 2) + #error "__PMU_NUM_EVENTCNT is out of range in device header file!" */ + #endif + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group Cortex_M85 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core EWIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core PMU Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for Cortex-M processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:1; /*!< bit: 20 Reserved */ + uint32_t B:1; /*!< bit: 21 BTI active (read 0) */ + uint32_t _reserved2:2; /*!< bit: 22..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_B_Pos 21U /*!< xPSR: B Position */ +#define xPSR_B_Msk (1UL << xPSR_B_Pos) /*!< xPSR: B Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t BTI_EN:1; /*!< bit: 4 Privileged branch target identification enable */ + uint32_t UBTI_EN:1; /*!< bit: 5 Unprivileged branch target identification enable */ + uint32_t PAC_EN:1; /*!< bit: 6 Privileged pointer authentication enable */ + uint32_t UPAC_EN:1; /*!< bit: 7 Unprivileged pointer authentication enable */ + uint32_t _reserved1:24; /*!< bit: 8..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_UPAC_EN_Pos 7U /*!< CONTROL: UPAC_EN Position */ +#define CONTROL_UPAC_EN_Msk (1UL << CONTROL_UPAC_EN_Pos) /*!< CONTROL: UPAC_EN Mask */ + +#define CONTROL_PAC_EN_Pos 6U /*!< CONTROL: PAC_EN Position */ +#define CONTROL_PAC_EN_Msk (1UL << CONTROL_PAC_EN_Pos) /*!< CONTROL: PAC_EN Mask */ + +#define CONTROL_UBTI_EN_Pos 5U /*!< CONTROL: UBTI_EN Position */ +#define CONTROL_UBTI_EN_Msk (1UL << CONTROL_UBTI_EN_Pos) /*!< CONTROL: UBTI_EN Mask */ + +#define CONTROL_BTI_EN_Pos 4U /*!< CONTROL: BTI_EN Position */ +#define CONTROL_BTI_EN_Msk (1UL << CONTROL_BTI_EN_Pos) /*!< CONTROL: BTI_EN Mask */ + +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[6U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED7[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: 0x200 ( /W) Software Triggered Interrupt Register */ + __IOM uint32_t RFSR; /*!< Offset: 0x204 (R/W) RAS Fault Status Register */ + uint32_t RESERVED4[14U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ + __OM uint32_t BPIALL; /*!< Offset: 0x278 ( /W) Branch Predictor Invalidate All */ +} SCB_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_IESB_Pos 5U /*!< SCB AIRCR: Implicit ESB Enable Position */ +#define SCB_AIRCR_IESB_Msk (1UL << SCB_AIRCR_IESB_Pos) /*!< SCB AIRCR: Implicit ESB Enable Mask */ + +#define SCB_AIRCR_DIT_Pos 4U /*!< SCB AIRCR: Data Independent Timing Position */ +#define SCB_AIRCR_DIT_Msk (1UL << SCB_AIRCR_DIT_Pos) /*!< SCB AIRCR: Data Independent Timing Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_TRD_Pos 20U /*!< SCB CCR: TRD Position */ +#define SCB_CCR_TRD_Msk (1UL << SCB_CCR_TRD_Pos) /*!< SCB CCR: TRD Mask */ + +#define SCB_CCR_LOB_Pos 19U /*!< SCB CCR: LOB Position */ +#define SCB_CCR_LOB_Msk (1UL << SCB_CCR_LOB_Pos) /*!< SCB CCR: LOB Mask */ + +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_PMU_Pos 5U /*!< SCB DFSR: PMU Position */ +#define SCB_DFSR_PMU_Msk (1UL << SCB_DFSR_PMU_Pos) /*!< SCB DFSR: PMU Mask */ + +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CP7_Pos 7U /*!< SCB NSACR: CP7 Position */ +#define SCB_NSACR_CP7_Msk (1UL << SCB_NSACR_CP7_Pos) /*!< SCB NSACR: CP7 Mask */ + +#define SCB_NSACR_CP6_Pos 6U /*!< SCB NSACR: CP6 Position */ +#define SCB_NSACR_CP6_Msk (1UL << SCB_NSACR_CP6_Pos) /*!< SCB NSACR: CP6 Mask */ + +#define SCB_NSACR_CP5_Pos 5U /*!< SCB NSACR: CP5 Position */ +#define SCB_NSACR_CP5_Msk (1UL << SCB_NSACR_CP5_Pos) /*!< SCB NSACR: CP5 Mask */ + +#define SCB_NSACR_CP4_Pos 4U /*!< SCB NSACR: CP4 Position */ +#define SCB_NSACR_CP4_Msk (1UL << SCB_NSACR_CP4_Pos) /*!< SCB NSACR: CP4 Mask */ + +#define SCB_NSACR_CP3_Pos 3U /*!< SCB NSACR: CP3 Position */ +#define SCB_NSACR_CP3_Msk (1UL << SCB_NSACR_CP3_Pos) /*!< SCB NSACR: CP3 Mask */ + +#define SCB_NSACR_CP2_Pos 2U /*!< SCB NSACR: CP2 Position */ +#define SCB_NSACR_CP2_Msk (1UL << SCB_NSACR_CP2_Pos) /*!< SCB NSACR: CP2 Mask */ + +#define SCB_NSACR_CP1_Pos 1U /*!< SCB NSACR: CP1 Position */ +#define SCB_NSACR_CP1_Msk (1UL << SCB_NSACR_CP1_Pos) /*!< SCB NSACR: CP1 Mask */ + +#define SCB_NSACR_CP0_Pos 0U /*!< SCB NSACR: CP0 Position */ +#define SCB_NSACR_CP0_Msk (1UL /*<< SCB_NSACR_CP0_Pos*/) /*!< SCB NSACR: CP0 Mask */ + +/* SCB Debug Feature Register 0 Definitions */ +#define SCB_ID_DFR_UDE_Pos 28U /*!< SCB ID_DFR: UDE Position */ +#define SCB_ID_DFR_UDE_Msk (0xFUL << SCB_ID_DFR_UDE_Pos) /*!< SCB ID_DFR: UDE Mask */ + +#define SCB_ID_DFR_MProfDbg_Pos 20U /*!< SCB ID_DFR: MProfDbg Position */ +#define SCB_ID_DFR_MProfDbg_Msk (0xFUL << SCB_ID_DFR_MProfDbg_Pos) /*!< SCB ID_DFR: MProfDbg Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB RAS Fault Status Register Definitions */ +#define SCB_RFSR_V_Pos 31U /*!< SCB RFSR: V Position */ +#define SCB_RFSR_V_Msk (1UL << SCB_RFSR_V_Pos) /*!< SCB RFSR: V Mask */ + +#define SCB_RFSR_IS_Pos 16U /*!< SCB RFSR: IS Position */ +#define SCB_RFSR_IS_Msk (0x7FFFUL << SCB_RFSR_IS_Pos) /*!< SCB RFSR: IS Mask */ + +#define SCB_RFSR_UET_Pos 0U /*!< SCB RFSR: UET Position */ +#define SCB_RFSR_UET_Msk (3UL /*<< SCB_RFSR_UET_Pos*/) /*!< SCB RFSR: UET Mask */ + +/* SCB D-Cache Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0x1FFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean by Set-way Register Definitions */ +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0x1FFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0x1FFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ICB Implementation Control Block register (ICB) + \brief Type definitions for the Implementation Control Block Register + @{ + */ + +/** + \brief Structure type to access the Implementation Control Block (ICB). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} ICB_Type; + +/* Auxiliary Control Register Definitions */ +#define ICB_ACTLR_DISCRITAXIRUW_Pos 27U /*!< ACTLR: DISCRITAXIRUW Position */ +#define ICB_ACTLR_DISCRITAXIRUW_Msk (1UL << ICB_ACTLR_DISCRITAXIRUW_Pos) /*!< ACTLR: DISCRITAXIRUW Mask */ + +#define ICB_ACTLR_DISCRITAXIRUR_Pos 15U /*!< ACTLR: DISCRITAXIRUR Position */ +#define ICB_ACTLR_DISCRITAXIRUR_Msk (1UL << ICB_ACTLR_DISCRITAXIRUR_Pos) /*!< ACTLR: DISCRITAXIRUR Mask */ + +#define ICB_ACTLR_EVENTBUSEN_Pos 14U /*!< ACTLR: EVENTBUSEN Position */ +#define ICB_ACTLR_EVENTBUSEN_Msk (1UL << ICB_ACTLR_EVENTBUSEN_Pos) /*!< ACTLR: EVENTBUSEN Mask */ + +#define ICB_ACTLR_EVENTBUSEN_S_Pos 13U /*!< ACTLR: EVENTBUSEN_S Position */ +#define ICB_ACTLR_EVENTBUSEN_S_Msk (1UL << ICB_ACTLR_EVENTBUSEN_S_Pos) /*!< ACTLR: EVENTBUSEN_S Mask */ + +#define ICB_ACTLR_DISITMATBFLUSH_Pos 12U /*!< ACTLR: DISITMATBFLUSH Position */ +#define ICB_ACTLR_DISITMATBFLUSH_Msk (1UL << ICB_ACTLR_DISITMATBFLUSH_Pos) /*!< ACTLR: DISITMATBFLUSH Mask */ + +#define ICB_ACTLR_DISNWAMODE_Pos 11U /*!< ACTLR: DISNWAMODE Position */ +#define ICB_ACTLR_DISNWAMODE_Msk (1UL << ICB_ACTLR_DISNWAMODE_Pos) /*!< ACTLR: DISNWAMODE Mask */ + +#define ICB_ACTLR_FPEXCODIS_Pos 10U /*!< ACTLR: FPEXCODIS Position */ +#define ICB_ACTLR_FPEXCODIS_Msk (1UL << ICB_ACTLR_FPEXCODIS_Pos) /*!< ACTLR: FPEXCODIS Mask */ + +/* Interrupt Controller Type Register Definitions */ +#define ICB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define ICB_ICTR_INTLINESNUM_Msk (0xFUL /*<< ICB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_ICB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[27U]; + __IM uint32_t ITREAD; /*!< Offset: 0xEF0 (R/ ) ITM Integration Read Register */ + uint32_t RESERVED4[1U]; + __OM uint32_t ITWRITE; /*!< Offset: 0xEF8 ( /W) ITM Integration Write Register */ + uint32_t RESERVED5[1U]; + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) ITM Integration Mode Control Register */ + uint32_t RESERVED6[46U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED7[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) ITM Device Type Register */ + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Integration Read Register Definitions */ +#define ITM_ITREAD_AFVALID_Pos 1U /*!< ITM ITREAD: AFVALID Position */ +#define ITM_ITREAD_AFVALID_Msk (0x1UL << ITM_ITREAD_AFVALID_Pos) /*!< ITM ITREAD: AFVALID Mask */ + +#define ITM_ITREAD_ATREADY_Pos 0U /*!< ITM ITREAD: ATREADY Position */ +#define ITM_ITREAD_ATREADY_Msk (0x1UL /*<< ITM_ITREAD_ATREADY_Pos*/) /*!< ITM ITREAD: ATREADY Mask */ + +/* ITM Integration Write Register Definitions */ +#define ITM_ITWRITE_AFVALID_Pos 1U /*!< ITM ITWRITE: AFVALID Position */ +#define ITM_ITWRITE_AFVALID_Msk (0x1UL << ITM_ITWRITE_AFVALID_Pos) /*!< ITM ITWRITE: AFVALID Mask */ + +#define ITM_ITWRITE_ATREADY_Pos 0U /*!< ITM ITWRITE: ATREADY Position */ +#define ITM_ITWRITE_ATREADY_Msk (0x1UL /*<< ITM_ITWRITE_ATREADY_Pos*/) /*!< ITM ITWRITE: ATREADY Mask */ + +/* ITM Integration Mode Control Register Definitions */ +#define ITM_ITCTRL_IME_Pos 0U /*!< ITM ITCTRL: IME Position */ +#define ITM_ITCTRL_IME_Msk (0x1UL /*<< ITM_ITCTRL_IME_Pos*/) /*!< ITM ITCTRL: IME Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + __IOM uint32_t VMASK1; /*!< Offset: 0x03C (R/W) Comparator Value Mask 1 */ + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + __IOM uint32_t VMASK3; /*!< Offset: 0x05C (R/W) Comparator Value Mask 3 */ + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED14[968U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Type Architecture Register */ + uint32_t RESERVED15[3U]; + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x3UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup MemSysCtl_Type Memory System Control Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Memory System Control Registers (MEMSYSCTL) + @{ + */ + +/** + \brief Structure type to access the Memory System Control Registers (MEMSYSCTL). + */ +typedef struct +{ + __IOM uint32_t MSCR; /*!< Offset: 0x000 (R/W) Memory System Control Register */ + __IOM uint32_t PFCR; /*!< Offset: 0x004 (R/W) Prefetcher Control Register */ + uint32_t RESERVED1[2U]; + __IOM uint32_t ITCMCR; /*!< Offset: 0x010 (R/W) ITCM Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x014 (R/W) DTCM Control Register */ + __IOM uint32_t PAHBCR; /*!< Offset: 0x018 (R/W) P-AHB Control Register */ + uint32_t RESERVED2[313U]; + __IOM uint32_t ITGU_CTRL; /*!< Offset: 0x500 (R/W) ITGU Control Register */ + __IOM uint32_t ITGU_CFG; /*!< Offset: 0x504 (R/W) ITGU Configuration Register */ + uint32_t RESERVED3[2U]; + __IOM uint32_t ITGU_LUT[16U]; /*!< Offset: 0x510 (R/W) ITGU Look Up Table Register */ + uint32_t RESERVED4[44U]; + __IOM uint32_t DTGU_CTRL; /*!< Offset: 0x600 (R/W) DTGU Control Registers */ + __IOM uint32_t DTGU_CFG; /*!< Offset: 0x604 (R/W) DTGU Configuration Register */ + uint32_t RESERVED5[2U]; + __IOM uint32_t DTGU_LUT[16U]; /*!< Offset: 0x610 (R/W) DTGU Look Up Table Register */ +} MemSysCtl_Type; + +/* MEMSYSCTL Memory System Control Register (MSCR) Register Definitions */ +#define MEMSYSCTL_MSCR_CPWRDN_Pos 17U /*!< MEMSYSCTL MSCR: CPWRDN Position */ +#define MEMSYSCTL_MSCR_CPWRDN_Msk (0x1UL << MEMSYSCTL_MSCR_CPWRDN_Pos) /*!< MEMSYSCTL MSCR: CPWRDN Mask */ + +#define MEMSYSCTL_MSCR_DCCLEAN_Pos 16U /*!< MEMSYSCTL MSCR: DCCLEAN Position */ +#define MEMSYSCTL_MSCR_DCCLEAN_Msk (0x1UL << MEMSYSCTL_MSCR_DCCLEAN_Pos) /*!< MEMSYSCTL MSCR: DCCLEAN Mask */ + +#define MEMSYSCTL_MSCR_ICACTIVE_Pos 13U /*!< MEMSYSCTL MSCR: ICACTIVE Position */ +#define MEMSYSCTL_MSCR_ICACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_ICACTIVE_Pos) /*!< MEMSYSCTL MSCR: ICACTIVE Mask */ + +#define MEMSYSCTL_MSCR_DCACTIVE_Pos 12U /*!< MEMSYSCTL MSCR: DCACTIVE Position */ +#define MEMSYSCTL_MSCR_DCACTIVE_Msk (0x1UL << MEMSYSCTL_MSCR_DCACTIVE_Pos) /*!< MEMSYSCTL MSCR: DCACTIVE Mask */ + +#define MEMSYSCTL_MSCR_EVECCFAULT_Pos 3U /*!< MEMSYSCTL MSCR: EVECCFAULT Position */ +#define MEMSYSCTL_MSCR_EVECCFAULT_Msk (0x1UL << MEMSYSCTL_MSCR_EVECCFAULT_Pos) /*!< MEMSYSCTL MSCR: EVECCFAULT Mask */ + +#define MEMSYSCTL_MSCR_FORCEWT_Pos 2U /*!< MEMSYSCTL MSCR: FORCEWT Position */ +#define MEMSYSCTL_MSCR_FORCEWT_Msk (0x1UL << MEMSYSCTL_MSCR_FORCEWT_Pos) /*!< MEMSYSCTL MSCR: FORCEWT Mask */ + +#define MEMSYSCTL_MSCR_ECCEN_Pos 1U /*!< MEMSYSCTL MSCR: ECCEN Position */ +#define MEMSYSCTL_MSCR_ECCEN_Msk (0x1UL << MEMSYSCTL_MSCR_ECCEN_Pos) /*!< MEMSYSCTL MSCR: ECCEN Mask */ + +/* MEMSYSCTL Prefetcher Control Register (PFCR) Register Definitions */ +#define MEMSYSCTL_PFCR_DIS_NLP_Pos 7U /*!< MEMSYSCTL PFCR: DIS_NLP Position */ +#define MEMSYSCTL_PFCR_DIS_NLP_Msk (0x1UL << MEMSYSCTL_PFCR_DIS_NLP_Pos) /*!< MEMSYSCTL PFCR: DIS_NLP Mask */ + +#define MEMSYSCTL_PFCR_ENABLE_Pos 0U /*!< MEMSYSCTL PFCR: ENABLE Position */ +#define MEMSYSCTL_PFCR_ENABLE_Msk (0x1UL /*<< MEMSYSCTL_PFCR_ENABLE_Pos*/) /*!< MEMSYSCTL PFCR: ENABLE Mask */ + +/* MEMSYSCTL ITCM Control Register (ITCMCR) Register Definitions */ +#define MEMSYSCTL_ITCMCR_SZ_Pos 3U /*!< MEMSYSCTL ITCMCR: SZ Position */ +#define MEMSYSCTL_ITCMCR_SZ_Msk (0xFUL << MEMSYSCTL_ITCMCR_SZ_Pos) /*!< MEMSYSCTL ITCMCR: SZ Mask */ + +#define MEMSYSCTL_ITCMCR_EN_Pos 0U /*!< MEMSYSCTL ITCMCR: EN Position */ +#define MEMSYSCTL_ITCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_ITCMCR_EN_Pos*/) /*!< MEMSYSCTL ITCMCR: EN Mask */ + +/* MEMSYSCTL DTCM Control Register (DTCMCR) Register Definitions */ +#define MEMSYSCTL_DTCMCR_SZ_Pos 3U /*!< MEMSYSCTL DTCMCR: SZ Position */ +#define MEMSYSCTL_DTCMCR_SZ_Msk (0xFUL << MEMSYSCTL_DTCMCR_SZ_Pos) /*!< MEMSYSCTL DTCMCR: SZ Mask */ + +#define MEMSYSCTL_DTCMCR_EN_Pos 0U /*!< MEMSYSCTL DTCMCR: EN Position */ +#define MEMSYSCTL_DTCMCR_EN_Msk (0x1UL /*<< MEMSYSCTL_DTCMCR_EN_Pos*/) /*!< MEMSYSCTL DTCMCR: EN Mask */ + +/* MEMSYSCTL P-AHB Control Register (PAHBCR) Register Definitions */ +#define MEMSYSCTL_PAHBCR_SZ_Pos 1U /*!< MEMSYSCTL PAHBCR: SZ Position */ +#define MEMSYSCTL_PAHBCR_SZ_Msk (0x7UL << MEMSYSCTL_PAHBCR_SZ_Pos) /*!< MEMSYSCTL PAHBCR: SZ Mask */ + +#define MEMSYSCTL_PAHBCR_EN_Pos 0U /*!< MEMSYSCTL PAHBCR: EN Position */ +#define MEMSYSCTL_PAHBCR_EN_Msk (0x1UL /*<< MEMSYSCTL_PAHBCR_EN_Pos*/) /*!< MEMSYSCTL PAHBCR: EN Mask */ + +/* MEMSYSCTL ITGU Control Register (ITGU_CTRL) Register Definitions */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL ITGU_CTRL: DEREN Position */ +#define MEMSYSCTL_ITGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_ITGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL ITGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL ITGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_ITGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_ITGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL ITGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL ITGU Configuration Register (ITGU_CFG) Register Definitions */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL ITGU_CFG: PRESENT Position */ +#define MEMSYSCTL_ITGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_ITGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL ITGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_ITGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_ITGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL ITGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL ITGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_ITGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_ITGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL ITGU_CFG: BLKSZ Mask */ + +/* MEMSYSCTL DTGU Control Registers (DTGU_CTRL) Register Definitions */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Pos 1U /*!< MEMSYSCTL DTGU_CTRL: DEREN Position */ +#define MEMSYSCTL_DTGU_CTRL_DEREN_Msk (0x1UL << MEMSYSCTL_DTGU_CTRL_DEREN_Pos) /*!< MEMSYSCTL DTGU_CTRL: DEREN Mask */ + +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Pos 0U /*!< MEMSYSCTL DTGU_CTRL: DBFEN Position */ +#define MEMSYSCTL_DTGU_CTRL_DBFEN_Msk (0x1UL /*<< MEMSYSCTL_DTGU_CTRL_DBFEN_Pos*/) /*!< MEMSYSCTL DTGU_CTRL: DBFEN Mask */ + +/* MEMSYSCTL DTGU Configuration Register (DTGU_CFG) Register Definitions */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Pos 31U /*!< MEMSYSCTL DTGU_CFG: PRESENT Position */ +#define MEMSYSCTL_DTGU_CFG_PRESENT_Msk (0x1UL << MEMSYSCTL_DTGU_CFG_PRESENT_Pos) /*!< MEMSYSCTL DTGU_CFG: PRESENT Mask */ + +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos 8U /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Position */ +#define MEMSYSCTL_DTGU_CFG_NUMBLKS_Msk (0xFUL << MEMSYSCTL_DTGU_CFG_NUMBLKS_Pos) /*!< MEMSYSCTL DTGU_CFG: NUMBLKS Mask */ + +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Pos 0U /*!< MEMSYSCTL DTGU_CFG: BLKSZ Position */ +#define MEMSYSCTL_DTGU_CFG_BLKSZ_Msk (0xFUL /*<< MEMSYSCTL_DTGU_CFG_BLKSZ_Pos*/) /*!< MEMSYSCTL DTGU_CFG: BLKSZ Mask */ + + +/*@}*/ /* end of group MemSysCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PwrModCtl_Type Power Mode Control Registers + \brief Type definitions for the Power Mode Control Registers (PWRMODCTL) + @{ + */ + +/** + \brief Structure type to access the Power Mode Control Registers (PWRMODCTL). + */ +typedef struct +{ + __IOM uint32_t CPDLPSTATE; /*!< Offset: 0x000 (R/W) Core Power Domain Low Power State Register */ + __IOM uint32_t DPDLPSTATE; /*!< Offset: 0x004 (R/W) Debug Power Domain Low Power State Register */ +} PwrModCtl_Type; + +/* PWRMODCTL Core Power Domain Low Power State (CPDLPSTATE) Register Definitions */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos 8U /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_RLPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_RLPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: RLPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos 4U /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_ELPSTATE_Msk (0x3UL << PWRMODCTL_CPDLPSTATE_ELPSTATE_Pos) /*!< PWRMODCTL CPDLPSTATE: ELPSTATE Mask */ + +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos 0U /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Position */ +#define PWRMODCTL_CPDLPSTATE_CLPSTATE_Msk (0x3UL /*<< PWRMODCTL_CPDLPSTATE_CLPSTATE_Pos*/) /*!< PWRMODCTL CPDLPSTATE: CLPSTATE Mask */ + +/* PWRMODCTL Debug Power Domain Low Power State (DPDLPSTATE) Register Definitions */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos 0U /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Position */ +#define PWRMODCTL_DPDLPSTATE_DLPSTATE_Msk (0x3UL /*<< PWRMODCTL_DPDLPSTATE_DLPSTATE_Pos*/) /*!< PWRMODCTL DPDLPSTATE: DLPSTATE Mask */ + +/*@}*/ /* end of group PwrModCtl_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup EWIC_Type External Wakeup Interrupt Controller Registers + \brief Type definitions for the External Wakeup Interrupt Controller Registers (EWIC) + @{ + */ + +/** + \brief Structure type to access the External Wakeup Interrupt Controller Registers (EWIC). + */ +typedef struct +{ + __OM uint32_t EVENTSPR; /*!< Offset: 0x000 ( /W) Event Set Pending Register */ + uint32_t RESERVED0[31U]; + __IM uint32_t EVENTMASKA; /*!< Offset: 0x080 (R/W) Event Mask A Register */ + __IM uint32_t EVENTMASK[15]; /*!< Offset: 0x084 (R/W) Event Mask Register */ +} EWIC_Type; + +/* EWIC External Wakeup Interrupt Controller (EVENTSPR) Register Definitions */ +#define EWIC_EVENTSPR_EDBGREQ_Pos 2U /*!< EWIC EVENTSPR: EDBGREQ Position */ +#define EWIC_EVENTSPR_EDBGREQ_Msk (0x1UL << EWIC_EVENTSPR_EDBGREQ_Pos) /*!< EWIC EVENTSPR: EDBGREQ Mask */ + +#define EWIC_EVENTSPR_NMI_Pos 1U /*!< EWIC EVENTSPR: NMI Position */ +#define EWIC_EVENTSPR_NMI_Msk (0x1UL << EWIC_EVENTSPR_NMI_Pos) /*!< EWIC EVENTSPR: NMI Mask */ + +#define EWIC_EVENTSPR_EVENT_Pos 0U /*!< EWIC EVENTSPR: EVENT Position */ +#define EWIC_EVENTSPR_EVENT_Msk (0x1UL /*<< EWIC_EVENTSPR_EVENT_Pos*/) /*!< EWIC EVENTSPR: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASKA) Register Definitions */ +#define EWIC_EVENTMASKA_EDBGREQ_Pos 2U /*!< EWIC EVENTMASKA: EDBGREQ Position */ +#define EWIC_EVENTMASKA_EDBGREQ_Msk (0x1UL << EWIC_EVENTMASKA_EDBGREQ_Pos) /*!< EWIC EVENTMASKA: EDBGREQ Mask */ + +#define EWIC_EVENTMASKA_NMI_Pos 1U /*!< EWIC EVENTMASKA: NMI Position */ +#define EWIC_EVENTMASKA_NMI_Msk (0x1UL << EWIC_EVENTMASKA_NMI_Pos) /*!< EWIC EVENTMASKA: NMI Mask */ + +#define EWIC_EVENTMASKA_EVENT_Pos 0U /*!< EWIC EVENTMASKA: EVENT Position */ +#define EWIC_EVENTMASKA_EVENT_Msk (0x1UL /*<< EWIC_EVENTMASKA_EVENT_Pos*/) /*!< EWIC EVENTMASKA: EVENT Mask */ + +/* EWIC External Wakeup Interrupt Controller (EVENTMASK) Register Definitions */ +#define EWIC_EVENTMASK_IRQ_Pos 0U /*!< EWIC EVENTMASKA: IRQ Position */ +#define EWIC_EVENTMASK_IRQ_Msk (0xFFFFFFFFUL /*<< EWIC_EVENTMASKA_IRQ_Pos*/) /*!< EWIC EVENTMASKA: IRQ Mask */ + +/*@}*/ /* end of group EWIC_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup ErrBnk_Type Error Banking Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Error Banking Registers (ERRBNK) + @{ + */ + +/** + \brief Structure type to access the Error Banking Registers (ERRBNK). + */ +typedef struct +{ + __IOM uint32_t IEBR0; /*!< Offset: 0x000 (R/W) Instruction Cache Error Bank Register 0 */ + __IOM uint32_t IEBR1; /*!< Offset: 0x004 (R/W) Instruction Cache Error Bank Register 1 */ + uint32_t RESERVED0[2U]; + __IOM uint32_t DEBR0; /*!< Offset: 0x010 (R/W) Data Cache Error Bank Register 0 */ + __IOM uint32_t DEBR1; /*!< Offset: 0x014 (R/W) Data Cache Error Bank Register 1 */ + uint32_t RESERVED1[2U]; + __IOM uint32_t TEBR0; /*!< Offset: 0x020 (R/W) TCM Error Bank Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t TEBR1; /*!< Offset: 0x028 (R/W) TCM Error Bank Register 1 */ +} ErrBnk_Type; + +/* ERRBNK Instruction Cache Error Bank Register 0 (IEBR0) Register Definitions */ +#define ERRBNK_IEBR0_SWDEF_Pos 30U /*!< ERRBNK IEBR0: SWDEF Position */ +#define ERRBNK_IEBR0_SWDEF_Msk (0x3UL << ERRBNK_IEBR0_SWDEF_Pos) /*!< ERRBNK IEBR0: SWDEF Mask */ + +#define ERRBNK_IEBR0_BANK_Pos 16U /*!< ERRBNK IEBR0: BANK Position */ +#define ERRBNK_IEBR0_BANK_Msk (0x1UL << ERRBNK_IEBR0_BANK_Pos) /*!< ERRBNK IEBR0: BANK Mask */ + +#define ERRBNK_IEBR0_LOCATION_Pos 2U /*!< ERRBNK IEBR0: LOCATION Position */ +#define ERRBNK_IEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR0_LOCATION_Pos) /*!< ERRBNK IEBR0: LOCATION Mask */ + +#define ERRBNK_IEBR0_LOCKED_Pos 1U /*!< ERRBNK IEBR0: LOCKED Position */ +#define ERRBNK_IEBR0_LOCKED_Msk (0x1UL << ERRBNK_IEBR0_LOCKED_Pos) /*!< ERRBNK IEBR0: LOCKED Mask */ + +#define ERRBNK_IEBR0_VALID_Pos 0U /*!< ERRBNK IEBR0: VALID Position */ +#define ERRBNK_IEBR0_VALID_Msk (0x1UL << /*ERRBNK_IEBR0_VALID_Pos*/) /*!< ERRBNK IEBR0: VALID Mask */ + +/* ERRBNK Instruction Cache Error Bank Register 1 (IEBR1) Register Definitions */ +#define ERRBNK_IEBR1_SWDEF_Pos 30U /*!< ERRBNK IEBR1: SWDEF Position */ +#define ERRBNK_IEBR1_SWDEF_Msk (0x3UL << ERRBNK_IEBR1_SWDEF_Pos) /*!< ERRBNK IEBR1: SWDEF Mask */ + +#define ERRBNK_IEBR1_BANK_Pos 16U /*!< ERRBNK IEBR1: BANK Position */ +#define ERRBNK_IEBR1_BANK_Msk (0x1UL << ERRBNK_IEBR1_BANK_Pos) /*!< ERRBNK IEBR1: BANK Mask */ + +#define ERRBNK_IEBR1_LOCATION_Pos 2U /*!< ERRBNK IEBR1: LOCATION Position */ +#define ERRBNK_IEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_IEBR1_LOCATION_Pos) /*!< ERRBNK IEBR1: LOCATION Mask */ + +#define ERRBNK_IEBR1_LOCKED_Pos 1U /*!< ERRBNK IEBR1: LOCKED Position */ +#define ERRBNK_IEBR1_LOCKED_Msk (0x1UL << ERRBNK_IEBR1_LOCKED_Pos) /*!< ERRBNK IEBR1: LOCKED Mask */ + +#define ERRBNK_IEBR1_VALID_Pos 0U /*!< ERRBNK IEBR1: VALID Position */ +#define ERRBNK_IEBR1_VALID_Msk (0x1UL << /*ERRBNK_IEBR1_VALID_Pos*/) /*!< ERRBNK IEBR1: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 0 (DEBR0) Register Definitions */ +#define ERRBNK_DEBR0_SWDEF_Pos 30U /*!< ERRBNK DEBR0: SWDEF Position */ +#define ERRBNK_DEBR0_SWDEF_Msk (0x3UL << ERRBNK_DEBR0_SWDEF_Pos) /*!< ERRBNK DEBR0: SWDEF Mask */ + +#define ERRBNK_DEBR0_TYPE_Pos 17U /*!< ERRBNK DEBR0: TYPE Position */ +#define ERRBNK_DEBR0_TYPE_Msk (0x1UL << ERRBNK_DEBR0_TYPE_Pos) /*!< ERRBNK DEBR0: TYPE Mask */ + +#define ERRBNK_DEBR0_BANK_Pos 16U /*!< ERRBNK DEBR0: BANK Position */ +#define ERRBNK_DEBR0_BANK_Msk (0x1UL << ERRBNK_DEBR0_BANK_Pos) /*!< ERRBNK DEBR0: BANK Mask */ + +#define ERRBNK_DEBR0_LOCATION_Pos 2U /*!< ERRBNK DEBR0: LOCATION Position */ +#define ERRBNK_DEBR0_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR0_LOCATION_Pos) /*!< ERRBNK DEBR0: LOCATION Mask */ + +#define ERRBNK_DEBR0_LOCKED_Pos 1U /*!< ERRBNK DEBR0: LOCKED Position */ +#define ERRBNK_DEBR0_LOCKED_Msk (0x1UL << ERRBNK_DEBR0_LOCKED_Pos) /*!< ERRBNK DEBR0: LOCKED Mask */ + +#define ERRBNK_DEBR0_VALID_Pos 0U /*!< ERRBNK DEBR0: VALID Position */ +#define ERRBNK_DEBR0_VALID_Msk (0x1UL << /*ERRBNK_DEBR0_VALID_Pos*/) /*!< ERRBNK DEBR0: VALID Mask */ + +/* ERRBNK Data Cache Error Bank Register 1 (DEBR1) Register Definitions */ +#define ERRBNK_DEBR1_SWDEF_Pos 30U /*!< ERRBNK DEBR1: SWDEF Position */ +#define ERRBNK_DEBR1_SWDEF_Msk (0x3UL << ERRBNK_DEBR1_SWDEF_Pos) /*!< ERRBNK DEBR1: SWDEF Mask */ + +#define ERRBNK_DEBR1_TYPE_Pos 17U /*!< ERRBNK DEBR1: TYPE Position */ +#define ERRBNK_DEBR1_TYPE_Msk (0x1UL << ERRBNK_DEBR1_TYPE_Pos) /*!< ERRBNK DEBR1: TYPE Mask */ + +#define ERRBNK_DEBR1_BANK_Pos 16U /*!< ERRBNK DEBR1: BANK Position */ +#define ERRBNK_DEBR1_BANK_Msk (0x1UL << ERRBNK_DEBR1_BANK_Pos) /*!< ERRBNK DEBR1: BANK Mask */ + +#define ERRBNK_DEBR1_LOCATION_Pos 2U /*!< ERRBNK DEBR1: LOCATION Position */ +#define ERRBNK_DEBR1_LOCATION_Msk (0x3FFFUL << ERRBNK_DEBR1_LOCATION_Pos) /*!< ERRBNK DEBR1: LOCATION Mask */ + +#define ERRBNK_DEBR1_LOCKED_Pos 1U /*!< ERRBNK DEBR1: LOCKED Position */ +#define ERRBNK_DEBR1_LOCKED_Msk (0x1UL << ERRBNK_DEBR1_LOCKED_Pos) /*!< ERRBNK DEBR1: LOCKED Mask */ + +#define ERRBNK_DEBR1_VALID_Pos 0U /*!< ERRBNK DEBR1: VALID Position */ +#define ERRBNK_DEBR1_VALID_Msk (0x1UL << /*ERRBNK_DEBR1_VALID_Pos*/) /*!< ERRBNK DEBR1: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 0 (TEBR0) Register Definitions */ +#define ERRBNK_TEBR0_SWDEF_Pos 30U /*!< ERRBNK TEBR0: SWDEF Position */ +#define ERRBNK_TEBR0_SWDEF_Msk (0x3UL << ERRBNK_TEBR0_SWDEF_Pos) /*!< ERRBNK TEBR0: SWDEF Mask */ + +#define ERRBNK_TEBR0_POISON_Pos 28U /*!< ERRBNK TEBR0: POISON Position */ +#define ERRBNK_TEBR0_POISON_Msk (0x1UL << ERRBNK_TEBR0_POISON_Pos) /*!< ERRBNK TEBR0: POISON Mask */ + +#define ERRBNK_TEBR0_TYPE_Pos 27U /*!< ERRBNK TEBR0: TYPE Position */ +#define ERRBNK_TEBR0_TYPE_Msk (0x1UL << ERRBNK_TEBR0_TYPE_Pos) /*!< ERRBNK TEBR0: TYPE Mask */ + +#define ERRBNK_TEBR0_BANK_Pos 24U /*!< ERRBNK TEBR0: BANK Position */ +#define ERRBNK_TEBR0_BANK_Msk (0x3UL << ERRBNK_TEBR0_BANK_Pos) /*!< ERRBNK TEBR0: BANK Mask */ + +#define ERRBNK_TEBR0_LOCATION_Pos 2U /*!< ERRBNK TEBR0: LOCATION Position */ +#define ERRBNK_TEBR0_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR0_LOCATION_Pos) /*!< ERRBNK TEBR0: LOCATION Mask */ + +#define ERRBNK_TEBR0_LOCKED_Pos 1U /*!< ERRBNK TEBR0: LOCKED Position */ +#define ERRBNK_TEBR0_LOCKED_Msk (0x1UL << ERRBNK_TEBR0_LOCKED_Pos) /*!< ERRBNK TEBR0: LOCKED Mask */ + +#define ERRBNK_TEBR0_VALID_Pos 0U /*!< ERRBNK TEBR0: VALID Position */ +#define ERRBNK_TEBR0_VALID_Msk (0x1UL << /*ERRBNK_TEBR0_VALID_Pos*/) /*!< ERRBNK TEBR0: VALID Mask */ + +/* ERRBNK TCM Error Bank Register 1 (TEBR1) Register Definitions */ +#define ERRBNK_TEBR1_SWDEF_Pos 30U /*!< ERRBNK TEBR1: SWDEF Position */ +#define ERRBNK_TEBR1_SWDEF_Msk (0x3UL << ERRBNK_TEBR1_SWDEF_Pos) /*!< ERRBNK TEBR1: SWDEF Mask */ + +#define ERRBNK_TEBR1_POISON_Pos 28U /*!< ERRBNK TEBR1: POISON Position */ +#define ERRBNK_TEBR1_POISON_Msk (0x1UL << ERRBNK_TEBR1_POISON_Pos) /*!< ERRBNK TEBR1: POISON Mask */ + +#define ERRBNK_TEBR1_TYPE_Pos 27U /*!< ERRBNK TEBR1: TYPE Position */ +#define ERRBNK_TEBR1_TYPE_Msk (0x1UL << ERRBNK_TEBR1_TYPE_Pos) /*!< ERRBNK TEBR1: TYPE Mask */ + +#define ERRBNK_TEBR1_BANK_Pos 24U /*!< ERRBNK TEBR1: BANK Position */ +#define ERRBNK_TEBR1_BANK_Msk (0x3UL << ERRBNK_TEBR1_BANK_Pos) /*!< ERRBNK TEBR1: BANK Mask */ + +#define ERRBNK_TEBR1_LOCATION_Pos 2U /*!< ERRBNK TEBR1: LOCATION Position */ +#define ERRBNK_TEBR1_LOCATION_Msk (0x3FFFFFUL << ERRBNK_TEBR1_LOCATION_Pos) /*!< ERRBNK TEBR1: LOCATION Mask */ + +#define ERRBNK_TEBR1_LOCKED_Pos 1U /*!< ERRBNK TEBR1: LOCKED Position */ +#define ERRBNK_TEBR1_LOCKED_Msk (0x1UL << ERRBNK_TEBR1_LOCKED_Pos) /*!< ERRBNK TEBR1: LOCKED Mask */ + +#define ERRBNK_TEBR1_VALID_Pos 0U /*!< ERRBNK TEBR1: VALID Position */ +#define ERRBNK_TEBR1_VALID_Msk (0x1UL << /*ERRBNK_TEBR1_VALID_Pos*/) /*!< ERRBNK TEBR1: VALID Mask */ + +/*@}*/ /* end of group ErrBnk_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup PrcCfgInf_Type Processor Configuration Information Registers (IMPLEMENTATION DEFINED) + \brief Type definitions for the Processor Configuration Information Registerss (PRCCFGINF) + @{ + */ + +/** + \brief Structure type to access the Processor Configuration Information Registerss (PRCCFGINF). + */ +typedef struct +{ + __OM uint32_t CFGINFOSEL; /*!< Offset: 0x000 ( /W) Processor Configuration Information Selection Register */ + __IM uint32_t CFGINFORD; /*!< Offset: 0x004 (R/ ) Processor Configuration Information Read Data Register */ +} PrcCfgInf_Type; + +/* PRCCFGINF Processor Configuration Information Selection Register (CFGINFOSEL) Definitions */ + +/* PRCCFGINF Processor Configuration Information Read Data Register (CFGINFORD) Definitions */ + +/*@}*/ /* end of group PrcCfgInf_Type */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Sizes Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Sizes Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[809U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) Software Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) Software Lock Status Register */ + uint32_t RESERVED4[4U]; + __IM uint32_t TYPE; /*!< Offset: 0xFC8 (R/ ) Device Identifier Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_SWOSCALER_Pos 0U /*!< TPI ACPR: SWOSCALER Position */ +#define TPI_ACPR_SWOSCALER_Msk (0xFFFFUL /*<< TPI_ACPR_SWOSCALER_Pos*/) /*!< TPI ACPR: SWOSCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFmt_Pos 0U /*!< TPI FFCR: EnFmt Position */ +#define TPI_FFCR_EnFmt_Msk (0x3UL << /*TPI_FFCR_EnFmt_Pos*/) /*!< TPI FFCR: EnFmt Mask */ + +/* TPI Periodic Synchronization Control Register Definitions */ +#define TPI_PSCR_PSCount_Pos 0U /*!< TPI PSCR: PSCount Position */ +#define TPI_PSCR_PSCount_Msk (0x1FUL /*<< TPI_PSCR_PSCount_Pos*/) /*!< TPI PSCR: TPSCount Mask */ + +/* TPI Software Lock Status Register Definitions */ +#define TPI_LSR_nTT_Pos 1U /*!< TPI LSR: Not thirty-two bit. Position */ +#define TPI_LSR_nTT_Msk (0x1UL << TPI_LSR_nTT_Pos) /*!< TPI LSR: Not thirty-two bit. Mask */ + +#define TPI_LSR_SLK_Pos 1U /*!< TPI LSR: Software Lock status Position */ +#define TPI_LSR_SLK_Msk (0x1UL << TPI_LSR_SLK_Pos) /*!< TPI LSR: Software Lock status Mask */ + +#define TPI_LSR_SLI_Pos 0U /*!< TPI LSR: Software Lock implemented Position */ +#define TPI_LSR_SLI_Msk (0x1UL /*<< TPI_LSR_SLI_Pos*/) /*!< TPI LSR: Software Lock implemented Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFO depth Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFO depth Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_PMU Performance Monitoring Unit (PMU) + \brief Type definitions for the Performance Monitoring Unit (PMU) + @{ + */ + +/** + \brief Structure type to access the Performance Monitoring Unit (PMU). + */ +typedef struct +{ + __IOM uint32_t EVCNTR[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x0 (R/W) PMU Event Counter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED0[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCNTR; /*!< Offset: 0x7C (R/W) PMU Cycle Counter Register */ + uint32_t RESERVED1[224]; + __IOM uint32_t EVTYPER[__PMU_NUM_EVENTCNT]; /*!< Offset: 0x400 (R/W) PMU Event Type and Filter Registers */ +#if __PMU_NUM_EVENTCNT<31 + uint32_t RESERVED2[31U-__PMU_NUM_EVENTCNT]; +#endif + __IOM uint32_t CCFILTR; /*!< Offset: 0x47C (R/W) PMU Cycle Counter Filter Register */ + uint32_t RESERVED3[480]; + __IOM uint32_t CNTENSET; /*!< Offset: 0xC00 (R/W) PMU Count Enable Set Register */ + uint32_t RESERVED4[7]; + __IOM uint32_t CNTENCLR; /*!< Offset: 0xC20 (R/W) PMU Count Enable Clear Register */ + uint32_t RESERVED5[7]; + __IOM uint32_t INTENSET; /*!< Offset: 0xC40 (R/W) PMU Interrupt Enable Set Register */ + uint32_t RESERVED6[7]; + __IOM uint32_t INTENCLR; /*!< Offset: 0xC60 (R/W) PMU Interrupt Enable Clear Register */ + uint32_t RESERVED7[7]; + __IOM uint32_t OVSCLR; /*!< Offset: 0xC80 (R/W) PMU Overflow Flag Status Clear Register */ + uint32_t RESERVED8[7]; + __IOM uint32_t SWINC; /*!< Offset: 0xCA0 (R/W) PMU Software Increment Register */ + uint32_t RESERVED9[7]; + __IOM uint32_t OVSSET; /*!< Offset: 0xCC0 (R/W) PMU Overflow Flag Status Set Register */ + uint32_t RESERVED10[79]; + __IOM uint32_t TYPE; /*!< Offset: 0xE00 (R/W) PMU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0xE04 (R/W) PMU Control Register */ + uint32_t RESERVED11[108]; + __IOM uint32_t AUTHSTATUS; /*!< Offset: 0xFB8 (R/W) PMU Authentication Status Register */ + __IOM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/W) PMU Device Architecture Register */ + uint32_t RESERVED12[3]; + __IOM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/W) PMU Device Type Register */ + __IOM uint32_t PIDR4; /*!< Offset: 0xFD0 (R/W) PMU Peripheral Identification Register 4 */ + uint32_t RESERVED13[3]; + __IOM uint32_t PIDR0; /*!< Offset: 0xFE0 (R/W) PMU Peripheral Identification Register 0 */ + __IOM uint32_t PIDR1; /*!< Offset: 0xFE4 (R/W) PMU Peripheral Identification Register 1 */ + __IOM uint32_t PIDR2; /*!< Offset: 0xFE8 (R/W) PMU Peripheral Identification Register 2 */ + __IOM uint32_t PIDR3; /*!< Offset: 0xFEC (R/W) PMU Peripheral Identification Register 3 */ + __IOM uint32_t CIDR0; /*!< Offset: 0xFF0 (R/W) PMU Component Identification Register 0 */ + __IOM uint32_t CIDR1; /*!< Offset: 0xFF4 (R/W) PMU Component Identification Register 1 */ + __IOM uint32_t CIDR2; /*!< Offset: 0xFF8 (R/W) PMU Component Identification Register 2 */ + __IOM uint32_t CIDR3; /*!< Offset: 0xFFC (R/W) PMU Component Identification Register 3 */ +} PMU_Type; + +/** \brief PMU Event Counter Registers (0-30) Definitions */ + +#define PMU_EVCNTR_CNT_Pos 0U /*!< PMU EVCNTR: Counter Position */ +#define PMU_EVCNTR_CNT_Msk (0xFFFFUL /*<< PMU_EVCNTRx_CNT_Pos*/) /*!< PMU EVCNTR: Counter Mask */ + +/** \brief PMU Event Type and Filter Registers (0-30) Definitions */ + +#define PMU_EVTYPER_EVENTTOCNT_Pos 0U /*!< PMU EVTYPER: Event to Count Position */ +#define PMU_EVTYPER_EVENTTOCNT_Msk (0xFFFFUL /*<< EVTYPERx_EVENTTOCNT_Pos*/) /*!< PMU EVTYPER: Event to Count Mask */ + +/** \brief PMU Count Enable Set Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENSET: Event Counter 0 Enable Set Position */ +#define PMU_CNTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENSET_CNT0_ENABLE_Pos*/) /*!< PMU CNTENSET: Event Counter 0 Enable Set Mask */ + +#define PMU_CNTENSET_CNT1_ENABLE_Pos 1U /*!< PMU CNTENSET: Event Counter 1 Enable Set Position */ +#define PMU_CNTENSET_CNT1_ENABLE_Msk (1UL << PMU_CNTENSET_CNT1_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 1 Enable Set Mask */ + +#define PMU_CNTENSET_CNT2_ENABLE_Pos 2U /*!< PMU CNTENSET: Event Counter 2 Enable Set Position */ +#define PMU_CNTENSET_CNT2_ENABLE_Msk (1UL << PMU_CNTENSET_CNT2_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 2 Enable Set Mask */ + +#define PMU_CNTENSET_CNT3_ENABLE_Pos 3U /*!< PMU CNTENSET: Event Counter 3 Enable Set Position */ +#define PMU_CNTENSET_CNT3_ENABLE_Msk (1UL << PMU_CNTENSET_CNT3_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 3 Enable Set Mask */ + +#define PMU_CNTENSET_CNT4_ENABLE_Pos 4U /*!< PMU CNTENSET: Event Counter 4 Enable Set Position */ +#define PMU_CNTENSET_CNT4_ENABLE_Msk (1UL << PMU_CNTENSET_CNT4_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 4 Enable Set Mask */ + +#define PMU_CNTENSET_CNT5_ENABLE_Pos 5U /*!< PMU CNTENSET: Event Counter 5 Enable Set Position */ +#define PMU_CNTENSET_CNT5_ENABLE_Msk (1UL << PMU_CNTENSET_CNT5_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 5 Enable Set Mask */ + +#define PMU_CNTENSET_CNT6_ENABLE_Pos 6U /*!< PMU CNTENSET: Event Counter 6 Enable Set Position */ +#define PMU_CNTENSET_CNT6_ENABLE_Msk (1UL << PMU_CNTENSET_CNT6_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 6 Enable Set Mask */ + +#define PMU_CNTENSET_CNT7_ENABLE_Pos 7U /*!< PMU CNTENSET: Event Counter 7 Enable Set Position */ +#define PMU_CNTENSET_CNT7_ENABLE_Msk (1UL << PMU_CNTENSET_CNT7_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 7 Enable Set Mask */ + +#define PMU_CNTENSET_CNT8_ENABLE_Pos 8U /*!< PMU CNTENSET: Event Counter 8 Enable Set Position */ +#define PMU_CNTENSET_CNT8_ENABLE_Msk (1UL << PMU_CNTENSET_CNT8_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 8 Enable Set Mask */ + +#define PMU_CNTENSET_CNT9_ENABLE_Pos 9U /*!< PMU CNTENSET: Event Counter 9 Enable Set Position */ +#define PMU_CNTENSET_CNT9_ENABLE_Msk (1UL << PMU_CNTENSET_CNT9_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 9 Enable Set Mask */ + +#define PMU_CNTENSET_CNT10_ENABLE_Pos 10U /*!< PMU CNTENSET: Event Counter 10 Enable Set Position */ +#define PMU_CNTENSET_CNT10_ENABLE_Msk (1UL << PMU_CNTENSET_CNT10_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 10 Enable Set Mask */ + +#define PMU_CNTENSET_CNT11_ENABLE_Pos 11U /*!< PMU CNTENSET: Event Counter 11 Enable Set Position */ +#define PMU_CNTENSET_CNT11_ENABLE_Msk (1UL << PMU_CNTENSET_CNT11_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 11 Enable Set Mask */ + +#define PMU_CNTENSET_CNT12_ENABLE_Pos 12U /*!< PMU CNTENSET: Event Counter 12 Enable Set Position */ +#define PMU_CNTENSET_CNT12_ENABLE_Msk (1UL << PMU_CNTENSET_CNT12_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 12 Enable Set Mask */ + +#define PMU_CNTENSET_CNT13_ENABLE_Pos 13U /*!< PMU CNTENSET: Event Counter 13 Enable Set Position */ +#define PMU_CNTENSET_CNT13_ENABLE_Msk (1UL << PMU_CNTENSET_CNT13_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 13 Enable Set Mask */ + +#define PMU_CNTENSET_CNT14_ENABLE_Pos 14U /*!< PMU CNTENSET: Event Counter 14 Enable Set Position */ +#define PMU_CNTENSET_CNT14_ENABLE_Msk (1UL << PMU_CNTENSET_CNT14_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 14 Enable Set Mask */ + +#define PMU_CNTENSET_CNT15_ENABLE_Pos 15U /*!< PMU CNTENSET: Event Counter 15 Enable Set Position */ +#define PMU_CNTENSET_CNT15_ENABLE_Msk (1UL << PMU_CNTENSET_CNT15_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 15 Enable Set Mask */ + +#define PMU_CNTENSET_CNT16_ENABLE_Pos 16U /*!< PMU CNTENSET: Event Counter 16 Enable Set Position */ +#define PMU_CNTENSET_CNT16_ENABLE_Msk (1UL << PMU_CNTENSET_CNT16_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 16 Enable Set Mask */ + +#define PMU_CNTENSET_CNT17_ENABLE_Pos 17U /*!< PMU CNTENSET: Event Counter 17 Enable Set Position */ +#define PMU_CNTENSET_CNT17_ENABLE_Msk (1UL << PMU_CNTENSET_CNT17_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 17 Enable Set Mask */ + +#define PMU_CNTENSET_CNT18_ENABLE_Pos 18U /*!< PMU CNTENSET: Event Counter 18 Enable Set Position */ +#define PMU_CNTENSET_CNT18_ENABLE_Msk (1UL << PMU_CNTENSET_CNT18_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 18 Enable Set Mask */ + +#define PMU_CNTENSET_CNT19_ENABLE_Pos 19U /*!< PMU CNTENSET: Event Counter 19 Enable Set Position */ +#define PMU_CNTENSET_CNT19_ENABLE_Msk (1UL << PMU_CNTENSET_CNT19_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 19 Enable Set Mask */ + +#define PMU_CNTENSET_CNT20_ENABLE_Pos 20U /*!< PMU CNTENSET: Event Counter 20 Enable Set Position */ +#define PMU_CNTENSET_CNT20_ENABLE_Msk (1UL << PMU_CNTENSET_CNT20_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 20 Enable Set Mask */ + +#define PMU_CNTENSET_CNT21_ENABLE_Pos 21U /*!< PMU CNTENSET: Event Counter 21 Enable Set Position */ +#define PMU_CNTENSET_CNT21_ENABLE_Msk (1UL << PMU_CNTENSET_CNT21_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 21 Enable Set Mask */ + +#define PMU_CNTENSET_CNT22_ENABLE_Pos 22U /*!< PMU CNTENSET: Event Counter 22 Enable Set Position */ +#define PMU_CNTENSET_CNT22_ENABLE_Msk (1UL << PMU_CNTENSET_CNT22_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 22 Enable Set Mask */ + +#define PMU_CNTENSET_CNT23_ENABLE_Pos 23U /*!< PMU CNTENSET: Event Counter 23 Enable Set Position */ +#define PMU_CNTENSET_CNT23_ENABLE_Msk (1UL << PMU_CNTENSET_CNT23_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 23 Enable Set Mask */ + +#define PMU_CNTENSET_CNT24_ENABLE_Pos 24U /*!< PMU CNTENSET: Event Counter 24 Enable Set Position */ +#define PMU_CNTENSET_CNT24_ENABLE_Msk (1UL << PMU_CNTENSET_CNT24_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 24 Enable Set Mask */ + +#define PMU_CNTENSET_CNT25_ENABLE_Pos 25U /*!< PMU CNTENSET: Event Counter 25 Enable Set Position */ +#define PMU_CNTENSET_CNT25_ENABLE_Msk (1UL << PMU_CNTENSET_CNT25_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 25 Enable Set Mask */ + +#define PMU_CNTENSET_CNT26_ENABLE_Pos 26U /*!< PMU CNTENSET: Event Counter 26 Enable Set Position */ +#define PMU_CNTENSET_CNT26_ENABLE_Msk (1UL << PMU_CNTENSET_CNT26_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 26 Enable Set Mask */ + +#define PMU_CNTENSET_CNT27_ENABLE_Pos 27U /*!< PMU CNTENSET: Event Counter 27 Enable Set Position */ +#define PMU_CNTENSET_CNT27_ENABLE_Msk (1UL << PMU_CNTENSET_CNT27_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 27 Enable Set Mask */ + +#define PMU_CNTENSET_CNT28_ENABLE_Pos 28U /*!< PMU CNTENSET: Event Counter 28 Enable Set Position */ +#define PMU_CNTENSET_CNT28_ENABLE_Msk (1UL << PMU_CNTENSET_CNT28_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 28 Enable Set Mask */ + +#define PMU_CNTENSET_CNT29_ENABLE_Pos 29U /*!< PMU CNTENSET: Event Counter 29 Enable Set Position */ +#define PMU_CNTENSET_CNT29_ENABLE_Msk (1UL << PMU_CNTENSET_CNT29_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 29 Enable Set Mask */ + +#define PMU_CNTENSET_CNT30_ENABLE_Pos 30U /*!< PMU CNTENSET: Event Counter 30 Enable Set Position */ +#define PMU_CNTENSET_CNT30_ENABLE_Msk (1UL << PMU_CNTENSET_CNT30_ENABLE_Pos) /*!< PMU CNTENSET: Event Counter 30 Enable Set Mask */ + +#define PMU_CNTENSET_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENSET: Cycle Counter Enable Set Position */ +#define PMU_CNTENSET_CCNTR_ENABLE_Msk (1UL << PMU_CNTENSET_CCNTR_ENABLE_Pos) /*!< PMU CNTENSET: Cycle Counter Enable Set Mask */ + +/** \brief PMU Count Enable Clear Register Definitions */ + +#define PMU_CNTENSET_CNT0_ENABLE_Pos 0U /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Position */ +#define PMU_CNTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_CNTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU CNTENCLR: Event Counter 0 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU CNTENCLR: Event Counter 1 Enable Clear Position */ +#define PMU_CNTENCLR_CNT1_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT1_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 1 Enable Clear */ + +#define PMU_CNTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Position */ +#define PMU_CNTENCLR_CNT2_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT2_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 2 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Position */ +#define PMU_CNTENCLR_CNT3_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT3_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 3 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Position */ +#define PMU_CNTENCLR_CNT4_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT4_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 4 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Position */ +#define PMU_CNTENCLR_CNT5_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT5_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 5 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Position */ +#define PMU_CNTENCLR_CNT6_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT6_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 6 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Position */ +#define PMU_CNTENCLR_CNT7_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT7_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 7 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Position */ +#define PMU_CNTENCLR_CNT8_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT8_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 8 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Position */ +#define PMU_CNTENCLR_CNT9_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT9_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 9 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Position */ +#define PMU_CNTENCLR_CNT10_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT10_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 10 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Position */ +#define PMU_CNTENCLR_CNT11_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT11_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 11 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Position */ +#define PMU_CNTENCLR_CNT12_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT12_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 12 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Position */ +#define PMU_CNTENCLR_CNT13_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT13_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 13 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Position */ +#define PMU_CNTENCLR_CNT14_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT14_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 14 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Position */ +#define PMU_CNTENCLR_CNT15_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT15_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 15 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Position */ +#define PMU_CNTENCLR_CNT16_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT16_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 16 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Position */ +#define PMU_CNTENCLR_CNT17_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT17_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 17 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Position */ +#define PMU_CNTENCLR_CNT18_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT18_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 18 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Position */ +#define PMU_CNTENCLR_CNT19_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT19_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 19 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Position */ +#define PMU_CNTENCLR_CNT20_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT20_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 20 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Position */ +#define PMU_CNTENCLR_CNT21_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT21_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 21 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Position */ +#define PMU_CNTENCLR_CNT22_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT22_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 22 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Position */ +#define PMU_CNTENCLR_CNT23_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT23_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 23 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Position */ +#define PMU_CNTENCLR_CNT24_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT24_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 24 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Position */ +#define PMU_CNTENCLR_CNT25_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT25_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 25 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Position */ +#define PMU_CNTENCLR_CNT26_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT26_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 26 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Position */ +#define PMU_CNTENCLR_CNT27_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT27_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 27 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Position */ +#define PMU_CNTENCLR_CNT28_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT28_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 28 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Position */ +#define PMU_CNTENCLR_CNT29_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT29_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 29 Enable Clear Mask */ + +#define PMU_CNTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Position */ +#define PMU_CNTENCLR_CNT30_ENABLE_Msk (1UL << PMU_CNTENCLR_CNT30_ENABLE_Pos) /*!< PMU CNTENCLR: Event Counter 30 Enable Clear Mask */ + +#define PMU_CNTENCLR_CCNTR_ENABLE_Pos 31U /*!< PMU CNTENCLR: Cycle Counter Enable Clear Position */ +#define PMU_CNTENCLR_CCNTR_ENABLE_Msk (1UL << PMU_CNTENCLR_CCNTR_ENABLE_Pos) /*!< PMU CNTENCLR: Cycle Counter Enable Clear Mask */ + +/** \brief PMU Interrupt Enable Set Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENSET_CNT0_ENABLE_Pos*/) /*!< PMU INTENSET: Event Counter 0 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT1_ENABLE_Pos 1U /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT1_ENABLE_Msk (1UL << PMU_INTENSET_CNT1_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 1 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT2_ENABLE_Pos 2U /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT2_ENABLE_Msk (1UL << PMU_INTENSET_CNT2_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 2 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT3_ENABLE_Pos 3U /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT3_ENABLE_Msk (1UL << PMU_INTENSET_CNT3_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 3 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT4_ENABLE_Pos 4U /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT4_ENABLE_Msk (1UL << PMU_INTENSET_CNT4_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 4 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT5_ENABLE_Pos 5U /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT5_ENABLE_Msk (1UL << PMU_INTENSET_CNT5_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 5 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT6_ENABLE_Pos 6U /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT6_ENABLE_Msk (1UL << PMU_INTENSET_CNT6_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 6 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT7_ENABLE_Pos 7U /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT7_ENABLE_Msk (1UL << PMU_INTENSET_CNT7_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 7 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT8_ENABLE_Pos 8U /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT8_ENABLE_Msk (1UL << PMU_INTENSET_CNT8_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 8 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT9_ENABLE_Pos 9U /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT9_ENABLE_Msk (1UL << PMU_INTENSET_CNT9_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 9 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT10_ENABLE_Pos 10U /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT10_ENABLE_Msk (1UL << PMU_INTENSET_CNT10_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 10 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT11_ENABLE_Pos 11U /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT11_ENABLE_Msk (1UL << PMU_INTENSET_CNT11_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 11 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT12_ENABLE_Pos 12U /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT12_ENABLE_Msk (1UL << PMU_INTENSET_CNT12_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 12 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT13_ENABLE_Pos 13U /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT13_ENABLE_Msk (1UL << PMU_INTENSET_CNT13_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 13 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT14_ENABLE_Pos 14U /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT14_ENABLE_Msk (1UL << PMU_INTENSET_CNT14_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 14 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT15_ENABLE_Pos 15U /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT15_ENABLE_Msk (1UL << PMU_INTENSET_CNT15_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 15 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT16_ENABLE_Pos 16U /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT16_ENABLE_Msk (1UL << PMU_INTENSET_CNT16_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 16 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT17_ENABLE_Pos 17U /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT17_ENABLE_Msk (1UL << PMU_INTENSET_CNT17_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 17 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT18_ENABLE_Pos 18U /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT18_ENABLE_Msk (1UL << PMU_INTENSET_CNT18_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 18 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT19_ENABLE_Pos 19U /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT19_ENABLE_Msk (1UL << PMU_INTENSET_CNT19_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 19 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT20_ENABLE_Pos 20U /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT20_ENABLE_Msk (1UL << PMU_INTENSET_CNT20_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 20 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT21_ENABLE_Pos 21U /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT21_ENABLE_Msk (1UL << PMU_INTENSET_CNT21_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 21 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT22_ENABLE_Pos 22U /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT22_ENABLE_Msk (1UL << PMU_INTENSET_CNT22_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 22 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT23_ENABLE_Pos 23U /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT23_ENABLE_Msk (1UL << PMU_INTENSET_CNT23_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 23 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT24_ENABLE_Pos 24U /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT24_ENABLE_Msk (1UL << PMU_INTENSET_CNT24_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 24 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT25_ENABLE_Pos 25U /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT25_ENABLE_Msk (1UL << PMU_INTENSET_CNT25_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 25 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT26_ENABLE_Pos 26U /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT26_ENABLE_Msk (1UL << PMU_INTENSET_CNT26_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 26 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT27_ENABLE_Pos 27U /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT27_ENABLE_Msk (1UL << PMU_INTENSET_CNT27_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 27 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT28_ENABLE_Pos 28U /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT28_ENABLE_Msk (1UL << PMU_INTENSET_CNT28_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 28 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT29_ENABLE_Pos 29U /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT29_ENABLE_Msk (1UL << PMU_INTENSET_CNT29_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 29 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CNT30_ENABLE_Pos 30U /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Position */ +#define PMU_INTENSET_CNT30_ENABLE_Msk (1UL << PMU_INTENSET_CNT30_ENABLE_Pos) /*!< PMU INTENSET: Event Counter 30 Interrupt Enable Set Mask */ + +#define PMU_INTENSET_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Position */ +#define PMU_INTENSET_CCYCNT_ENABLE_Msk (1UL << PMU_INTENSET_CYCCNT_ENABLE_Pos) /*!< PMU INTENSET: Cycle Counter Interrupt Enable Set Mask */ + +/** \brief PMU Interrupt Enable Clear Register Definitions */ + +#define PMU_INTENSET_CNT0_ENABLE_Pos 0U /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT0_ENABLE_Msk (1UL /*<< PMU_INTENCLR_CNT0_ENABLE_Pos*/) /*!< PMU INTENCLR: Event Counter 0 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT1_ENABLE_Pos 1U /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT1_ENABLE_Msk (1UL << PMU_INTENCLR_CNT1_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 1 Interrupt Enable Clear */ + +#define PMU_INTENCLR_CNT2_ENABLE_Pos 2U /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT2_ENABLE_Msk (1UL << PMU_INTENCLR_CNT2_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 2 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT3_ENABLE_Pos 3U /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT3_ENABLE_Msk (1UL << PMU_INTENCLR_CNT3_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 3 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT4_ENABLE_Pos 4U /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT4_ENABLE_Msk (1UL << PMU_INTENCLR_CNT4_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 4 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT5_ENABLE_Pos 5U /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT5_ENABLE_Msk (1UL << PMU_INTENCLR_CNT5_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 5 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT6_ENABLE_Pos 6U /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT6_ENABLE_Msk (1UL << PMU_INTENCLR_CNT6_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 6 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT7_ENABLE_Pos 7U /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT7_ENABLE_Msk (1UL << PMU_INTENCLR_CNT7_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 7 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT8_ENABLE_Pos 8U /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT8_ENABLE_Msk (1UL << PMU_INTENCLR_CNT8_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 8 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT9_ENABLE_Pos 9U /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT9_ENABLE_Msk (1UL << PMU_INTENCLR_CNT9_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 9 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT10_ENABLE_Pos 10U /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT10_ENABLE_Msk (1UL << PMU_INTENCLR_CNT10_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 10 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT11_ENABLE_Pos 11U /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT11_ENABLE_Msk (1UL << PMU_INTENCLR_CNT11_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 11 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT12_ENABLE_Pos 12U /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT12_ENABLE_Msk (1UL << PMU_INTENCLR_CNT12_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 12 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT13_ENABLE_Pos 13U /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT13_ENABLE_Msk (1UL << PMU_INTENCLR_CNT13_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 13 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT14_ENABLE_Pos 14U /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT14_ENABLE_Msk (1UL << PMU_INTENCLR_CNT14_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 14 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT15_ENABLE_Pos 15U /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT15_ENABLE_Msk (1UL << PMU_INTENCLR_CNT15_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 15 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT16_ENABLE_Pos 16U /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT16_ENABLE_Msk (1UL << PMU_INTENCLR_CNT16_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 16 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT17_ENABLE_Pos 17U /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT17_ENABLE_Msk (1UL << PMU_INTENCLR_CNT17_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 17 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT18_ENABLE_Pos 18U /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT18_ENABLE_Msk (1UL << PMU_INTENCLR_CNT18_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 18 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT19_ENABLE_Pos 19U /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT19_ENABLE_Msk (1UL << PMU_INTENCLR_CNT19_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 19 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT20_ENABLE_Pos 20U /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT20_ENABLE_Msk (1UL << PMU_INTENCLR_CNT20_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 20 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT21_ENABLE_Pos 21U /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT21_ENABLE_Msk (1UL << PMU_INTENCLR_CNT21_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 21 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT22_ENABLE_Pos 22U /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT22_ENABLE_Msk (1UL << PMU_INTENCLR_CNT22_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 22 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT23_ENABLE_Pos 23U /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT23_ENABLE_Msk (1UL << PMU_INTENCLR_CNT23_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 23 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT24_ENABLE_Pos 24U /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT24_ENABLE_Msk (1UL << PMU_INTENCLR_CNT24_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 24 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT25_ENABLE_Pos 25U /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT25_ENABLE_Msk (1UL << PMU_INTENCLR_CNT25_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 25 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT26_ENABLE_Pos 26U /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT26_ENABLE_Msk (1UL << PMU_INTENCLR_CNT26_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 26 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT27_ENABLE_Pos 27U /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT27_ENABLE_Msk (1UL << PMU_INTENCLR_CNT27_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 27 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT28_ENABLE_Pos 28U /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT28_ENABLE_Msk (1UL << PMU_INTENCLR_CNT28_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 28 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT29_ENABLE_Pos 29U /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT29_ENABLE_Msk (1UL << PMU_INTENCLR_CNT29_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 29 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CNT30_ENABLE_Pos 30U /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CNT30_ENABLE_Msk (1UL << PMU_INTENCLR_CNT30_ENABLE_Pos) /*!< PMU INTENCLR: Event Counter 30 Interrupt Enable Clear Mask */ + +#define PMU_INTENCLR_CYCCNT_ENABLE_Pos 31U /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Position */ +#define PMU_INTENCLR_CYCCNT_ENABLE_Msk (1UL << PMU_INTENCLR_CYCCNT_ENABLE_Pos) /*!< PMU INTENCLR: Cycle Counter Interrupt Enable Clear Mask */ + +/** \brief PMU Overflow Flag Status Set Register Definitions */ + +#define PMU_OVSSET_CNT0_STATUS_Pos 0U /*!< PMU OVSSET: Event Counter 0 Overflow Set Position */ +#define PMU_OVSSET_CNT0_STATUS_Msk (1UL /*<< PMU_OVSSET_CNT0_STATUS_Pos*/) /*!< PMU OVSSET: Event Counter 0 Overflow Set Mask */ + +#define PMU_OVSSET_CNT1_STATUS_Pos 1U /*!< PMU OVSSET: Event Counter 1 Overflow Set Position */ +#define PMU_OVSSET_CNT1_STATUS_Msk (1UL << PMU_OVSSET_CNT1_STATUS_Pos) /*!< PMU OVSSET: Event Counter 1 Overflow Set Mask */ + +#define PMU_OVSSET_CNT2_STATUS_Pos 2U /*!< PMU OVSSET: Event Counter 2 Overflow Set Position */ +#define PMU_OVSSET_CNT2_STATUS_Msk (1UL << PMU_OVSSET_CNT2_STATUS_Pos) /*!< PMU OVSSET: Event Counter 2 Overflow Set Mask */ + +#define PMU_OVSSET_CNT3_STATUS_Pos 3U /*!< PMU OVSSET: Event Counter 3 Overflow Set Position */ +#define PMU_OVSSET_CNT3_STATUS_Msk (1UL << PMU_OVSSET_CNT3_STATUS_Pos) /*!< PMU OVSSET: Event Counter 3 Overflow Set Mask */ + +#define PMU_OVSSET_CNT4_STATUS_Pos 4U /*!< PMU OVSSET: Event Counter 4 Overflow Set Position */ +#define PMU_OVSSET_CNT4_STATUS_Msk (1UL << PMU_OVSSET_CNT4_STATUS_Pos) /*!< PMU OVSSET: Event Counter 4 Overflow Set Mask */ + +#define PMU_OVSSET_CNT5_STATUS_Pos 5U /*!< PMU OVSSET: Event Counter 5 Overflow Set Position */ +#define PMU_OVSSET_CNT5_STATUS_Msk (1UL << PMU_OVSSET_CNT5_STATUS_Pos) /*!< PMU OVSSET: Event Counter 5 Overflow Set Mask */ + +#define PMU_OVSSET_CNT6_STATUS_Pos 6U /*!< PMU OVSSET: Event Counter 6 Overflow Set Position */ +#define PMU_OVSSET_CNT6_STATUS_Msk (1UL << PMU_OVSSET_CNT6_STATUS_Pos) /*!< PMU OVSSET: Event Counter 6 Overflow Set Mask */ + +#define PMU_OVSSET_CNT7_STATUS_Pos 7U /*!< PMU OVSSET: Event Counter 7 Overflow Set Position */ +#define PMU_OVSSET_CNT7_STATUS_Msk (1UL << PMU_OVSSET_CNT7_STATUS_Pos) /*!< PMU OVSSET: Event Counter 7 Overflow Set Mask */ + +#define PMU_OVSSET_CNT8_STATUS_Pos 8U /*!< PMU OVSSET: Event Counter 8 Overflow Set Position */ +#define PMU_OVSSET_CNT8_STATUS_Msk (1UL << PMU_OVSSET_CNT8_STATUS_Pos) /*!< PMU OVSSET: Event Counter 8 Overflow Set Mask */ + +#define PMU_OVSSET_CNT9_STATUS_Pos 9U /*!< PMU OVSSET: Event Counter 9 Overflow Set Position */ +#define PMU_OVSSET_CNT9_STATUS_Msk (1UL << PMU_OVSSET_CNT9_STATUS_Pos) /*!< PMU OVSSET: Event Counter 9 Overflow Set Mask */ + +#define PMU_OVSSET_CNT10_STATUS_Pos 10U /*!< PMU OVSSET: Event Counter 10 Overflow Set Position */ +#define PMU_OVSSET_CNT10_STATUS_Msk (1UL << PMU_OVSSET_CNT10_STATUS_Pos) /*!< PMU OVSSET: Event Counter 10 Overflow Set Mask */ + +#define PMU_OVSSET_CNT11_STATUS_Pos 11U /*!< PMU OVSSET: Event Counter 11 Overflow Set Position */ +#define PMU_OVSSET_CNT11_STATUS_Msk (1UL << PMU_OVSSET_CNT11_STATUS_Pos) /*!< PMU OVSSET: Event Counter 11 Overflow Set Mask */ + +#define PMU_OVSSET_CNT12_STATUS_Pos 12U /*!< PMU OVSSET: Event Counter 12 Overflow Set Position */ +#define PMU_OVSSET_CNT12_STATUS_Msk (1UL << PMU_OVSSET_CNT12_STATUS_Pos) /*!< PMU OVSSET: Event Counter 12 Overflow Set Mask */ + +#define PMU_OVSSET_CNT13_STATUS_Pos 13U /*!< PMU OVSSET: Event Counter 13 Overflow Set Position */ +#define PMU_OVSSET_CNT13_STATUS_Msk (1UL << PMU_OVSSET_CNT13_STATUS_Pos) /*!< PMU OVSSET: Event Counter 13 Overflow Set Mask */ + +#define PMU_OVSSET_CNT14_STATUS_Pos 14U /*!< PMU OVSSET: Event Counter 14 Overflow Set Position */ +#define PMU_OVSSET_CNT14_STATUS_Msk (1UL << PMU_OVSSET_CNT14_STATUS_Pos) /*!< PMU OVSSET: Event Counter 14 Overflow Set Mask */ + +#define PMU_OVSSET_CNT15_STATUS_Pos 15U /*!< PMU OVSSET: Event Counter 15 Overflow Set Position */ +#define PMU_OVSSET_CNT15_STATUS_Msk (1UL << PMU_OVSSET_CNT15_STATUS_Pos) /*!< PMU OVSSET: Event Counter 15 Overflow Set Mask */ + +#define PMU_OVSSET_CNT16_STATUS_Pos 16U /*!< PMU OVSSET: Event Counter 16 Overflow Set Position */ +#define PMU_OVSSET_CNT16_STATUS_Msk (1UL << PMU_OVSSET_CNT16_STATUS_Pos) /*!< PMU OVSSET: Event Counter 16 Overflow Set Mask */ + +#define PMU_OVSSET_CNT17_STATUS_Pos 17U /*!< PMU OVSSET: Event Counter 17 Overflow Set Position */ +#define PMU_OVSSET_CNT17_STATUS_Msk (1UL << PMU_OVSSET_CNT17_STATUS_Pos) /*!< PMU OVSSET: Event Counter 17 Overflow Set Mask */ + +#define PMU_OVSSET_CNT18_STATUS_Pos 18U /*!< PMU OVSSET: Event Counter 18 Overflow Set Position */ +#define PMU_OVSSET_CNT18_STATUS_Msk (1UL << PMU_OVSSET_CNT18_STATUS_Pos) /*!< PMU OVSSET: Event Counter 18 Overflow Set Mask */ + +#define PMU_OVSSET_CNT19_STATUS_Pos 19U /*!< PMU OVSSET: Event Counter 19 Overflow Set Position */ +#define PMU_OVSSET_CNT19_STATUS_Msk (1UL << PMU_OVSSET_CNT19_STATUS_Pos) /*!< PMU OVSSET: Event Counter 19 Overflow Set Mask */ + +#define PMU_OVSSET_CNT20_STATUS_Pos 20U /*!< PMU OVSSET: Event Counter 20 Overflow Set Position */ +#define PMU_OVSSET_CNT20_STATUS_Msk (1UL << PMU_OVSSET_CNT20_STATUS_Pos) /*!< PMU OVSSET: Event Counter 20 Overflow Set Mask */ + +#define PMU_OVSSET_CNT21_STATUS_Pos 21U /*!< PMU OVSSET: Event Counter 21 Overflow Set Position */ +#define PMU_OVSSET_CNT21_STATUS_Msk (1UL << PMU_OVSSET_CNT21_STATUS_Pos) /*!< PMU OVSSET: Event Counter 21 Overflow Set Mask */ + +#define PMU_OVSSET_CNT22_STATUS_Pos 22U /*!< PMU OVSSET: Event Counter 22 Overflow Set Position */ +#define PMU_OVSSET_CNT22_STATUS_Msk (1UL << PMU_OVSSET_CNT22_STATUS_Pos) /*!< PMU OVSSET: Event Counter 22 Overflow Set Mask */ + +#define PMU_OVSSET_CNT23_STATUS_Pos 23U /*!< PMU OVSSET: Event Counter 23 Overflow Set Position */ +#define PMU_OVSSET_CNT23_STATUS_Msk (1UL << PMU_OVSSET_CNT23_STATUS_Pos) /*!< PMU OVSSET: Event Counter 23 Overflow Set Mask */ + +#define PMU_OVSSET_CNT24_STATUS_Pos 24U /*!< PMU OVSSET: Event Counter 24 Overflow Set Position */ +#define PMU_OVSSET_CNT24_STATUS_Msk (1UL << PMU_OVSSET_CNT24_STATUS_Pos) /*!< PMU OVSSET: Event Counter 24 Overflow Set Mask */ + +#define PMU_OVSSET_CNT25_STATUS_Pos 25U /*!< PMU OVSSET: Event Counter 25 Overflow Set Position */ +#define PMU_OVSSET_CNT25_STATUS_Msk (1UL << PMU_OVSSET_CNT25_STATUS_Pos) /*!< PMU OVSSET: Event Counter 25 Overflow Set Mask */ + +#define PMU_OVSSET_CNT26_STATUS_Pos 26U /*!< PMU OVSSET: Event Counter 26 Overflow Set Position */ +#define PMU_OVSSET_CNT26_STATUS_Msk (1UL << PMU_OVSSET_CNT26_STATUS_Pos) /*!< PMU OVSSET: Event Counter 26 Overflow Set Mask */ + +#define PMU_OVSSET_CNT27_STATUS_Pos 27U /*!< PMU OVSSET: Event Counter 27 Overflow Set Position */ +#define PMU_OVSSET_CNT27_STATUS_Msk (1UL << PMU_OVSSET_CNT27_STATUS_Pos) /*!< PMU OVSSET: Event Counter 27 Overflow Set Mask */ + +#define PMU_OVSSET_CNT28_STATUS_Pos 28U /*!< PMU OVSSET: Event Counter 28 Overflow Set Position */ +#define PMU_OVSSET_CNT28_STATUS_Msk (1UL << PMU_OVSSET_CNT28_STATUS_Pos) /*!< PMU OVSSET: Event Counter 28 Overflow Set Mask */ + +#define PMU_OVSSET_CNT29_STATUS_Pos 29U /*!< PMU OVSSET: Event Counter 29 Overflow Set Position */ +#define PMU_OVSSET_CNT29_STATUS_Msk (1UL << PMU_OVSSET_CNT29_STATUS_Pos) /*!< PMU OVSSET: Event Counter 29 Overflow Set Mask */ + +#define PMU_OVSSET_CNT30_STATUS_Pos 30U /*!< PMU OVSSET: Event Counter 30 Overflow Set Position */ +#define PMU_OVSSET_CNT30_STATUS_Msk (1UL << PMU_OVSSET_CNT30_STATUS_Pos) /*!< PMU OVSSET: Event Counter 30 Overflow Set Mask */ + +#define PMU_OVSSET_CYCCNT_STATUS_Pos 31U /*!< PMU OVSSET: Cycle Counter Overflow Set Position */ +#define PMU_OVSSET_CYCCNT_STATUS_Msk (1UL << PMU_OVSSET_CYCCNT_STATUS_Pos) /*!< PMU OVSSET: Cycle Counter Overflow Set Mask */ + +/** \brief PMU Overflow Flag Status Clear Register Definitions */ + +#define PMU_OVSCLR_CNT0_STATUS_Pos 0U /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Position */ +#define PMU_OVSCLR_CNT0_STATUS_Msk (1UL /*<< PMU_OVSCLR_CNT0_STATUS_Pos*/) /*!< PMU OVSCLR: Event Counter 0 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT1_STATUS_Pos 1U /*!< PMU OVSCLR: Event Counter 1 Overflow Clear Position */ +#define PMU_OVSCLR_CNT1_STATUS_Msk (1UL << PMU_OVSCLR_CNT1_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 1 Overflow Clear */ + +#define PMU_OVSCLR_CNT2_STATUS_Pos 2U /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Position */ +#define PMU_OVSCLR_CNT2_STATUS_Msk (1UL << PMU_OVSCLR_CNT2_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 2 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT3_STATUS_Pos 3U /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Position */ +#define PMU_OVSCLR_CNT3_STATUS_Msk (1UL << PMU_OVSCLR_CNT3_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 3 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT4_STATUS_Pos 4U /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Position */ +#define PMU_OVSCLR_CNT4_STATUS_Msk (1UL << PMU_OVSCLR_CNT4_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 4 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT5_STATUS_Pos 5U /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Position */ +#define PMU_OVSCLR_CNT5_STATUS_Msk (1UL << PMU_OVSCLR_CNT5_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 5 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT6_STATUS_Pos 6U /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Position */ +#define PMU_OVSCLR_CNT6_STATUS_Msk (1UL << PMU_OVSCLR_CNT6_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 6 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT7_STATUS_Pos 7U /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Position */ +#define PMU_OVSCLR_CNT7_STATUS_Msk (1UL << PMU_OVSCLR_CNT7_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 7 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT8_STATUS_Pos 8U /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Position */ +#define PMU_OVSCLR_CNT8_STATUS_Msk (1UL << PMU_OVSCLR_CNT8_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 8 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT9_STATUS_Pos 9U /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Position */ +#define PMU_OVSCLR_CNT9_STATUS_Msk (1UL << PMU_OVSCLR_CNT9_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 9 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT10_STATUS_Pos 10U /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Position */ +#define PMU_OVSCLR_CNT10_STATUS_Msk (1UL << PMU_OVSCLR_CNT10_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 10 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT11_STATUS_Pos 11U /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Position */ +#define PMU_OVSCLR_CNT11_STATUS_Msk (1UL << PMU_OVSCLR_CNT11_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 11 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT12_STATUS_Pos 12U /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Position */ +#define PMU_OVSCLR_CNT12_STATUS_Msk (1UL << PMU_OVSCLR_CNT12_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 12 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT13_STATUS_Pos 13U /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Position */ +#define PMU_OVSCLR_CNT13_STATUS_Msk (1UL << PMU_OVSCLR_CNT13_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 13 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT14_STATUS_Pos 14U /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Position */ +#define PMU_OVSCLR_CNT14_STATUS_Msk (1UL << PMU_OVSCLR_CNT14_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 14 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT15_STATUS_Pos 15U /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Position */ +#define PMU_OVSCLR_CNT15_STATUS_Msk (1UL << PMU_OVSCLR_CNT15_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 15 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT16_STATUS_Pos 16U /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Position */ +#define PMU_OVSCLR_CNT16_STATUS_Msk (1UL << PMU_OVSCLR_CNT16_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 16 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT17_STATUS_Pos 17U /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Position */ +#define PMU_OVSCLR_CNT17_STATUS_Msk (1UL << PMU_OVSCLR_CNT17_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 17 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT18_STATUS_Pos 18U /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Position */ +#define PMU_OVSCLR_CNT18_STATUS_Msk (1UL << PMU_OVSCLR_CNT18_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 18 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT19_STATUS_Pos 19U /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Position */ +#define PMU_OVSCLR_CNT19_STATUS_Msk (1UL << PMU_OVSCLR_CNT19_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 19 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT20_STATUS_Pos 20U /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Position */ +#define PMU_OVSCLR_CNT20_STATUS_Msk (1UL << PMU_OVSCLR_CNT20_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 20 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT21_STATUS_Pos 21U /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Position */ +#define PMU_OVSCLR_CNT21_STATUS_Msk (1UL << PMU_OVSCLR_CNT21_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 21 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT22_STATUS_Pos 22U /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Position */ +#define PMU_OVSCLR_CNT22_STATUS_Msk (1UL << PMU_OVSCLR_CNT22_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 22 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT23_STATUS_Pos 23U /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Position */ +#define PMU_OVSCLR_CNT23_STATUS_Msk (1UL << PMU_OVSCLR_CNT23_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 23 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT24_STATUS_Pos 24U /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Position */ +#define PMU_OVSCLR_CNT24_STATUS_Msk (1UL << PMU_OVSCLR_CNT24_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 24 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT25_STATUS_Pos 25U /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Position */ +#define PMU_OVSCLR_CNT25_STATUS_Msk (1UL << PMU_OVSCLR_CNT25_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 25 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT26_STATUS_Pos 26U /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Position */ +#define PMU_OVSCLR_CNT26_STATUS_Msk (1UL << PMU_OVSCLR_CNT26_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 26 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT27_STATUS_Pos 27U /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Position */ +#define PMU_OVSCLR_CNT27_STATUS_Msk (1UL << PMU_OVSCLR_CNT27_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 27 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT28_STATUS_Pos 28U /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Position */ +#define PMU_OVSCLR_CNT28_STATUS_Msk (1UL << PMU_OVSCLR_CNT28_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 28 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT29_STATUS_Pos 29U /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Position */ +#define PMU_OVSCLR_CNT29_STATUS_Msk (1UL << PMU_OVSCLR_CNT29_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 29 Overflow Clear Mask */ + +#define PMU_OVSCLR_CNT30_STATUS_Pos 30U /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Position */ +#define PMU_OVSCLR_CNT30_STATUS_Msk (1UL << PMU_OVSCLR_CNT30_STATUS_Pos) /*!< PMU OVSCLR: Event Counter 30 Overflow Clear Mask */ + +#define PMU_OVSCLR_CYCCNT_STATUS_Pos 31U /*!< PMU OVSCLR: Cycle Counter Overflow Clear Position */ +#define PMU_OVSCLR_CYCCNT_STATUS_Msk (1UL << PMU_OVSCLR_CYCCNT_STATUS_Pos) /*!< PMU OVSCLR: Cycle Counter Overflow Clear Mask */ + +/** \brief PMU Software Increment Counter */ + +#define PMU_SWINC_CNT0_Pos 0U /*!< PMU SWINC: Event Counter 0 Software Increment Position */ +#define PMU_SWINC_CNT0_Msk (1UL /*<< PMU_SWINC_CNT0_Pos */) /*!< PMU SWINC: Event Counter 0 Software Increment Mask */ + +#define PMU_SWINC_CNT1_Pos 1U /*!< PMU SWINC: Event Counter 1 Software Increment Position */ +#define PMU_SWINC_CNT1_Msk (1UL << PMU_SWINC_CNT1_Pos) /*!< PMU SWINC: Event Counter 1 Software Increment Mask */ + +#define PMU_SWINC_CNT2_Pos 2U /*!< PMU SWINC: Event Counter 2 Software Increment Position */ +#define PMU_SWINC_CNT2_Msk (1UL << PMU_SWINC_CNT2_Pos) /*!< PMU SWINC: Event Counter 2 Software Increment Mask */ + +#define PMU_SWINC_CNT3_Pos 3U /*!< PMU SWINC: Event Counter 3 Software Increment Position */ +#define PMU_SWINC_CNT3_Msk (1UL << PMU_SWINC_CNT3_Pos) /*!< PMU SWINC: Event Counter 3 Software Increment Mask */ + +#define PMU_SWINC_CNT4_Pos 4U /*!< PMU SWINC: Event Counter 4 Software Increment Position */ +#define PMU_SWINC_CNT4_Msk (1UL << PMU_SWINC_CNT4_Pos) /*!< PMU SWINC: Event Counter 4 Software Increment Mask */ + +#define PMU_SWINC_CNT5_Pos 5U /*!< PMU SWINC: Event Counter 5 Software Increment Position */ +#define PMU_SWINC_CNT5_Msk (1UL << PMU_SWINC_CNT5_Pos) /*!< PMU SWINC: Event Counter 5 Software Increment Mask */ + +#define PMU_SWINC_CNT6_Pos 6U /*!< PMU SWINC: Event Counter 6 Software Increment Position */ +#define PMU_SWINC_CNT6_Msk (1UL << PMU_SWINC_CNT6_Pos) /*!< PMU SWINC: Event Counter 6 Software Increment Mask */ + +#define PMU_SWINC_CNT7_Pos 7U /*!< PMU SWINC: Event Counter 7 Software Increment Position */ +#define PMU_SWINC_CNT7_Msk (1UL << PMU_SWINC_CNT7_Pos) /*!< PMU SWINC: Event Counter 7 Software Increment Mask */ + +#define PMU_SWINC_CNT8_Pos 8U /*!< PMU SWINC: Event Counter 8 Software Increment Position */ +#define PMU_SWINC_CNT8_Msk (1UL << PMU_SWINC_CNT8_Pos) /*!< PMU SWINC: Event Counter 8 Software Increment Mask */ + +#define PMU_SWINC_CNT9_Pos 9U /*!< PMU SWINC: Event Counter 9 Software Increment Position */ +#define PMU_SWINC_CNT9_Msk (1UL << PMU_SWINC_CNT9_Pos) /*!< PMU SWINC: Event Counter 9 Software Increment Mask */ + +#define PMU_SWINC_CNT10_Pos 10U /*!< PMU SWINC: Event Counter 10 Software Increment Position */ +#define PMU_SWINC_CNT10_Msk (1UL << PMU_SWINC_CNT10_Pos) /*!< PMU SWINC: Event Counter 10 Software Increment Mask */ + +#define PMU_SWINC_CNT11_Pos 11U /*!< PMU SWINC: Event Counter 11 Software Increment Position */ +#define PMU_SWINC_CNT11_Msk (1UL << PMU_SWINC_CNT11_Pos) /*!< PMU SWINC: Event Counter 11 Software Increment Mask */ + +#define PMU_SWINC_CNT12_Pos 12U /*!< PMU SWINC: Event Counter 12 Software Increment Position */ +#define PMU_SWINC_CNT12_Msk (1UL << PMU_SWINC_CNT12_Pos) /*!< PMU SWINC: Event Counter 12 Software Increment Mask */ + +#define PMU_SWINC_CNT13_Pos 13U /*!< PMU SWINC: Event Counter 13 Software Increment Position */ +#define PMU_SWINC_CNT13_Msk (1UL << PMU_SWINC_CNT13_Pos) /*!< PMU SWINC: Event Counter 13 Software Increment Mask */ + +#define PMU_SWINC_CNT14_Pos 14U /*!< PMU SWINC: Event Counter 14 Software Increment Position */ +#define PMU_SWINC_CNT14_Msk (1UL << PMU_SWINC_CNT14_Pos) /*!< PMU SWINC: Event Counter 14 Software Increment Mask */ + +#define PMU_SWINC_CNT15_Pos 15U /*!< PMU SWINC: Event Counter 15 Software Increment Position */ +#define PMU_SWINC_CNT15_Msk (1UL << PMU_SWINC_CNT15_Pos) /*!< PMU SWINC: Event Counter 15 Software Increment Mask */ + +#define PMU_SWINC_CNT16_Pos 16U /*!< PMU SWINC: Event Counter 16 Software Increment Position */ +#define PMU_SWINC_CNT16_Msk (1UL << PMU_SWINC_CNT16_Pos) /*!< PMU SWINC: Event Counter 16 Software Increment Mask */ + +#define PMU_SWINC_CNT17_Pos 17U /*!< PMU SWINC: Event Counter 17 Software Increment Position */ +#define PMU_SWINC_CNT17_Msk (1UL << PMU_SWINC_CNT17_Pos) /*!< PMU SWINC: Event Counter 17 Software Increment Mask */ + +#define PMU_SWINC_CNT18_Pos 18U /*!< PMU SWINC: Event Counter 18 Software Increment Position */ +#define PMU_SWINC_CNT18_Msk (1UL << PMU_SWINC_CNT18_Pos) /*!< PMU SWINC: Event Counter 18 Software Increment Mask */ + +#define PMU_SWINC_CNT19_Pos 19U /*!< PMU SWINC: Event Counter 19 Software Increment Position */ +#define PMU_SWINC_CNT19_Msk (1UL << PMU_SWINC_CNT19_Pos) /*!< PMU SWINC: Event Counter 19 Software Increment Mask */ + +#define PMU_SWINC_CNT20_Pos 20U /*!< PMU SWINC: Event Counter 20 Software Increment Position */ +#define PMU_SWINC_CNT20_Msk (1UL << PMU_SWINC_CNT20_Pos) /*!< PMU SWINC: Event Counter 20 Software Increment Mask */ + +#define PMU_SWINC_CNT21_Pos 21U /*!< PMU SWINC: Event Counter 21 Software Increment Position */ +#define PMU_SWINC_CNT21_Msk (1UL << PMU_SWINC_CNT21_Pos) /*!< PMU SWINC: Event Counter 21 Software Increment Mask */ + +#define PMU_SWINC_CNT22_Pos 22U /*!< PMU SWINC: Event Counter 22 Software Increment Position */ +#define PMU_SWINC_CNT22_Msk (1UL << PMU_SWINC_CNT22_Pos) /*!< PMU SWINC: Event Counter 22 Software Increment Mask */ + +#define PMU_SWINC_CNT23_Pos 23U /*!< PMU SWINC: Event Counter 23 Software Increment Position */ +#define PMU_SWINC_CNT23_Msk (1UL << PMU_SWINC_CNT23_Pos) /*!< PMU SWINC: Event Counter 23 Software Increment Mask */ + +#define PMU_SWINC_CNT24_Pos 24U /*!< PMU SWINC: Event Counter 24 Software Increment Position */ +#define PMU_SWINC_CNT24_Msk (1UL << PMU_SWINC_CNT24_Pos) /*!< PMU SWINC: Event Counter 24 Software Increment Mask */ + +#define PMU_SWINC_CNT25_Pos 25U /*!< PMU SWINC: Event Counter 25 Software Increment Position */ +#define PMU_SWINC_CNT25_Msk (1UL << PMU_SWINC_CNT25_Pos) /*!< PMU SWINC: Event Counter 25 Software Increment Mask */ + +#define PMU_SWINC_CNT26_Pos 26U /*!< PMU SWINC: Event Counter 26 Software Increment Position */ +#define PMU_SWINC_CNT26_Msk (1UL << PMU_SWINC_CNT26_Pos) /*!< PMU SWINC: Event Counter 26 Software Increment Mask */ + +#define PMU_SWINC_CNT27_Pos 27U /*!< PMU SWINC: Event Counter 27 Software Increment Position */ +#define PMU_SWINC_CNT27_Msk (1UL << PMU_SWINC_CNT27_Pos) /*!< PMU SWINC: Event Counter 27 Software Increment Mask */ + +#define PMU_SWINC_CNT28_Pos 28U /*!< PMU SWINC: Event Counter 28 Software Increment Position */ +#define PMU_SWINC_CNT28_Msk (1UL << PMU_SWINC_CNT28_Pos) /*!< PMU SWINC: Event Counter 28 Software Increment Mask */ + +#define PMU_SWINC_CNT29_Pos 29U /*!< PMU SWINC: Event Counter 29 Software Increment Position */ +#define PMU_SWINC_CNT29_Msk (1UL << PMU_SWINC_CNT29_Pos) /*!< PMU SWINC: Event Counter 29 Software Increment Mask */ + +#define PMU_SWINC_CNT30_Pos 30U /*!< PMU SWINC: Event Counter 30 Software Increment Position */ +#define PMU_SWINC_CNT30_Msk (1UL << PMU_SWINC_CNT30_Pos) /*!< PMU SWINC: Event Counter 30 Software Increment Mask */ + +/** \brief PMU Control Register Definitions */ + +#define PMU_CTRL_ENABLE_Pos 0U /*!< PMU CTRL: ENABLE Position */ +#define PMU_CTRL_ENABLE_Msk (1UL /*<< PMU_CTRL_ENABLE_Pos*/) /*!< PMU CTRL: ENABLE Mask */ + +#define PMU_CTRL_EVENTCNT_RESET_Pos 1U /*!< PMU CTRL: Event Counter Reset Position */ +#define PMU_CTRL_EVENTCNT_RESET_Msk (1UL << PMU_CTRL_EVENTCNT_RESET_Pos) /*!< PMU CTRL: Event Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_RESET_Pos 2U /*!< PMU CTRL: Cycle Counter Reset Position */ +#define PMU_CTRL_CYCCNT_RESET_Msk (1UL << PMU_CTRL_CYCCNT_RESET_Pos) /*!< PMU CTRL: Cycle Counter Reset Mask */ + +#define PMU_CTRL_CYCCNT_DISABLE_Pos 5U /*!< PMU CTRL: Disable Cycle Counter Position */ +#define PMU_CTRL_CYCCNT_DISABLE_Msk (1UL << PMU_CTRL_CYCCNT_DISABLE_Pos) /*!< PMU CTRL: Disable Cycle Counter Mask */ + +#define PMU_CTRL_FRZ_ON_OV_Pos 9U /*!< PMU CTRL: Freeze-on-overflow Position */ +#define PMU_CTRL_FRZ_ON_OV_Msk (1UL << PMU_CTRL_FRZ_ON_OVERFLOW_Pos) /*!< PMU CTRL: Freeze-on-overflow Mask */ + +#define PMU_CTRL_TRACE_ON_OV_Pos 11U /*!< PMU CTRL: Trace-on-overflow Position */ +#define PMU_CTRL_TRACE_ON_OV_Msk (1UL << PMU_CTRL_TRACE_ON_OVERFLOW_Pos) /*!< PMU CTRL: Trace-on-overflow Mask */ + +/** \brief PMU Type Register Definitions */ + +#define PMU_TYPE_NUM_CNTS_Pos 0U /*!< PMU TYPE: Number of Counters Position */ +#define PMU_TYPE_NUM_CNTS_Msk (0xFFUL /*<< PMU_TYPE_NUM_CNTS_Pos*/) /*!< PMU TYPE: Number of Counters Mask */ + +#define PMU_TYPE_SIZE_CNTS_Pos 8U /*!< PMU TYPE: Size of Counters Position */ +#define PMU_TYPE_SIZE_CNTS_Msk (0x3FUL << PMU_TYPE_SIZE_CNTS_Pos) /*!< PMU TYPE: Size of Counters Mask */ + +#define PMU_TYPE_CYCCNT_PRESENT_Pos 14U /*!< PMU TYPE: Cycle Counter Present Position */ +#define PMU_TYPE_CYCCNT_PRESENT_Msk (1UL << PMU_TYPE_CYCCNT_PRESENT_Pos) /*!< PMU TYPE: Cycle Counter Present Mask */ + +#define PMU_TYPE_FRZ_OV_SUPPORT_Pos 21U /*!< PMU TYPE: Freeze-on-overflow Support Position */ +#define PMU_TYPE_FRZ_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Freeze-on-overflow Support Mask */ + +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Pos 23U /*!< PMU TYPE: Trace-on-overflow Support Position */ +#define PMU_TYPE_TRACE_ON_OV_SUPPORT_Msk (1UL << PMU_TYPE_FRZ_OV_SUPPORT_Pos) /*!< PMU TYPE: Trace-on-overflow Support Mask */ + +/** \brief PMU Authentication Status Register Definitions */ + +#define PMU_AUTHSTATUS_NSID_Pos 0U /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSID_Msk (0x3UL /*<< PMU_AUTHSTATUS_NSID_Pos*/) /*!< PMU AUTHSTATUS: Non-secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSNID_Pos 2U /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSNID_Msk (0x3UL << PMU_AUTHSTATUS_NSNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SID_Pos 4U /*!< PMU AUTHSTATUS: Secure Invasive Debug Position */ +#define PMU_AUTHSTATUS_SID_Msk (0x3UL << PMU_AUTHSTATUS_SID_Pos) /*!< PMU AUTHSTATUS: Secure Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SNID_Pos 6U /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SNID_Msk (0x3UL << PMU_AUTHSTATUS_SNID_Pos) /*!< PMU AUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUID_Pos 16U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUID_Msk (0x3UL << PMU_AUTHSTATUS_NSUID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_NSUNID_Pos 18U /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_NSUNID_Msk (0x3UL << PMU_AUTHSTATUS_NSUNID_Pos) /*!< PMU AUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUID_Pos 20U /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Position */ +#define PMU_AUTHSTATUS_SUID_Msk (0x3UL << PMU_AUTHSTATUS_SUID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Invasive Debug Mask */ + +#define PMU_AUTHSTATUS_SUNID_Pos 22U /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Position */ +#define PMU_AUTHSTATUS_SUNID_Msk (0x3UL << PMU_AUTHSTATUS_SUNID_Pos) /*!< PMU AUTHSTATUS: Secure Unprivileged Non-invasive Debug Mask */ + + +/*@} end of group CMSIS_PMU */ +#endif + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_PXN_Pos 4U /*!< MPU RLAR: PXN Position */ +#define MPU_RLAR_PXN_Msk (1UL << MPU_RLAR_PXN_Pos) /*!< MPU RLAR: PXN Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +#define FPU_FPDSCR_FZ16_Pos 19U /*!< FPDSCR: FZ16 bit Position */ +#define FPU_FPDSCR_FZ16_Msk (1UL << FPU_FPDSCR_FZ16_Pos) /*!< FPDSCR: FZ16 bit Mask */ + +#define FPU_FPDSCR_LTPSIZE_Pos 16U /*!< FPDSCR: LTPSIZE bit Position */ +#define FPU_FPDSCR_LTPSIZE_Msk (7UL << FPU_FPDSCR_LTPSIZE_Pos) /*!< FPDSCR: LTPSIZE bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FPRound_Pos 28U /*!< MVFR0: FPRound bits Position */ +#define FPU_MVFR0_FPRound_Msk (0xFUL << FPU_MVFR0_FPRound_Pos) /*!< MVFR0: FPRound bits Mask */ + +#define FPU_MVFR0_FPSqrt_Pos 20U /*!< MVFR0: FPSqrt bits Position */ +#define FPU_MVFR0_FPSqrt_Msk (0xFUL << FPU_MVFR0_FPSqrt_Pos) /*!< MVFR0: FPSqrt bits Mask */ + +#define FPU_MVFR0_FPDivide_Pos 16U /*!< MVFR0: FPDivide bits Position */ +#define FPU_MVFR0_FPDivide_Msk (0xFUL << FPU_MVFR0_FPDivide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FPDP_Pos 8U /*!< MVFR0: FPDP bits Position */ +#define FPU_MVFR0_FPDP_Msk (0xFUL << FPU_MVFR0_FPDP_Pos) /*!< MVFR0: FPDP bits Mask */ + +#define FPU_MVFR0_FPSP_Pos 4U /*!< MVFR0: FPSP bits Position */ +#define FPU_MVFR0_FPSP_Msk (0xFUL << FPU_MVFR0_FPSP_Pos) /*!< MVFR0: FPSP bits Mask */ + +#define FPU_MVFR0_SIMDReg_Pos 0U /*!< MVFR0: SIMDReg bits Position */ +#define FPU_MVFR0_SIMDReg_Msk (0xFUL /*<< FPU_MVFR0_SIMDReg_Pos*/) /*!< MVFR0: SIMDReg bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FMAC_Pos 28U /*!< MVFR1: FMAC bits Position */ +#define FPU_MVFR1_FMAC_Msk (0xFUL << FPU_MVFR1_FMAC_Pos) /*!< MVFR1: FMAC bits Mask */ + +#define FPU_MVFR1_FPHP_Pos 24U /*!< MVFR1: FPHP bits Position */ +#define FPU_MVFR1_FPHP_Msk (0xFUL << FPU_MVFR1_FPHP_Pos) /*!< MVFR1: FPHP bits Mask */ + +#define FPU_MVFR1_FP16_Pos 20U /*!< MVFR1: FP16 bits Position */ +#define FPU_MVFR1_FP16_Msk (0xFUL << FPU_MVFR1_FP16_Pos) /*!< MVFR1: FP16 bits Mask */ + +#define FPU_MVFR1_MVE_Pos 8U /*!< MVFR1: MVE bits Position */ +#define FPU_MVFR1_MVE_Msk (0xFUL << FPU_MVFR1_MVE_Pos) /*!< MVFR1: MVE bits Mask */ + +#define FPU_MVFR1_FPDNaN_Pos 4U /*!< MVFR1: FPDNaN bits Position */ +#define FPU_MVFR1_FPDNaN_Msk (0xFUL << FPU_MVFR1_FPDNaN_Pos) /*!< MVFR1: FPDNaN bits Mask */ + +#define FPU_MVFR1_FPFtZ_Pos 0U /*!< MVFR1: FPFtZ bits Position */ +#define FPU_MVFR1_FPFtZ_Msk (0xFUL /*<< FPU_MVFR1_FPFtZ_Pos*/) /*!< MVFR1: FPFtZ bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + +/* CoreDebug is deprecated. replaced by DCB (Debug Control Block) */ +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CoreDebug Core Debug Registers (CoreDebug) + \brief Type definitions for the Core Debug Registers + @{ + */ + +/** + \brief \deprecated Structure type to access the Core Debug Register (CoreDebug). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} CoreDebug_Type; + +/* Debug Halting Control and Status Register Definitions */ +#define CoreDebug_DHCSR_DBGKEY_Pos 16U /*!< \deprecated CoreDebug DHCSR: DBGKEY Position */ +#define CoreDebug_DHCSR_DBGKEY_Msk (0xFFFFUL << CoreDebug_DHCSR_DBGKEY_Pos) /*!< \deprecated CoreDebug DHCSR: DBGKEY Mask */ + +#define CoreDebug_DHCSR_S_RESTART_ST_Pos 26U /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Position */ +#define CoreDebug_DHCSR_S_RESTART_ST_Msk (1UL << CoreDebug_DHCSR_S_RESTART_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESTART_ST Mask */ + +#define CoreDebug_DHCSR_S_RESET_ST_Pos 25U /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Position */ +#define CoreDebug_DHCSR_S_RESET_ST_Msk (1UL << CoreDebug_DHCSR_S_RESET_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RESET_ST Mask */ + +#define CoreDebug_DHCSR_S_RETIRE_ST_Pos 24U /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Position */ +#define CoreDebug_DHCSR_S_RETIRE_ST_Msk (1UL << CoreDebug_DHCSR_S_RETIRE_ST_Pos) /*!< \deprecated CoreDebug DHCSR: S_RETIRE_ST Mask */ + +#define CoreDebug_DHCSR_S_FPD_Pos 23U /*!< \deprecated CoreDebug DHCSR: S_FPD Position */ +#define CoreDebug_DHCSR_S_FPD_Msk (1UL << CoreDebug_DHCSR_S_FPD_Pos) /*!< \deprecated CoreDebug DHCSR: S_FPD Mask */ + +#define CoreDebug_DHCSR_S_SUIDE_Pos 22U /*!< \deprecated CoreDebug DHCSR: S_SUIDE Position */ +#define CoreDebug_DHCSR_S_SUIDE_Msk (1UL << CoreDebug_DHCSR_S_SUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SUIDE Mask */ + +#define CoreDebug_DHCSR_S_NSUIDE_Pos 21U /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Position */ +#define CoreDebug_DHCSR_S_NSUIDE_Msk (1UL << CoreDebug_DHCSR_S_NSUIDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_NSUIDE Mask */ + +#define CoreDebug_DHCSR_S_SDE_Pos 20U /*!< \deprecated CoreDebug DHCSR: S_SDE Position */ +#define CoreDebug_DHCSR_S_SDE_Msk (1UL << CoreDebug_DHCSR_S_SDE_Pos) /*!< \deprecated CoreDebug DHCSR: S_SDE Mask */ + +#define CoreDebug_DHCSR_S_LOCKUP_Pos 19U /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Position */ +#define CoreDebug_DHCSR_S_LOCKUP_Msk (1UL << CoreDebug_DHCSR_S_LOCKUP_Pos) /*!< \deprecated CoreDebug DHCSR: S_LOCKUP Mask */ + +#define CoreDebug_DHCSR_S_SLEEP_Pos 18U /*!< \deprecated CoreDebug DHCSR: S_SLEEP Position */ +#define CoreDebug_DHCSR_S_SLEEP_Msk (1UL << CoreDebug_DHCSR_S_SLEEP_Pos) /*!< \deprecated CoreDebug DHCSR: S_SLEEP Mask */ + +#define CoreDebug_DHCSR_S_HALT_Pos 17U /*!< \deprecated CoreDebug DHCSR: S_HALT Position */ +#define CoreDebug_DHCSR_S_HALT_Msk (1UL << CoreDebug_DHCSR_S_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: S_HALT Mask */ + +#define CoreDebug_DHCSR_S_REGRDY_Pos 16U /*!< \deprecated CoreDebug DHCSR: S_REGRDY Position */ +#define CoreDebug_DHCSR_S_REGRDY_Msk (1UL << CoreDebug_DHCSR_S_REGRDY_Pos) /*!< \deprecated CoreDebug DHCSR: S_REGRDY Mask */ + +#define CoreDebug_DHCSR_C_PMOV_Pos 6U /*!< \deprecated CoreDebug DHCSR: C_PMOV Position */ +#define CoreDebug_DHCSR_C_PMOV_Msk (1UL << CoreDebug_DHCSR_C_PMOV_Pos) /*!< \deprecated CoreDebug DHCSR: C_PMOV Mask */ + +#define CoreDebug_DHCSR_C_SNAPSTALL_Pos 5U /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Position */ +#define CoreDebug_DHCSR_C_SNAPSTALL_Msk (1UL << CoreDebug_DHCSR_C_SNAPSTALL_Pos) /*!< \deprecated CoreDebug DHCSR: C_SNAPSTALL Mask */ + +#define CoreDebug_DHCSR_C_MASKINTS_Pos 3U /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Position */ +#define CoreDebug_DHCSR_C_MASKINTS_Msk (1UL << CoreDebug_DHCSR_C_MASKINTS_Pos) /*!< \deprecated CoreDebug DHCSR: C_MASKINTS Mask */ + +#define CoreDebug_DHCSR_C_STEP_Pos 2U /*!< \deprecated CoreDebug DHCSR: C_STEP Position */ +#define CoreDebug_DHCSR_C_STEP_Msk (1UL << CoreDebug_DHCSR_C_STEP_Pos) /*!< \deprecated CoreDebug DHCSR: C_STEP Mask */ + +#define CoreDebug_DHCSR_C_HALT_Pos 1U /*!< \deprecated CoreDebug DHCSR: C_HALT Position */ +#define CoreDebug_DHCSR_C_HALT_Msk (1UL << CoreDebug_DHCSR_C_HALT_Pos) /*!< \deprecated CoreDebug DHCSR: C_HALT Mask */ + +#define CoreDebug_DHCSR_C_DEBUGEN_Pos 0U /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Position */ +#define CoreDebug_DHCSR_C_DEBUGEN_Msk (1UL /*<< CoreDebug_DHCSR_C_DEBUGEN_Pos*/) /*!< \deprecated CoreDebug DHCSR: C_DEBUGEN Mask */ + +/* Debug Core Register Selector Register Definitions */ +#define CoreDebug_DCRSR_REGWnR_Pos 16U /*!< \deprecated CoreDebug DCRSR: REGWnR Position */ +#define CoreDebug_DCRSR_REGWnR_Msk (1UL << CoreDebug_DCRSR_REGWnR_Pos) /*!< \deprecated CoreDebug DCRSR: REGWnR Mask */ + +#define CoreDebug_DCRSR_REGSEL_Pos 0U /*!< \deprecated CoreDebug DCRSR: REGSEL Position */ +#define CoreDebug_DCRSR_REGSEL_Msk (0x1FUL /*<< CoreDebug_DCRSR_REGSEL_Pos*/) /*!< \deprecated CoreDebug DCRSR: REGSEL Mask */ + +/* Debug Exception and Monitor Control Register Definitions */ +#define CoreDebug_DEMCR_TRCENA_Pos 24U /*!< \deprecated CoreDebug DEMCR: TRCENA Position */ +#define CoreDebug_DEMCR_TRCENA_Msk (1UL << CoreDebug_DEMCR_TRCENA_Pos) /*!< \deprecated CoreDebug DEMCR: TRCENA Mask */ + +#define CoreDebug_DEMCR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DEMCR: MON_REQ Position */ +#define CoreDebug_DEMCR_MON_REQ_Msk (1UL << CoreDebug_DEMCR_MON_REQ_Pos) /*!< \deprecated CoreDebug DEMCR: MON_REQ Mask */ + +#define CoreDebug_DEMCR_MON_STEP_Pos 18U /*!< \deprecated CoreDebug DEMCR: MON_STEP Position */ +#define CoreDebug_DEMCR_MON_STEP_Msk (1UL << CoreDebug_DEMCR_MON_STEP_Pos) /*!< \deprecated CoreDebug DEMCR: MON_STEP Mask */ + +#define CoreDebug_DEMCR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DEMCR: MON_PEND Position */ +#define CoreDebug_DEMCR_MON_PEND_Msk (1UL << CoreDebug_DEMCR_MON_PEND_Pos) /*!< \deprecated CoreDebug DEMCR: MON_PEND Mask */ + +#define CoreDebug_DEMCR_MON_EN_Pos 16U /*!< \deprecated CoreDebug DEMCR: MON_EN Position */ +#define CoreDebug_DEMCR_MON_EN_Msk (1UL << CoreDebug_DEMCR_MON_EN_Pos) /*!< \deprecated CoreDebug DEMCR: MON_EN Mask */ + +#define CoreDebug_DEMCR_VC_HARDERR_Pos 10U /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Position */ +#define CoreDebug_DEMCR_VC_HARDERR_Msk (1UL << CoreDebug_DEMCR_VC_HARDERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_HARDERR Mask */ + +#define CoreDebug_DEMCR_VC_INTERR_Pos 9U /*!< \deprecated CoreDebug DEMCR: VC_INTERR Position */ +#define CoreDebug_DEMCR_VC_INTERR_Msk (1UL << CoreDebug_DEMCR_VC_INTERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_INTERR Mask */ + +#define CoreDebug_DEMCR_VC_BUSERR_Pos 8U /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Position */ +#define CoreDebug_DEMCR_VC_BUSERR_Msk (1UL << CoreDebug_DEMCR_VC_BUSERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_BUSERR Mask */ + +#define CoreDebug_DEMCR_VC_STATERR_Pos 7U /*!< \deprecated CoreDebug DEMCR: VC_STATERR Position */ +#define CoreDebug_DEMCR_VC_STATERR_Msk (1UL << CoreDebug_DEMCR_VC_STATERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_STATERR Mask */ + +#define CoreDebug_DEMCR_VC_CHKERR_Pos 6U /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Position */ +#define CoreDebug_DEMCR_VC_CHKERR_Msk (1UL << CoreDebug_DEMCR_VC_CHKERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_CHKERR Mask */ + +#define CoreDebug_DEMCR_VC_NOCPERR_Pos 5U /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Position */ +#define CoreDebug_DEMCR_VC_NOCPERR_Msk (1UL << CoreDebug_DEMCR_VC_NOCPERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_NOCPERR Mask */ + +#define CoreDebug_DEMCR_VC_MMERR_Pos 4U /*!< \deprecated CoreDebug DEMCR: VC_MMERR Position */ +#define CoreDebug_DEMCR_VC_MMERR_Msk (1UL << CoreDebug_DEMCR_VC_MMERR_Pos) /*!< \deprecated CoreDebug DEMCR: VC_MMERR Mask */ + +#define CoreDebug_DEMCR_VC_CORERESET_Pos 0U /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Position */ +#define CoreDebug_DEMCR_VC_CORERESET_Msk (1UL /*<< CoreDebug_DEMCR_VC_CORERESET_Pos*/) /*!< \deprecated CoreDebug DEMCR: VC_CORERESET Mask */ + +/* Debug Set Clear Exception and Monitor Control Register Definitions */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_CLR_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_CLR_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: CLR_MON_PEND, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_REQ_Pos 3U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Position */ +#define CoreDebug_DSCEMCR_SET_MON_REQ_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_REQ_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_REQ, Mask */ + +#define CoreDebug_DSCEMCR_SET_MON_PEND_Pos 1U /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Position */ +#define CoreDebug_DSCEMCR_SET_MON_PEND_Msk (1UL << CoreDebug_DSCEMCR_SET_MON_PEND_Pos) /*!< \deprecated CoreDebug DSCEMCR: SET_MON_PEND, Mask */ + +/* Debug Authentication Control Register Definitions */ +#define CoreDebug_DAUTHCTRL_UIDEN_Pos 10U /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_UIDAPEN_Pos 9U /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Position */ +#define CoreDebug_DAUTHCTRL_UIDAPEN_Msk (1UL << CoreDebug_DAUTHCTRL_UIDAPEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: UIDAPEN, Mask */ + +#define CoreDebug_DAUTHCTRL_FSDMA_Pos 8U /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Position */ +#define CoreDebug_DAUTHCTRL_FSDMA_Msk (1UL << CoreDebug_DAUTHCTRL_FSDMA_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: FSDMA, Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Position */ +#define CoreDebug_DAUTHCTRL_INTSPNIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPNIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPNIDEN, Mask */ + +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPNIDENSEL_Msk (1UL << CoreDebug_DAUTHCTRL_SPNIDENSEL_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: SPNIDENSEL Mask */ + +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Position */ +#define CoreDebug_DAUTHCTRL_INTSPIDEN_Msk (1UL << CoreDebug_DAUTHCTRL_INTSPIDEN_Pos) /*!< \deprecated CoreDebug DAUTHCTRL: INTSPIDEN Mask */ + +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Position */ +#define CoreDebug_DAUTHCTRL_SPIDENSEL_Msk (1UL /*<< CoreDebug_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< \deprecated CoreDebug DAUTHCTRL: SPIDENSEL Mask */ + +/* Debug Security Control and Status Register Definitions */ +#define CoreDebug_DSCSR_CDS_Pos 16U /*!< \deprecated CoreDebug DSCSR: CDS Position */ +#define CoreDebug_DSCSR_CDS_Msk (1UL << CoreDebug_DSCSR_CDS_Pos) /*!< \deprecated CoreDebug DSCSR: CDS Mask */ + +#define CoreDebug_DSCSR_SBRSEL_Pos 1U /*!< \deprecated CoreDebug DSCSR: SBRSEL Position */ +#define CoreDebug_DSCSR_SBRSEL_Msk (1UL << CoreDebug_DSCSR_SBRSEL_Pos) /*!< \deprecated CoreDebug DSCSR: SBRSEL Mask */ + +#define CoreDebug_DSCSR_SBRSELEN_Pos 0U /*!< \deprecated CoreDebug DSCSR: SBRSELEN Position */ +#define CoreDebug_DSCSR_SBRSELEN_Msk (1UL /*<< CoreDebug_DSCSR_SBRSELEN_Pos*/) /*!< \deprecated CoreDebug DSCSR: SBRSELEN Mask */ + +/*@} end of group CMSIS_CoreDebug */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + __OM uint32_t DSCEMCR; /*!< Offset: 0x010 ( /W) Debug Set Clear Exception and Monitor Control Register */ + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_FPD_Pos 23U /*!< DCB DHCSR: Floating-point registers Debuggable Position */ +#define DCB_DHCSR_S_FPD_Msk (0x1UL << DCB_DHCSR_S_FPD_Pos) /*!< DCB DHCSR: Floating-point registers Debuggable Mask */ + +#define DCB_DHCSR_S_SUIDE_Pos 22U /*!< DCB DHCSR: Secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_SUIDE_Msk (0x1UL << DCB_DHCSR_S_SUIDE_Pos) /*!< DCB DHCSR: Secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_NSUIDE_Pos 21U /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Position */ +#define DCB_DHCSR_S_NSUIDE_Msk (0x1UL << DCB_DHCSR_S_NSUIDE_Pos) /*!< DCB DHCSR: Non-secure unprivileged halting debug enabled Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_PMOV_Pos 6U /*!< DCB DHCSR: Halt on PMU overflow control Position */ +#define DCB_DHCSR_C_PMOV_Msk (0x1UL << DCB_DHCSR_C_PMOV_Pos) /*!< DCB DHCSR: Halt on PMU overflow control Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DSCEMCR, Debug Set Clear Exception and Monitor Control Register Definitions */ +#define DCB_DSCEMCR_CLR_MON_REQ_Pos 19U /*!< DCB DSCEMCR: Clear monitor request Position */ +#define DCB_DSCEMCR_CLR_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_REQ_Pos) /*!< DCB DSCEMCR: Clear monitor request Mask */ + +#define DCB_DSCEMCR_CLR_MON_PEND_Pos 17U /*!< DCB DSCEMCR: Clear monitor pend Position */ +#define DCB_DSCEMCR_CLR_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_CLR_MON_PEND_Pos) /*!< DCB DSCEMCR: Clear monitor pend Mask */ + +#define DCB_DSCEMCR_SET_MON_REQ_Pos 3U /*!< DCB DSCEMCR: Set monitor request Position */ +#define DCB_DSCEMCR_SET_MON_REQ_Msk (0x1UL << DCB_DSCEMCR_SET_MON_REQ_Pos) /*!< DCB DSCEMCR: Set monitor request Mask */ + +#define DCB_DSCEMCR_SET_MON_PEND_Pos 1U /*!< DCB DSCEMCR: Set monitor pend Position */ +#define DCB_DSCEMCR_SET_MON_PEND_Msk (0x1UL << DCB_DSCEMCR_SET_MON_PEND_Pos) /*!< DCB DSCEMCR: Set monitor pend Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_UIDEN_Pos 10U /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Position */ +#define DCB_DAUTHCTRL_UIDEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive Debug Enable Mask */ + +#define DCB_DAUTHCTRL_UIDAPEN_Pos 9U /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Position */ +#define DCB_DAUTHCTRL_UIDAPEN_Msk (0x1UL << DCB_DAUTHCTRL_UIDAPEN_Pos) /*!< DCB DAUTHCTRL: Unprivileged Invasive DAP Access Enable Mask */ + +#define DCB_DAUTHCTRL_FSDMA_Pos 8U /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Position */ +#define DCB_DAUTHCTRL_FSDMA_Msk (0x1UL << DCB_DAUTHCTRL_FSDMA_Pos) /*!< DCB DAUTHCTRL: Force Secure DebugMonitor Allowed Mask */ + +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + uint32_t RESERVED0[2U]; + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + uint32_t RESERVED1[3U]; + __IM uint32_t DDEVTYPE; /*!< Offset: 0x01C (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SUNID_Pos 22U /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUNID_Msk (0x3UL << DIB_DAUTHSTATUS_SUNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Non-invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SUID_Pos 20U /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_SUID_Msk (0x3UL << DIB_DAUTHSTATUS_SUID_Pos ) /*!< DIB DAUTHSTATUS: Secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_NSUNID_Pos 18U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Position */ +#define DIB_DAUTHSTATUS_NSUNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Non-invasive Debug Allo Mask */ + +#define DIB_DAUTHSTATUS_NSUID_Pos 16U /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Position */ +#define DIB_DAUTHSTATUS_NSUID_Msk (0x3UL << DIB_DAUTHSTATUS_NSUID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Unprivileged Invasive Debug Allowed Mask */ + +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define MEMSYSCTL_BASE (0xE001E000UL) /*!< Memory System Control Base Address */ + #define ERRBNK_BASE (0xE001E100UL) /*!< Error Banking Base Address */ + #define PWRMODCTL_BASE (0xE001E300UL) /*!< Power Mode Control Base Address */ + #define EWIC_BASE (0xE001E400UL) /*!< External Wakeup Interrupt Controller Base Address */ + #define PRCCFGINF_BASE (0xE001E700UL) /*!< Processor Configuration Information Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define CoreDebug_BASE (0xE000EDF0UL) /*!< \deprecated Core Debug Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define SysTick_BASE (SCS_BASE + 0x0010UL) /*!< SysTick Base Address */ + #define NVIC_BASE (SCS_BASE + 0x0100UL) /*!< NVIC Base Address */ + #define SCB_BASE (SCS_BASE + 0x0D00UL) /*!< System Control Block Base Address */ + + #define ICB ((ICB_Type *) SCS_BASE ) /*!< System control Register not in SCB */ + #define SCB ((SCB_Type *) SCB_BASE ) /*!< SCB configuration struct */ + #define SysTick ((SysTick_Type *) SysTick_BASE ) /*!< SysTick configuration struct */ + #define NVIC ((NVIC_Type *) NVIC_BASE ) /*!< NVIC configuration struct */ + #define ITM ((ITM_Type *) ITM_BASE ) /*!< ITM configuration struct */ + #define DWT ((DWT_Type *) DWT_BASE ) /*!< DWT configuration struct */ + #define TPI ((TPI_Type *) TPI_BASE ) /*!< TPI configuration struct */ + #define MEMSYSCTL ((MemSysCtl_Type *) MEMSYSCTL_BASE ) /*!< Memory System Control configuration struct */ + #define ERRBNK ((ErrBnk_Type *) ERRBNK_BASE ) /*!< Error Banking configuration struct */ + #define PWRMODCTL ((PwrModCtl_Type *) PWRMODCTL_BASE ) /*!< Power Mode Control configuration struct */ + #define EWIC ((EWIC_Type *) EWIC_BASE ) /*!< EWIC configuration struct */ + #define PRCCFGINF ((PrcCfgInf_Type *) PRCCFGINF_BASE ) /*!< Processor Configuration Information configuration struct */ + #define CoreDebug ((CoreDebug_Type *) CoreDebug_BASE ) /*!< \deprecated Core Debug configuration struct */ + #define DCB ((DCB_Type *) DCB_BASE ) /*!< DCB configuration struct */ + #define DIB ((DIB_Type *) DIB_BASE ) /*!< DIB configuration struct */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE (SCS_BASE + 0x0D90UL) /*!< Memory Protection Unit */ + #define MPU ((MPU_Type *) MPU_BASE ) /*!< Memory Protection Unit */ + #endif + + #if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + #define PMU_BASE (0xE0003000UL) /*!< PMU Base Address */ + #define PMU ((PMU_Type *) PMU_BASE ) /*!< PMU configuration struct */ + #endif + + #if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SAU_BASE (SCS_BASE + 0x0DD0UL) /*!< Security Attribution Unit */ + #define SAU ((SAU_Type *) SAU_BASE ) /*!< Security Attribution Unit */ + #endif + + #define FPU_BASE (SCS_BASE + 0x0F30UL) /*!< Floating Point Unit */ + #define FPU ((FPU_Type *) FPU_BASE ) /*!< Floating Point Unit */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + #define SCS_BASE_NS (0xE002E000UL) /*!< System Control Space Base Address (non-secure address space) */ + #define CoreDebug_BASE_NS (0xE002EDF0UL) /*!< \deprecated Core Debug Base Address (non-secure address space) */ + #define DCB_BASE_NS (0xE002EDF0UL) /*!< DCB Base Address (non-secure address space) */ + #define DIB_BASE_NS (0xE002EFB0UL) /*!< DIB Base Address (non-secure address space) */ + #define SysTick_BASE_NS (SCS_BASE_NS + 0x0010UL) /*!< SysTick Base Address (non-secure address space) */ + #define NVIC_BASE_NS (SCS_BASE_NS + 0x0100UL) /*!< NVIC Base Address (non-secure address space) */ + #define SCB_BASE_NS (SCS_BASE_NS + 0x0D00UL) /*!< System Control Block Base Address (non-secure address space) */ + + #define ICB_NS ((ICB_Type *) SCS_BASE_NS ) /*!< System control Register not in SCB(non-secure address space) */ + #define SCB_NS ((SCB_Type *) SCB_BASE_NS ) /*!< SCB configuration struct (non-secure address space) */ + #define SysTick_NS ((SysTick_Type *) SysTick_BASE_NS ) /*!< SysTick configuration struct (non-secure address space) */ + #define NVIC_NS ((NVIC_Type *) NVIC_BASE_NS ) /*!< NVIC configuration struct (non-secure address space) */ + #define CoreDebug_NS ((CoreDebug_Type *) CoreDebug_BASE_NS) /*!< \deprecated Core Debug configuration struct (non-secure address space) */ + #define DCB_NS ((DCB_Type *) DCB_BASE_NS ) /*!< DCB configuration struct (non-secure address space) */ + #define DIB_NS ((DIB_Type *) DIB_BASE_NS ) /*!< DIB configuration struct (non-secure address space) */ + + #if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + #define MPU_BASE_NS (SCS_BASE_NS + 0x0D90UL) /*!< Memory Protection Unit (non-secure address space) */ + #define MPU_NS ((MPU_Type *) MPU_BASE_NS ) /*!< Memory Protection Unit (non-secure address space) */ + #endif + + #define FPU_BASE_NS (SCS_BASE_NS + 0x0F30UL) /*!< Floating Point Unit (non-secure address space) */ + #define FPU_NS ((FPU_Type *) FPU_BASE_NS ) /*!< Floating Point Unit (non-secure address space) */ + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ +/*@} */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_register_aliases Backwards Compatibility Aliases + \brief Register alias definitions for backwards compatibility. + @{ + */ + +/*@} */ + + +/******************************************************************************* + * Hardware Abstraction Layer + Core Function Interface contains: + - Core NVIC Functions + - Core SysTick Functions + - Core Debug Functions + - Core Register Access Functions + ******************************************************************************/ +/** + \defgroup CMSIS_Core_FunctionInterface Functions and Instructions Reference +*/ + + + +/* ########################## NVIC functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_NVICFunctions NVIC Functions + \brief Functions that manage interrupts and exceptions via the NVIC. + @{ + */ + +#ifdef CMSIS_NVIC_VIRTUAL + #ifndef CMSIS_NVIC_VIRTUAL_HEADER_FILE + #define CMSIS_NVIC_VIRTUAL_HEADER_FILE "cmsis_nvic_virtual.h" + #endif + #include CMSIS_NVIC_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetPriorityGrouping __NVIC_SetPriorityGrouping + #define NVIC_GetPriorityGrouping __NVIC_GetPriorityGrouping + #define NVIC_EnableIRQ __NVIC_EnableIRQ + #define NVIC_GetEnableIRQ __NVIC_GetEnableIRQ + #define NVIC_DisableIRQ __NVIC_DisableIRQ + #define NVIC_GetPendingIRQ __NVIC_GetPendingIRQ + #define NVIC_SetPendingIRQ __NVIC_SetPendingIRQ + #define NVIC_ClearPendingIRQ __NVIC_ClearPendingIRQ + #define NVIC_GetActive __NVIC_GetActive + #define NVIC_SetPriority __NVIC_SetPriority + #define NVIC_GetPriority __NVIC_GetPriority + #define NVIC_SystemReset __NVIC_SystemReset +#endif /* CMSIS_NVIC_VIRTUAL */ + +#ifdef CMSIS_VECTAB_VIRTUAL + #ifndef CMSIS_VECTAB_VIRTUAL_HEADER_FILE + #define CMSIS_VECTAB_VIRTUAL_HEADER_FILE "cmsis_vectab_virtual.h" + #endif + #include CMSIS_VECTAB_VIRTUAL_HEADER_FILE +#else + #define NVIC_SetVector __NVIC_SetVector + #define NVIC_GetVector __NVIC_GetVector +#endif /* (CMSIS_VECTAB_VIRTUAL) */ + +#define NVIC_USER_IRQ_OFFSET 16 + + +/* Special LR values for Secure/Non-Secure call handling and exception handling */ + +/* Function Return Payload (from ARMv8-M Architecture Reference Manual) LR value on entry from Secure BLXNS */ +#define FNC_RETURN (0xFEFFFFFFUL) /* bit [0] ignored when processing a branch */ + +/* The following EXC_RETURN mask values are used to evaluate the LR on exception entry */ +#define EXC_RETURN_PREFIX (0xFF000000UL) /* bits [31:24] set to indicate an EXC_RETURN value */ +#define EXC_RETURN_S (0x00000040UL) /* bit [6] stack used to push registers: 0=Non-secure 1=Secure */ +#define EXC_RETURN_DCRS (0x00000020UL) /* bit [5] stacking rules for called registers: 0=skipped 1=saved */ +#define EXC_RETURN_FTYPE (0x00000010UL) /* bit [4] allocate stack for floating-point context: 0=done 1=skipped */ +#define EXC_RETURN_MODE (0x00000008UL) /* bit [3] processor mode for return: 0=Handler mode 1=Thread mode */ +#define EXC_RETURN_SPSEL (0x00000004UL) /* bit [2] stack pointer used to restore context: 0=MSP 1=PSP */ +#define EXC_RETURN_ES (0x00000001UL) /* bit [0] security state exception was taken to: 0=Non-secure 1=Secure */ + +/* Integrity Signature (from ARMv8-M Architecture Reference Manual) for exception context stacking */ +#if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) /* Value for processors with floating-point extension: */ +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125AUL) /* bit [0] SFTC must match LR bit[4] EXC_RETURN_FTYPE */ +#else +#define EXC_INTEGRITY_SIGNATURE (0xFEFA125BUL) /* Value for processors without floating-point extension */ +#endif + + +/** + \brief Set Priority Grouping + \details Sets the priority grouping field using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses included + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## PMU functions and events #################################### */ + +#if defined (__PMU_PRESENT) && (__PMU_PRESENT == 1U) + +#include "pmu_armv8.h" + +/** + \brief Cortex-M85 PMU events + \note Architectural PMU events can be found in pmu_armv8.h +*/ + +#define ARMCM85_PMU_ECC_ERR 0xC000 /*!< One or more Error Correcting Code (ECC) errors detected */ +#define ARMCM85_PMU_ECC_ERR_MBIT 0xC001 /*!< One or more multi-bit ECC errors detected */ +#define ARMCM85_PMU_ECC_ERR_DCACHE 0xC010 /*!< One or more ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_ICACHE 0xC011 /*!< One or more ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DCACHE 0xC012 /*!< One or more multi-bit ECC errors in the data cache */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ICACHE 0xC013 /*!< One or more multi-bit ECC errors in the instruction cache */ +#define ARMCM85_PMU_ECC_ERR_DTCM 0xC020 /*!< One or more ECC errors in the Data Tightly Coupled Memory (DTCM) */ +#define ARMCM85_PMU_ECC_ERR_ITCM 0xC021 /*!< One or more ECC errors in the Instruction Tightly Coupled Memory (ITCM) */ +#define ARMCM85_PMU_ECC_ERR_MBIT_DTCM 0xC022 /*!< One or more multi-bit ECC errors in the DTCM */ +#define ARMCM85_PMU_ECC_ERR_MBIT_ITCM 0xC023 /*!< One or more multi-bit ECC errors in the ITCM */ +#define ARMCM85_PMU_PF_LINEFILL 0xC100 /*!< The prefetcher starts a line-fill */ +#define ARMCM85_PMU_PF_CANCEL 0xC101 /*!< The prefetcher stops prefetching */ +#define ARMCM85_PMU_PF_DROP_LINEFILL 0xC102 /*!< A linefill triggered by a prefetcher has been dropped because of lack of buffering */ +#define ARMCM85_PMU_NWAMODE_ENTER 0xC200 /*!< No write-allocate mode entry */ +#define ARMCM85_PMU_NWAMODE 0xC201 /*!< Write-allocate store is not allocated into the data cache due to no-write-allocate mode */ +#define ARMCM85_PMU_SAHB_ACCESS 0xC300 /*!< Read or write access on the S-AHB interface to the TCM */ +#define ARMCM85_PMU_PAHB_ACCESS 0xC301 /*!< Read or write access on the P-AHB write interface */ +#define ARMCM85_PMU_AXI_WRITE_ACCESS 0xC302 /*!< Any beat access to M-AXI write interface */ +#define ARMCM85_PMU_AXI_READ_ACCESS 0xC303 /*!< Any beat access to M-AXI read interface */ +#define ARMCM85_PMU_DOSTIMEOUT_DOUBLE 0xC400 /*!< Denial of Service timeout has fired twice and caused buffers to drain to allow forward progress */ +#define ARMCM85_PMU_DOSTIMEOUT_TRIPLE 0xC401 /*!< Denial of Service timeout has fired three times and blocked the LSU to force forward progress */ + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_FPSP_Msk | FPU_MVFR0_FPDP_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + +/* ########################## MVE functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_MveFunctions MVE Functions + \brief Function that provides MVE type. + @{ + */ + +/** + \brief get MVE type + \details returns the MVE type + \returns + - \b 0: No Vector Extension (MVE) + - \b 1: Integer Vector Extension (MVE-I) + - \b 2: Floating-point Vector Extension (MVE-F) + */ +__STATIC_INLINE uint32_t SCB_GetMVEType(void) +{ + const uint32_t mvfr1 = FPU->MVFR1; + if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x2U << FPU_MVFR1_MVE_Pos)) + { + return 2U; + } + else if ((mvfr1 & FPU_MVFR1_MVE_Msk) == (0x1U << FPU_MVFR1_MVE_Pos)) + { + return 1U; + } + else + { + return 0U; + } +} + + +/*@} end of CMSIS_Core_MveFunctions */ + + +/* ########################## Cache functions #################################### */ + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) +#include "cachel1_armv7.h" +#endif + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################### PAC Key functions ########################### */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) +#include "pac_armv81.h" +#endif + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_CM85_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h index 03a02cc..f6c3bfd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_sc300.h @@ -1,11 +1,11 @@ /**************************************************************************//** * @file core_sc300.h * @brief CMSIS SC300 Core Peripheral Access Layer Header File - * @version V5.0.9 - * @date 27. March 2020 + * @version V5.0.10 + * @date 04. June 2021 ******************************************************************************/ /* - * Copyright (c) 2009-2020 Arm Limited. All rights reserved. + * Copyright (c) 2009-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -562,19 +562,19 @@ typedef struct #define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ /* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ -#define SCB_CFSR_MMARVALID_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ #define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ -#define SCB_CFSR_MSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ #define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ -#define SCB_CFSR_MUNSTKERR_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ #define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ -#define SCB_CFSR_DACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ #define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ -#define SCB_CFSR_IACCVIOL_Pos (SCB_SHCSR_MEMFAULTACT_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ #define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ /* BusFault Status Register (part of SCB Configurable Fault Status Register) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_starmc1.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_starmc1.h new file mode 100644 index 0000000..a6a399d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/core_starmc1.h @@ -0,0 +1,3592 @@ +/**************************************************************************//** + * @file core_starmc1.h + * @brief CMSIS ArmChina STAR-MC1 Core Peripheral Access Layer Header File + * @version V1.0.2 + * @date 07. April 2022 + ******************************************************************************/ +/* + * Copyright (c) 2009-2018 Arm Limited. + * Copyright (c) 2018-2022 Arm China. + * All rights reserved. + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#elif defined ( __GNUC__ ) + #pragma GCC diagnostic ignored "-Wpedantic" /* disable pedantic warning due to unnamed structs/unions */ +#endif + +#ifndef __CORE_STAR_H_GENERIC +#define __CORE_STAR_H_GENERIC + +#include + +#ifdef __cplusplus + extern "C" { +#endif + +/** + \page CMSIS_MISRA_Exceptions MISRA-C:2004 Compliance Exceptions + CMSIS violates the following MISRA-C:2004 rules: + + \li Required Rule 8.5, object/function definition in header file.
+ Function definitions in header files are used to allow 'inlining'. + + \li Required Rule 18.4, declaration of union type or object of union type: '{...}'.
+ Unions are used for effective representation of core registers. + + \li Advisory Rule 19.7, Function-like macro defined.
+ Function-like macros are used to allow more efficient code. + */ + + +/******************************************************************************* + * CMSIS definitions + ******************************************************************************/ +/** + \ingroup STAR-MC1 + @{ + */ + +#include "cmsis_version.h" + +/* Macro Define for STAR-MC1 */ +#define __STAR_MC (1U) /*!< STAR-MC Core */ + +/** __FPU_USED indicates whether an FPU is used or not. + For this, __FPU_PRESENT has to be checked prior to making use of FPU specific registers and functions. +*/ +#if defined ( __CC_ARM ) + #if defined (__TARGET_FPU_VFP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + #if defined (__ARM_FP) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #warning "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __GNUC__ ) + #if defined (__VFP_FP__) && !defined(__SOFTFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __ICCARM__ ) + #if defined (__ARMVFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + + #if defined (__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1U) + #if defined (__DSP_PRESENT) && (__DSP_PRESENT == 1U) + #define __DSP_USED 1U + #else + #error "Compiler generates DSP (SIMD) instructions for a devices without DSP extensions (check __DSP_PRESENT)" + #define __DSP_USED 0U + #endif + #else + #define __DSP_USED 0U + #endif + +#elif defined ( __TI_ARM__ ) + #if defined (__TI_VFP_SUPPORT__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __TASKING__ ) + #if defined (__FPU_VFP__) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#elif defined ( __CSMC__ ) + #if ( __CSMC__ & 0x400U) + #if defined (__FPU_PRESENT) && (__FPU_PRESENT == 1U) + #define __FPU_USED 1U + #else + #error "Compiler generates FPU instructions for a device without an FPU (check __FPU_PRESENT)" + #define __FPU_USED 0U + #endif + #else + #define __FPU_USED 0U + #endif + +#endif + +#include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" /* CMSIS compiler specific defines */ + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_GENERIC */ + +#ifndef __CMSIS_GENERIC + +#ifndef __CORE_STAR_H_DEPENDANT +#define __CORE_STAR_H_DEPENDANT + +#ifdef __cplusplus + extern "C" { +#endif + +/* check device defines and use defaults */ +#if defined __CHECK_DEVICE_DEFINES + #ifndef __STAR_REV + #define __STAR_REV 0x0000U + #warning "__STAR_REV not defined in device header file; using default!" + #endif + + #ifndef __FPU_PRESENT + #define __FPU_PRESENT 0U + #warning "__FPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __MPU_PRESENT + #define __MPU_PRESENT 0U + #warning "__MPU_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __SAUREGION_PRESENT + #define __SAUREGION_PRESENT 0U + #warning "__SAUREGION_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DSP_PRESENT + #define __DSP_PRESENT 0U + #warning "__DSP_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __ICACHE_PRESENT + #define __ICACHE_PRESENT 0U + #warning "__ICACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DCACHE_PRESENT + #define __DCACHE_PRESENT 0U + #warning "__DCACHE_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __DTCM_PRESENT + #define __DTCM_PRESENT 0U + #warning "__DTCM_PRESENT not defined in device header file; using default!" + #endif + + #ifndef __NVIC_PRIO_BITS + #define __NVIC_PRIO_BITS 3U + #warning "__NVIC_PRIO_BITS not defined in device header file; using default!" + #endif + + #ifndef __Vendor_SysTickConfig + #define __Vendor_SysTickConfig 0U + #warning "__Vendor_SysTickConfig not defined in device header file; using default!" + #endif +#endif + +/* IO definitions (access restrictions to peripheral registers) */ +/** + \defgroup CMSIS_glob_defs CMSIS Global Defines + + IO Type Qualifiers are used + \li to specify the access to peripheral variables. + \li for automatic generation of peripheral register debug information. +*/ +#ifdef __cplusplus + #define __I volatile /*!< Defines 'read only' permissions */ +#else + #define __I volatile const /*!< Defines 'read only' permissions */ +#endif +#define __O volatile /*!< Defines 'write only' permissions */ +#define __IO volatile /*!< Defines 'read / write' permissions */ + +/* following defines should be used for structure members */ +#define __IM volatile const /*! Defines 'read only' structure member permissions */ +#define __OM volatile /*! Defines 'write only' structure member permissions */ +#define __IOM volatile /*! Defines 'read / write' structure member permissions */ + +/*@} end of group STAR-MC1 */ + + + +/******************************************************************************* + * Register Abstraction + Core Register contain: + - Core Register + - Core NVIC Register + - Core SCB Register + - Core SysTick Register + - Core Debug Register + - Core MPU Register + - Core SAU Register + - Core FPU Register + ******************************************************************************/ +/** + \defgroup CMSIS_core_register Defines and Type Definitions + \brief Type definitions and defines for STAR-MC1 processor based devices. +*/ + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_CORE Status and Control Registers + \brief Core Register type definitions. + @{ + */ + +/** + \brief Union type to access the Application Program Status Register (APSR). + */ +typedef union +{ + struct + { + uint32_t _reserved0:16; /*!< bit: 0..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:7; /*!< bit: 20..26 Reserved */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} APSR_Type; + +/* APSR Register Definitions */ +#define APSR_N_Pos 31U /*!< APSR: N Position */ +#define APSR_N_Msk (1UL << APSR_N_Pos) /*!< APSR: N Mask */ + +#define APSR_Z_Pos 30U /*!< APSR: Z Position */ +#define APSR_Z_Msk (1UL << APSR_Z_Pos) /*!< APSR: Z Mask */ + +#define APSR_C_Pos 29U /*!< APSR: C Position */ +#define APSR_C_Msk (1UL << APSR_C_Pos) /*!< APSR: C Mask */ + +#define APSR_V_Pos 28U /*!< APSR: V Position */ +#define APSR_V_Msk (1UL << APSR_V_Pos) /*!< APSR: V Mask */ + +#define APSR_Q_Pos 27U /*!< APSR: Q Position */ +#define APSR_Q_Msk (1UL << APSR_Q_Pos) /*!< APSR: Q Mask */ + +#define APSR_GE_Pos 16U /*!< APSR: GE Position */ +#define APSR_GE_Msk (0xFUL << APSR_GE_Pos) /*!< APSR: GE Mask */ + + +/** + \brief Union type to access the Interrupt Program Status Register (IPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:23; /*!< bit: 9..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} IPSR_Type; + +/* IPSR Register Definitions */ +#define IPSR_ISR_Pos 0U /*!< IPSR: ISR Position */ +#define IPSR_ISR_Msk (0x1FFUL /*<< IPSR_ISR_Pos*/) /*!< IPSR: ISR Mask */ + + +/** + \brief Union type to access the Special-Purpose Program Status Registers (xPSR). + */ +typedef union +{ + struct + { + uint32_t ISR:9; /*!< bit: 0.. 8 Exception number */ + uint32_t _reserved0:7; /*!< bit: 9..15 Reserved */ + uint32_t GE:4; /*!< bit: 16..19 Greater than or Equal flags */ + uint32_t _reserved1:4; /*!< bit: 20..23 Reserved */ + uint32_t T:1; /*!< bit: 24 Thumb bit (read 0) */ + uint32_t IT:2; /*!< bit: 25..26 saved IT state (read 0) */ + uint32_t Q:1; /*!< bit: 27 Saturation condition flag */ + uint32_t V:1; /*!< bit: 28 Overflow condition code flag */ + uint32_t C:1; /*!< bit: 29 Carry condition code flag */ + uint32_t Z:1; /*!< bit: 30 Zero condition code flag */ + uint32_t N:1; /*!< bit: 31 Negative condition code flag */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} xPSR_Type; + +/* xPSR Register Definitions */ +#define xPSR_N_Pos 31U /*!< xPSR: N Position */ +#define xPSR_N_Msk (1UL << xPSR_N_Pos) /*!< xPSR: N Mask */ + +#define xPSR_Z_Pos 30U /*!< xPSR: Z Position */ +#define xPSR_Z_Msk (1UL << xPSR_Z_Pos) /*!< xPSR: Z Mask */ + +#define xPSR_C_Pos 29U /*!< xPSR: C Position */ +#define xPSR_C_Msk (1UL << xPSR_C_Pos) /*!< xPSR: C Mask */ + +#define xPSR_V_Pos 28U /*!< xPSR: V Position */ +#define xPSR_V_Msk (1UL << xPSR_V_Pos) /*!< xPSR: V Mask */ + +#define xPSR_Q_Pos 27U /*!< xPSR: Q Position */ +#define xPSR_Q_Msk (1UL << xPSR_Q_Pos) /*!< xPSR: Q Mask */ + +#define xPSR_IT_Pos 25U /*!< xPSR: IT Position */ +#define xPSR_IT_Msk (3UL << xPSR_IT_Pos) /*!< xPSR: IT Mask */ + +#define xPSR_T_Pos 24U /*!< xPSR: T Position */ +#define xPSR_T_Msk (1UL << xPSR_T_Pos) /*!< xPSR: T Mask */ + +#define xPSR_GE_Pos 16U /*!< xPSR: GE Position */ +#define xPSR_GE_Msk (0xFUL << xPSR_GE_Pos) /*!< xPSR: GE Mask */ + +#define xPSR_ISR_Pos 0U /*!< xPSR: ISR Position */ +#define xPSR_ISR_Msk (0x1FFUL /*<< xPSR_ISR_Pos*/) /*!< xPSR: ISR Mask */ + + +/** + \brief Union type to access the Control Registers (CONTROL). + */ +typedef union +{ + struct + { + uint32_t nPRIV:1; /*!< bit: 0 Execution privilege in Thread mode */ + uint32_t SPSEL:1; /*!< bit: 1 Stack-pointer select */ + uint32_t FPCA:1; /*!< bit: 2 Floating-point context active */ + uint32_t SFPA:1; /*!< bit: 3 Secure floating-point active */ + uint32_t _reserved1:28; /*!< bit: 4..31 Reserved */ + } b; /*!< Structure used for bit access */ + uint32_t w; /*!< Type used for word access */ +} CONTROL_Type; + +/* CONTROL Register Definitions */ +#define CONTROL_SFPA_Pos 3U /*!< CONTROL: SFPA Position */ +#define CONTROL_SFPA_Msk (1UL << CONTROL_SFPA_Pos) /*!< CONTROL: SFPA Mask */ + +#define CONTROL_FPCA_Pos 2U /*!< CONTROL: FPCA Position */ +#define CONTROL_FPCA_Msk (1UL << CONTROL_FPCA_Pos) /*!< CONTROL: FPCA Mask */ + +#define CONTROL_SPSEL_Pos 1U /*!< CONTROL: SPSEL Position */ +#define CONTROL_SPSEL_Msk (1UL << CONTROL_SPSEL_Pos) /*!< CONTROL: SPSEL Mask */ + +#define CONTROL_nPRIV_Pos 0U /*!< CONTROL: nPRIV Position */ +#define CONTROL_nPRIV_Msk (1UL /*<< CONTROL_nPRIV_Pos*/) /*!< CONTROL: nPRIV Mask */ + +/*@} end of group CMSIS_CORE */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_NVIC Nested Vectored Interrupt Controller (NVIC) + \brief Type definitions for the NVIC Registers + @{ + */ + +/** + \brief Structure type to access the Nested Vectored Interrupt Controller (NVIC). + */ +typedef struct +{ + __IOM uint32_t ISER[16U]; /*!< Offset: 0x000 (R/W) Interrupt Set Enable Register */ + uint32_t RESERVED0[16U]; + __IOM uint32_t ICER[16U]; /*!< Offset: 0x080 (R/W) Interrupt Clear Enable Register */ + uint32_t RSERVED1[16U]; + __IOM uint32_t ISPR[16U]; /*!< Offset: 0x100 (R/W) Interrupt Set Pending Register */ + uint32_t RESERVED2[16U]; + __IOM uint32_t ICPR[16U]; /*!< Offset: 0x180 (R/W) Interrupt Clear Pending Register */ + uint32_t RESERVED3[16U]; + __IOM uint32_t IABR[16U]; /*!< Offset: 0x200 (R/W) Interrupt Active bit Register */ + uint32_t RESERVED4[16U]; + __IOM uint32_t ITNS[16U]; /*!< Offset: 0x280 (R/W) Interrupt Non-Secure State Register */ + uint32_t RESERVED5[16U]; + __IOM uint8_t IPR[496U]; /*!< Offset: 0x300 (R/W) Interrupt Priority Register (8Bit wide) */ + uint32_t RESERVED6[580U]; + __OM uint32_t STIR; /*!< Offset: 0xE00 ( /W) Software Trigger Interrupt Register */ +} NVIC_Type; + +/* Software Triggered Interrupt Register Definitions */ +#define NVIC_STIR_INTID_Pos 0U /*!< STIR: INTLINESNUM Position */ +#define NVIC_STIR_INTID_Msk (0x1FFUL /*<< NVIC_STIR_INTID_Pos*/) /*!< STIR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_NVIC */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCB System Control Block (SCB) + \brief Type definitions for the System Control Block Registers + @{ + */ + +/** + \brief Structure type to access the System Control Block (SCB). + */ +typedef struct +{ + __IM uint32_t CPUID; /*!< Offset: 0x000 (R/ ) CPUID Base Register */ + __IOM uint32_t ICSR; /*!< Offset: 0x004 (R/W) Interrupt Control and State Register */ + __IOM uint32_t VTOR; /*!< Offset: 0x008 (R/W) Vector Table Offset Register */ + __IOM uint32_t AIRCR; /*!< Offset: 0x00C (R/W) Application Interrupt and Reset Control Register */ + __IOM uint32_t SCR; /*!< Offset: 0x010 (R/W) System Control Register */ + __IOM uint32_t CCR; /*!< Offset: 0x014 (R/W) Configuration Control Register */ + __IOM uint8_t SHPR[12U]; /*!< Offset: 0x018 (R/W) System Handlers Priority Registers (4-7, 8-11, 12-15) */ + __IOM uint32_t SHCSR; /*!< Offset: 0x024 (R/W) System Handler Control and State Register */ + __IOM uint32_t CFSR; /*!< Offset: 0x028 (R/W) Configurable Fault Status Register */ + __IOM uint32_t HFSR; /*!< Offset: 0x02C (R/W) HardFault Status Register */ + __IOM uint32_t DFSR; /*!< Offset: 0x030 (R/W) Debug Fault Status Register */ + __IOM uint32_t MMFAR; /*!< Offset: 0x034 (R/W) MemManage Fault Address Register */ + __IOM uint32_t BFAR; /*!< Offset: 0x038 (R/W) BusFault Address Register */ + __IOM uint32_t AFSR; /*!< Offset: 0x03C (R/W) Auxiliary Fault Status Register */ + __IM uint32_t ID_PFR[2U]; /*!< Offset: 0x040 (R/ ) Processor Feature Register */ + __IM uint32_t ID_DFR; /*!< Offset: 0x048 (R/ ) Debug Feature Register */ + __IM uint32_t ID_AFR; /*!< Offset: 0x04C (R/ ) Auxiliary Feature Register */ + __IM uint32_t ID_MMFR[4U]; /*!< Offset: 0x050 (R/ ) Memory Model Feature Register */ + __IM uint32_t ID_ISAR[5U]; /*!< Offset: 0x060 (R/ ) Instruction Set Attributes Register */ + uint32_t RESERVED0[1U]; + __IM uint32_t CLIDR; /*!< Offset: 0x078 (R/ ) Cache Level ID register */ + __IM uint32_t CTR; /*!< Offset: 0x07C (R/ ) Cache Type register */ + __IM uint32_t CCSIDR; /*!< Offset: 0x080 (R/ ) Cache Size ID Register */ + __IOM uint32_t CSSELR; /*!< Offset: 0x084 (R/W) Cache Size Selection Register */ + __IOM uint32_t CPACR; /*!< Offset: 0x088 (R/W) Coprocessor Access Control Register */ + __IOM uint32_t NSACR; /*!< Offset: 0x08C (R/W) Non-Secure Access Control Register */ + uint32_t RESERVED_ADD1[21U]; + __IOM uint32_t SFSR; /*!< Offset: 0x0E4 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x0E8 (R/W) Secure Fault Address Register */ + uint32_t RESERVED3[69U]; + __OM uint32_t STIR; /*!< Offset: F00-D00=0x200 ( /W) Software Triggered Interrupt Register */ + uint32_t RESERVED4[15U]; + __IM uint32_t MVFR0; /*!< Offset: 0x240 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x244 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x248 (R/ ) Media and VFP Feature Register 2 */ + uint32_t RESERVED5[1U]; + __OM uint32_t ICIALLU; /*!< Offset: 0x250 ( /W) I-Cache Invalidate All to PoU */ + uint32_t RESERVED6[1U]; + __OM uint32_t ICIMVAU; /*!< Offset: 0x258 ( /W) I-Cache Invalidate by MVA to PoU */ + __OM uint32_t DCIMVAC; /*!< Offset: 0x25C ( /W) D-Cache Invalidate by MVA to PoC */ + __OM uint32_t DCISW; /*!< Offset: 0x260 ( /W) D-Cache Invalidate by Set-way */ + __OM uint32_t DCCMVAU; /*!< Offset: 0x264 ( /W) D-Cache Clean by MVA to PoU */ + __OM uint32_t DCCMVAC; /*!< Offset: 0x268 ( /W) D-Cache Clean by MVA to PoC */ + __OM uint32_t DCCSW; /*!< Offset: 0x26C ( /W) D-Cache Clean by Set-way */ + __OM uint32_t DCCIMVAC; /*!< Offset: 0x270 ( /W) D-Cache Clean and Invalidate by MVA to PoC */ + __OM uint32_t DCCISW; /*!< Offset: 0x274 ( /W) D-Cache Clean and Invalidate by Set-way */ +} SCB_Type; + +typedef struct +{ + __IOM uint32_t CACR; /*!< Offset: 0x0 (R/W) L1 Cache Control Register */ + __IOM uint32_t ITCMCR; /*!< Offset: 0x10 (R/W) Instruction Tightly-Coupled Memory Control Register */ + __IOM uint32_t DTCMCR; /*!< Offset: 0x14 (R/W) Data Tightly-Coupled Memory Control Registers */ +}EMSS_Type; + +/* SCB CPUID Register Definitions */ +#define SCB_CPUID_IMPLEMENTER_Pos 24U /*!< SCB CPUID: IMPLEMENTER Position */ +#define SCB_CPUID_IMPLEMENTER_Msk (0xFFUL << SCB_CPUID_IMPLEMENTER_Pos) /*!< SCB CPUID: IMPLEMENTER Mask */ + +#define SCB_CPUID_VARIANT_Pos 20U /*!< SCB CPUID: VARIANT Position */ +#define SCB_CPUID_VARIANT_Msk (0xFUL << SCB_CPUID_VARIANT_Pos) /*!< SCB CPUID: VARIANT Mask */ + +#define SCB_CPUID_ARCHITECTURE_Pos 16U /*!< SCB CPUID: ARCHITECTURE Position */ +#define SCB_CPUID_ARCHITECTURE_Msk (0xFUL << SCB_CPUID_ARCHITECTURE_Pos) /*!< SCB CPUID: ARCHITECTURE Mask */ + +#define SCB_CPUID_PARTNO_Pos 4U /*!< SCB CPUID: PARTNO Position */ +#define SCB_CPUID_PARTNO_Msk (0xFFFUL << SCB_CPUID_PARTNO_Pos) /*!< SCB CPUID: PARTNO Mask */ + +#define SCB_CPUID_REVISION_Pos 0U /*!< SCB CPUID: REVISION Position */ +#define SCB_CPUID_REVISION_Msk (0xFUL /*<< SCB_CPUID_REVISION_Pos*/) /*!< SCB CPUID: REVISION Mask */ + +/* SCB Interrupt Control State Register Definitions */ +#define SCB_ICSR_PENDNMISET_Pos 31U /*!< SCB ICSR: PENDNMISET Position */ +#define SCB_ICSR_PENDNMISET_Msk (1UL << SCB_ICSR_PENDNMISET_Pos) /*!< SCB ICSR: PENDNMISET Mask */ + +#define SCB_ICSR_NMIPENDSET_Pos SCB_ICSR_PENDNMISET_Pos /*!< SCB ICSR: NMIPENDSET Position, backward compatibility */ +#define SCB_ICSR_NMIPENDSET_Msk SCB_ICSR_PENDNMISET_Msk /*!< SCB ICSR: NMIPENDSET Mask, backward compatibility */ + +#define SCB_ICSR_PENDNMICLR_Pos 30U /*!< SCB ICSR: PENDNMICLR Position */ +#define SCB_ICSR_PENDNMICLR_Msk (1UL << SCB_ICSR_PENDNMICLR_Pos) /*!< SCB ICSR: PENDNMICLR Mask */ + +#define SCB_ICSR_PENDSVSET_Pos 28U /*!< SCB ICSR: PENDSVSET Position */ +#define SCB_ICSR_PENDSVSET_Msk (1UL << SCB_ICSR_PENDSVSET_Pos) /*!< SCB ICSR: PENDSVSET Mask */ + +#define SCB_ICSR_PENDSVCLR_Pos 27U /*!< SCB ICSR: PENDSVCLR Position */ +#define SCB_ICSR_PENDSVCLR_Msk (1UL << SCB_ICSR_PENDSVCLR_Pos) /*!< SCB ICSR: PENDSVCLR Mask */ + +#define SCB_ICSR_PENDSTSET_Pos 26U /*!< SCB ICSR: PENDSTSET Position */ +#define SCB_ICSR_PENDSTSET_Msk (1UL << SCB_ICSR_PENDSTSET_Pos) /*!< SCB ICSR: PENDSTSET Mask */ + +#define SCB_ICSR_PENDSTCLR_Pos 25U /*!< SCB ICSR: PENDSTCLR Position */ +#define SCB_ICSR_PENDSTCLR_Msk (1UL << SCB_ICSR_PENDSTCLR_Pos) /*!< SCB ICSR: PENDSTCLR Mask */ + +#define SCB_ICSR_STTNS_Pos 24U /*!< SCB ICSR: STTNS Position (Security Extension) */ +#define SCB_ICSR_STTNS_Msk (1UL << SCB_ICSR_STTNS_Pos) /*!< SCB ICSR: STTNS Mask (Security Extension) */ + +#define SCB_ICSR_ISRPREEMPT_Pos 23U /*!< SCB ICSR: ISRPREEMPT Position */ +#define SCB_ICSR_ISRPREEMPT_Msk (1UL << SCB_ICSR_ISRPREEMPT_Pos) /*!< SCB ICSR: ISRPREEMPT Mask */ + +#define SCB_ICSR_ISRPENDING_Pos 22U /*!< SCB ICSR: ISRPENDING Position */ +#define SCB_ICSR_ISRPENDING_Msk (1UL << SCB_ICSR_ISRPENDING_Pos) /*!< SCB ICSR: ISRPENDING Mask */ + +#define SCB_ICSR_VECTPENDING_Pos 12U /*!< SCB ICSR: VECTPENDING Position */ +#define SCB_ICSR_VECTPENDING_Msk (0x1FFUL << SCB_ICSR_VECTPENDING_Pos) /*!< SCB ICSR: VECTPENDING Mask */ + +#define SCB_ICSR_RETTOBASE_Pos 11U /*!< SCB ICSR: RETTOBASE Position */ +#define SCB_ICSR_RETTOBASE_Msk (1UL << SCB_ICSR_RETTOBASE_Pos) /*!< SCB ICSR: RETTOBASE Mask */ + +#define SCB_ICSR_VECTACTIVE_Pos 0U /*!< SCB ICSR: VECTACTIVE Position */ +#define SCB_ICSR_VECTACTIVE_Msk (0x1FFUL /*<< SCB_ICSR_VECTACTIVE_Pos*/) /*!< SCB ICSR: VECTACTIVE Mask */ + +/* SCB Vector Table Offset Register Definitions */ +#define SCB_VTOR_TBLOFF_Pos 7U /*!< SCB VTOR: TBLOFF Position */ +#define SCB_VTOR_TBLOFF_Msk (0x1FFFFFFUL << SCB_VTOR_TBLOFF_Pos) /*!< SCB VTOR: TBLOFF Mask */ + +/* SCB Application Interrupt and Reset Control Register Definitions */ +#define SCB_AIRCR_VECTKEY_Pos 16U /*!< SCB AIRCR: VECTKEY Position */ +#define SCB_AIRCR_VECTKEY_Msk (0xFFFFUL << SCB_AIRCR_VECTKEY_Pos) /*!< SCB AIRCR: VECTKEY Mask */ + +#define SCB_AIRCR_VECTKEYSTAT_Pos 16U /*!< SCB AIRCR: VECTKEYSTAT Position */ +#define SCB_AIRCR_VECTKEYSTAT_Msk (0xFFFFUL << SCB_AIRCR_VECTKEYSTAT_Pos) /*!< SCB AIRCR: VECTKEYSTAT Mask */ + +#define SCB_AIRCR_ENDIANESS_Pos 15U /*!< SCB AIRCR: ENDIANESS Position */ +#define SCB_AIRCR_ENDIANESS_Msk (1UL << SCB_AIRCR_ENDIANESS_Pos) /*!< SCB AIRCR: ENDIANESS Mask */ + +#define SCB_AIRCR_PRIS_Pos 14U /*!< SCB AIRCR: PRIS Position */ +#define SCB_AIRCR_PRIS_Msk (1UL << SCB_AIRCR_PRIS_Pos) /*!< SCB AIRCR: PRIS Mask */ + +#define SCB_AIRCR_BFHFNMINS_Pos 13U /*!< SCB AIRCR: BFHFNMINS Position */ +#define SCB_AIRCR_BFHFNMINS_Msk (1UL << SCB_AIRCR_BFHFNMINS_Pos) /*!< SCB AIRCR: BFHFNMINS Mask */ + +#define SCB_AIRCR_PRIGROUP_Pos 8U /*!< SCB AIRCR: PRIGROUP Position */ +#define SCB_AIRCR_PRIGROUP_Msk (7UL << SCB_AIRCR_PRIGROUP_Pos) /*!< SCB AIRCR: PRIGROUP Mask */ + +#define SCB_AIRCR_SYSRESETREQS_Pos 3U /*!< SCB AIRCR: SYSRESETREQS Position */ +#define SCB_AIRCR_SYSRESETREQS_Msk (1UL << SCB_AIRCR_SYSRESETREQS_Pos) /*!< SCB AIRCR: SYSRESETREQS Mask */ + +#define SCB_AIRCR_SYSRESETREQ_Pos 2U /*!< SCB AIRCR: SYSRESETREQ Position */ +#define SCB_AIRCR_SYSRESETREQ_Msk (1UL << SCB_AIRCR_SYSRESETREQ_Pos) /*!< SCB AIRCR: SYSRESETREQ Mask */ + +#define SCB_AIRCR_VECTCLRACTIVE_Pos 1U /*!< SCB AIRCR: VECTCLRACTIVE Position */ +#define SCB_AIRCR_VECTCLRACTIVE_Msk (1UL << SCB_AIRCR_VECTCLRACTIVE_Pos) /*!< SCB AIRCR: VECTCLRACTIVE Mask */ + +/* SCB System Control Register Definitions */ +#define SCB_SCR_SEVONPEND_Pos 4U /*!< SCB SCR: SEVONPEND Position */ +#define SCB_SCR_SEVONPEND_Msk (1UL << SCB_SCR_SEVONPEND_Pos) /*!< SCB SCR: SEVONPEND Mask */ + +#define SCB_SCR_SLEEPDEEPS_Pos 3U /*!< SCB SCR: SLEEPDEEPS Position */ +#define SCB_SCR_SLEEPDEEPS_Msk (1UL << SCB_SCR_SLEEPDEEPS_Pos) /*!< SCB SCR: SLEEPDEEPS Mask */ + +#define SCB_SCR_SLEEPDEEP_Pos 2U /*!< SCB SCR: SLEEPDEEP Position */ +#define SCB_SCR_SLEEPDEEP_Msk (1UL << SCB_SCR_SLEEPDEEP_Pos) /*!< SCB SCR: SLEEPDEEP Mask */ + +#define SCB_SCR_SLEEPONEXIT_Pos 1U /*!< SCB SCR: SLEEPONEXIT Position */ +#define SCB_SCR_SLEEPONEXIT_Msk (1UL << SCB_SCR_SLEEPONEXIT_Pos) /*!< SCB SCR: SLEEPONEXIT Mask */ + +/* SCB Configuration Control Register Definitions */ +#define SCB_CCR_BP_Pos 18U /*!< SCB CCR: BP Position */ +#define SCB_CCR_BP_Msk (1UL << SCB_CCR_BP_Pos) /*!< SCB CCR: BP Mask */ + +#define SCB_CCR_IC_Pos 17U /*!< SCB CCR: IC Position */ +#define SCB_CCR_IC_Msk (1UL << SCB_CCR_IC_Pos) /*!< SCB CCR: IC Mask */ + +#define SCB_CCR_DC_Pos 16U /*!< SCB CCR: DC Position */ +#define SCB_CCR_DC_Msk (1UL << SCB_CCR_DC_Pos) /*!< SCB CCR: DC Mask */ + +#define SCB_CCR_STKOFHFNMIGN_Pos 10U /*!< SCB CCR: STKOFHFNMIGN Position */ +#define SCB_CCR_STKOFHFNMIGN_Msk (1UL << SCB_CCR_STKOFHFNMIGN_Pos) /*!< SCB CCR: STKOFHFNMIGN Mask */ + +#define SCB_CCR_BFHFNMIGN_Pos 8U /*!< SCB CCR: BFHFNMIGN Position */ +#define SCB_CCR_BFHFNMIGN_Msk (1UL << SCB_CCR_BFHFNMIGN_Pos) /*!< SCB CCR: BFHFNMIGN Mask */ + +#define SCB_CCR_DIV_0_TRP_Pos 4U /*!< SCB CCR: DIV_0_TRP Position */ +#define SCB_CCR_DIV_0_TRP_Msk (1UL << SCB_CCR_DIV_0_TRP_Pos) /*!< SCB CCR: DIV_0_TRP Mask */ + +#define SCB_CCR_UNALIGN_TRP_Pos 3U /*!< SCB CCR: UNALIGN_TRP Position */ +#define SCB_CCR_UNALIGN_TRP_Msk (1UL << SCB_CCR_UNALIGN_TRP_Pos) /*!< SCB CCR: UNALIGN_TRP Mask */ + +#define SCB_CCR_USERSETMPEND_Pos 1U /*!< SCB CCR: USERSETMPEND Position */ +#define SCB_CCR_USERSETMPEND_Msk (1UL << SCB_CCR_USERSETMPEND_Pos) /*!< SCB CCR: USERSETMPEND Mask */ + +/* SCB System Handler Control and State Register Definitions */ +#define SCB_SHCSR_HARDFAULTPENDED_Pos 21U /*!< SCB SHCSR: HARDFAULTPENDED Position */ +#define SCB_SHCSR_HARDFAULTPENDED_Msk (1UL << SCB_SHCSR_HARDFAULTPENDED_Pos) /*!< SCB SHCSR: HARDFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTPENDED_Pos 20U /*!< SCB SHCSR: SECUREFAULTPENDED Position */ +#define SCB_SHCSR_SECUREFAULTPENDED_Msk (1UL << SCB_SHCSR_SECUREFAULTPENDED_Pos) /*!< SCB SHCSR: SECUREFAULTPENDED Mask */ + +#define SCB_SHCSR_SECUREFAULTENA_Pos 19U /*!< SCB SHCSR: SECUREFAULTENA Position */ +#define SCB_SHCSR_SECUREFAULTENA_Msk (1UL << SCB_SHCSR_SECUREFAULTENA_Pos) /*!< SCB SHCSR: SECUREFAULTENA Mask */ + +#define SCB_SHCSR_USGFAULTENA_Pos 18U /*!< SCB SHCSR: USGFAULTENA Position */ +#define SCB_SHCSR_USGFAULTENA_Msk (1UL << SCB_SHCSR_USGFAULTENA_Pos) /*!< SCB SHCSR: USGFAULTENA Mask */ + +#define SCB_SHCSR_BUSFAULTENA_Pos 17U /*!< SCB SHCSR: BUSFAULTENA Position */ +#define SCB_SHCSR_BUSFAULTENA_Msk (1UL << SCB_SHCSR_BUSFAULTENA_Pos) /*!< SCB SHCSR: BUSFAULTENA Mask */ + +#define SCB_SHCSR_MEMFAULTENA_Pos 16U /*!< SCB SHCSR: MEMFAULTENA Position */ +#define SCB_SHCSR_MEMFAULTENA_Msk (1UL << SCB_SHCSR_MEMFAULTENA_Pos) /*!< SCB SHCSR: MEMFAULTENA Mask */ + +#define SCB_SHCSR_SVCALLPENDED_Pos 15U /*!< SCB SHCSR: SVCALLPENDED Position */ +#define SCB_SHCSR_SVCALLPENDED_Msk (1UL << SCB_SHCSR_SVCALLPENDED_Pos) /*!< SCB SHCSR: SVCALLPENDED Mask */ + +#define SCB_SHCSR_BUSFAULTPENDED_Pos 14U /*!< SCB SHCSR: BUSFAULTPENDED Position */ +#define SCB_SHCSR_BUSFAULTPENDED_Msk (1UL << SCB_SHCSR_BUSFAULTPENDED_Pos) /*!< SCB SHCSR: BUSFAULTPENDED Mask */ + +#define SCB_SHCSR_MEMFAULTPENDED_Pos 13U /*!< SCB SHCSR: MEMFAULTPENDED Position */ +#define SCB_SHCSR_MEMFAULTPENDED_Msk (1UL << SCB_SHCSR_MEMFAULTPENDED_Pos) /*!< SCB SHCSR: MEMFAULTPENDED Mask */ + +#define SCB_SHCSR_USGFAULTPENDED_Pos 12U /*!< SCB SHCSR: USGFAULTPENDED Position */ +#define SCB_SHCSR_USGFAULTPENDED_Msk (1UL << SCB_SHCSR_USGFAULTPENDED_Pos) /*!< SCB SHCSR: USGFAULTPENDED Mask */ + +#define SCB_SHCSR_SYSTICKACT_Pos 11U /*!< SCB SHCSR: SYSTICKACT Position */ +#define SCB_SHCSR_SYSTICKACT_Msk (1UL << SCB_SHCSR_SYSTICKACT_Pos) /*!< SCB SHCSR: SYSTICKACT Mask */ + +#define SCB_SHCSR_PENDSVACT_Pos 10U /*!< SCB SHCSR: PENDSVACT Position */ +#define SCB_SHCSR_PENDSVACT_Msk (1UL << SCB_SHCSR_PENDSVACT_Pos) /*!< SCB SHCSR: PENDSVACT Mask */ + +#define SCB_SHCSR_MONITORACT_Pos 8U /*!< SCB SHCSR: MONITORACT Position */ +#define SCB_SHCSR_MONITORACT_Msk (1UL << SCB_SHCSR_MONITORACT_Pos) /*!< SCB SHCSR: MONITORACT Mask */ + +#define SCB_SHCSR_SVCALLACT_Pos 7U /*!< SCB SHCSR: SVCALLACT Position */ +#define SCB_SHCSR_SVCALLACT_Msk (1UL << SCB_SHCSR_SVCALLACT_Pos) /*!< SCB SHCSR: SVCALLACT Mask */ + +#define SCB_SHCSR_NMIACT_Pos 5U /*!< SCB SHCSR: NMIACT Position */ +#define SCB_SHCSR_NMIACT_Msk (1UL << SCB_SHCSR_NMIACT_Pos) /*!< SCB SHCSR: NMIACT Mask */ + +#define SCB_SHCSR_SECUREFAULTACT_Pos 4U /*!< SCB SHCSR: SECUREFAULTACT Position */ +#define SCB_SHCSR_SECUREFAULTACT_Msk (1UL << SCB_SHCSR_SECUREFAULTACT_Pos) /*!< SCB SHCSR: SECUREFAULTACT Mask */ + +#define SCB_SHCSR_USGFAULTACT_Pos 3U /*!< SCB SHCSR: USGFAULTACT Position */ +#define SCB_SHCSR_USGFAULTACT_Msk (1UL << SCB_SHCSR_USGFAULTACT_Pos) /*!< SCB SHCSR: USGFAULTACT Mask */ + +#define SCB_SHCSR_HARDFAULTACT_Pos 2U /*!< SCB SHCSR: HARDFAULTACT Position */ +#define SCB_SHCSR_HARDFAULTACT_Msk (1UL << SCB_SHCSR_HARDFAULTACT_Pos) /*!< SCB SHCSR: HARDFAULTACT Mask */ + +#define SCB_SHCSR_BUSFAULTACT_Pos 1U /*!< SCB SHCSR: BUSFAULTACT Position */ +#define SCB_SHCSR_BUSFAULTACT_Msk (1UL << SCB_SHCSR_BUSFAULTACT_Pos) /*!< SCB SHCSR: BUSFAULTACT Mask */ + +#define SCB_SHCSR_MEMFAULTACT_Pos 0U /*!< SCB SHCSR: MEMFAULTACT Position */ +#define SCB_SHCSR_MEMFAULTACT_Msk (1UL /*<< SCB_SHCSR_MEMFAULTACT_Pos*/) /*!< SCB SHCSR: MEMFAULTACT Mask */ + +/* SCB Configurable Fault Status Register Definitions */ +#define SCB_CFSR_USGFAULTSR_Pos 16U /*!< SCB CFSR: Usage Fault Status Register Position */ +#define SCB_CFSR_USGFAULTSR_Msk (0xFFFFUL << SCB_CFSR_USGFAULTSR_Pos) /*!< SCB CFSR: Usage Fault Status Register Mask */ + +#define SCB_CFSR_BUSFAULTSR_Pos 8U /*!< SCB CFSR: Bus Fault Status Register Position */ +#define SCB_CFSR_BUSFAULTSR_Msk (0xFFUL << SCB_CFSR_BUSFAULTSR_Pos) /*!< SCB CFSR: Bus Fault Status Register Mask */ + +#define SCB_CFSR_MEMFAULTSR_Pos 0U /*!< SCB CFSR: Memory Manage Fault Status Register Position */ +#define SCB_CFSR_MEMFAULTSR_Msk (0xFFUL /*<< SCB_CFSR_MEMFAULTSR_Pos*/) /*!< SCB CFSR: Memory Manage Fault Status Register Mask */ + +/* MemManage Fault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_MMARVALID_Pos (SCB_CFSR_MEMFAULTSR_Pos + 7U) /*!< SCB CFSR (MMFSR): MMARVALID Position */ +#define SCB_CFSR_MMARVALID_Msk (1UL << SCB_CFSR_MMARVALID_Pos) /*!< SCB CFSR (MMFSR): MMARVALID Mask */ + +#define SCB_CFSR_MLSPERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 5U) /*!< SCB CFSR (MMFSR): MLSPERR Position */ +#define SCB_CFSR_MLSPERR_Msk (1UL << SCB_CFSR_MLSPERR_Pos) /*!< SCB CFSR (MMFSR): MLSPERR Mask */ + +#define SCB_CFSR_MSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 4U) /*!< SCB CFSR (MMFSR): MSTKERR Position */ +#define SCB_CFSR_MSTKERR_Msk (1UL << SCB_CFSR_MSTKERR_Pos) /*!< SCB CFSR (MMFSR): MSTKERR Mask */ + +#define SCB_CFSR_MUNSTKERR_Pos (SCB_CFSR_MEMFAULTSR_Pos + 3U) /*!< SCB CFSR (MMFSR): MUNSTKERR Position */ +#define SCB_CFSR_MUNSTKERR_Msk (1UL << SCB_CFSR_MUNSTKERR_Pos) /*!< SCB CFSR (MMFSR): MUNSTKERR Mask */ + +#define SCB_CFSR_DACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 1U) /*!< SCB CFSR (MMFSR): DACCVIOL Position */ +#define SCB_CFSR_DACCVIOL_Msk (1UL << SCB_CFSR_DACCVIOL_Pos) /*!< SCB CFSR (MMFSR): DACCVIOL Mask */ + +#define SCB_CFSR_IACCVIOL_Pos (SCB_CFSR_MEMFAULTSR_Pos + 0U) /*!< SCB CFSR (MMFSR): IACCVIOL Position */ +#define SCB_CFSR_IACCVIOL_Msk (1UL /*<< SCB_CFSR_IACCVIOL_Pos*/) /*!< SCB CFSR (MMFSR): IACCVIOL Mask */ + +/* BusFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_BFARVALID_Pos (SCB_CFSR_BUSFAULTSR_Pos + 7U) /*!< SCB CFSR (BFSR): BFARVALID Position */ +#define SCB_CFSR_BFARVALID_Msk (1UL << SCB_CFSR_BFARVALID_Pos) /*!< SCB CFSR (BFSR): BFARVALID Mask */ + +#define SCB_CFSR_LSPERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 5U) /*!< SCB CFSR (BFSR): LSPERR Position */ +#define SCB_CFSR_LSPERR_Msk (1UL << SCB_CFSR_LSPERR_Pos) /*!< SCB CFSR (BFSR): LSPERR Mask */ + +#define SCB_CFSR_STKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 4U) /*!< SCB CFSR (BFSR): STKERR Position */ +#define SCB_CFSR_STKERR_Msk (1UL << SCB_CFSR_STKERR_Pos) /*!< SCB CFSR (BFSR): STKERR Mask */ + +#define SCB_CFSR_UNSTKERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 3U) /*!< SCB CFSR (BFSR): UNSTKERR Position */ +#define SCB_CFSR_UNSTKERR_Msk (1UL << SCB_CFSR_UNSTKERR_Pos) /*!< SCB CFSR (BFSR): UNSTKERR Mask */ + +#define SCB_CFSR_IMPRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 2U) /*!< SCB CFSR (BFSR): IMPRECISERR Position */ +#define SCB_CFSR_IMPRECISERR_Msk (1UL << SCB_CFSR_IMPRECISERR_Pos) /*!< SCB CFSR (BFSR): IMPRECISERR Mask */ + +#define SCB_CFSR_PRECISERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 1U) /*!< SCB CFSR (BFSR): PRECISERR Position */ +#define SCB_CFSR_PRECISERR_Msk (1UL << SCB_CFSR_PRECISERR_Pos) /*!< SCB CFSR (BFSR): PRECISERR Mask */ + +#define SCB_CFSR_IBUSERR_Pos (SCB_CFSR_BUSFAULTSR_Pos + 0U) /*!< SCB CFSR (BFSR): IBUSERR Position */ +#define SCB_CFSR_IBUSERR_Msk (1UL << SCB_CFSR_IBUSERR_Pos) /*!< SCB CFSR (BFSR): IBUSERR Mask */ + +/* UsageFault Status Register (part of SCB Configurable Fault Status Register) */ +#define SCB_CFSR_DIVBYZERO_Pos (SCB_CFSR_USGFAULTSR_Pos + 9U) /*!< SCB CFSR (UFSR): DIVBYZERO Position */ +#define SCB_CFSR_DIVBYZERO_Msk (1UL << SCB_CFSR_DIVBYZERO_Pos) /*!< SCB CFSR (UFSR): DIVBYZERO Mask */ + +#define SCB_CFSR_UNALIGNED_Pos (SCB_CFSR_USGFAULTSR_Pos + 8U) /*!< SCB CFSR (UFSR): UNALIGNED Position */ +#define SCB_CFSR_UNALIGNED_Msk (1UL << SCB_CFSR_UNALIGNED_Pos) /*!< SCB CFSR (UFSR): UNALIGNED Mask */ + +#define SCB_CFSR_STKOF_Pos (SCB_CFSR_USGFAULTSR_Pos + 4U) /*!< SCB CFSR (UFSR): STKOF Position */ +#define SCB_CFSR_STKOF_Msk (1UL << SCB_CFSR_STKOF_Pos) /*!< SCB CFSR (UFSR): STKOF Mask */ + +#define SCB_CFSR_NOCP_Pos (SCB_CFSR_USGFAULTSR_Pos + 3U) /*!< SCB CFSR (UFSR): NOCP Position */ +#define SCB_CFSR_NOCP_Msk (1UL << SCB_CFSR_NOCP_Pos) /*!< SCB CFSR (UFSR): NOCP Mask */ + +#define SCB_CFSR_INVPC_Pos (SCB_CFSR_USGFAULTSR_Pos + 2U) /*!< SCB CFSR (UFSR): INVPC Position */ +#define SCB_CFSR_INVPC_Msk (1UL << SCB_CFSR_INVPC_Pos) /*!< SCB CFSR (UFSR): INVPC Mask */ + +#define SCB_CFSR_INVSTATE_Pos (SCB_CFSR_USGFAULTSR_Pos + 1U) /*!< SCB CFSR (UFSR): INVSTATE Position */ +#define SCB_CFSR_INVSTATE_Msk (1UL << SCB_CFSR_INVSTATE_Pos) /*!< SCB CFSR (UFSR): INVSTATE Mask */ + +#define SCB_CFSR_UNDEFINSTR_Pos (SCB_CFSR_USGFAULTSR_Pos + 0U) /*!< SCB CFSR (UFSR): UNDEFINSTR Position */ +#define SCB_CFSR_UNDEFINSTR_Msk (1UL << SCB_CFSR_UNDEFINSTR_Pos) /*!< SCB CFSR (UFSR): UNDEFINSTR Mask */ + +/* SCB Hard Fault Status Register Definitions */ +#define SCB_HFSR_DEBUGEVT_Pos 31U /*!< SCB HFSR: DEBUGEVT Position */ +#define SCB_HFSR_DEBUGEVT_Msk (1UL << SCB_HFSR_DEBUGEVT_Pos) /*!< SCB HFSR: DEBUGEVT Mask */ + +#define SCB_HFSR_FORCED_Pos 30U /*!< SCB HFSR: FORCED Position */ +#define SCB_HFSR_FORCED_Msk (1UL << SCB_HFSR_FORCED_Pos) /*!< SCB HFSR: FORCED Mask */ + +#define SCB_HFSR_VECTTBL_Pos 1U /*!< SCB HFSR: VECTTBL Position */ +#define SCB_HFSR_VECTTBL_Msk (1UL << SCB_HFSR_VECTTBL_Pos) /*!< SCB HFSR: VECTTBL Mask */ + +/* SCB Debug Fault Status Register Definitions */ +#define SCB_DFSR_EXTERNAL_Pos 4U /*!< SCB DFSR: EXTERNAL Position */ +#define SCB_DFSR_EXTERNAL_Msk (1UL << SCB_DFSR_EXTERNAL_Pos) /*!< SCB DFSR: EXTERNAL Mask */ + +#define SCB_DFSR_VCATCH_Pos 3U /*!< SCB DFSR: VCATCH Position */ +#define SCB_DFSR_VCATCH_Msk (1UL << SCB_DFSR_VCATCH_Pos) /*!< SCB DFSR: VCATCH Mask */ + +#define SCB_DFSR_DWTTRAP_Pos 2U /*!< SCB DFSR: DWTTRAP Position */ +#define SCB_DFSR_DWTTRAP_Msk (1UL << SCB_DFSR_DWTTRAP_Pos) /*!< SCB DFSR: DWTTRAP Mask */ + +#define SCB_DFSR_BKPT_Pos 1U /*!< SCB DFSR: BKPT Position */ +#define SCB_DFSR_BKPT_Msk (1UL << SCB_DFSR_BKPT_Pos) /*!< SCB DFSR: BKPT Mask */ + +#define SCB_DFSR_HALTED_Pos 0U /*!< SCB DFSR: HALTED Position */ +#define SCB_DFSR_HALTED_Msk (1UL /*<< SCB_DFSR_HALTED_Pos*/) /*!< SCB DFSR: HALTED Mask */ + +/* SCB Non-Secure Access Control Register Definitions */ +#define SCB_NSACR_CP11_Pos 11U /*!< SCB NSACR: CP11 Position */ +#define SCB_NSACR_CP11_Msk (1UL << SCB_NSACR_CP11_Pos) /*!< SCB NSACR: CP11 Mask */ + +#define SCB_NSACR_CP10_Pos 10U /*!< SCB NSACR: CP10 Position */ +#define SCB_NSACR_CP10_Msk (1UL << SCB_NSACR_CP10_Pos) /*!< SCB NSACR: CP10 Mask */ + +#define SCB_NSACR_CPn_Pos 0U /*!< SCB NSACR: CPn Position */ +#define SCB_NSACR_CPn_Msk (1UL /*<< SCB_NSACR_CPn_Pos*/) /*!< SCB NSACR: CPn Mask */ + +/* SCB Cache Level ID Register Definitions */ +#define SCB_CLIDR_LOUU_Pos 27U /*!< SCB CLIDR: LoUU Position */ +#define SCB_CLIDR_LOUU_Msk (7UL << SCB_CLIDR_LOUU_Pos) /*!< SCB CLIDR: LoUU Mask */ + +#define SCB_CLIDR_LOC_Pos 24U /*!< SCB CLIDR: LoC Position */ +#define SCB_CLIDR_LOC_Msk (7UL << SCB_CLIDR_LOC_Pos) /*!< SCB CLIDR: LoC Mask */ + +#define SCB_CLIDR_IC_Pos 0U /*!< SCB CLIDR: IC Position */ +#define SCB_CLIDR_IC_Msk (1UL << SCB_CLIDR_IC_Pos) /*!< SCB CLIDR: IC Mask */ + +#define SCB_CLIDR_DC_Pos 1U /*!< SCB CLIDR: DC Position */ +#define SCB_CLIDR_DC_Msk (1UL << SCB_CLIDR_DC_Pos) /*!< SCB CLIDR: DC Mask */ + + + +/* SCB Cache Type Register Definitions */ +#define SCB_CTR_FORMAT_Pos 29U /*!< SCB CTR: Format Position */ +#define SCB_CTR_FORMAT_Msk (7UL << SCB_CTR_FORMAT_Pos) /*!< SCB CTR: Format Mask */ + +#define SCB_CTR_CWG_Pos 24U /*!< SCB CTR: CWG Position */ +#define SCB_CTR_CWG_Msk (0xFUL << SCB_CTR_CWG_Pos) /*!< SCB CTR: CWG Mask */ + +#define SCB_CTR_ERG_Pos 20U /*!< SCB CTR: ERG Position */ +#define SCB_CTR_ERG_Msk (0xFUL << SCB_CTR_ERG_Pos) /*!< SCB CTR: ERG Mask */ + +#define SCB_CTR_DMINLINE_Pos 16U /*!< SCB CTR: DminLine Position */ +#define SCB_CTR_DMINLINE_Msk (0xFUL << SCB_CTR_DMINLINE_Pos) /*!< SCB CTR: DminLine Mask */ + +#define SCB_CTR_IMINLINE_Pos 0U /*!< SCB CTR: ImInLine Position */ +#define SCB_CTR_IMINLINE_Msk (0xFUL /*<< SCB_CTR_IMINLINE_Pos*/) /*!< SCB CTR: ImInLine Mask */ + +/* SCB Cache Size ID Register Definitions */ +#define SCB_CCSIDR_WT_Pos 31U /*!< SCB CCSIDR: WT Position */ +#define SCB_CCSIDR_WT_Msk (1UL << SCB_CCSIDR_WT_Pos) /*!< SCB CCSIDR: WT Mask */ + +#define SCB_CCSIDR_WB_Pos 30U /*!< SCB CCSIDR: WB Position */ +#define SCB_CCSIDR_WB_Msk (1UL << SCB_CCSIDR_WB_Pos) /*!< SCB CCSIDR: WB Mask */ + +#define SCB_CCSIDR_RA_Pos 29U /*!< SCB CCSIDR: RA Position */ +#define SCB_CCSIDR_RA_Msk (1UL << SCB_CCSIDR_RA_Pos) /*!< SCB CCSIDR: RA Mask */ + +#define SCB_CCSIDR_WA_Pos 28U /*!< SCB CCSIDR: WA Position */ +#define SCB_CCSIDR_WA_Msk (1UL << SCB_CCSIDR_WA_Pos) /*!< SCB CCSIDR: WA Mask */ + +#define SCB_CCSIDR_NUMSETS_Pos 13U /*!< SCB CCSIDR: NumSets Position */ +#define SCB_CCSIDR_NUMSETS_Msk (0x7FFFUL << SCB_CCSIDR_NUMSETS_Pos) /*!< SCB CCSIDR: NumSets Mask */ + +#define SCB_CCSIDR_ASSOCIATIVITY_Pos 3U /*!< SCB CCSIDR: Associativity Position */ +#define SCB_CCSIDR_ASSOCIATIVITY_Msk (0x3FFUL << SCB_CCSIDR_ASSOCIATIVITY_Pos) /*!< SCB CCSIDR: Associativity Mask */ + +#define SCB_CCSIDR_LINESIZE_Pos 0U /*!< SCB CCSIDR: LineSize Position */ +#define SCB_CCSIDR_LINESIZE_Msk (7UL /*<< SCB_CCSIDR_LINESIZE_Pos*/) /*!< SCB CCSIDR: LineSize Mask */ + +/* SCB Cache Size Selection Register Definitions */ +#define SCB_CSSELR_LEVEL_Pos 1U /*!< SCB CSSELR: Level Position */ +#define SCB_CSSELR_LEVEL_Msk (7UL << SCB_CSSELR_LEVEL_Pos) /*!< SCB CSSELR: Level Mask */ + +#define SCB_CSSELR_IND_Pos 0U /*!< SCB CSSELR: InD Position */ +#define SCB_CSSELR_IND_Msk (1UL /*<< SCB_CSSELR_IND_Pos*/) /*!< SCB CSSELR: InD Mask */ + +/* SCB Software Triggered Interrupt Register Definitions */ +#define SCB_STIR_INTID_Pos 0U /*!< SCB STIR: INTID Position */ +#define SCB_STIR_INTID_Msk (0x1FFUL /*<< SCB_STIR_INTID_Pos*/) /*!< SCB STIR: INTID Mask */ + +/* SCB D-Cache line Invalidate by Set-way Register Definitions */ +#define SCB_DCISW_LEVEL_Pos 1U /*!< SCB DCISW: Level Position */ +#define SCB_DCISW_LEVEL_Msk (7UL << SCB_DCISW_LEVEL_Pos) /*!< SCB DCISW: Level Mask */ + +#define SCB_DCISW_WAY_Pos 30U /*!< SCB DCISW: Way Position */ +#define SCB_DCISW_WAY_Msk (3UL << SCB_DCISW_WAY_Pos) /*!< SCB DCISW: Way Mask */ + +#define SCB_DCISW_SET_Pos 5U /*!< SCB DCISW: Set Position */ +#define SCB_DCISW_SET_Msk (0xFFUL << SCB_DCISW_SET_Pos) /*!< SCB DCISW: Set Mask */ + +/* SCB D-Cache Clean line by Set-way Register Definitions */ +#define SCB_DCCSW_LEVEL_Pos 1U /*!< SCB DCCSW: Level Position */ +#define SCB_DCCSW_LEVEL_Msk (7UL << SCB_DCCSW_LEVEL_Pos) /*!< SCB DCCSW: Level Mask */ + +#define SCB_DCCSW_WAY_Pos 30U /*!< SCB DCCSW: Way Position */ +#define SCB_DCCSW_WAY_Msk (3UL << SCB_DCCSW_WAY_Pos) /*!< SCB DCCSW: Way Mask */ + +#define SCB_DCCSW_SET_Pos 5U /*!< SCB DCCSW: Set Position */ +#define SCB_DCCSW_SET_Msk (0xFFUL << SCB_DCCSW_SET_Pos) /*!< SCB DCCSW: Set Mask */ + +/* SCB D-Cache Clean and Invalidate by Set-way Register Definitions */ +#define SCB_DCCISW_LEVEL_Pos 1U /*!< SCB DCCISW: Level Position */ +#define SCB_DCCISW_LEVEL_Msk (7UL << SCB_DCCISW_LEVEL_Pos) /*!< SCB DCCISW: Level Mask */ + +#define SCB_DCCISW_WAY_Pos 30U /*!< SCB DCCISW: Way Position */ +#define SCB_DCCISW_WAY_Msk (3UL << SCB_DCCISW_WAY_Pos) /*!< SCB DCCISW: Way Mask */ + +#define SCB_DCCISW_SET_Pos 5U /*!< SCB DCCISW: Set Position */ +#define SCB_DCCISW_SET_Msk (0xFFUL << SCB_DCCISW_SET_Pos) /*!< SCB DCCISW: Set Mask */ + +/* ArmChina: Implementation Defined */ +/* Instruction Tightly-Coupled Memory Control Register Definitions */ +#define SCB_ITCMCR_SZ_Pos 3U /*!< SCB ITCMCR: SZ Position */ +#define SCB_ITCMCR_SZ_Msk (0xFUL << SCB_ITCMCR_SZ_Pos) /*!< SCB ITCMCR: SZ Mask */ + +#define SCB_ITCMCR_EN_Pos 0U /*!< SCB ITCMCR: EN Position */ +#define SCB_ITCMCR_EN_Msk (1UL /*<< SCB_ITCMCR_EN_Pos*/) /*!< SCB ITCMCR: EN Mask */ + +/* Data Tightly-Coupled Memory Control Register Definitions */ +#define SCB_DTCMCR_SZ_Pos 3U /*!< SCB DTCMCR: SZ Position */ +#define SCB_DTCMCR_SZ_Msk (0xFUL << SCB_DTCMCR_SZ_Pos) /*!< SCB DTCMCR: SZ Mask */ + +#define SCB_DTCMCR_EN_Pos 0U /*!< SCB DTCMCR: EN Position */ +#define SCB_DTCMCR_EN_Msk (1UL /*<< SCB_DTCMCR_EN_Pos*/) /*!< SCB DTCMCR: EN Mask */ + +/* L1 Cache Control Register Definitions */ +#define SCB_CACR_DCCLEAN_Pos 16U /*!< SCB CACR: DCCLEAN Position */ +#define SCB_CACR_DCCLEAN_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCCLEAN Mask */ + +#define SCB_CACR_ICACTIVE_Pos 13U /*!< SCB CACR: ICACTIVE Position */ +#define SCB_CACR_ICACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: ICACTIVE Mask */ + +#define SCB_CACR_DCACTIVE_Pos 12U /*!< SCB CACR: DCACTIVE Position */ +#define SCB_CACR_DCACTIVE_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: DCACTIVE Mask */ + +#define SCB_CACR_FORCEWT_Pos 2U /*!< SCB CACR: FORCEWT Position */ +#define SCB_CACR_FORCEWT_Msk (1UL << SCB_CACR_FORCEWT_Pos) /*!< SCB CACR: FORCEWT Mask */ + +/*@} end of group CMSIS_SCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SCnSCB System Controls not in SCB (SCnSCB) + \brief Type definitions for the System Control and ID Register not in the SCB + @{ + */ + +/** + \brief Structure type to access the System Control and ID Register not in the SCB. + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IM uint32_t ICTR; /*!< Offset: 0x004 (R/ ) Interrupt Controller Type Register */ + __IOM uint32_t ACTLR; /*!< Offset: 0x008 (R/W) Auxiliary Control Register */ + __IOM uint32_t CPPWR; /*!< Offset: 0x00C (R/W) Coprocessor Power Control Register */ +} SCnSCB_Type; + +/* Interrupt Controller Type Register Definitions */ +#define SCnSCB_ICTR_INTLINESNUM_Pos 0U /*!< ICTR: INTLINESNUM Position */ +#define SCnSCB_ICTR_INTLINESNUM_Msk (0xFUL /*<< SCnSCB_ICTR_INTLINESNUM_Pos*/) /*!< ICTR: INTLINESNUM Mask */ + +/*@} end of group CMSIS_SCnotSCB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SysTick System Tick Timer (SysTick) + \brief Type definitions for the System Timer Registers. + @{ + */ + +/** + \brief Structure type to access the System Timer (SysTick). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SysTick Control and Status Register */ + __IOM uint32_t LOAD; /*!< Offset: 0x004 (R/W) SysTick Reload Value Register */ + __IOM uint32_t VAL; /*!< Offset: 0x008 (R/W) SysTick Current Value Register */ + __IM uint32_t CALIB; /*!< Offset: 0x00C (R/ ) SysTick Calibration Register */ +} SysTick_Type; + +/* SysTick Control / Status Register Definitions */ +#define SysTick_CTRL_COUNTFLAG_Pos 16U /*!< SysTick CTRL: COUNTFLAG Position */ +#define SysTick_CTRL_COUNTFLAG_Msk (1UL << SysTick_CTRL_COUNTFLAG_Pos) /*!< SysTick CTRL: COUNTFLAG Mask */ + +#define SysTick_CTRL_CLKSOURCE_Pos 2U /*!< SysTick CTRL: CLKSOURCE Position */ +#define SysTick_CTRL_CLKSOURCE_Msk (1UL << SysTick_CTRL_CLKSOURCE_Pos) /*!< SysTick CTRL: CLKSOURCE Mask */ + +#define SysTick_CTRL_TICKINT_Pos 1U /*!< SysTick CTRL: TICKINT Position */ +#define SysTick_CTRL_TICKINT_Msk (1UL << SysTick_CTRL_TICKINT_Pos) /*!< SysTick CTRL: TICKINT Mask */ + +#define SysTick_CTRL_ENABLE_Pos 0U /*!< SysTick CTRL: ENABLE Position */ +#define SysTick_CTRL_ENABLE_Msk (1UL /*<< SysTick_CTRL_ENABLE_Pos*/) /*!< SysTick CTRL: ENABLE Mask */ + +/* SysTick Reload Register Definitions */ +#define SysTick_LOAD_RELOAD_Pos 0U /*!< SysTick LOAD: RELOAD Position */ +#define SysTick_LOAD_RELOAD_Msk (0xFFFFFFUL /*<< SysTick_LOAD_RELOAD_Pos*/) /*!< SysTick LOAD: RELOAD Mask */ + +/* SysTick Current Register Definitions */ +#define SysTick_VAL_CURRENT_Pos 0U /*!< SysTick VAL: CURRENT Position */ +#define SysTick_VAL_CURRENT_Msk (0xFFFFFFUL /*<< SysTick_VAL_CURRENT_Pos*/) /*!< SysTick VAL: CURRENT Mask */ + +/* SysTick Calibration Register Definitions */ +#define SysTick_CALIB_NOREF_Pos 31U /*!< SysTick CALIB: NOREF Position */ +#define SysTick_CALIB_NOREF_Msk (1UL << SysTick_CALIB_NOREF_Pos) /*!< SysTick CALIB: NOREF Mask */ + +#define SysTick_CALIB_SKEW_Pos 30U /*!< SysTick CALIB: SKEW Position */ +#define SysTick_CALIB_SKEW_Msk (1UL << SysTick_CALIB_SKEW_Pos) /*!< SysTick CALIB: SKEW Mask */ + +#define SysTick_CALIB_TENMS_Pos 0U /*!< SysTick CALIB: TENMS Position */ +#define SysTick_CALIB_TENMS_Msk (0xFFFFFFUL /*<< SysTick_CALIB_TENMS_Pos*/) /*!< SysTick CALIB: TENMS Mask */ + +/*@} end of group CMSIS_SysTick */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_ITM Instrumentation Trace Macrocell (ITM) + \brief Type definitions for the Instrumentation Trace Macrocell (ITM) + @{ + */ + +/** + \brief Structure type to access the Instrumentation Trace Macrocell Register (ITM). + */ +typedef struct +{ + __OM union + { + __OM uint8_t u8; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 8-bit */ + __OM uint16_t u16; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 16-bit */ + __OM uint32_t u32; /*!< Offset: 0x000 ( /W) ITM Stimulus Port 32-bit */ + } PORT [32U]; /*!< Offset: 0x000 ( /W) ITM Stimulus Port Registers */ + uint32_t RESERVED0[864U]; + __IOM uint32_t TER; /*!< Offset: 0xE00 (R/W) ITM Trace Enable Register */ + uint32_t RESERVED1[15U]; + __IOM uint32_t TPR; /*!< Offset: 0xE40 (R/W) ITM Trace Privilege Register */ + uint32_t RESERVED2[15U]; + __IOM uint32_t TCR; /*!< Offset: 0xE80 (R/W) ITM Trace Control Register */ + uint32_t RESERVED3[32U]; + uint32_t RESERVED4[43U]; + __OM uint32_t LAR; /*!< Offset: 0xFB0 ( /W) ITM Lock Access Register */ + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R/ ) ITM Lock Status Register */ + uint32_t RESERVED5[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) ITM Device Architecture Register */ + uint32_t RESERVED6[4U]; + __IM uint32_t PID4; /*!< Offset: 0xFD0 (R/ ) ITM Peripheral Identification Register #4 */ + __IM uint32_t PID5; /*!< Offset: 0xFD4 (R/ ) ITM Peripheral Identification Register #5 */ + __IM uint32_t PID6; /*!< Offset: 0xFD8 (R/ ) ITM Peripheral Identification Register #6 */ + __IM uint32_t PID7; /*!< Offset: 0xFDC (R/ ) ITM Peripheral Identification Register #7 */ + __IM uint32_t PID0; /*!< Offset: 0xFE0 (R/ ) ITM Peripheral Identification Register #0 */ + __IM uint32_t PID1; /*!< Offset: 0xFE4 (R/ ) ITM Peripheral Identification Register #1 */ + __IM uint32_t PID2; /*!< Offset: 0xFE8 (R/ ) ITM Peripheral Identification Register #2 */ + __IM uint32_t PID3; /*!< Offset: 0xFEC (R/ ) ITM Peripheral Identification Register #3 */ + __IM uint32_t CID0; /*!< Offset: 0xFF0 (R/ ) ITM Component Identification Register #0 */ + __IM uint32_t CID1; /*!< Offset: 0xFF4 (R/ ) ITM Component Identification Register #1 */ + __IM uint32_t CID2; /*!< Offset: 0xFF8 (R/ ) ITM Component Identification Register #2 */ + __IM uint32_t CID3; /*!< Offset: 0xFFC (R/ ) ITM Component Identification Register #3 */ +} ITM_Type; + +/* ITM Stimulus Port Register Definitions */ +#define ITM_STIM_DISABLED_Pos 1U /*!< ITM STIM: DISABLED Position */ +#define ITM_STIM_DISABLED_Msk (0x1UL << ITM_STIM_DISABLED_Pos) /*!< ITM STIM: DISABLED Mask */ + +#define ITM_STIM_FIFOREADY_Pos 0U /*!< ITM STIM: FIFOREADY Position */ +#define ITM_STIM_FIFOREADY_Msk (0x1UL /*<< ITM_STIM_FIFOREADY_Pos*/) /*!< ITM STIM: FIFOREADY Mask */ + +/* ITM Trace Privilege Register Definitions */ +#define ITM_TPR_PRIVMASK_Pos 0U /*!< ITM TPR: PRIVMASK Position */ +#define ITM_TPR_PRIVMASK_Msk (0xFFFFFFFFUL /*<< ITM_TPR_PRIVMASK_Pos*/) /*!< ITM TPR: PRIVMASK Mask */ + +/* ITM Trace Control Register Definitions */ +#define ITM_TCR_BUSY_Pos 23U /*!< ITM TCR: BUSY Position */ +#define ITM_TCR_BUSY_Msk (1UL << ITM_TCR_BUSY_Pos) /*!< ITM TCR: BUSY Mask */ + +#define ITM_TCR_TRACEBUSID_Pos 16U /*!< ITM TCR: ATBID Position */ +#define ITM_TCR_TRACEBUSID_Msk (0x7FUL << ITM_TCR_TRACEBUSID_Pos) /*!< ITM TCR: ATBID Mask */ + +#define ITM_TCR_GTSFREQ_Pos 10U /*!< ITM TCR: Global timestamp frequency Position */ +#define ITM_TCR_GTSFREQ_Msk (3UL << ITM_TCR_GTSFREQ_Pos) /*!< ITM TCR: Global timestamp frequency Mask */ + +#define ITM_TCR_TSPRESCALE_Pos 8U /*!< ITM TCR: TSPRESCALE Position */ +#define ITM_TCR_TSPRESCALE_Msk (3UL << ITM_TCR_TSPRESCALE_Pos) /*!< ITM TCR: TSPRESCALE Mask */ + +#define ITM_TCR_STALLENA_Pos 5U /*!< ITM TCR: STALLENA Position */ +#define ITM_TCR_STALLENA_Msk (1UL << ITM_TCR_STALLENA_Pos) /*!< ITM TCR: STALLENA Mask */ + +#define ITM_TCR_SWOENA_Pos 4U /*!< ITM TCR: SWOENA Position */ +#define ITM_TCR_SWOENA_Msk (1UL << ITM_TCR_SWOENA_Pos) /*!< ITM TCR: SWOENA Mask */ + +#define ITM_TCR_DWTENA_Pos 3U /*!< ITM TCR: DWTENA Position */ +#define ITM_TCR_DWTENA_Msk (1UL << ITM_TCR_DWTENA_Pos) /*!< ITM TCR: DWTENA Mask */ + +#define ITM_TCR_SYNCENA_Pos 2U /*!< ITM TCR: SYNCENA Position */ +#define ITM_TCR_SYNCENA_Msk (1UL << ITM_TCR_SYNCENA_Pos) /*!< ITM TCR: SYNCENA Mask */ + +#define ITM_TCR_TSENA_Pos 1U /*!< ITM TCR: TSENA Position */ +#define ITM_TCR_TSENA_Msk (1UL << ITM_TCR_TSENA_Pos) /*!< ITM TCR: TSENA Mask */ + +#define ITM_TCR_ITMENA_Pos 0U /*!< ITM TCR: ITM Enable bit Position */ +#define ITM_TCR_ITMENA_Msk (1UL /*<< ITM_TCR_ITMENA_Pos*/) /*!< ITM TCR: ITM Enable bit Mask */ + +/* ITM Lock Status Register Definitions */ +#define ITM_LSR_ByteAcc_Pos 2U /*!< ITM LSR: ByteAcc Position */ +#define ITM_LSR_ByteAcc_Msk (1UL << ITM_LSR_ByteAcc_Pos) /*!< ITM LSR: ByteAcc Mask */ + +#define ITM_LSR_Access_Pos 1U /*!< ITM LSR: Access Position */ +#define ITM_LSR_Access_Msk (1UL << ITM_LSR_Access_Pos) /*!< ITM LSR: Access Mask */ + +#define ITM_LSR_Present_Pos 0U /*!< ITM LSR: Present Position */ +#define ITM_LSR_Present_Msk (1UL /*<< ITM_LSR_Present_Pos*/) /*!< ITM LSR: Present Mask */ + +/*@}*/ /* end of group CMSIS_ITM */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DWT Data Watchpoint and Trace (DWT) + \brief Type definitions for the Data Watchpoint and Trace (DWT) + @{ + */ + +/** + \brief Structure type to access the Data Watchpoint and Trace Register (DWT). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) Control Register */ + __IOM uint32_t CYCCNT; /*!< Offset: 0x004 (R/W) Cycle Count Register */ + __IOM uint32_t CPICNT; /*!< Offset: 0x008 (R/W) CPI Count Register */ + __IOM uint32_t EXCCNT; /*!< Offset: 0x00C (R/W) Exception Overhead Count Register */ + __IOM uint32_t SLEEPCNT; /*!< Offset: 0x010 (R/W) Sleep Count Register */ + __IOM uint32_t LSUCNT; /*!< Offset: 0x014 (R/W) LSU Count Register */ + __IOM uint32_t FOLDCNT; /*!< Offset: 0x018 (R/W) Folded-instruction Count Register */ + __IM uint32_t PCSR; /*!< Offset: 0x01C (R/ ) Program Counter Sample Register */ + __IOM uint32_t COMP0; /*!< Offset: 0x020 (R/W) Comparator Register 0 */ + uint32_t RESERVED1[1U]; + __IOM uint32_t FUNCTION0; /*!< Offset: 0x028 (R/W) Function Register 0 */ + uint32_t RESERVED2[1U]; + __IOM uint32_t COMP1; /*!< Offset: 0x030 (R/W) Comparator Register 1 */ + uint32_t RESERVED3[1U]; + __IOM uint32_t FUNCTION1; /*!< Offset: 0x038 (R/W) Function Register 1 */ + uint32_t RESERVED4[1U]; + __IOM uint32_t COMP2; /*!< Offset: 0x040 (R/W) Comparator Register 2 */ + uint32_t RESERVED5[1U]; + __IOM uint32_t FUNCTION2; /*!< Offset: 0x048 (R/W) Function Register 2 */ + uint32_t RESERVED6[1U]; + __IOM uint32_t COMP3; /*!< Offset: 0x050 (R/W) Comparator Register 3 */ + uint32_t RESERVED7[1U]; + __IOM uint32_t FUNCTION3; /*!< Offset: 0x058 (R/W) Function Register 3 */ + uint32_t RESERVED8[1U]; + __IOM uint32_t COMP4; /*!< Offset: 0x060 (R/W) Comparator Register 4 */ + uint32_t RESERVED9[1U]; + __IOM uint32_t FUNCTION4; /*!< Offset: 0x068 (R/W) Function Register 4 */ + uint32_t RESERVED10[1U]; + __IOM uint32_t COMP5; /*!< Offset: 0x070 (R/W) Comparator Register 5 */ + uint32_t RESERVED11[1U]; + __IOM uint32_t FUNCTION5; /*!< Offset: 0x078 (R/W) Function Register 5 */ + uint32_t RESERVED12[1U]; + __IOM uint32_t COMP6; /*!< Offset: 0x080 (R/W) Comparator Register 6 */ + uint32_t RESERVED13[1U]; + __IOM uint32_t FUNCTION6; /*!< Offset: 0x088 (R/W) Function Register 6 */ + uint32_t RESERVED14[1U]; + __IOM uint32_t COMP7; /*!< Offset: 0x090 (R/W) Comparator Register 7 */ + uint32_t RESERVED15[1U]; + __IOM uint32_t FUNCTION7; /*!< Offset: 0x098 (R/W) Function Register 7 */ + uint32_t RESERVED16[1U]; + __IOM uint32_t COMP8; /*!< Offset: 0x0A0 (R/W) Comparator Register 8 */ + uint32_t RESERVED17[1U]; + __IOM uint32_t FUNCTION8; /*!< Offset: 0x0A8 (R/W) Function Register 8 */ + uint32_t RESERVED18[1U]; + __IOM uint32_t COMP9; /*!< Offset: 0x0B0 (R/W) Comparator Register 9 */ + uint32_t RESERVED19[1U]; + __IOM uint32_t FUNCTION9; /*!< Offset: 0x0B8 (R/W) Function Register 9 */ + uint32_t RESERVED20[1U]; + __IOM uint32_t COMP10; /*!< Offset: 0x0C0 (R/W) Comparator Register 10 */ + uint32_t RESERVED21[1U]; + __IOM uint32_t FUNCTION10; /*!< Offset: 0x0C8 (R/W) Function Register 10 */ + uint32_t RESERVED22[1U]; + __IOM uint32_t COMP11; /*!< Offset: 0x0D0 (R/W) Comparator Register 11 */ + uint32_t RESERVED23[1U]; + __IOM uint32_t FUNCTION11; /*!< Offset: 0x0D8 (R/W) Function Register 11 */ + uint32_t RESERVED24[1U]; + __IOM uint32_t COMP12; /*!< Offset: 0x0E0 (R/W) Comparator Register 12 */ + uint32_t RESERVED25[1U]; + __IOM uint32_t FUNCTION12; /*!< Offset: 0x0E8 (R/W) Function Register 12 */ + uint32_t RESERVED26[1U]; + __IOM uint32_t COMP13; /*!< Offset: 0x0F0 (R/W) Comparator Register 13 */ + uint32_t RESERVED27[1U]; + __IOM uint32_t FUNCTION13; /*!< Offset: 0x0F8 (R/W) Function Register 13 */ + uint32_t RESERVED28[1U]; + __IOM uint32_t COMP14; /*!< Offset: 0x100 (R/W) Comparator Register 14 */ + uint32_t RESERVED29[1U]; + __IOM uint32_t FUNCTION14; /*!< Offset: 0x108 (R/W) Function Register 14 */ + uint32_t RESERVED30[1U]; + __IOM uint32_t COMP15; /*!< Offset: 0x110 (R/W) Comparator Register 15 */ + uint32_t RESERVED31[1U]; + __IOM uint32_t FUNCTION15; /*!< Offset: 0x118 (R/W) Function Register 15 */ + uint32_t RESERVED32[934U]; + __IM uint32_t LSR; /*!< Offset: 0xFB4 (R ) Lock Status Register */ + uint32_t RESERVED33[1U]; + __IM uint32_t DEVARCH; /*!< Offset: 0xFBC (R/ ) Device Architecture Register */ +} DWT_Type; + +/* DWT Control Register Definitions */ +#define DWT_CTRL_NUMCOMP_Pos 28U /*!< DWT CTRL: NUMCOMP Position */ +#define DWT_CTRL_NUMCOMP_Msk (0xFUL << DWT_CTRL_NUMCOMP_Pos) /*!< DWT CTRL: NUMCOMP Mask */ + +#define DWT_CTRL_NOTRCPKT_Pos 27U /*!< DWT CTRL: NOTRCPKT Position */ +#define DWT_CTRL_NOTRCPKT_Msk (0x1UL << DWT_CTRL_NOTRCPKT_Pos) /*!< DWT CTRL: NOTRCPKT Mask */ + +#define DWT_CTRL_NOEXTTRIG_Pos 26U /*!< DWT CTRL: NOEXTTRIG Position */ +#define DWT_CTRL_NOEXTTRIG_Msk (0x1UL << DWT_CTRL_NOEXTTRIG_Pos) /*!< DWT CTRL: NOEXTTRIG Mask */ + +#define DWT_CTRL_NOCYCCNT_Pos 25U /*!< DWT CTRL: NOCYCCNT Position */ +#define DWT_CTRL_NOCYCCNT_Msk (0x1UL << DWT_CTRL_NOCYCCNT_Pos) /*!< DWT CTRL: NOCYCCNT Mask */ + +#define DWT_CTRL_NOPRFCNT_Pos 24U /*!< DWT CTRL: NOPRFCNT Position */ +#define DWT_CTRL_NOPRFCNT_Msk (0x1UL << DWT_CTRL_NOPRFCNT_Pos) /*!< DWT CTRL: NOPRFCNT Mask */ + +#define DWT_CTRL_CYCDISS_Pos 23U /*!< DWT CTRL: CYCDISS Position */ +#define DWT_CTRL_CYCDISS_Msk (0x1UL << DWT_CTRL_CYCDISS_Pos) /*!< DWT CTRL: CYCDISS Mask */ + +#define DWT_CTRL_CYCEVTENA_Pos 22U /*!< DWT CTRL: CYCEVTENA Position */ +#define DWT_CTRL_CYCEVTENA_Msk (0x1UL << DWT_CTRL_CYCEVTENA_Pos) /*!< DWT CTRL: CYCEVTENA Mask */ + +#define DWT_CTRL_FOLDEVTENA_Pos 21U /*!< DWT CTRL: FOLDEVTENA Position */ +#define DWT_CTRL_FOLDEVTENA_Msk (0x1UL << DWT_CTRL_FOLDEVTENA_Pos) /*!< DWT CTRL: FOLDEVTENA Mask */ + +#define DWT_CTRL_LSUEVTENA_Pos 20U /*!< DWT CTRL: LSUEVTENA Position */ +#define DWT_CTRL_LSUEVTENA_Msk (0x1UL << DWT_CTRL_LSUEVTENA_Pos) /*!< DWT CTRL: LSUEVTENA Mask */ + +#define DWT_CTRL_SLEEPEVTENA_Pos 19U /*!< DWT CTRL: SLEEPEVTENA Position */ +#define DWT_CTRL_SLEEPEVTENA_Msk (0x1UL << DWT_CTRL_SLEEPEVTENA_Pos) /*!< DWT CTRL: SLEEPEVTENA Mask */ + +#define DWT_CTRL_EXCEVTENA_Pos 18U /*!< DWT CTRL: EXCEVTENA Position */ +#define DWT_CTRL_EXCEVTENA_Msk (0x1UL << DWT_CTRL_EXCEVTENA_Pos) /*!< DWT CTRL: EXCEVTENA Mask */ + +#define DWT_CTRL_CPIEVTENA_Pos 17U /*!< DWT CTRL: CPIEVTENA Position */ +#define DWT_CTRL_CPIEVTENA_Msk (0x1UL << DWT_CTRL_CPIEVTENA_Pos) /*!< DWT CTRL: CPIEVTENA Mask */ + +#define DWT_CTRL_EXCTRCENA_Pos 16U /*!< DWT CTRL: EXCTRCENA Position */ +#define DWT_CTRL_EXCTRCENA_Msk (0x1UL << DWT_CTRL_EXCTRCENA_Pos) /*!< DWT CTRL: EXCTRCENA Mask */ + +#define DWT_CTRL_PCSAMPLENA_Pos 12U /*!< DWT CTRL: PCSAMPLENA Position */ +#define DWT_CTRL_PCSAMPLENA_Msk (0x1UL << DWT_CTRL_PCSAMPLENA_Pos) /*!< DWT CTRL: PCSAMPLENA Mask */ + +#define DWT_CTRL_SYNCTAP_Pos 10U /*!< DWT CTRL: SYNCTAP Position */ +#define DWT_CTRL_SYNCTAP_Msk (0x3UL << DWT_CTRL_SYNCTAP_Pos) /*!< DWT CTRL: SYNCTAP Mask */ + +#define DWT_CTRL_CYCTAP_Pos 9U /*!< DWT CTRL: CYCTAP Position */ +#define DWT_CTRL_CYCTAP_Msk (0x1UL << DWT_CTRL_CYCTAP_Pos) /*!< DWT CTRL: CYCTAP Mask */ + +#define DWT_CTRL_POSTINIT_Pos 5U /*!< DWT CTRL: POSTINIT Position */ +#define DWT_CTRL_POSTINIT_Msk (0xFUL << DWT_CTRL_POSTINIT_Pos) /*!< DWT CTRL: POSTINIT Mask */ + +#define DWT_CTRL_POSTPRESET_Pos 1U /*!< DWT CTRL: POSTPRESET Position */ +#define DWT_CTRL_POSTPRESET_Msk (0xFUL << DWT_CTRL_POSTPRESET_Pos) /*!< DWT CTRL: POSTPRESET Mask */ + +#define DWT_CTRL_CYCCNTENA_Pos 0U /*!< DWT CTRL: CYCCNTENA Position */ +#define DWT_CTRL_CYCCNTENA_Msk (0x1UL /*<< DWT_CTRL_CYCCNTENA_Pos*/) /*!< DWT CTRL: CYCCNTENA Mask */ + +/* DWT CPI Count Register Definitions */ +#define DWT_CPICNT_CPICNT_Pos 0U /*!< DWT CPICNT: CPICNT Position */ +#define DWT_CPICNT_CPICNT_Msk (0xFFUL /*<< DWT_CPICNT_CPICNT_Pos*/) /*!< DWT CPICNT: CPICNT Mask */ + +/* DWT Exception Overhead Count Register Definitions */ +#define DWT_EXCCNT_EXCCNT_Pos 0U /*!< DWT EXCCNT: EXCCNT Position */ +#define DWT_EXCCNT_EXCCNT_Msk (0xFFUL /*<< DWT_EXCCNT_EXCCNT_Pos*/) /*!< DWT EXCCNT: EXCCNT Mask */ + +/* DWT Sleep Count Register Definitions */ +#define DWT_SLEEPCNT_SLEEPCNT_Pos 0U /*!< DWT SLEEPCNT: SLEEPCNT Position */ +#define DWT_SLEEPCNT_SLEEPCNT_Msk (0xFFUL /*<< DWT_SLEEPCNT_SLEEPCNT_Pos*/) /*!< DWT SLEEPCNT: SLEEPCNT Mask */ + +/* DWT LSU Count Register Definitions */ +#define DWT_LSUCNT_LSUCNT_Pos 0U /*!< DWT LSUCNT: LSUCNT Position */ +#define DWT_LSUCNT_LSUCNT_Msk (0xFFUL /*<< DWT_LSUCNT_LSUCNT_Pos*/) /*!< DWT LSUCNT: LSUCNT Mask */ + +/* DWT Folded-instruction Count Register Definitions */ +#define DWT_FOLDCNT_FOLDCNT_Pos 0U /*!< DWT FOLDCNT: FOLDCNT Position */ +#define DWT_FOLDCNT_FOLDCNT_Msk (0xFFUL /*<< DWT_FOLDCNT_FOLDCNT_Pos*/) /*!< DWT FOLDCNT: FOLDCNT Mask */ + +/* DWT Comparator Function Register Definitions */ +#define DWT_FUNCTION_ID_Pos 27U /*!< DWT FUNCTION: ID Position */ +#define DWT_FUNCTION_ID_Msk (0x1FUL << DWT_FUNCTION_ID_Pos) /*!< DWT FUNCTION: ID Mask */ + +#define DWT_FUNCTION_MATCHED_Pos 24U /*!< DWT FUNCTION: MATCHED Position */ +#define DWT_FUNCTION_MATCHED_Msk (0x1UL << DWT_FUNCTION_MATCHED_Pos) /*!< DWT FUNCTION: MATCHED Mask */ + +#define DWT_FUNCTION_DATAVSIZE_Pos 10U /*!< DWT FUNCTION: DATAVSIZE Position */ +#define DWT_FUNCTION_DATAVSIZE_Msk (0x3UL << DWT_FUNCTION_DATAVSIZE_Pos) /*!< DWT FUNCTION: DATAVSIZE Mask */ + +#define DWT_FUNCTION_ACTION_Pos 4U /*!< DWT FUNCTION: ACTION Position */ +#define DWT_FUNCTION_ACTION_Msk (0x1UL << DWT_FUNCTION_ACTION_Pos) /*!< DWT FUNCTION: ACTION Mask */ + +#define DWT_FUNCTION_MATCH_Pos 0U /*!< DWT FUNCTION: MATCH Position */ +#define DWT_FUNCTION_MATCH_Msk (0xFUL /*<< DWT_FUNCTION_MATCH_Pos*/) /*!< DWT FUNCTION: MATCH Mask */ + +/*@}*/ /* end of group CMSIS_DWT */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_TPI Trace Port Interface (TPI) + \brief Type definitions for the Trace Port Interface (TPI) + @{ + */ + +/** + \brief Structure type to access the Trace Port Interface Register (TPI). + */ +typedef struct +{ + __IM uint32_t SSPSR; /*!< Offset: 0x000 (R/ ) Supported Parallel Port Size Register */ + __IOM uint32_t CSPSR; /*!< Offset: 0x004 (R/W) Current Parallel Port Size Register */ + uint32_t RESERVED0[2U]; + __IOM uint32_t ACPR; /*!< Offset: 0x010 (R/W) Asynchronous Clock Prescaler Register */ + uint32_t RESERVED1[55U]; + __IOM uint32_t SPPR; /*!< Offset: 0x0F0 (R/W) Selected Pin Protocol Register */ + uint32_t RESERVED2[131U]; + __IM uint32_t FFSR; /*!< Offset: 0x300 (R/ ) Formatter and Flush Status Register */ + __IOM uint32_t FFCR; /*!< Offset: 0x304 (R/W) Formatter and Flush Control Register */ + __IOM uint32_t PSCR; /*!< Offset: 0x308 (R/W) Periodic Synchronization Control Register */ + uint32_t RESERVED3[759U]; + __IM uint32_t TRIGGER; /*!< Offset: 0xEE8 (R/ ) TRIGGER Register */ + __IM uint32_t ITFTTD0; /*!< Offset: 0xEEC (R/ ) Integration Test FIFO Test Data 0 Register */ + __IOM uint32_t ITATBCTR2; /*!< Offset: 0xEF0 (R/W) Integration Test ATB Control Register 2 */ + uint32_t RESERVED4[1U]; + __IM uint32_t ITATBCTR0; /*!< Offset: 0xEF8 (R/ ) Integration Test ATB Control Register 0 */ + __IM uint32_t ITFTTD1; /*!< Offset: 0xEFC (R/ ) Integration Test FIFO Test Data 1 Register */ + __IOM uint32_t ITCTRL; /*!< Offset: 0xF00 (R/W) Integration Mode Control */ + uint32_t RESERVED5[39U]; + __IOM uint32_t CLAIMSET; /*!< Offset: 0xFA0 (R/W) Claim tag set */ + __IOM uint32_t CLAIMCLR; /*!< Offset: 0xFA4 (R/W) Claim tag clear */ + uint32_t RESERVED7[8U]; + __IM uint32_t DEVID; /*!< Offset: 0xFC8 (R/ ) Device Configuration Register */ + __IM uint32_t DEVTYPE; /*!< Offset: 0xFCC (R/ ) Device Type Identifier Register */ +} TPI_Type; + +/* TPI Asynchronous Clock Prescaler Register Definitions */ +#define TPI_ACPR_PRESCALER_Pos 0U /*!< TPI ACPR: PRESCALER Position */ +#define TPI_ACPR_PRESCALER_Msk (0x1FFFUL /*<< TPI_ACPR_PRESCALER_Pos*/) /*!< TPI ACPR: PRESCALER Mask */ + +/* TPI Selected Pin Protocol Register Definitions */ +#define TPI_SPPR_TXMODE_Pos 0U /*!< TPI SPPR: TXMODE Position */ +#define TPI_SPPR_TXMODE_Msk (0x3UL /*<< TPI_SPPR_TXMODE_Pos*/) /*!< TPI SPPR: TXMODE Mask */ + +/* TPI Formatter and Flush Status Register Definitions */ +#define TPI_FFSR_FtNonStop_Pos 3U /*!< TPI FFSR: FtNonStop Position */ +#define TPI_FFSR_FtNonStop_Msk (0x1UL << TPI_FFSR_FtNonStop_Pos) /*!< TPI FFSR: FtNonStop Mask */ + +#define TPI_FFSR_TCPresent_Pos 2U /*!< TPI FFSR: TCPresent Position */ +#define TPI_FFSR_TCPresent_Msk (0x1UL << TPI_FFSR_TCPresent_Pos) /*!< TPI FFSR: TCPresent Mask */ + +#define TPI_FFSR_FtStopped_Pos 1U /*!< TPI FFSR: FtStopped Position */ +#define TPI_FFSR_FtStopped_Msk (0x1UL << TPI_FFSR_FtStopped_Pos) /*!< TPI FFSR: FtStopped Mask */ + +#define TPI_FFSR_FlInProg_Pos 0U /*!< TPI FFSR: FlInProg Position */ +#define TPI_FFSR_FlInProg_Msk (0x1UL /*<< TPI_FFSR_FlInProg_Pos*/) /*!< TPI FFSR: FlInProg Mask */ + +/* TPI Formatter and Flush Control Register Definitions */ +#define TPI_FFCR_TrigIn_Pos 8U /*!< TPI FFCR: TrigIn Position */ +#define TPI_FFCR_TrigIn_Msk (0x1UL << TPI_FFCR_TrigIn_Pos) /*!< TPI FFCR: TrigIn Mask */ + +#define TPI_FFCR_FOnMan_Pos 6U /*!< TPI FFCR: FOnMan Position */ +#define TPI_FFCR_FOnMan_Msk (0x1UL << TPI_FFCR_FOnMan_Pos) /*!< TPI FFCR: FOnMan Mask */ + +#define TPI_FFCR_EnFCont_Pos 1U /*!< TPI FFCR: EnFCont Position */ +#define TPI_FFCR_EnFCont_Msk (0x1UL << TPI_FFCR_EnFCont_Pos) /*!< TPI FFCR: EnFCont Mask */ + +/* TPI TRIGGER Register Definitions */ +#define TPI_TRIGGER_TRIGGER_Pos 0U /*!< TPI TRIGGER: TRIGGER Position */ +#define TPI_TRIGGER_TRIGGER_Msk (0x1UL /*<< TPI_TRIGGER_TRIGGER_Pos*/) /*!< TPI TRIGGER: TRIGGER Mask */ + +/* TPI Integration Test FIFO Test Data 0 Register Definitions */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD0: ATB Interface 2 ATVALIDPosition */ +#define TPI_ITFTTD0_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD0: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD0_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD0_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD0: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD0_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD0: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD0_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD0_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD0: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data2_Pos 16U /*!< TPI ITFTTD0: ATB Interface 1 data2 Position */ +#define TPI_ITFTTD0_ATB_IF1_data2_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data2 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data1_Pos 8U /*!< TPI ITFTTD0: ATB Interface 1 data1 Position */ +#define TPI_ITFTTD0_ATB_IF1_data1_Msk (0xFFUL << TPI_ITFTTD0_ATB_IF1_data1_Pos) /*!< TPI ITFTTD0: ATB Interface 1 data1 Mask */ + +#define TPI_ITFTTD0_ATB_IF1_data0_Pos 0U /*!< TPI ITFTTD0: ATB Interface 1 data0 Position */ +#define TPI_ITFTTD0_ATB_IF1_data0_Msk (0xFFUL /*<< TPI_ITFTTD0_ATB_IF1_data0_Pos*/) /*!< TPI ITFTTD0: ATB Interface 1 data0 Mask */ + +/* TPI Integration Test ATB Control Register 2 Register Definitions */ +#define TPI_ITATBCTR2_AFVALID2S_Pos 1U /*!< TPI ITATBCTR2: AFVALID2S Position */ +#define TPI_ITATBCTR2_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID2S_Pos) /*!< TPI ITATBCTR2: AFVALID2SS Mask */ + +#define TPI_ITATBCTR2_AFVALID1S_Pos 1U /*!< TPI ITATBCTR2: AFVALID1S Position */ +#define TPI_ITATBCTR2_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR2_AFVALID1S_Pos) /*!< TPI ITATBCTR2: AFVALID1SS Mask */ + +#define TPI_ITATBCTR2_ATREADY2S_Pos 0U /*!< TPI ITATBCTR2: ATREADY2S Position */ +#define TPI_ITATBCTR2_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY2S_Pos*/) /*!< TPI ITATBCTR2: ATREADY2S Mask */ + +#define TPI_ITATBCTR2_ATREADY1S_Pos 0U /*!< TPI ITATBCTR2: ATREADY1S Position */ +#define TPI_ITATBCTR2_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR2_ATREADY1S_Pos*/) /*!< TPI ITATBCTR2: ATREADY1S Mask */ + +/* TPI Integration Test FIFO Test Data 1 Register Definitions */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Pos 29U /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF2_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 2 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF2_bytecount_Pos 27U /*!< TPI ITFTTD1: ATB Interface 2 byte count Position */ +#define TPI_ITFTTD1_ATB_IF2_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF2_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 2 byte count Mask */ + +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Pos 26U /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Position */ +#define TPI_ITFTTD1_ATB_IF1_ATVALID_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_ATVALID_Pos) /*!< TPI ITFTTD1: ATB Interface 1 ATVALID Mask */ + +#define TPI_ITFTTD1_ATB_IF1_bytecount_Pos 24U /*!< TPI ITFTTD1: ATB Interface 1 byte count Position */ +#define TPI_ITFTTD1_ATB_IF1_bytecount_Msk (0x3UL << TPI_ITFTTD1_ATB_IF1_bytecount_Pos) /*!< TPI ITFTTD1: ATB Interface 1 byte countt Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data2_Pos 16U /*!< TPI ITFTTD1: ATB Interface 2 data2 Position */ +#define TPI_ITFTTD1_ATB_IF2_data2_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data2 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data1_Pos 8U /*!< TPI ITFTTD1: ATB Interface 2 data1 Position */ +#define TPI_ITFTTD1_ATB_IF2_data1_Msk (0xFFUL << TPI_ITFTTD1_ATB_IF2_data1_Pos) /*!< TPI ITFTTD1: ATB Interface 2 data1 Mask */ + +#define TPI_ITFTTD1_ATB_IF2_data0_Pos 0U /*!< TPI ITFTTD1: ATB Interface 2 data0 Position */ +#define TPI_ITFTTD1_ATB_IF2_data0_Msk (0xFFUL /*<< TPI_ITFTTD1_ATB_IF2_data0_Pos*/) /*!< TPI ITFTTD1: ATB Interface 2 data0 Mask */ + +/* TPI Integration Test ATB Control Register 0 Definitions */ +#define TPI_ITATBCTR0_AFVALID2S_Pos 1U /*!< TPI ITATBCTR0: AFVALID2S Position */ +#define TPI_ITATBCTR0_AFVALID2S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID2S_Pos) /*!< TPI ITATBCTR0: AFVALID2SS Mask */ + +#define TPI_ITATBCTR0_AFVALID1S_Pos 1U /*!< TPI ITATBCTR0: AFVALID1S Position */ +#define TPI_ITATBCTR0_AFVALID1S_Msk (0x1UL << TPI_ITATBCTR0_AFVALID1S_Pos) /*!< TPI ITATBCTR0: AFVALID1SS Mask */ + +#define TPI_ITATBCTR0_ATREADY2S_Pos 0U /*!< TPI ITATBCTR0: ATREADY2S Position */ +#define TPI_ITATBCTR0_ATREADY2S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY2S_Pos*/) /*!< TPI ITATBCTR0: ATREADY2S Mask */ + +#define TPI_ITATBCTR0_ATREADY1S_Pos 0U /*!< TPI ITATBCTR0: ATREADY1S Position */ +#define TPI_ITATBCTR0_ATREADY1S_Msk (0x1UL /*<< TPI_ITATBCTR0_ATREADY1S_Pos*/) /*!< TPI ITATBCTR0: ATREADY1S Mask */ + +/* TPI Integration Mode Control Register Definitions */ +#define TPI_ITCTRL_Mode_Pos 0U /*!< TPI ITCTRL: Mode Position */ +#define TPI_ITCTRL_Mode_Msk (0x3UL /*<< TPI_ITCTRL_Mode_Pos*/) /*!< TPI ITCTRL: Mode Mask */ + +/* TPI DEVID Register Definitions */ +#define TPI_DEVID_NRZVALID_Pos 11U /*!< TPI DEVID: NRZVALID Position */ +#define TPI_DEVID_NRZVALID_Msk (0x1UL << TPI_DEVID_NRZVALID_Pos) /*!< TPI DEVID: NRZVALID Mask */ + +#define TPI_DEVID_MANCVALID_Pos 10U /*!< TPI DEVID: MANCVALID Position */ +#define TPI_DEVID_MANCVALID_Msk (0x1UL << TPI_DEVID_MANCVALID_Pos) /*!< TPI DEVID: MANCVALID Mask */ + +#define TPI_DEVID_PTINVALID_Pos 9U /*!< TPI DEVID: PTINVALID Position */ +#define TPI_DEVID_PTINVALID_Msk (0x1UL << TPI_DEVID_PTINVALID_Pos) /*!< TPI DEVID: PTINVALID Mask */ + +#define TPI_DEVID_FIFOSZ_Pos 6U /*!< TPI DEVID: FIFOSZ Position */ +#define TPI_DEVID_FIFOSZ_Msk (0x7UL << TPI_DEVID_FIFOSZ_Pos) /*!< TPI DEVID: FIFOSZ Mask */ + +#define TPI_DEVID_NrTraceInput_Pos 0U /*!< TPI DEVID: NrTraceInput Position */ +#define TPI_DEVID_NrTraceInput_Msk (0x3FUL /*<< TPI_DEVID_NrTraceInput_Pos*/) /*!< TPI DEVID: NrTraceInput Mask */ + +/* TPI DEVTYPE Register Definitions */ +#define TPI_DEVTYPE_SubType_Pos 4U /*!< TPI DEVTYPE: SubType Position */ +#define TPI_DEVTYPE_SubType_Msk (0xFUL /*<< TPI_DEVTYPE_SubType_Pos*/) /*!< TPI DEVTYPE: SubType Mask */ + +#define TPI_DEVTYPE_MajorType_Pos 0U /*!< TPI DEVTYPE: MajorType Position */ +#define TPI_DEVTYPE_MajorType_Msk (0xFUL << TPI_DEVTYPE_MajorType_Pos) /*!< TPI DEVTYPE: MajorType Mask */ + +/*@}*/ /* end of group CMSIS_TPI */ + + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_MPU Memory Protection Unit (MPU) + \brief Type definitions for the Memory Protection Unit (MPU) + @{ + */ + +/** + \brief Structure type to access the Memory Protection Unit (MPU). + */ +typedef struct +{ + __IM uint32_t TYPE; /*!< Offset: 0x000 (R/ ) MPU Type Register */ + __IOM uint32_t CTRL; /*!< Offset: 0x004 (R/W) MPU Control Register */ + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) MPU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) MPU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) MPU Region Limit Address Register */ + __IOM uint32_t RBAR_A1; /*!< Offset: 0x014 (R/W) MPU Region Base Address Register Alias 1 */ + __IOM uint32_t RLAR_A1; /*!< Offset: 0x018 (R/W) MPU Region Limit Address Register Alias 1 */ + __IOM uint32_t RBAR_A2; /*!< Offset: 0x01C (R/W) MPU Region Base Address Register Alias 2 */ + __IOM uint32_t RLAR_A2; /*!< Offset: 0x020 (R/W) MPU Region Limit Address Register Alias 2 */ + __IOM uint32_t RBAR_A3; /*!< Offset: 0x024 (R/W) MPU Region Base Address Register Alias 3 */ + __IOM uint32_t RLAR_A3; /*!< Offset: 0x028 (R/W) MPU Region Limit Address Register Alias 3 */ + uint32_t RESERVED0[1]; + union { + __IOM uint32_t MAIR[2]; + struct { + __IOM uint32_t MAIR0; /*!< Offset: 0x030 (R/W) MPU Memory Attribute Indirection Register 0 */ + __IOM uint32_t MAIR1; /*!< Offset: 0x034 (R/W) MPU Memory Attribute Indirection Register 1 */ + }; + }; +} MPU_Type; + +#define MPU_TYPE_RALIASES 4U + +/* MPU Type Register Definitions */ +#define MPU_TYPE_IREGION_Pos 16U /*!< MPU TYPE: IREGION Position */ +#define MPU_TYPE_IREGION_Msk (0xFFUL << MPU_TYPE_IREGION_Pos) /*!< MPU TYPE: IREGION Mask */ + +#define MPU_TYPE_DREGION_Pos 8U /*!< MPU TYPE: DREGION Position */ +#define MPU_TYPE_DREGION_Msk (0xFFUL << MPU_TYPE_DREGION_Pos) /*!< MPU TYPE: DREGION Mask */ + +#define MPU_TYPE_SEPARATE_Pos 0U /*!< MPU TYPE: SEPARATE Position */ +#define MPU_TYPE_SEPARATE_Msk (1UL /*<< MPU_TYPE_SEPARATE_Pos*/) /*!< MPU TYPE: SEPARATE Mask */ + +/* MPU Control Register Definitions */ +#define MPU_CTRL_PRIVDEFENA_Pos 2U /*!< MPU CTRL: PRIVDEFENA Position */ +#define MPU_CTRL_PRIVDEFENA_Msk (1UL << MPU_CTRL_PRIVDEFENA_Pos) /*!< MPU CTRL: PRIVDEFENA Mask */ + +#define MPU_CTRL_HFNMIENA_Pos 1U /*!< MPU CTRL: HFNMIENA Position */ +#define MPU_CTRL_HFNMIENA_Msk (1UL << MPU_CTRL_HFNMIENA_Pos) /*!< MPU CTRL: HFNMIENA Mask */ + +#define MPU_CTRL_ENABLE_Pos 0U /*!< MPU CTRL: ENABLE Position */ +#define MPU_CTRL_ENABLE_Msk (1UL /*<< MPU_CTRL_ENABLE_Pos*/) /*!< MPU CTRL: ENABLE Mask */ + +/* MPU Region Number Register Definitions */ +#define MPU_RNR_REGION_Pos 0U /*!< MPU RNR: REGION Position */ +#define MPU_RNR_REGION_Msk (0xFFUL /*<< MPU_RNR_REGION_Pos*/) /*!< MPU RNR: REGION Mask */ + +/* MPU Region Base Address Register Definitions */ +#define MPU_RBAR_BASE_Pos 5U /*!< MPU RBAR: BASE Position */ +#define MPU_RBAR_BASE_Msk (0x7FFFFFFUL << MPU_RBAR_BASE_Pos) /*!< MPU RBAR: BASE Mask */ + +#define MPU_RBAR_SH_Pos 3U /*!< MPU RBAR: SH Position */ +#define MPU_RBAR_SH_Msk (0x3UL << MPU_RBAR_SH_Pos) /*!< MPU RBAR: SH Mask */ + +#define MPU_RBAR_AP_Pos 1U /*!< MPU RBAR: AP Position */ +#define MPU_RBAR_AP_Msk (0x3UL << MPU_RBAR_AP_Pos) /*!< MPU RBAR: AP Mask */ + +#define MPU_RBAR_XN_Pos 0U /*!< MPU RBAR: XN Position */ +#define MPU_RBAR_XN_Msk (01UL /*<< MPU_RBAR_XN_Pos*/) /*!< MPU RBAR: XN Mask */ + +/* MPU Region Limit Address Register Definitions */ +#define MPU_RLAR_LIMIT_Pos 5U /*!< MPU RLAR: LIMIT Position */ +#define MPU_RLAR_LIMIT_Msk (0x7FFFFFFUL << MPU_RLAR_LIMIT_Pos) /*!< MPU RLAR: LIMIT Mask */ + +#define MPU_RLAR_AttrIndx_Pos 1U /*!< MPU RLAR: AttrIndx Position */ +#define MPU_RLAR_AttrIndx_Msk (0x7UL << MPU_RLAR_AttrIndx_Pos) /*!< MPU RLAR: AttrIndx Mask */ + +#define MPU_RLAR_EN_Pos 0U /*!< MPU RLAR: Region enable bit Position */ +#define MPU_RLAR_EN_Msk (1UL /*<< MPU_RLAR_EN_Pos*/) /*!< MPU RLAR: Region enable bit Disable Mask */ + +/* MPU Memory Attribute Indirection Register 0 Definitions */ +#define MPU_MAIR0_Attr3_Pos 24U /*!< MPU MAIR0: Attr3 Position */ +#define MPU_MAIR0_Attr3_Msk (0xFFUL << MPU_MAIR0_Attr3_Pos) /*!< MPU MAIR0: Attr3 Mask */ + +#define MPU_MAIR0_Attr2_Pos 16U /*!< MPU MAIR0: Attr2 Position */ +#define MPU_MAIR0_Attr2_Msk (0xFFUL << MPU_MAIR0_Attr2_Pos) /*!< MPU MAIR0: Attr2 Mask */ + +#define MPU_MAIR0_Attr1_Pos 8U /*!< MPU MAIR0: Attr1 Position */ +#define MPU_MAIR0_Attr1_Msk (0xFFUL << MPU_MAIR0_Attr1_Pos) /*!< MPU MAIR0: Attr1 Mask */ + +#define MPU_MAIR0_Attr0_Pos 0U /*!< MPU MAIR0: Attr0 Position */ +#define MPU_MAIR0_Attr0_Msk (0xFFUL /*<< MPU_MAIR0_Attr0_Pos*/) /*!< MPU MAIR0: Attr0 Mask */ + +/* MPU Memory Attribute Indirection Register 1 Definitions */ +#define MPU_MAIR1_Attr7_Pos 24U /*!< MPU MAIR1: Attr7 Position */ +#define MPU_MAIR1_Attr7_Msk (0xFFUL << MPU_MAIR1_Attr7_Pos) /*!< MPU MAIR1: Attr7 Mask */ + +#define MPU_MAIR1_Attr6_Pos 16U /*!< MPU MAIR1: Attr6 Position */ +#define MPU_MAIR1_Attr6_Msk (0xFFUL << MPU_MAIR1_Attr6_Pos) /*!< MPU MAIR1: Attr6 Mask */ + +#define MPU_MAIR1_Attr5_Pos 8U /*!< MPU MAIR1: Attr5 Position */ +#define MPU_MAIR1_Attr5_Msk (0xFFUL << MPU_MAIR1_Attr5_Pos) /*!< MPU MAIR1: Attr5 Mask */ + +#define MPU_MAIR1_Attr4_Pos 0U /*!< MPU MAIR1: Attr4 Position */ +#define MPU_MAIR1_Attr4_Msk (0xFFUL /*<< MPU_MAIR1_Attr4_Pos*/) /*!< MPU MAIR1: Attr4 Mask */ + +/*@} end of group CMSIS_MPU */ +#endif + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_SAU Security Attribution Unit (SAU) + \brief Type definitions for the Security Attribution Unit (SAU) + @{ + */ + +/** + \brief Structure type to access the Security Attribution Unit (SAU). + */ +typedef struct +{ + __IOM uint32_t CTRL; /*!< Offset: 0x000 (R/W) SAU Control Register */ + __IM uint32_t TYPE; /*!< Offset: 0x004 (R/ ) SAU Type Register */ +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) + __IOM uint32_t RNR; /*!< Offset: 0x008 (R/W) SAU Region Number Register */ + __IOM uint32_t RBAR; /*!< Offset: 0x00C (R/W) SAU Region Base Address Register */ + __IOM uint32_t RLAR; /*!< Offset: 0x010 (R/W) SAU Region Limit Address Register */ +#else + uint32_t RESERVED0[3]; +#endif + __IOM uint32_t SFSR; /*!< Offset: 0x014 (R/W) Secure Fault Status Register */ + __IOM uint32_t SFAR; /*!< Offset: 0x018 (R/W) Secure Fault Address Register */ +} SAU_Type; + +/* SAU Control Register Definitions */ +#define SAU_CTRL_ALLNS_Pos 1U /*!< SAU CTRL: ALLNS Position */ +#define SAU_CTRL_ALLNS_Msk (1UL << SAU_CTRL_ALLNS_Pos) /*!< SAU CTRL: ALLNS Mask */ + +#define SAU_CTRL_ENABLE_Pos 0U /*!< SAU CTRL: ENABLE Position */ +#define SAU_CTRL_ENABLE_Msk (1UL /*<< SAU_CTRL_ENABLE_Pos*/) /*!< SAU CTRL: ENABLE Mask */ + +/* SAU Type Register Definitions */ +#define SAU_TYPE_SREGION_Pos 0U /*!< SAU TYPE: SREGION Position */ +#define SAU_TYPE_SREGION_Msk (0xFFUL /*<< SAU_TYPE_SREGION_Pos*/) /*!< SAU TYPE: SREGION Mask */ + +#if defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) +/* SAU Region Number Register Definitions */ +#define SAU_RNR_REGION_Pos 0U /*!< SAU RNR: REGION Position */ +#define SAU_RNR_REGION_Msk (0xFFUL /*<< SAU_RNR_REGION_Pos*/) /*!< SAU RNR: REGION Mask */ + +/* SAU Region Base Address Register Definitions */ +#define SAU_RBAR_BADDR_Pos 5U /*!< SAU RBAR: BADDR Position */ +#define SAU_RBAR_BADDR_Msk (0x7FFFFFFUL << SAU_RBAR_BADDR_Pos) /*!< SAU RBAR: BADDR Mask */ + +/* SAU Region Limit Address Register Definitions */ +#define SAU_RLAR_LADDR_Pos 5U /*!< SAU RLAR: LADDR Position */ +#define SAU_RLAR_LADDR_Msk (0x7FFFFFFUL << SAU_RLAR_LADDR_Pos) /*!< SAU RLAR: LADDR Mask */ + +#define SAU_RLAR_NSC_Pos 1U /*!< SAU RLAR: NSC Position */ +#define SAU_RLAR_NSC_Msk (1UL << SAU_RLAR_NSC_Pos) /*!< SAU RLAR: NSC Mask */ + +#define SAU_RLAR_ENABLE_Pos 0U /*!< SAU RLAR: ENABLE Position */ +#define SAU_RLAR_ENABLE_Msk (1UL /*<< SAU_RLAR_ENABLE_Pos*/) /*!< SAU RLAR: ENABLE Mask */ + +#endif /* defined (__SAUREGION_PRESENT) && (__SAUREGION_PRESENT == 1U) */ + +/* Secure Fault Status Register Definitions */ +#define SAU_SFSR_LSERR_Pos 7U /*!< SAU SFSR: LSERR Position */ +#define SAU_SFSR_LSERR_Msk (1UL << SAU_SFSR_LSERR_Pos) /*!< SAU SFSR: LSERR Mask */ + +#define SAU_SFSR_SFARVALID_Pos 6U /*!< SAU SFSR: SFARVALID Position */ +#define SAU_SFSR_SFARVALID_Msk (1UL << SAU_SFSR_SFARVALID_Pos) /*!< SAU SFSR: SFARVALID Mask */ + +#define SAU_SFSR_LSPERR_Pos 5U /*!< SAU SFSR: LSPERR Position */ +#define SAU_SFSR_LSPERR_Msk (1UL << SAU_SFSR_LSPERR_Pos) /*!< SAU SFSR: LSPERR Mask */ + +#define SAU_SFSR_INVTRAN_Pos 4U /*!< SAU SFSR: INVTRAN Position */ +#define SAU_SFSR_INVTRAN_Msk (1UL << SAU_SFSR_INVTRAN_Pos) /*!< SAU SFSR: INVTRAN Mask */ + +#define SAU_SFSR_AUVIOL_Pos 3U /*!< SAU SFSR: AUVIOL Position */ +#define SAU_SFSR_AUVIOL_Msk (1UL << SAU_SFSR_AUVIOL_Pos) /*!< SAU SFSR: AUVIOL Mask */ + +#define SAU_SFSR_INVER_Pos 2U /*!< SAU SFSR: INVER Position */ +#define SAU_SFSR_INVER_Msk (1UL << SAU_SFSR_INVER_Pos) /*!< SAU SFSR: INVER Mask */ + +#define SAU_SFSR_INVIS_Pos 1U /*!< SAU SFSR: INVIS Position */ +#define SAU_SFSR_INVIS_Msk (1UL << SAU_SFSR_INVIS_Pos) /*!< SAU SFSR: INVIS Mask */ + +#define SAU_SFSR_INVEP_Pos 0U /*!< SAU SFSR: INVEP Position */ +#define SAU_SFSR_INVEP_Msk (1UL /*<< SAU_SFSR_INVEP_Pos*/) /*!< SAU SFSR: INVEP Mask */ + +/*@} end of group CMSIS_SAU */ +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_FPU Floating Point Unit (FPU) + \brief Type definitions for the Floating Point Unit (FPU) + @{ + */ + +/** + \brief Structure type to access the Floating Point Unit (FPU). + */ +typedef struct +{ + uint32_t RESERVED0[1U]; + __IOM uint32_t FPCCR; /*!< Offset: 0x004 (R/W) Floating-Point Context Control Register */ + __IOM uint32_t FPCAR; /*!< Offset: 0x008 (R/W) Floating-Point Context Address Register */ + __IOM uint32_t FPDSCR; /*!< Offset: 0x00C (R/W) Floating-Point Default Status Control Register */ + __IM uint32_t MVFR0; /*!< Offset: 0x010 (R/ ) Media and VFP Feature Register 0 */ + __IM uint32_t MVFR1; /*!< Offset: 0x014 (R/ ) Media and VFP Feature Register 1 */ + __IM uint32_t MVFR2; /*!< Offset: 0x018 (R/ ) Media and VFP Feature Register 2 */ +} FPU_Type; + +/* Floating-Point Context Control Register Definitions */ +#define FPU_FPCCR_ASPEN_Pos 31U /*!< FPCCR: ASPEN bit Position */ +#define FPU_FPCCR_ASPEN_Msk (1UL << FPU_FPCCR_ASPEN_Pos) /*!< FPCCR: ASPEN bit Mask */ + +#define FPU_FPCCR_LSPEN_Pos 30U /*!< FPCCR: LSPEN Position */ +#define FPU_FPCCR_LSPEN_Msk (1UL << FPU_FPCCR_LSPEN_Pos) /*!< FPCCR: LSPEN bit Mask */ + +#define FPU_FPCCR_LSPENS_Pos 29U /*!< FPCCR: LSPENS Position */ +#define FPU_FPCCR_LSPENS_Msk (1UL << FPU_FPCCR_LSPENS_Pos) /*!< FPCCR: LSPENS bit Mask */ + +#define FPU_FPCCR_CLRONRET_Pos 28U /*!< FPCCR: CLRONRET Position */ +#define FPU_FPCCR_CLRONRET_Msk (1UL << FPU_FPCCR_CLRONRET_Pos) /*!< FPCCR: CLRONRET bit Mask */ + +#define FPU_FPCCR_CLRONRETS_Pos 27U /*!< FPCCR: CLRONRETS Position */ +#define FPU_FPCCR_CLRONRETS_Msk (1UL << FPU_FPCCR_CLRONRETS_Pos) /*!< FPCCR: CLRONRETS bit Mask */ + +#define FPU_FPCCR_TS_Pos 26U /*!< FPCCR: TS Position */ +#define FPU_FPCCR_TS_Msk (1UL << FPU_FPCCR_TS_Pos) /*!< FPCCR: TS bit Mask */ + +#define FPU_FPCCR_UFRDY_Pos 10U /*!< FPCCR: UFRDY Position */ +#define FPU_FPCCR_UFRDY_Msk (1UL << FPU_FPCCR_UFRDY_Pos) /*!< FPCCR: UFRDY bit Mask */ + +#define FPU_FPCCR_SPLIMVIOL_Pos 9U /*!< FPCCR: SPLIMVIOL Position */ +#define FPU_FPCCR_SPLIMVIOL_Msk (1UL << FPU_FPCCR_SPLIMVIOL_Pos) /*!< FPCCR: SPLIMVIOL bit Mask */ + +#define FPU_FPCCR_MONRDY_Pos 8U /*!< FPCCR: MONRDY Position */ +#define FPU_FPCCR_MONRDY_Msk (1UL << FPU_FPCCR_MONRDY_Pos) /*!< FPCCR: MONRDY bit Mask */ + +#define FPU_FPCCR_SFRDY_Pos 7U /*!< FPCCR: SFRDY Position */ +#define FPU_FPCCR_SFRDY_Msk (1UL << FPU_FPCCR_SFRDY_Pos) /*!< FPCCR: SFRDY bit Mask */ + +#define FPU_FPCCR_BFRDY_Pos 6U /*!< FPCCR: BFRDY Position */ +#define FPU_FPCCR_BFRDY_Msk (1UL << FPU_FPCCR_BFRDY_Pos) /*!< FPCCR: BFRDY bit Mask */ + +#define FPU_FPCCR_MMRDY_Pos 5U /*!< FPCCR: MMRDY Position */ +#define FPU_FPCCR_MMRDY_Msk (1UL << FPU_FPCCR_MMRDY_Pos) /*!< FPCCR: MMRDY bit Mask */ + +#define FPU_FPCCR_HFRDY_Pos 4U /*!< FPCCR: HFRDY Position */ +#define FPU_FPCCR_HFRDY_Msk (1UL << FPU_FPCCR_HFRDY_Pos) /*!< FPCCR: HFRDY bit Mask */ + +#define FPU_FPCCR_THREAD_Pos 3U /*!< FPCCR: processor mode bit Position */ +#define FPU_FPCCR_THREAD_Msk (1UL << FPU_FPCCR_THREAD_Pos) /*!< FPCCR: processor mode active bit Mask */ + +#define FPU_FPCCR_S_Pos 2U /*!< FPCCR: Security status of the FP context bit Position */ +#define FPU_FPCCR_S_Msk (1UL << FPU_FPCCR_S_Pos) /*!< FPCCR: Security status of the FP context bit Mask */ + +#define FPU_FPCCR_USER_Pos 1U /*!< FPCCR: privilege level bit Position */ +#define FPU_FPCCR_USER_Msk (1UL << FPU_FPCCR_USER_Pos) /*!< FPCCR: privilege level bit Mask */ + +#define FPU_FPCCR_LSPACT_Pos 0U /*!< FPCCR: Lazy state preservation active bit Position */ +#define FPU_FPCCR_LSPACT_Msk (1UL /*<< FPU_FPCCR_LSPACT_Pos*/) /*!< FPCCR: Lazy state preservation active bit Mask */ + +/* Floating-Point Context Address Register Definitions */ +#define FPU_FPCAR_ADDRESS_Pos 3U /*!< FPCAR: ADDRESS bit Position */ +#define FPU_FPCAR_ADDRESS_Msk (0x1FFFFFFFUL << FPU_FPCAR_ADDRESS_Pos) /*!< FPCAR: ADDRESS bit Mask */ + +/* Floating-Point Default Status Control Register Definitions */ +#define FPU_FPDSCR_AHP_Pos 26U /*!< FPDSCR: AHP bit Position */ +#define FPU_FPDSCR_AHP_Msk (1UL << FPU_FPDSCR_AHP_Pos) /*!< FPDSCR: AHP bit Mask */ + +#define FPU_FPDSCR_DN_Pos 25U /*!< FPDSCR: DN bit Position */ +#define FPU_FPDSCR_DN_Msk (1UL << FPU_FPDSCR_DN_Pos) /*!< FPDSCR: DN bit Mask */ + +#define FPU_FPDSCR_FZ_Pos 24U /*!< FPDSCR: FZ bit Position */ +#define FPU_FPDSCR_FZ_Msk (1UL << FPU_FPDSCR_FZ_Pos) /*!< FPDSCR: FZ bit Mask */ + +#define FPU_FPDSCR_RMode_Pos 22U /*!< FPDSCR: RMode bit Position */ +#define FPU_FPDSCR_RMode_Msk (3UL << FPU_FPDSCR_RMode_Pos) /*!< FPDSCR: RMode bit Mask */ + +/* Media and VFP Feature Register 0 Definitions */ +#define FPU_MVFR0_FP_rounding_modes_Pos 28U /*!< MVFR0: FP rounding modes bits Position */ +#define FPU_MVFR0_FP_rounding_modes_Msk (0xFUL << FPU_MVFR0_FP_rounding_modes_Pos) /*!< MVFR0: FP rounding modes bits Mask */ + +#define FPU_MVFR0_Short_vectors_Pos 24U /*!< MVFR0: Short vectors bits Position */ +#define FPU_MVFR0_Short_vectors_Msk (0xFUL << FPU_MVFR0_Short_vectors_Pos) /*!< MVFR0: Short vectors bits Mask */ + +#define FPU_MVFR0_Square_root_Pos 20U /*!< MVFR0: Square root bits Position */ +#define FPU_MVFR0_Square_root_Msk (0xFUL << FPU_MVFR0_Square_root_Pos) /*!< MVFR0: Square root bits Mask */ + +#define FPU_MVFR0_Divide_Pos 16U /*!< MVFR0: Divide bits Position */ +#define FPU_MVFR0_Divide_Msk (0xFUL << FPU_MVFR0_Divide_Pos) /*!< MVFR0: Divide bits Mask */ + +#define FPU_MVFR0_FP_excep_trapping_Pos 12U /*!< MVFR0: FP exception trapping bits Position */ +#define FPU_MVFR0_FP_excep_trapping_Msk (0xFUL << FPU_MVFR0_FP_excep_trapping_Pos) /*!< MVFR0: FP exception trapping bits Mask */ + +#define FPU_MVFR0_Double_precision_Pos 8U /*!< MVFR0: Double-precision bits Position */ +#define FPU_MVFR0_Double_precision_Msk (0xFUL << FPU_MVFR0_Double_precision_Pos) /*!< MVFR0: Double-precision bits Mask */ + +#define FPU_MVFR0_Single_precision_Pos 4U /*!< MVFR0: Single-precision bits Position */ +#define FPU_MVFR0_Single_precision_Msk (0xFUL << FPU_MVFR0_Single_precision_Pos) /*!< MVFR0: Single-precision bits Mask */ + +#define FPU_MVFR0_A_SIMD_registers_Pos 0U /*!< MVFR0: A_SIMD registers bits Position */ +#define FPU_MVFR0_A_SIMD_registers_Msk (0xFUL /*<< FPU_MVFR0_A_SIMD_registers_Pos*/) /*!< MVFR0: A_SIMD registers bits Mask */ + +/* Media and VFP Feature Register 1 Definitions */ +#define FPU_MVFR1_FP_fused_MAC_Pos 28U /*!< MVFR1: FP fused MAC bits Position */ +#define FPU_MVFR1_FP_fused_MAC_Msk (0xFUL << FPU_MVFR1_FP_fused_MAC_Pos) /*!< MVFR1: FP fused MAC bits Mask */ + +#define FPU_MVFR1_FP_HPFP_Pos 24U /*!< MVFR1: FP HPFP bits Position */ +#define FPU_MVFR1_FP_HPFP_Msk (0xFUL << FPU_MVFR1_FP_HPFP_Pos) /*!< MVFR1: FP HPFP bits Mask */ + +#define FPU_MVFR1_D_NaN_mode_Pos 4U /*!< MVFR1: D_NaN mode bits Position */ +#define FPU_MVFR1_D_NaN_mode_Msk (0xFUL << FPU_MVFR1_D_NaN_mode_Pos) /*!< MVFR1: D_NaN mode bits Mask */ + +#define FPU_MVFR1_FtZ_mode_Pos 0U /*!< MVFR1: FtZ mode bits Position */ +#define FPU_MVFR1_FtZ_mode_Msk (0xFUL /*<< FPU_MVFR1_FtZ_mode_Pos*/) /*!< MVFR1: FtZ mode bits Mask */ + +/* Media and VFP Feature Register 2 Definitions */ +#define FPU_MVFR2_FPMisc_Pos 4U /*!< MVFR2: FPMisc bits Position */ +#define FPU_MVFR2_FPMisc_Msk (0xFUL << FPU_MVFR2_FPMisc_Pos) /*!< MVFR2: FPMisc bits Mask */ + +/*@} end of group CMSIS_FPU */ + + + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DCB Debug Control Block + \brief Type definitions for the Debug Control Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Control Block Registers (DCB). + */ +typedef struct +{ + __IOM uint32_t DHCSR; /*!< Offset: 0x000 (R/W) Debug Halting Control and Status Register */ + __OM uint32_t DCRSR; /*!< Offset: 0x004 ( /W) Debug Core Register Selector Register */ + __IOM uint32_t DCRDR; /*!< Offset: 0x008 (R/W) Debug Core Register Data Register */ + __IOM uint32_t DEMCR; /*!< Offset: 0x00C (R/W) Debug Exception and Monitor Control Register */ + uint32_t RESERVED0[1U]; + __IOM uint32_t DAUTHCTRL; /*!< Offset: 0x014 (R/W) Debug Authentication Control Register */ + __IOM uint32_t DSCSR; /*!< Offset: 0x018 (R/W) Debug Security Control and Status Register */ +} DCB_Type; + +/* DHCSR, Debug Halting Control and Status Register Definitions */ +#define DCB_DHCSR_DBGKEY_Pos 16U /*!< DCB DHCSR: Debug key Position */ +#define DCB_DHCSR_DBGKEY_Msk (0xFFFFUL << DCB_DHCSR_DBGKEY_Pos) /*!< DCB DHCSR: Debug key Mask */ + +#define DCB_DHCSR_S_RESTART_ST_Pos 26U /*!< DCB DHCSR: Restart sticky status Position */ +#define DCB_DHCSR_S_RESTART_ST_Msk (0x1UL << DCB_DHCSR_S_RESTART_ST_Pos) /*!< DCB DHCSR: Restart sticky status Mask */ + +#define DCB_DHCSR_S_RESET_ST_Pos 25U /*!< DCB DHCSR: Reset sticky status Position */ +#define DCB_DHCSR_S_RESET_ST_Msk (0x1UL << DCB_DHCSR_S_RESET_ST_Pos) /*!< DCB DHCSR: Reset sticky status Mask */ + +#define DCB_DHCSR_S_RETIRE_ST_Pos 24U /*!< DCB DHCSR: Retire sticky status Position */ +#define DCB_DHCSR_S_RETIRE_ST_Msk (0x1UL << DCB_DHCSR_S_RETIRE_ST_Pos) /*!< DCB DHCSR: Retire sticky status Mask */ + +#define DCB_DHCSR_S_SDE_Pos 20U /*!< DCB DHCSR: Secure debug enabled Position */ +#define DCB_DHCSR_S_SDE_Msk (0x1UL << DCB_DHCSR_S_SDE_Pos) /*!< DCB DHCSR: Secure debug enabled Mask */ + +#define DCB_DHCSR_S_LOCKUP_Pos 19U /*!< DCB DHCSR: Lockup status Position */ +#define DCB_DHCSR_S_LOCKUP_Msk (0x1UL << DCB_DHCSR_S_LOCKUP_Pos) /*!< DCB DHCSR: Lockup status Mask */ + +#define DCB_DHCSR_S_SLEEP_Pos 18U /*!< DCB DHCSR: Sleeping status Position */ +#define DCB_DHCSR_S_SLEEP_Msk (0x1UL << DCB_DHCSR_S_SLEEP_Pos) /*!< DCB DHCSR: Sleeping status Mask */ + +#define DCB_DHCSR_S_HALT_Pos 17U /*!< DCB DHCSR: Halted status Position */ +#define DCB_DHCSR_S_HALT_Msk (0x1UL << DCB_DHCSR_S_HALT_Pos) /*!< DCB DHCSR: Halted status Mask */ + +#define DCB_DHCSR_S_REGRDY_Pos 16U /*!< DCB DHCSR: Register ready status Position */ +#define DCB_DHCSR_S_REGRDY_Msk (0x1UL << DCB_DHCSR_S_REGRDY_Pos) /*!< DCB DHCSR: Register ready status Mask */ + +#define DCB_DHCSR_C_SNAPSTALL_Pos 5U /*!< DCB DHCSR: Snap stall control Position */ +#define DCB_DHCSR_C_SNAPSTALL_Msk (0x1UL << DCB_DHCSR_C_SNAPSTALL_Pos) /*!< DCB DHCSR: Snap stall control Mask */ + +#define DCB_DHCSR_C_MASKINTS_Pos 3U /*!< DCB DHCSR: Mask interrupts control Position */ +#define DCB_DHCSR_C_MASKINTS_Msk (0x1UL << DCB_DHCSR_C_MASKINTS_Pos) /*!< DCB DHCSR: Mask interrupts control Mask */ + +#define DCB_DHCSR_C_STEP_Pos 2U /*!< DCB DHCSR: Step control Position */ +#define DCB_DHCSR_C_STEP_Msk (0x1UL << DCB_DHCSR_C_STEP_Pos) /*!< DCB DHCSR: Step control Mask */ + +#define DCB_DHCSR_C_HALT_Pos 1U /*!< DCB DHCSR: Halt control Position */ +#define DCB_DHCSR_C_HALT_Msk (0x1UL << DCB_DHCSR_C_HALT_Pos) /*!< DCB DHCSR: Halt control Mask */ + +#define DCB_DHCSR_C_DEBUGEN_Pos 0U /*!< DCB DHCSR: Debug enable control Position */ +#define DCB_DHCSR_C_DEBUGEN_Msk (0x1UL /*<< DCB_DHCSR_C_DEBUGEN_Pos*/) /*!< DCB DHCSR: Debug enable control Mask */ + +/* DCRSR, Debug Core Register Select Register Definitions */ +#define DCB_DCRSR_REGWnR_Pos 16U /*!< DCB DCRSR: Register write/not-read Position */ +#define DCB_DCRSR_REGWnR_Msk (0x1UL << DCB_DCRSR_REGWnR_Pos) /*!< DCB DCRSR: Register write/not-read Mask */ + +#define DCB_DCRSR_REGSEL_Pos 0U /*!< DCB DCRSR: Register selector Position */ +#define DCB_DCRSR_REGSEL_Msk (0x7FUL /*<< DCB_DCRSR_REGSEL_Pos*/) /*!< DCB DCRSR: Register selector Mask */ + +/* DCRDR, Debug Core Register Data Register Definitions */ +#define DCB_DCRDR_DBGTMP_Pos 0U /*!< DCB DCRDR: Data temporary buffer Position */ +#define DCB_DCRDR_DBGTMP_Msk (0xFFFFFFFFUL /*<< DCB_DCRDR_DBGTMP_Pos*/) /*!< DCB DCRDR: Data temporary buffer Mask */ + +/* DEMCR, Debug Exception and Monitor Control Register Definitions */ +#define DCB_DEMCR_TRCENA_Pos 24U /*!< DCB DEMCR: Trace enable Position */ +#define DCB_DEMCR_TRCENA_Msk (0x1UL << DCB_DEMCR_TRCENA_Pos) /*!< DCB DEMCR: Trace enable Mask */ + +#define DCB_DEMCR_MONPRKEY_Pos 23U /*!< DCB DEMCR: Monitor pend req key Position */ +#define DCB_DEMCR_MONPRKEY_Msk (0x1UL << DCB_DEMCR_MONPRKEY_Pos) /*!< DCB DEMCR: Monitor pend req key Mask */ + +#define DCB_DEMCR_UMON_EN_Pos 21U /*!< DCB DEMCR: Unprivileged monitor enable Position */ +#define DCB_DEMCR_UMON_EN_Msk (0x1UL << DCB_DEMCR_UMON_EN_Pos) /*!< DCB DEMCR: Unprivileged monitor enable Mask */ + +#define DCB_DEMCR_SDME_Pos 20U /*!< DCB DEMCR: Secure DebugMonitor enable Position */ +#define DCB_DEMCR_SDME_Msk (0x1UL << DCB_DEMCR_SDME_Pos) /*!< DCB DEMCR: Secure DebugMonitor enable Mask */ + +#define DCB_DEMCR_MON_REQ_Pos 19U /*!< DCB DEMCR: Monitor request Position */ +#define DCB_DEMCR_MON_REQ_Msk (0x1UL << DCB_DEMCR_MON_REQ_Pos) /*!< DCB DEMCR: Monitor request Mask */ + +#define DCB_DEMCR_MON_STEP_Pos 18U /*!< DCB DEMCR: Monitor step Position */ +#define DCB_DEMCR_MON_STEP_Msk (0x1UL << DCB_DEMCR_MON_STEP_Pos) /*!< DCB DEMCR: Monitor step Mask */ + +#define DCB_DEMCR_MON_PEND_Pos 17U /*!< DCB DEMCR: Monitor pend Position */ +#define DCB_DEMCR_MON_PEND_Msk (0x1UL << DCB_DEMCR_MON_PEND_Pos) /*!< DCB DEMCR: Monitor pend Mask */ + +#define DCB_DEMCR_MON_EN_Pos 16U /*!< DCB DEMCR: Monitor enable Position */ +#define DCB_DEMCR_MON_EN_Msk (0x1UL << DCB_DEMCR_MON_EN_Pos) /*!< DCB DEMCR: Monitor enable Mask */ + +#define DCB_DEMCR_VC_SFERR_Pos 11U /*!< DCB DEMCR: Vector Catch SecureFault Position */ +#define DCB_DEMCR_VC_SFERR_Msk (0x1UL << DCB_DEMCR_VC_SFERR_Pos) /*!< DCB DEMCR: Vector Catch SecureFault Mask */ + +#define DCB_DEMCR_VC_HARDERR_Pos 10U /*!< DCB DEMCR: Vector Catch HardFault errors Position */ +#define DCB_DEMCR_VC_HARDERR_Msk (0x1UL << DCB_DEMCR_VC_HARDERR_Pos) /*!< DCB DEMCR: Vector Catch HardFault errors Mask */ + +#define DCB_DEMCR_VC_INTERR_Pos 9U /*!< DCB DEMCR: Vector Catch interrupt errors Position */ +#define DCB_DEMCR_VC_INTERR_Msk (0x1UL << DCB_DEMCR_VC_INTERR_Pos) /*!< DCB DEMCR: Vector Catch interrupt errors Mask */ + +#define DCB_DEMCR_VC_BUSERR_Pos 8U /*!< DCB DEMCR: Vector Catch BusFault errors Position */ +#define DCB_DEMCR_VC_BUSERR_Msk (0x1UL << DCB_DEMCR_VC_BUSERR_Pos) /*!< DCB DEMCR: Vector Catch BusFault errors Mask */ + +#define DCB_DEMCR_VC_STATERR_Pos 7U /*!< DCB DEMCR: Vector Catch state errors Position */ +#define DCB_DEMCR_VC_STATERR_Msk (0x1UL << DCB_DEMCR_VC_STATERR_Pos) /*!< DCB DEMCR: Vector Catch state errors Mask */ + +#define DCB_DEMCR_VC_CHKERR_Pos 6U /*!< DCB DEMCR: Vector Catch check errors Position */ +#define DCB_DEMCR_VC_CHKERR_Msk (0x1UL << DCB_DEMCR_VC_CHKERR_Pos) /*!< DCB DEMCR: Vector Catch check errors Mask */ + +#define DCB_DEMCR_VC_NOCPERR_Pos 5U /*!< DCB DEMCR: Vector Catch NOCP errors Position */ +#define DCB_DEMCR_VC_NOCPERR_Msk (0x1UL << DCB_DEMCR_VC_NOCPERR_Pos) /*!< DCB DEMCR: Vector Catch NOCP errors Mask */ + +#define DCB_DEMCR_VC_MMERR_Pos 4U /*!< DCB DEMCR: Vector Catch MemManage errors Position */ +#define DCB_DEMCR_VC_MMERR_Msk (0x1UL << DCB_DEMCR_VC_MMERR_Pos) /*!< DCB DEMCR: Vector Catch MemManage errors Mask */ + +#define DCB_DEMCR_VC_CORERESET_Pos 0U /*!< DCB DEMCR: Vector Catch Core reset Position */ +#define DCB_DEMCR_VC_CORERESET_Msk (0x1UL /*<< DCB_DEMCR_VC_CORERESET_Pos*/) /*!< DCB DEMCR: Vector Catch Core reset Mask */ + +/* DAUTHCTRL, Debug Authentication Control Register Definitions */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Pos 3U /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPNIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPNIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure non-invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPNIDENSEL_Pos 2U /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPNIDENSEL_Msk (0x1UL << DCB_DAUTHCTRL_SPNIDENSEL_Pos) /*!< DCB DAUTHCTRL: Secure non-invasive debug enable select Mask */ + +#define DCB_DAUTHCTRL_INTSPIDEN_Pos 1U /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Position */ +#define DCB_DAUTHCTRL_INTSPIDEN_Msk (0x1UL << DCB_DAUTHCTRL_INTSPIDEN_Pos) /*!< DCB DAUTHCTRL: Internal Secure invasive debug enable Mask */ + +#define DCB_DAUTHCTRL_SPIDENSEL_Pos 0U /*!< DCB DAUTHCTRL: Secure invasive debug enable select Position */ +#define DCB_DAUTHCTRL_SPIDENSEL_Msk (0x1UL /*<< DCB_DAUTHCTRL_SPIDENSEL_Pos*/) /*!< DCB DAUTHCTRL: Secure invasive debug enable select Mask */ + +/* DSCSR, Debug Security Control and Status Register Definitions */ +#define DCB_DSCSR_CDSKEY_Pos 17U /*!< DCB DSCSR: CDS write-enable key Position */ +#define DCB_DSCSR_CDSKEY_Msk (0x1UL << DCB_DSCSR_CDSKEY_Pos) /*!< DCB DSCSR: CDS write-enable key Mask */ + +#define DCB_DSCSR_CDS_Pos 16U /*!< DCB DSCSR: Current domain Secure Position */ +#define DCB_DSCSR_CDS_Msk (0x1UL << DCB_DSCSR_CDS_Pos) /*!< DCB DSCSR: Current domain Secure Mask */ + +#define DCB_DSCSR_SBRSEL_Pos 1U /*!< DCB DSCSR: Secure banked register select Position */ +#define DCB_DSCSR_SBRSEL_Msk (0x1UL << DCB_DSCSR_SBRSEL_Pos) /*!< DCB DSCSR: Secure banked register select Mask */ + +#define DCB_DSCSR_SBRSELEN_Pos 0U /*!< DCB DSCSR: Secure banked register select enable Position */ +#define DCB_DSCSR_SBRSELEN_Msk (0x1UL /*<< DCB_DSCSR_SBRSELEN_Pos*/) /*!< DCB DSCSR: Secure banked register select enable Mask */ + +/*@} end of group CMSIS_DCB */ + + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_DIB Debug Identification Block + \brief Type definitions for the Debug Identification Block Registers + @{ + */ + +/** + \brief Structure type to access the Debug Identification Block Registers (DIB). + */ +typedef struct +{ + __OM uint32_t DLAR; /*!< Offset: 0x000 ( /W) SCS Software Lock Access Register */ + __IM uint32_t DLSR; /*!< Offset: 0x004 (R/ ) SCS Software Lock Status Register */ + __IM uint32_t DAUTHSTATUS; /*!< Offset: 0x008 (R/ ) Debug Authentication Status Register */ + __IM uint32_t DDEVARCH; /*!< Offset: 0x00C (R/ ) SCS Device Architecture Register */ + __IM uint32_t DDEVTYPE; /*!< Offset: 0x010 (R/ ) SCS Device Type Register */ +} DIB_Type; + +/* DLAR, SCS Software Lock Access Register Definitions */ +#define DIB_DLAR_KEY_Pos 0U /*!< DIB DLAR: KEY Position */ +#define DIB_DLAR_KEY_Msk (0xFFFFFFFFUL /*<< DIB_DLAR_KEY_Pos */) /*!< DIB DLAR: KEY Mask */ + +/* DLSR, SCS Software Lock Status Register Definitions */ +#define DIB_DLSR_nTT_Pos 2U /*!< DIB DLSR: Not thirty-two bit Position */ +#define DIB_DLSR_nTT_Msk (0x1UL << DIB_DLSR_nTT_Pos ) /*!< DIB DLSR: Not thirty-two bit Mask */ + +#define DIB_DLSR_SLK_Pos 1U /*!< DIB DLSR: Software Lock status Position */ +#define DIB_DLSR_SLK_Msk (0x1UL << DIB_DLSR_SLK_Pos ) /*!< DIB DLSR: Software Lock status Mask */ + +#define DIB_DLSR_SLI_Pos 0U /*!< DIB DLSR: Software Lock implemented Position */ +#define DIB_DLSR_SLI_Msk (0x1UL /*<< DIB_DLSR_SLI_Pos*/) /*!< DIB DLSR: Software Lock implemented Mask */ + +/* DAUTHSTATUS, Debug Authentication Status Register Definitions */ +#define DIB_DAUTHSTATUS_SNID_Pos 6U /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_SNID_Msk (0x3UL << DIB_DAUTHSTATUS_SNID_Pos ) /*!< DIB DAUTHSTATUS: Secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_SID_Pos 4U /*!< DIB DAUTHSTATUS: Secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_SID_Msk (0x3UL << DIB_DAUTHSTATUS_SID_Pos ) /*!< DIB DAUTHSTATUS: Secure Invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSNID_Pos 2U /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSNID_Msk (0x3UL << DIB_DAUTHSTATUS_NSNID_Pos ) /*!< DIB DAUTHSTATUS: Non-secure Non-invasive Debug Mask */ + +#define DIB_DAUTHSTATUS_NSID_Pos 0U /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Position */ +#define DIB_DAUTHSTATUS_NSID_Msk (0x3UL /*<< DIB_DAUTHSTATUS_NSID_Pos*/) /*!< DIB DAUTHSTATUS: Non-secure Invasive Debug Mask */ + +/* DDEVARCH, SCS Device Architecture Register Definitions */ +#define DIB_DDEVARCH_ARCHITECT_Pos 21U /*!< DIB DDEVARCH: Architect Position */ +#define DIB_DDEVARCH_ARCHITECT_Msk (0x7FFUL << DIB_DDEVARCH_ARCHITECT_Pos ) /*!< DIB DDEVARCH: Architect Mask */ + +#define DIB_DDEVARCH_PRESENT_Pos 20U /*!< DIB DDEVARCH: DEVARCH Present Position */ +#define DIB_DDEVARCH_PRESENT_Msk (0x1FUL << DIB_DDEVARCH_PRESENT_Pos ) /*!< DIB DDEVARCH: DEVARCH Present Mask */ + +#define DIB_DDEVARCH_REVISION_Pos 16U /*!< DIB DDEVARCH: Revision Position */ +#define DIB_DDEVARCH_REVISION_Msk (0xFUL << DIB_DDEVARCH_REVISION_Pos ) /*!< DIB DDEVARCH: Revision Mask */ + +#define DIB_DDEVARCH_ARCHVER_Pos 12U /*!< DIB DDEVARCH: Architecture Version Position */ +#define DIB_DDEVARCH_ARCHVER_Msk (0xFUL << DIB_DDEVARCH_ARCHVER_Pos ) /*!< DIB DDEVARCH: Architecture Version Mask */ + +#define DIB_DDEVARCH_ARCHPART_Pos 0U /*!< DIB DDEVARCH: Architecture Part Position */ +#define DIB_DDEVARCH_ARCHPART_Msk (0xFFFUL /*<< DIB_DDEVARCH_ARCHPART_Pos*/) /*!< DIB DDEVARCH: Architecture Part Mask */ + +/* DDEVTYPE, SCS Device Type Register Definitions */ +#define DIB_DDEVTYPE_SUB_Pos 4U /*!< DIB DDEVTYPE: Sub-type Position */ +#define DIB_DDEVTYPE_SUB_Msk (0xFUL << DIB_DDEVTYPE_SUB_Pos ) /*!< DIB DDEVTYPE: Sub-type Mask */ + +#define DIB_DDEVTYPE_MAJOR_Pos 0U /*!< DIB DDEVTYPE: Major type Position */ +#define DIB_DDEVTYPE_MAJOR_Msk (0xFUL /*<< DIB_DDEVTYPE_MAJOR_Pos*/) /*!< DIB DDEVTYPE: Major type Mask */ + + +/*@} end of group CMSIS_DIB */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_bitfield Core register bit field macros + \brief Macros for use with bit field definitions (xxx_Pos, xxx_Msk). + @{ + */ + +/** + \brief Mask and shift a bit field value for use in a register bit range. + \param[in] field Name of the register bit field. + \param[in] value Value of the bit field. This parameter is interpreted as an uint32_t type. + \return Masked and shifted value. +*/ +#define _VAL2FLD(field, value) (((uint32_t)(value) << field ## _Pos) & field ## _Msk) + +/** + \brief Mask and shift a register value to extract a bit filed value. + \param[in] field Name of the register bit field. + \param[in] value Value of register. This parameter is interpreted as an uint32_t type. + \return Masked and shifted bit field value. +*/ +#define _FLD2VAL(field, value) (((uint32_t)(value) & field ## _Msk) >> field ## _Pos) + +/*@} end of group CMSIS_core_bitfield */ + + +/** + \ingroup CMSIS_core_register + \defgroup CMSIS_core_base Core Definitions + \brief Definitions for base addresses, unions, and structures. + @{ + */ + +/* Memory mapping of Core Hardware */ + #define SCS_BASE (0xE000E000UL) /*!< System Control Space Base Address */ + #define ITM_BASE (0xE0000000UL) /*!< ITM Base Address */ + #define DWT_BASE (0xE0001000UL) /*!< DWT Base Address */ + #define TPI_BASE (0xE0040000UL) /*!< TPI Base Address */ + #define DCB_BASE (0xE000EDF0UL) /*!< DCB Base Address */ + #define DIB_BASE (0xE000EFB0UL) /*!< DIB Base Address */ + #define EMSS_BASE (0xE001E000UL) /*!AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void __NVIC_SetPriorityGrouping(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping + \details Reads the priority grouping field from the NVIC Interrupt Controller. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t __NVIC_GetPriorityGrouping(void) +{ + return ((uint32_t)((SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt + \details Enables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_EnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + __COMPILER_BARRIER(); + NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __COMPILER_BARRIER(); + } +} + + +/** + \brief Get Interrupt Enable status + \details Returns a device specific interrupt enable status from the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetEnableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt + \details Disables a device specific interrupt in the NVIC interrupt controller. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_DisableIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + __DSB(); + __ISB(); + } +} + + +/** + \brief Get Pending Interrupt + \details Reads the NVIC pending register and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt + \details Sets the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_SetPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt + \details Clears the pending bit of a device specific interrupt in the NVIC pending register. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void __NVIC_ClearPendingIRQ(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt + \details Reads the active register in the NVIC and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t __NVIC_GetActive(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Interrupt Target State + \details Reads the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + \return 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_GetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Target State + \details Sets the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_SetTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] |= ((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Clear Interrupt Target State + \details Clears the interrupt target field in the NVIC and returns the interrupt target bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 if interrupt is assigned to Secure + 1 if interrupt is assigned to Non Secure + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t NVIC_ClearTargetState(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] &= ~((uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL))); + return((uint32_t)(((NVIC->ITNS[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + + +/** + \brief Set Interrupt Priority + \details Sets the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every processor exception. + */ +__STATIC_INLINE void __NVIC_SetPriority(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority + \details Reads the priority of a device specific interrupt or a processor exception. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. + Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t __NVIC_GetPriority(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} + + +/** + \brief Encode Priority + \details Encodes the priority for an interrupt with the given priority group, + preemptive priority value, and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Used priority group. + \param [in] PreemptPriority Preemptive priority value (starting from 0). + \param [in] SubPriority Subpriority value (starting from 0). + \return Encoded priority. Value can be used in the function \ref NVIC_SetPriority(). + */ +__STATIC_INLINE uint32_t NVIC_EncodePriority (uint32_t PriorityGroup, uint32_t PreemptPriority, uint32_t SubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + return ( + ((PreemptPriority & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL)) << SubPriorityBits) | + ((SubPriority & (uint32_t)((1UL << (SubPriorityBits )) - 1UL))) + ); +} + + +/** + \brief Decode Priority + \details Decodes an interrupt priority value with a given priority group to + preemptive priority value and subpriority value. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS) the smallest possible priority group is set. + \param [in] Priority Priority value, which can be retrieved with the function \ref NVIC_GetPriority(). + \param [in] PriorityGroup Used priority group. + \param [out] pPreemptPriority Preemptive priority value (starting from 0). + \param [out] pSubPriority Subpriority value (starting from 0). + */ +__STATIC_INLINE void NVIC_DecodePriority (uint32_t Priority, uint32_t PriorityGroup, uint32_t* const pPreemptPriority, uint32_t* const pSubPriority) +{ + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + uint32_t PreemptPriorityBits; + uint32_t SubPriorityBits; + + PreemptPriorityBits = ((7UL - PriorityGroupTmp) > (uint32_t)(__NVIC_PRIO_BITS)) ? (uint32_t)(__NVIC_PRIO_BITS) : (uint32_t)(7UL - PriorityGroupTmp); + SubPriorityBits = ((PriorityGroupTmp + (uint32_t)(__NVIC_PRIO_BITS)) < (uint32_t)7UL) ? (uint32_t)0UL : (uint32_t)((PriorityGroupTmp - 7UL) + (uint32_t)(__NVIC_PRIO_BITS)); + + *pPreemptPriority = (Priority >> SubPriorityBits) & (uint32_t)((1UL << (PreemptPriorityBits)) - 1UL); + *pSubPriority = (Priority ) & (uint32_t)((1UL << (SubPriorityBits )) - 1UL); +} + + +/** + \brief Set Interrupt Vector + \details Sets an interrupt vector in SRAM based interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + VTOR must been relocated to SRAM before. + \param [in] IRQn Interrupt number + \param [in] vector Address of interrupt handler function + */ +__STATIC_INLINE void __NVIC_SetVector(IRQn_Type IRQn, uint32_t vector) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET] = vector; + __DSB(); +} + + +/** + \brief Get Interrupt Vector + \details Reads an interrupt vector from interrupt vector table. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Address of interrupt handler function + */ +__STATIC_INLINE uint32_t __NVIC_GetVector(IRQn_Type IRQn) +{ + uint32_t *vectors = (uint32_t *)SCB->VTOR; + return vectors[(int32_t)IRQn + NVIC_USER_IRQ_OFFSET]; +} + + +/** + \brief System Reset + \details Initiates a system reset request to reset the MCU. + */ +__NO_RETURN __STATIC_INLINE void __NVIC_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | + SCB_AIRCR_SYSRESETREQ_Msk ); /* Keep priority group unchanged */ + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + +/** + \brief Software Reset + \details Initiates a system reset request to reset the CPU. + */ +__NO_RETURN __STATIC_INLINE void __SW_SystemReset(void) +{ + __DSB(); /* Ensure all outstanding memory accesses including + buffered write are completed before reset */ + SCB->AIRCR = (uint32_t)((0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (SCB->AIRCR & SCB_AIRCR_BFHFNMINS_Msk) | /* Keep BFHFNMINS unchanged. Use this Reset function in case your case need to keep it */ + (SCB->AIRCR & SCB_AIRCR_PRIGROUP_Msk) | /* Keep priority group unchanged */ + SCB_AIRCR_SYSRESETREQ_Msk ); + __DSB(); /* Ensure completion of memory access */ + + for(;;) /* wait until reset */ + { + __NOP(); + } +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Priority Grouping (non-secure) + \details Sets the non-secure priority grouping field when in secure state using the required unlock sequence. + The parameter PriorityGroup is assigned to the field SCB->AIRCR [10:8] PRIGROUP field. + Only values from 0..7 are used. + In case of a conflict between priority grouping and available + priority bits (__NVIC_PRIO_BITS), the smallest possible priority group is set. + \param [in] PriorityGroup Priority grouping field. + */ +__STATIC_INLINE void TZ_NVIC_SetPriorityGrouping_NS(uint32_t PriorityGroup) +{ + uint32_t reg_value; + uint32_t PriorityGroupTmp = (PriorityGroup & (uint32_t)0x07UL); /* only values 0..7 are used */ + + reg_value = SCB_NS->AIRCR; /* read old register configuration */ + reg_value &= ~((uint32_t)(SCB_AIRCR_VECTKEY_Msk | SCB_AIRCR_PRIGROUP_Msk)); /* clear bits to change */ + reg_value = (reg_value | + ((uint32_t)0x5FAUL << SCB_AIRCR_VECTKEY_Pos) | + (PriorityGroupTmp << SCB_AIRCR_PRIGROUP_Pos) ); /* Insert write key and priority group */ + SCB_NS->AIRCR = reg_value; +} + + +/** + \brief Get Priority Grouping (non-secure) + \details Reads the priority grouping field from the non-secure NVIC when in secure state. + \return Priority grouping field (SCB->AIRCR [10:8] PRIGROUP field). + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriorityGrouping_NS(void) +{ + return ((uint32_t)((SCB_NS->AIRCR & SCB_AIRCR_PRIGROUP_Msk) >> SCB_AIRCR_PRIGROUP_Pos)); +} + + +/** + \brief Enable Interrupt (non-secure) + \details Enables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_EnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Interrupt Enable status (non-secure) + \details Returns a device specific interrupt enable status from the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt is not enabled. + \return 1 Interrupt is enabled. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetEnableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISER[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Disable Interrupt (non-secure) + \details Disables a device specific interrupt in the non-secure NVIC interrupt controller when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_DisableIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICER[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Pending Interrupt (non-secure) + \details Reads the NVIC pending register in the non-secure NVIC when in secure state and returns the pending bit for the specified device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not pending. + \return 1 Interrupt status is pending. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Pending Interrupt (non-secure) + \details Sets the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_SetPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ISPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Clear Pending Interrupt (non-secure) + \details Clears the pending bit of a device specific interrupt in the non-secure NVIC pending register when in secure state. + \param [in] IRQn Device specific interrupt number. + \note IRQn must not be negative. + */ +__STATIC_INLINE void TZ_NVIC_ClearPendingIRQ_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->ICPR[(((uint32_t)IRQn) >> 5UL)] = (uint32_t)(1UL << (((uint32_t)IRQn) & 0x1FUL)); + } +} + + +/** + \brief Get Active Interrupt (non-secure) + \details Reads the active register in non-secure NVIC when in secure state and returns the active bit for the device specific interrupt. + \param [in] IRQn Device specific interrupt number. + \return 0 Interrupt status is not active. + \return 1 Interrupt status is active. + \note IRQn must not be negative. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetActive_NS(IRQn_Type IRQn) +{ + if ((int32_t)(IRQn) >= 0) + { + return((uint32_t)(((NVIC_NS->IABR[(((uint32_t)IRQn) >> 5UL)] & (1UL << (((uint32_t)IRQn) & 0x1FUL))) != 0UL) ? 1UL : 0UL)); + } + else + { + return(0U); + } +} + + +/** + \brief Set Interrupt Priority (non-secure) + \details Sets the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \param [in] priority Priority to set. + \note The priority cannot be set for every non-secure processor exception. + */ +__STATIC_INLINE void TZ_NVIC_SetPriority_NS(IRQn_Type IRQn, uint32_t priority) +{ + if ((int32_t)(IRQn) >= 0) + { + NVIC_NS->IPR[((uint32_t)IRQn)] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } + else + { + SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] = (uint8_t)((priority << (8U - __NVIC_PRIO_BITS)) & (uint32_t)0xFFUL); + } +} + + +/** + \brief Get Interrupt Priority (non-secure) + \details Reads the priority of a non-secure device specific interrupt or a non-secure processor exception when in secure state. + The interrupt number can be positive to specify a device specific interrupt, + or negative to specify a processor exception. + \param [in] IRQn Interrupt number. + \return Interrupt Priority. Value is aligned automatically to the implemented priority bits of the microcontroller. + */ +__STATIC_INLINE uint32_t TZ_NVIC_GetPriority_NS(IRQn_Type IRQn) +{ + + if ((int32_t)(IRQn) >= 0) + { + return(((uint32_t)NVIC_NS->IPR[((uint32_t)IRQn)] >> (8U - __NVIC_PRIO_BITS))); + } + else + { + return(((uint32_t)SCB_NS->SHPR[(((uint32_t)IRQn) & 0xFUL)-4UL] >> (8U - __NVIC_PRIO_BITS))); + } +} +#endif /* defined (__ARM_FEATURE_CMSE) &&(__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_NVICFunctions */ + +/* ########################## MPU functions #################################### */ + +#if defined (__MPU_PRESENT) && (__MPU_PRESENT == 1U) + +#include "mpu_armv8.h" + +#endif + +/* ########################## FPU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_FpuFunctions FPU Functions + \brief Function that provides FPU type. + @{ + */ + +/** + \brief get FPU type + \details returns the FPU type + \returns + - \b 0: No FPU + - \b 1: Single precision FPU + - \b 2: Double + Single precision FPU + */ +__STATIC_INLINE uint32_t SCB_GetFPUType(void) +{ + uint32_t mvfr0; + + mvfr0 = FPU->MVFR0; + if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x220U) + { + return 2U; /* Double + Single precision FPU */ + } + else if ((mvfr0 & (FPU_MVFR0_Single_precision_Msk | FPU_MVFR0_Double_precision_Msk)) == 0x020U) + { + return 1U; /* Single precision FPU */ + } + else + { + return 0U; /* No FPU */ + } +} + + +/*@} end of CMSIS_Core_FpuFunctions */ + + + +/* ########################## SAU functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SAUFunctions SAU Functions + \brief Functions that configure the SAU. + @{ + */ + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) + +/** + \brief Enable SAU + \details Enables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Enable(void) +{ + SAU->CTRL |= (SAU_CTRL_ENABLE_Msk); +} + + + +/** + \brief Disable SAU + \details Disables the Security Attribution Unit (SAU). + */ +__STATIC_INLINE void TZ_SAU_Disable(void) +{ + SAU->CTRL &= ~(SAU_CTRL_ENABLE_Msk); +} + +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_SAUFunctions */ + + + +/* ################################## Debug Control function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DCBFunctions Debug Control Functions + \brief Functions that access the Debug Control Block. + @{ + */ + + +/** + \brief Set Debug Authentication Control Register + \details writes to Debug Authentication Control register. + \param [in] value value to be writen. + */ +__STATIC_INLINE void DCB_SetAuthCtrl(uint32_t value) +{ + __DSB(); + __ISB(); + DCB->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register + \details Reads Debug Authentication Control register. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t DCB_GetAuthCtrl(void) +{ + return (DCB->DAUTHCTRL); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Set Debug Authentication Control Register (non-secure) + \details writes to non-secure Debug Authentication Control register when in secure state. + \param [in] value value to be writen + */ +__STATIC_INLINE void TZ_DCB_SetAuthCtrl_NS(uint32_t value) +{ + __DSB(); + __ISB(); + DCB_NS->DAUTHCTRL = value; + __DSB(); + __ISB(); +} + + +/** + \brief Get Debug Authentication Control Register (non-secure) + \details Reads non-secure Debug Authentication Control register when in secure state. + \return Debug Authentication Control Register. + */ +__STATIC_INLINE uint32_t TZ_DCB_GetAuthCtrl_NS(void) +{ + return (DCB_NS->DAUTHCTRL); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + + + +/* ################################## Debug Identification function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_DIBFunctions Debug Identification Functions + \brief Functions that access the Debug Identification Block. + @{ + */ + + +/** + \brief Get Debug Authentication Status Register + \details Reads Debug Authentication Status register. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t DIB_GetAuthStatus(void) +{ + return (DIB->DAUTHSTATUS); +} + + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief Get Debug Authentication Status Register (non-secure) + \details Reads non-secure Debug Authentication Status register when in secure state. + \return Debug Authentication Status Register. + */ +__STATIC_INLINE uint32_t TZ_DIB_GetAuthStatus_NS(void) +{ + return (DIB_NS->DAUTHSTATUS); +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +/*@} end of CMSIS_Core_DCBFunctions */ + + +#if ((defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U)) || \ + (defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U))) + +/* ########################## Cache functions #################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_CacheFunctions Cache Functions + \brief Functions that configure Instruction and Data cache. + @{ + */ + +/* Cache Size ID Register Macros */ +#define CCSIDR_WAYS(x) (((x) & SCB_CCSIDR_ASSOCIATIVITY_Msk) >> SCB_CCSIDR_ASSOCIATIVITY_Pos) +#define CCSIDR_SETS(x) (((x) & SCB_CCSIDR_NUMSETS_Msk ) >> SCB_CCSIDR_NUMSETS_Pos ) + +#define __SCB_DCACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ +#define __SCB_ICACHE_LINE_SIZE 32U /*!< STAR-MC1 cache line size is fixed to 32 bytes (8 words). See also register SCB_CCSIDR */ + +/** + \brief Enable I-Cache + \details Turns on I-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if (SCB->CCR & SCB_CCR_IC_Msk) return; /* return if ICache is already enabled */ + + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + SCB->CCR |= (uint32_t)SCB_CCR_IC_Msk; /* enable I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable I-Cache + \details Turns off I-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->CCR &= ~(uint32_t)SCB_CCR_IC_Msk; /* disable I-Cache */ + SCB->ICIALLU = 0UL; /* invalidate I-Cache */ + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate I-Cache + \details Invalidates I-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateICache (void) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + __DSB(); + __ISB(); + SCB->ICIALLU = 0UL; + __DSB(); + __ISB(); + #endif +} + + +/** + \brief I-Cache Invalidate by address + \details Invalidates I-Cache for the given address. + I-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + I-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] isize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateICache_by_Addr (void *addr, int32_t isize) +{ + #if defined (__ICACHE_PRESENT) && (__ICACHE_PRESENT == 1U) + if ( isize > 0 ) { + int32_t op_size = isize + (((uint32_t)addr) & (__SCB_ICACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_ICACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->ICIMVAU = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_ICACHE_LINE_SIZE; + op_size -= __SCB_ICACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief Enable D-Cache + \details Turns on D-Cache + */ +__STATIC_FORCEINLINE void SCB_EnableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + if (SCB->CCR & SCB_CCR_DC_Msk) return; /* return if DCache is already enabled */ + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + __DSB(); + + SCB->CCR |= (uint32_t)SCB_CCR_DC_Msk; /* enable D-Cache */ + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Disable D-Cache + \details Turns off D-Cache + */ +__STATIC_FORCEINLINE void SCB_DisableDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + SCB->CCR &= ~(uint32_t)SCB_CCR_DC_Msk; /* disable D-Cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Invalidate D-Cache + \details Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_InvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCISW = (((sets << SCB_DCISW_SET_Pos) & SCB_DCISW_SET_Msk) | + ((ways << SCB_DCISW_WAY_Pos) & SCB_DCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean D-Cache + \details Cleans D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCSW = (((sets << SCB_DCCSW_SET_Pos) & SCB_DCCSW_SET_Msk) | + ((ways << SCB_DCCSW_WAY_Pos) & SCB_DCCSW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief Clean & Invalidate D-Cache + \details Cleans and Invalidates D-Cache + */ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache (void) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + uint32_t ccsidr; + uint32_t sets; + uint32_t ways; + + SCB->CSSELR = 0U; /* select Level 1 data cache */ + __DSB(); + + ccsidr = SCB->CCSIDR; + + /* clean & invalidate D-Cache */ + sets = (uint32_t)(CCSIDR_SETS(ccsidr)); + do { + ways = (uint32_t)(CCSIDR_WAYS(ccsidr)); + do { + SCB->DCCISW = (((sets << SCB_DCCISW_SET_Pos) & SCB_DCCISW_SET_Msk) | + ((ways << SCB_DCCISW_WAY_Pos) & SCB_DCCISW_WAY_Msk) ); + #if defined ( __CC_ARM ) + __schedule_barrier(); + #endif + } while (ways-- != 0U); + } while(sets-- != 0U); + + __DSB(); + __ISB(); + #endif +} + + +/** + \brief D-Cache Invalidate by address + \details Invalidates D-Cache for the given address. + D-Cache is invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are invalidated. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_InvalidateDCache_by_Addr (void *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean by address + \details Cleans D-Cache for the given address + D-Cache is cleaned starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned. + \param[in] addr address + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + + +/** + \brief D-Cache Clean and Invalidate by address + \details Cleans and invalidates D_Cache for the given address + D-Cache is cleaned and invalidated starting from a 32 byte aligned address in 32 byte granularity. + D-Cache memory blocks which are part of given address + given size are cleaned and invalidated. + \param[in] addr address (aligned to 32-byte boundary) + \param[in] dsize size of memory block (in number of bytes) +*/ +__STATIC_FORCEINLINE void SCB_CleanInvalidateDCache_by_Addr (uint32_t *addr, int32_t dsize) +{ + #if defined (__DCACHE_PRESENT) && (__DCACHE_PRESENT == 1U) + if ( dsize > 0 ) { + int32_t op_size = dsize + (((uint32_t)addr) & (__SCB_DCACHE_LINE_SIZE - 1U)); + uint32_t op_addr = (uint32_t)addr /* & ~(__SCB_DCACHE_LINE_SIZE - 1U) */; + + __DSB(); + + do { + SCB->DCCIMVAC = op_addr; /* register accepts only 32byte aligned values, only bits 31..5 are valid */ + op_addr += __SCB_DCACHE_LINE_SIZE; + op_size -= __SCB_DCACHE_LINE_SIZE; + } while ( op_size > 0 ); + + __DSB(); + __ISB(); + } + #endif +} + +/*@} end of CMSIS_Core_CacheFunctions */ +#endif + + +/* ################################## SysTick function ############################################ */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_SysTickFunctions SysTick Functions + \brief Functions that configure the System. + @{ + */ + +#if defined (__Vendor_SysTickConfig) && (__Vendor_SysTickConfig == 0U) + +/** + \brief System Tick Configuration + \details Initializes the System Timer and its interrupt, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function SysTick_Config is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + */ +__STATIC_INLINE uint32_t SysTick_Config(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + NVIC_SetPriority (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} + +#if defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) +/** + \brief System Tick Configuration (non-secure) + \details Initializes the non-secure System Timer and its interrupt when in secure state, and starts the System Tick Timer. + Counter is in free running mode to generate periodic interrupts. + \param [in] ticks Number of ticks between two interrupts. + \return 0 Function succeeded. + \return 1 Function failed. + \note When the variable __Vendor_SysTickConfig is set to 1, then the + function TZ_SysTick_Config_NS is not included. In this case, the file device.h + must contain a vendor-specific implementation of this function. + + */ +__STATIC_INLINE uint32_t TZ_SysTick_Config_NS(uint32_t ticks) +{ + if ((ticks - 1UL) > SysTick_LOAD_RELOAD_Msk) + { + return (1UL); /* Reload value impossible */ + } + + SysTick_NS->LOAD = (uint32_t)(ticks - 1UL); /* set reload register */ + TZ_NVIC_SetPriority_NS (SysTick_IRQn, (1UL << __NVIC_PRIO_BITS) - 1UL); /* set Priority for Systick Interrupt */ + SysTick_NS->VAL = 0UL; /* Load the SysTick Counter Value */ + SysTick_NS->CTRL = SysTick_CTRL_CLKSOURCE_Msk | + SysTick_CTRL_TICKINT_Msk | + SysTick_CTRL_ENABLE_Msk; /* Enable SysTick IRQ and SysTick Timer */ + return (0UL); /* Function successful */ +} +#endif /* defined (__ARM_FEATURE_CMSE) && (__ARM_FEATURE_CMSE == 3U) */ + +#endif + +/*@} end of CMSIS_Core_SysTickFunctions */ + + + +/* ##################################### Debug In/Output function ########################################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_core_DebugFunctions ITM Functions + \brief Functions that access the ITM debug interface. + @{ + */ + +extern volatile int32_t ITM_RxBuffer; /*!< External variable to receive characters. */ +#define ITM_RXBUFFER_EMPTY ((int32_t)0x5AA55AA5U) /*!< Value identifying \ref ITM_RxBuffer is ready for next character. */ + + +/** + \brief ITM Send Character + \details Transmits a character via the ITM channel 0, and + \li Just returns when no debugger is connected that has booked the output. + \li Is blocking when a debugger is connected, but the previous character sent has not been transmitted. + \param [in] ch Character to transmit. + \returns Character to transmit. + */ +__STATIC_INLINE uint32_t ITM_SendChar (uint32_t ch) +{ + if (((ITM->TCR & ITM_TCR_ITMENA_Msk) != 0UL) && /* ITM enabled */ + ((ITM->TER & 1UL ) != 0UL) ) /* ITM Port #0 enabled */ + { + while (ITM->PORT[0U].u32 == 0UL) + { + __NOP(); + } + ITM->PORT[0U].u8 = (uint8_t)ch; + } + return (ch); +} + + +/** + \brief ITM Receive Character + \details Inputs a character via the external variable \ref ITM_RxBuffer. + \return Received character. + \return -1 No character pending. + */ +__STATIC_INLINE int32_t ITM_ReceiveChar (void) +{ + int32_t ch = -1; /* no character available */ + + if (ITM_RxBuffer != ITM_RXBUFFER_EMPTY) + { + ch = ITM_RxBuffer; + ITM_RxBuffer = ITM_RXBUFFER_EMPTY; /* ready for next character */ + } + + return (ch); +} + + +/** + \brief ITM Check Character + \details Checks whether a character is pending for reading in the variable \ref ITM_RxBuffer. + \return 0 No character available. + \return 1 Character available. + */ +__STATIC_INLINE int32_t ITM_CheckChar (void) +{ + + if (ITM_RxBuffer == ITM_RXBUFFER_EMPTY) + { + return (0); /* no character available */ + } + else + { + return (1); /* character available */ + } +} + +/*@} end of CMSIS_core_DebugFunctions */ + + + + +#ifdef __cplusplus +} +#endif + +#endif /* __CORE_STAR_H_DEPENDANT */ + +#endif /* __CMSIS_GENERIC */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h index d4c6f7b..cb04a57 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/mpu_armv8.h @@ -1,11 +1,11 @@ /****************************************************************************** * @file mpu_armv8.h * @brief CMSIS MPU API for Armv8-M and Armv8.1-M MPU - * @version V5.1.2 - * @date 10. February 2020 + * @version V5.1.4 + * @date 30. May 2022 ******************************************************************************/ /* - * Copyright (c) 2017-2020 Arm Limited. All rights reserved. + * Copyright (c) 2017-2022 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -84,7 +84,7 @@ * \param SH Defines the Shareability domain for this memory region. * \param RO Read-Only: Set to 1 for a read-only memory region. * \param NP Non-Privileged: Set to 1 for a non-privileged memory region. -* \oaram XN eXecute Never: Set to 1 for a non-executable memory region. +* \param XN eXecute Never: Set to 1 for a non-executable memory region. */ #define ARM_MPU_RBAR(BASE, SH, RO, NP, XN) \ (((BASE) & MPU_RBAR_BASE_Msk) | \ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/pac_armv81.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/pac_armv81.h new file mode 100644 index 0000000..854b60a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/Core/Include/pac_armv81.h @@ -0,0 +1,206 @@ +/****************************************************************************** + * @file pac_armv81.h + * @brief CMSIS PAC key functions for Armv8.1-M PAC extension + * @version V1.0.0 + * @date 23. March 2022 + ******************************************************************************/ +/* + * Copyright (c) 2022 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined ( __ICCARM__ ) + #pragma system_include /* treat file as system include file for MISRA check */ +#elif defined (__clang__) + #pragma clang system_header /* treat file as system include file */ +#endif + +#ifndef PAC_ARMV81_H +#define PAC_ARMV81_H + + +/* ################### PAC Key functions ########################### */ +/** + \ingroup CMSIS_Core_FunctionInterface + \defgroup CMSIS_Core_PacKeyFunctions PAC Key functions + \brief Functions that access the PAC keys. + @{ + */ + +#if (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) + +/** + \brief read the PAC key used for privileged mode + \details Reads the PAC key stored in the PAC_KEY_P registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode + \details writes the given PAC key to the PAC_KEY_P registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_P (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode + \details Reads the PAC key stored in the PAC_KEY_U registers. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __get_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode + \details writes the given PAC key to the PAC_KEY_U registers. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __set_PAC_KEY_U (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#if (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) + +/** + \brief read the PAC key used for privileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_P registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_p_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_p_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_p_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_p_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for privileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_P registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_P_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_p_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_p_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_p_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_p_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief read the PAC key used for unprivileged mode (non-secure) + \details Reads the PAC key stored in the non-secure PAC_KEY_U registers when in secure mode. + \param [out] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_get_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "mrs r1, pac_key_u_0_ns\n" + "str r1,[%0,#0]\n" + "mrs r1, pac_key_u_1_ns\n" + "str r1,[%0,#4]\n" + "mrs r1, pac_key_u_2_ns\n" + "str r1,[%0,#8]\n" + "mrs r1, pac_key_u_3_ns\n" + "str r1,[%0,#12]\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +/** + \brief write the PAC key used for unprivileged mode (non-secure) + \details writes the given PAC key to the non-secure PAC_KEY_U registers when in secure mode. + \param [in] pPacKey 128bit PAC key + */ +__STATIC_FORCEINLINE void __TZ_set_PAC_KEY_U_NS (uint32_t* pPacKey) { + __ASM volatile ( + "ldr r1,[%0,#0]\n" + "msr pac_key_u_0_ns, r1\n" + "ldr r1,[%0,#4]\n" + "msr pac_key_u_1_ns, r1\n" + "ldr r1,[%0,#8]\n" + "msr pac_key_u_2_ns, r1\n" + "ldr r1,[%0,#12]\n" + "msr pac_key_u_3_ns, r1\n" + : : "r" (pPacKey) : "memory", "r1" + ); +} + +#endif /* (defined (__ARM_FEATURE_CMSE ) && (__ARM_FEATURE_CMSE == 3)) */ + +#endif /* (defined (__ARM_FEATURE_PAUTH) && (__ARM_FEATURE_PAUTH == 1)) */ + +/*@} end of CMSIS_Core_PacKeyFunctions */ + + +#endif /* PAC_ARMV81_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h index 4f7a5c7..55b789e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h @@ -3,13 +3,13 @@ * Title: arm_common_tables.h * Description: Extern declaration for common tables * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 + * @version V1.10.0 + * @date 08 July 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -498,10 +498,20 @@ extern "C" extern const q15_t sinTable_q15[FAST_MATH_TABLE_SIZE + 1]; #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + /* Fast vector sqrt */ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_FAST_SQRT_Q31_MVE) extern const q31_t sqrtTable_Q31[256]; #endif /* !defined(ARM_DSP_CONFIG_TABLES) defined(ARM_ALL_FAST_TABLES) */ + #endif + + /* Accurate scalar sqrt */ + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q31) + extern const q31_t sqrt_initial_lut_q31[32]; + #endif + + #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q15) + extern const q15_t sqrt_initial_lut_q15[16]; #endif #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h index a5b9454..9c48086 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables_f16.h @@ -3,13 +3,13 @@ * Title: arm_common_tables_f16.h * Description: Extern declaration for common tables * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 + * @version V1.10.0 + * @date 08 July 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h index 2a0659f..2efc0a1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h @@ -4,13 +4,13 @@ * Description: Constant structs that are initialized for user convenience. * For example, some can be given as arguments to the arm_cfft_f32() function. * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 + * @version V1.10.0 + * @date 08 July 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h index 13f7b59..843f50e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs_f16.h @@ -4,13 +4,13 @@ * Description: Constant structs that are initialized for user convenience. * For example, some can be given as arguments to the arm_cfft_f16() function. * - * $Date: 20. April 2020 - * $Revision: V.1.5.1 + * @version V1.10.0 + * @date 08 July 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -74,4 +74,4 @@ extern "C" } #endif -#endif \ No newline at end of file +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h index 1479611..8706197 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h @@ -3,13 +3,13 @@ * Title: arm_helium_utils.h * Description: Utility functions for Helium development * - * $Date: 09. September 2019 - * $Revision: V.1.5.1 + * @version V1.10.0 + * @date 08 July 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -335,7 +335,7 @@ __STATIC_INLINE arm_status arm_mat_cmplx_trans_32bit( /* * Set status as ARM_MATH_SIZE_MISMATCH */ - return = ARM_MATH_SIZE_MISMATCH; + return ARM_MATH_SIZE_MISMATCH; } #else (void)dstRows; @@ -535,7 +535,7 @@ __STATIC_INLINE arm_status arm_mat_cmplx_trans_16bit( /* * Set status as ARM_MATH_SIZE_MISMATCH */ - return = ARM_MATH_SIZE_MISMATCH; + return ARM_MATH_SIZE_MISMATCH; } #else (void)dstRows; @@ -620,7 +620,7 @@ __STATIC_INLINE q31x4_t FAST_VSQRT_Q31(q31x4_t vecIn) vecSignBits = vclsq(vecIn); - vecSignBits = vbicq(vecSignBits, 1); + vecSignBits = vbicq_n_s32(vecSignBits, 1); /* * in = in << no_of_sign_bits; */ @@ -687,7 +687,7 @@ __STATIC_INLINE q15x8_t FAST_VSQRT_Q15(q15x8_t vecIn) vecDst = vuninitializedq_s16(); vecSignBits = vclsq(vecIn); - vecSignBits = vbicq(vecSignBits, 1); + vecSignBits = vbicq_n_s16(vecSignBits, 1); /* * in = in << no_of_sign_bits; */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h index d1e68e5..989ba29 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_math.h * @brief Public header file for CMSIS DSP Library - * @version V1.7.0 - * @date 18. March 2019 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,20 +33,20 @@ * based devices. * * The library is divided into a number of functions each covering a specific category: - * - Basic math functions - * - Fast math functions - * - Complex math functions - * - Filtering functions - * - Matrix functions - * - Transform functions - * - Motor control functions - * - Statistical functions - * - Support functions - * - Interpolation functions - * - Support Vector Machine functions (SVM) - * - Bayes classifier functions - * - Distance functions - * - Quaternion functions + * - \ref groupMath "Basic math functions" + * - \ref groupFastMath "Fast math functions" + * - \ref groupCmplxMath "Complex math functions" + * - \ref groupFilters "Filtering functions" + * - \ref groupMatrix "Matrix functions" + * - \ref groupTransforms "Transform functions" + * - \ref groupController "Motor control functions" + * - \ref groupStats "Statistical functions" + * - \ref groupSupport "Support functions" + * - \ref groupInterpolation "Interpolation functions" + * - \ref groupSVM "Support Vector Machine functions (SVM)" + * - \ref groupBayes "Bayes classifier functions" + * - \ref groupDistance "Distance functions" + * - \ref groupQuaternionMath "Quaternion functions" * * The library has generally separate functions for operating on 8-bit integers, 16-bit integers, * 32-bit integer and 32-bit floating-point values. @@ -60,129 +61,95 @@ * * \section using Using the Library * - * The library installer contains prebuilt versions of the libraries in the Lib folder. - * - * Here is the list of pre-built libraries : - * - arm_cortexM7lfdp_math.lib (Cortex-M7, Little endian, Double Precision Floating Point Unit) - * - arm_cortexM7bfdp_math.lib (Cortex-M7, Big endian, Double Precision Floating Point Unit) - * - arm_cortexM7lfsp_math.lib (Cortex-M7, Little endian, Single Precision Floating Point Unit) - * - arm_cortexM7bfsp_math.lib (Cortex-M7, Big endian and Single Precision Floating Point Unit on) - * - arm_cortexM7l_math.lib (Cortex-M7, Little endian) - * - arm_cortexM7b_math.lib (Cortex-M7, Big endian) - * - arm_cortexM4lf_math.lib (Cortex-M4, Little endian, Floating Point Unit) - * - arm_cortexM4bf_math.lib (Cortex-M4, Big endian, Floating Point Unit) - * - arm_cortexM4l_math.lib (Cortex-M4, Little endian) - * - arm_cortexM4b_math.lib (Cortex-M4, Big endian) - * - arm_cortexM3l_math.lib (Cortex-M3, Little endian) - * - arm_cortexM3b_math.lib (Cortex-M3, Big endian) - * - arm_cortexM0l_math.lib (Cortex-M0 / Cortex-M0+, Little endian) - * - arm_cortexM0b_math.lib (Cortex-M0 / Cortex-M0+, Big endian) - * - arm_ARMv8MBLl_math.lib (Armv8-M Baseline, Little endian) - * - arm_ARMv8MMLl_math.lib (Armv8-M Mainline, Little endian) - * - arm_ARMv8MMLlfsp_math.lib (Armv8-M Mainline, Little endian, Single Precision Floating Point Unit) - * - arm_ARMv8MMLld_math.lib (Armv8-M Mainline, Little endian, DSP instructions) - * - arm_ARMv8MMLldfsp_math.lib (Armv8-M Mainline, Little endian, DSP instructions, Single Precision Floating Point Unit) - * - * The library functions are declared in the public file arm_math.h which is placed in the Include folder. - * Simply include this file and link the appropriate library in the application and begin calling the library functions. The Library supports single - * public header file arm_math.h for Cortex-M cores with little endian and big endian. Same header file will be used for floating point unit(FPU) variants. + * The library is released in source form. It is strongly advised to compile the library using -Ofast to + * have the best performances. * + * The library functions are declared in the public file `arm_math.h` which is placed in the `Include` folder. + * Simply include this file. If you don't want to include everything, you can also rely + * on headers in `Include/dsp` folder and use only what you need. * * \section example Examples * - * The library ships with a number of examples which demonstrate how to use the library functions. + * The library ships with a number of examples which demonstrate how to use the library functions. Please refer to \ref groupExamples. * * \section toolchain Toolchain Support * * The library is now tested on Fast Models building with cmake. - * Core M0, M7, A5 are tested. - * - * - * - * \section building Building the Library - * - * The library installer contains a project file to rebuild libraries on MDK toolchain in the CMSIS\\DSP\\Projects\\ARM folder. - * - arm_cortexM_math.uvprojx - * - * - * The libraries can be built by opening the arm_cortexM_math.uvprojx project in MDK-ARM, selecting a specific target, and defining the optional preprocessor macros detailed above. + * Core M0, M4, M7, M33, M55, A32 are tested. * - * There is also a work in progress cmake build. The README file is giving more details. * * \section preprocessor Preprocessor Macros * - * Each library project have different preprocessor macros. - * - * - ARM_MATH_BIG_ENDIAN: - * - * Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets. - * - * - ARM_MATH_MATRIX_CHECK: - * - * Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices - * - * - ARM_MATH_ROUNDING: - * - * Define macro ARM_MATH_ROUNDING for rounding on support functions - * - * - ARM_MATH_LOOPUNROLL: - * - * Define macro ARM_MATH_LOOPUNROLL to enable manual loop unrolling in DSP functions - * - * - ARM_MATH_NEON: - * - * Define macro ARM_MATH_NEON to enable Neon versions of the DSP functions. + * Each library project has different preprocessor macros. + * + * - `ARM_MATH_BIG_ENDIAN`: + * - Define macro ARM_MATH_BIG_ENDIAN to build the library for big endian targets. By default library builds for little endian targets. + * . + * - `ARM_MATH_MATRIX_CHECK`: + * - Define macro ARM_MATH_MATRIX_CHECK for checking on the input and output sizes of matrices + * . + * - `ARM_MATH_ROUNDING`: + * - Define macro ARM_MATH_ROUNDING for rounding on support functions + * . + * - `ARM_MATH_LOOPUNROLL`: + * - Define macro ARM_MATH_LOOPUNROLL to enable manual loop unrolling in DSP functions + * . + * - `ARM_MATH_NEON`: + * - Define macro ARM_MATH_NEON to enable Neon versions of the DSP functions. * It is not enabled by default when Neon is available because performances are * dependent on the compiler and target architecture. - * - * - ARM_MATH_NEON_EXPERIMENTAL: - * - * Define macro ARM_MATH_NEON_EXPERIMENTAL to enable experimental Neon versions of + * . + * - `ARM_MATH_NEON_EXPERIMENTAL`: + * - Define macro ARM_MATH_NEON_EXPERIMENTAL to enable experimental Neon versions of * of some DSP functions. Experimental Neon versions currently do not have better * performances than the scalar versions. - * - * - ARM_MATH_HELIUM: - * - * It implies the flags ARM_MATH_MVEF and ARM_MATH_MVEI and ARM_MATH_FLOAT16. - * - * - ARM_MATH_MVEF: - * - * Select Helium versions of the f32 algorithms. + * . + * - `ARM_MATH_HELIUM`: + * - It implies the flags ARM_MATH_MVEF and ARM_MATH_MVEI and ARM_MATH_MVE_FLOAT16. + * . + * - `ARM_MATH_HELIUM_EXPERIMENTAL`: + * - Only taken into account when ARM_MATH_MVEF, ARM_MATH_MVEI or ARM_MATH_MVE_FLOAT16 are defined. + * Enable some vector versions which may have worse performance than scalar + * depending on the core / compiler configuration. + * . + * - `ARM_MATH_MVEF`: + * - Select Helium versions of the f32 algorithms. * It implies ARM_MATH_FLOAT16 and ARM_MATH_MVEI. - * - * - ARM_MATH_MVEI: - * - * Select Helium versions of the int and fixed point algorithms. - * - * - ARM_MATH_MVE_FLOAT16: - * - * MVE Float16 implementations of some algorithms (Requires MVE extension). - * - * - DISABLEFLOAT16: - * - * Disable float16 algorithms when __fp16 is not supported for a + * . + * - `ARM_MATH_MVEI`: + * - Select Helium versions of the int and fixed point algorithms. + * . + * - `ARM_MATH_MVE_FLOAT16`: + * - MVE Float16 implementations of some algorithms (Requires MVE extension). + * . + * - `DISABLEFLOAT16`: + * - Disable float16 algorithms when __fp16 is not supported for a * specific compiler / core configuration. * This is only valid for scalar. When vector architecture is * supporting f16 then it can't be disabled. + * . + * - `ARM_MATH_AUTOVECTORIZE`: + * - With Helium or Neon, disable the use of vectorized code with C intrinsics + * and use pure C instead. The vectorization is then done by the compiler. * - *
* \section pack CMSIS-DSP in ARM::CMSIS Pack * * The following files relevant to CMSIS-DSP are present in the ARM::CMSIS Pack directories: * |File/Folder |Content | * |---------------------------------|------------------------------------------------------------------------| * |\b CMSIS\\Documentation\\DSP | This documentation | - * |\b CMSIS\\DSP\\DSP_Lib_TestSuite | DSP_Lib deprecated test suite | * |\b CMSIS\\DSP\\Examples | Example projects demonstrating the usage of the library functions | - * |\b CMSIS\\DSP\\Include | DSP_Lib include files for using and building the lib - * |\b CMSIS\\DSP\\PrivateInclude | DSP_Lib private include files for building the lib | - * |\b CMSIS\\DSP\\Lib | DSP_Lib binaries | - * |\b CMSIS\\DSP\\Projects | Projects to rebuild DSP_Lib binaries | - * |\b CMSIS\\DSP\\Source | DSP_Lib source files | + * |\b CMSIS\\DSP\\ComputeLibrary | Small Neon kernels when building on Cortex-A + * |\b CMSIS\\DSP\\Include | include files for using and building the lib + * |\b CMSIS\\DSP\\PrivateInclude | private include files for building the lib | + * |\b CMSIS\\DSP\\Source | source files | * - *
* \section rev Revision History of CMSIS-DSP * Please refer to \ref ChangeLog_pg. + * + * \section license License + * + * The CMSIS-DSP is provided free of charge under the Apache 2.0 License. */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h index 85b20df..166d7d6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_f16.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_math_f16.h * @brief Public header file for f16 function of the CMSIS DSP Library - * @version V1.8.1 - * @date 20. April 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h index e750a8f..850d51e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_math_memory.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -73,7 +74,7 @@ extern "C" @return Q31 value */ __STATIC_FORCEINLINE q31_t read_q15x2 ( - q15_t * pQ15) + q15_t const * pQ15) { q31_t val; @@ -91,40 +92,14 @@ __STATIC_FORCEINLINE q31_t read_q15x2 ( @param[in] pQ15 points to input value @return Q31 value */ -__STATIC_FORCEINLINE q31_t read_q15x2_ia ( - q15_t ** pQ15) -{ - q31_t val; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ15, 4); -#else - val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); -#endif - - *pQ15 += 2; - return (val); -} +#define read_q15x2_ia(pQ15) read_q15x2((*(pQ15) += 2) - 2) /** @brief Read 2 Q15 from Q15 pointer and decrement pointer afterwards. @param[in] pQ15 points to input value @return Q31 value */ -__STATIC_FORCEINLINE q31_t read_q15x2_da ( - q15_t ** pQ15) -{ - q31_t val; - -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ15, 4); -#else - val = ((*pQ15)[1] << 16) | ((*pQ15)[0] & 0x0FFFF); -#endif - - *pQ15 -= 2; - return (val); -} +#define read_q15x2_da(pQ15) read_q15x2((*(pQ15) -= 2) + 2) /** @brief Write 2 Q15 to Q15 pointer and increment pointer afterwards. @@ -140,8 +115,8 @@ __STATIC_FORCEINLINE void write_q15x2_ia ( #ifdef __ARM_FEATURE_UNALIGNED memcpy (*pQ15, &val, 4); #else - (*pQ15)[0] = (val & 0x0FFFF); - (*pQ15)[1] = (val >> 16) & 0x0FFFF; + (*pQ15)[0] = (q15_t)(val & 0x0FFFF); + (*pQ15)[1] = (q15_t)((val >> 16) & 0x0FFFF); #endif *pQ15 += 2; @@ -162,52 +137,43 @@ __STATIC_FORCEINLINE void write_q15x2 ( #ifdef __ARM_FEATURE_UNALIGNED memcpy (pQ15, &val, 4); #else - pQ15[0] = val & 0x0FFFF; - pQ15[1] = val >> 16; + pQ15[0] = (q15_t)(val & 0x0FFFF); + pQ15[1] = (q15_t)(val >> 16); #endif } /** - @brief Read 4 Q7 from Q7 pointer and increment pointer afterwards. + @brief Read 4 Q7 from Q7 pointer @param[in] pQ7 points to input value @return Q31 value */ -__STATIC_FORCEINLINE q31_t read_q7x4_ia ( - q7_t ** pQ7) +__STATIC_FORCEINLINE q31_t read_q7x4 ( + q7_t const * pQ7) { q31_t val; - #ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ7, 4); + memcpy (&val, pQ7, 4); #else - val =(((*pQ7)[3] & 0x0FF) << 24) | (((*pQ7)[2] & 0x0FF) << 16) | (((*pQ7)[1] & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); + val =((pQ7[3] & 0x0FF) << 24) | ((pQ7[2] & 0x0FF) << 16) | ((pQ7[1] & 0x0FF) << 8) | (pQ7[0] & 0x0FF); #endif - - *pQ7 += 4; - return (val); } /** - @brief Read 4 Q7 from Q7 pointer and decrement pointer afterwards. + @brief Read 4 Q7 from Q7 pointer and increment pointer afterwards. @param[in] pQ7 points to input value @return Q31 value */ -__STATIC_FORCEINLINE q31_t read_q7x4_da ( - q7_t ** pQ7) -{ - q31_t val; -#ifdef __ARM_FEATURE_UNALIGNED - memcpy (&val, *pQ7, 4); -#else - val = ((((*pQ7)[3]) & 0x0FF) << 24) | ((((*pQ7)[2]) & 0x0FF) << 16) | ((((*pQ7)[1]) & 0x0FF) << 8) | ((*pQ7)[0] & 0x0FF); -#endif - *pQ7 -= 4; +#define read_q7x4_ia(pQ7) read_q7x4((*(pQ7) += 4) - 4) - return (val); -} +/** + @brief Read 4 Q7 from Q7 pointer and decrement pointer afterwards. + @param[in] pQ7 points to input value + @return Q31 value + */ +#define read_q7x4_da(pQ7) read_q7x4((*(pQ7) -= 4) + 4) /** @brief Write 4 Q7 to Q7 pointer and increment pointer afterwards. @@ -223,10 +189,10 @@ __STATIC_FORCEINLINE void write_q7x4_ia ( #ifdef __ARM_FEATURE_UNALIGNED memcpy (*pQ7, &val, 4); #else - (*pQ7)[0] = val & 0x0FF; - (*pQ7)[1] = (val >> 8) & 0x0FF; - (*pQ7)[2] = (val >> 16) & 0x0FF; - (*pQ7)[3] = (val >> 24) & 0x0FF; + (*pQ7)[0] = (q7_t)(val & 0x0FF); + (*pQ7)[1] = (q7_t)((val >> 8) & 0x0FF); + (*pQ7)[2] = (q7_t)((val >> 16) & 0x0FF); + (*pQ7)[3] = (q7_t)((val >> 24) & 0x0FF); #endif *pQ7 += 4; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h index a48b659..b3db6f7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_math_types.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -36,6 +37,9 @@ extern "C" #elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) +#elif defined ( __APPLE_CC__ ) + #pragma GCC diagnostic ignored "-Wold-style-cast" + #elif defined ( __GNUC__ ) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wsign-conversion" @@ -63,7 +67,11 @@ extern "C" #define __STATIC_FORCEINLINE static __forceinline #define __STATIC_INLINE static __inline #define __ALIGNED(x) __declspec(align(x)) - +#elif defined ( __APPLE_CC__ ) +#include +#define __ALIGNED(x) __attribute__((aligned(x))) +#define __STATIC_FORCEINLINE static inline __attribute__((always_inline)) +#define __STATIC_INLINE static inline #elif defined (__GNUC_PYTHON__) #include #define __ALIGNED(x) __attribute__((aligned(x))) @@ -87,16 +95,22 @@ extern "C" #endif #if defined(ARM_MATH_NEON) -#include -#if __ARM_FEATURE_FP16_VECTOR_ARITHMETIC - #if !defined(ARM_MATH_NEON_FLOAT16) - #define ARM_MATH_NEON_FLOAT16 + #if defined(_MSC_VER) && defined(_M_ARM64EC) + #include + #else + #include + #endif + #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && __ARM_FEATURE_FP16_VECTOR_ARITHMETIC + #if !defined(ARM_MATH_NEON_FLOAT16) + #define ARM_MATH_NEON_FLOAT16 + #endif #endif -#endif #endif #if !defined(ARM_MATH_AUTOVECTORIZE) + +#if defined(__ARM_FEATURE_MVE) #if __ARM_FEATURE_MVE #if !defined(ARM_MATH_MVEI) #define ARM_MATH_MVEI @@ -112,6 +126,7 @@ extern "C" #endif #endif +#endif /*defined(__ARM_FEATURE_MVE)*/ #endif /*!defined(ARM_MATH_AUTOVECTORIZE)*/ @@ -160,6 +175,12 @@ extern "C" #define LOW_OPTIMIZATION_EXIT #define IAR_ONLY_LOW_OPTIMIZATION_ENTER #define IAR_ONLY_LOW_OPTIMIZATION_EXIT + +#elif defined ( __APPLE_CC__ ) + #define LOW_OPTIMIZATION_ENTER + #define LOW_OPTIMIZATION_EXIT + #define IAR_ONLY_LOW_OPTIMIZATION_ENTER + #define IAR_ONLY_LOW_OPTIMIZATION_EXIT #elif defined ( __GNUC__ ) #define LOW_OPTIMIZATION_ENTER \ @@ -223,6 +244,8 @@ extern "C" #elif defined ( __ARMCC_VERSION ) && ( __ARMCC_VERSION >= 6010050 ) +#elif defined ( __APPLE_CC__ ) + #elif defined ( __GNUC__ ) #pragma GCC diagnostic pop @@ -244,7 +267,7 @@ extern "C" } #endif -#if __ARM_FEATURE_MVE +#if defined(__ARM_FEATURE_MVE) && __ARM_FEATURE_MVE #include #endif @@ -276,7 +299,9 @@ extern "C" /** * @brief 32-bit floating-point type definition. */ +#if !defined(__ICCARM__) || !(__ARM_FEATURE_MVE & 2) typedef float float32_t; +#endif /** * @brief 64-bit floating-point type definition. @@ -298,12 +323,12 @@ extern "C" typedef int32x4_t q31x4_t; /** - * @brief 16-bit fractional 128-bit vector data type with 16-bit alignement in 1.15 format. + * @brief 16-bit fractional 128-bit vector data type with 16-bit alignment in 1.15 format. */ typedef __ALIGNED(2) int16x8_t q15x8_t; /** - * @brief 8-bit fractional 128-bit vector data type with 8-bit alignement in 1.7 format. + * @brief 8-bit fractional 128-bit vector data type with 8-bit alignment in 1.7 format. */ typedef __ALIGNED(1) int8x16_t q7x16_t; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h index c83f761..771af5c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_math_types_f16.h * @brief Public header file for f16 function of the CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -61,7 +62,7 @@ won't be built. #endif #if defined(ARM_MATH_NEON) || (defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE)) /* floating point vector*/ - + #if defined(ARM_MATH_MVE_FLOAT16) || defined(ARM_MATH_NEON_FLOAT16) /** @@ -92,7 +93,7 @@ won't be built. #endif #if defined(ARM_MATH_NEON) - + #if defined(ARM_MATH_NEON_FLOAT16) /** @@ -128,21 +129,30 @@ won't be built. float16x4_t f; int16x4_t i; } any16x4_t; -#endif +#endif #endif #if defined(ARM_FLOAT16_SUPPORTED) + +#if defined(__ICCARM__) + +#define F16INFINITY ((float16_t) INFINITY) + +#else + +#define F16INFINITY ((float16_t)__builtin_inf()) + +#endif + #define F16_MAX ((float16_t)__FLT16_MAX__) -#define F16_MIN (-(float16_t)__FLT16_MAX__) +#define F16_MIN (-(_Float16)__FLT16_MAX__) #define F16_ABSMAX ((float16_t)__FLT16_MAX__) #define F16_ABSMIN ((float16_t)0.0f16) -#define F16INFINITY ((float16_t)__builtin_inf()) - #endif /* ARM_FLOAT16_SUPPORTED*/ #endif /* !defined( __CC_ARM ) */ @@ -151,5 +161,3 @@ won't be built. #endif #endif /* _ARM_MATH_F16_H */ - - diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h index 74f51b2..43456f0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables.h @@ -4,12 +4,13 @@ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * used for MVE implementation only * - * $Date: 14. April 2020 + * @version V1.10.0 + * @date 04 October 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h index 171a391..62b8d9b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_mve_tables_f16.h @@ -4,12 +4,13 @@ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * used for MVE implementation only * - * $Date: 14. April 2020 + * @version V1.10.0 + * @date 04 October 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -39,7 +40,7 @@ extern "C" -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) @@ -96,7 +97,7 @@ extern float16_t rearranged_twiddle_stride3_4096_f16[2728]; #endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) */ -#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +#endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h index 4d15381..4994892 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h @@ -40,13 +40,280 @@ extern "C" #define MVE_CMPLX_MULT_FLT_AxB(A,B) vcmlaq_rot90(vcmulq(A, B), A, B) #define MVE_CMPLX_MULT_FLT_Conj_AxB(A,B) vcmlaq_rot270(vcmulq(A, B), A, B) -#define MVE_CMPLX_MULT_FX_AxB(A,B) vqdmladhxq(vqdmlsdhq((__typeof(A))vuninitializedq_s32(), A, B), A, B) -#define MVE_CMPLX_MULT_FX_AxConjB(A,B) vqdmladhq(vqdmlsdhxq((__typeof(A))vuninitializedq_s32(), A, B), A, B) +#define MVE_CMPLX_MULT_FX_AxB(A,B,TyA) vqdmladhxq(vqdmlsdhq((TyA)vuninitializedq_s32(), A, B), A, B) +#define MVE_CMPLX_MULT_FX_AxConjB(A,B,TyA) vqdmladhq(vqdmlsdhxq((TyA)vuninitializedq_s32(), A, B), A, B) #define MVE_CMPLX_ADD_FX_A_ixB(A, B) vhcaddq_rot90(A,B) #define MVE_CMPLX_SUB_FX_A_ixB(A,B) vhcaddq_rot270(A,B) +/** + @brief In-place 32 bit reversal function for helium + @param[in,out] pSrc points to in-place buffer of unknown 32-bit data type + @param[in] bitRevLen bit reversal table length + @param[in] pBitRevTab points to bit reversal table + @return none +*/ + +__STATIC_INLINE void arm_bitreversal_32_inpl_mve( + uint32_t *pSrc, + const uint16_t bitRevLen, + const uint16_t *pBitRevTab) + +{ + uint64_t *src = (uint64_t *) pSrc; + int32_t blkCnt; /* loop counters */ + uint32x4_t bitRevTabOff; + uint32x4_t one = vdupq_n_u32(1); + uint64x2_t inLow, inHigh; + uint64x2_t bitRevOff1Low, bitRevOff0Low; + uint64x2_t bitRevOff1High, bitRevOff0High; + + /* load scheduling to increase gather load idx update / gather load distance */ + bitRevTabOff = vldrhq_u32(pBitRevTab); + pBitRevTab += 4; + + bitRevOff0Low = vmullbq_int_u32(bitRevTabOff, one); + bitRevOff0High = vmulltq_int_u32(bitRevTabOff, one); + + + blkCnt = bitRevLen / 8; + while (blkCnt > 0) { + bitRevTabOff = vldrhq_u32(pBitRevTab); + pBitRevTab += 4; + + /* 64-bit index expansion */ + bitRevOff1Low = vmullbq_int_u32(bitRevTabOff, one); + bitRevOff1High = vmulltq_int_u32(bitRevTabOff, one); + + inLow = vldrdq_gather_offset_u64(src, bitRevOff0Low); + inHigh = vldrdq_gather_offset_u64(src, bitRevOff0High); + + vstrdq_scatter_offset_u64(src, bitRevOff0Low, inHigh); + vstrdq_scatter_offset_u64(src, bitRevOff0High, inLow); + + + /* unrolled */ + bitRevTabOff = vldrhq_u32(pBitRevTab); + pBitRevTab += 4; + + bitRevOff0Low = vmullbq_int_u32(bitRevTabOff, one); + bitRevOff0High = vmulltq_int_u32(bitRevTabOff, one); + + inLow = vldrdq_gather_offset_u64(src, bitRevOff1Low); + inHigh = vldrdq_gather_offset_u64(src, bitRevOff1High); + + vstrdq_scatter_offset_u64(src, bitRevOff1Low, inHigh); + vstrdq_scatter_offset_u64(src, bitRevOff1High, inLow); + + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + + if (bitRevLen & 7) { + /* FFT size = 16 */ + inLow = vldrdq_gather_offset_u64(src, bitRevOff0Low); + inHigh = vldrdq_gather_offset_u64(src, bitRevOff0High); + + vstrdq_scatter_offset_u64(src, bitRevOff0Low, inHigh); + vstrdq_scatter_offset_u64(src, bitRevOff0High, inLow); + } +} + + + +/** + @brief In-place 16 bit reversal function for helium + @param[in,out] pSrc points to in-place buffer of unknown 16-bit data type + @param[in] bitRevLen bit reversal table length + @param[in] pBitRevTab points to bit reversal table + @return none +*/ + +__STATIC_INLINE void arm_bitreversal_16_inpl_mve( + uint16_t *pSrc, + const uint16_t bitRevLen, + const uint16_t *pBitRevTab) + +{ + uint32_t *src = (uint32_t *) pSrc; + int32_t blkCnt; /* loop counters */ + uint32x4_t bitRevTabOff; + uint16x8_t one = vdupq_n_u16(1); + uint32x4_t bitRevOff1Low, bitRevOff0Low; + uint32x4_t bitRevOff1High, bitRevOff0High; + uint32x4_t inLow, inHigh; + + /* load scheduling to increase gather load idx update / gather load distance */ + bitRevTabOff = vldrhq_u16(pBitRevTab); + pBitRevTab += 8; + + bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3); + bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3); + + blkCnt = (bitRevLen / 16); + while (blkCnt > 0) { + bitRevTabOff = vldrhq_u16(pBitRevTab); + pBitRevTab += 8; + + bitRevOff1Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff1High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff1Low = vshrq_n_u16((uint16x8_t)bitRevOff1Low, 3); + bitRevOff1High = vshrq_n_u16((uint16x8_t)bitRevOff1High, 3); + + inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low); + inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High); + + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh); + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow); + + /* loop unrolling */ + bitRevTabOff = vldrhq_u16(pBitRevTab); + pBitRevTab += 8; + + bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3); + bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3); + + inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff1Low); + inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff1High); + + vstrwq_scatter_shifted_offset_u32(src, bitRevOff1Low, inHigh); + vstrwq_scatter_shifted_offset_u32(src, bitRevOff1High, inLow); + + blkCnt--; + } + + /* tail handling */ + blkCnt = bitRevLen & 0xf; + if (blkCnt == 8) { + inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low); + inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High); + + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh); + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow); + } else if (blkCnt == 12) { + /* FFT 16 special case */ + mve_pred16_t p = vctp16q(4); + + bitRevTabOff = vldrhq_z_u16(pBitRevTab, p); + + inLow = vldrwq_gather_shifted_offset_u32(src, bitRevOff0Low); + inHigh = vldrwq_gather_shifted_offset_u32(src, bitRevOff0High); + + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0Low, inHigh); + vstrwq_scatter_shifted_offset_u32(src, bitRevOff0High, inLow); + + bitRevOff0Low = vmullbq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0High = vmulltq_int_u16((uint16x8_t)bitRevTabOff, one); + bitRevOff0Low = vshrq_n_u16((uint16x8_t)bitRevOff0Low, 3); + bitRevOff0High = vshrq_n_u16((uint16x8_t)bitRevOff0High, 3); + + inLow = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff0Low, p); + inHigh = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff0High, p); + + vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff0Low, inHigh, p); + vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff0High, inLow, p); + } +} + +/** + @brief Out-of-place 32 bit reversal function for helium + @param[out] pDst points to destination buffer of unknown 32-bit data type + @param[in] pSrc points to input buffer of unknown 32-bit data type + @param[in] fftLen FFT length + @return none +*/ +__STATIC_INLINE void arm_bitreversal_32_outpl_mve(void *pDst, void *pSrc, uint32_t fftLen) +{ + uint32x4_t idxOffs0, idxOffs1, bitRevOffs0, bitRevOffs1; + uint32_t bitRevPos, blkCnt; + uint32_t *pDst32 = (uint32_t *) pDst; + + /* fwd indexes */ + idxOffs0 = vdupq_n_u32(0); + idxOffs1 = vdupq_n_u32(0); + idxOffs0[0] = 0; idxOffs0[2] = 4; + idxOffs1[0] = 8; idxOffs1[2] = 12; + + bitRevPos = (31 - __CLZ(fftLen)) + 5; + blkCnt = fftLen >> 2; + + /* issued earlier to increase gather load idx update / gather load distance */ + /* bit-reverse fwd indexes */ + bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos); + bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos); + while (blkCnt > 0) { + uint64x2_t vecIn; + + vecIn = vldrdq_gather_offset_u64(pSrc, (uint64x2_t) bitRevOffs0); + idxOffs0 = idxOffs0 + 16; + vst1q(pDst32, (uint32x4_t) vecIn); + pDst32 += 4; + bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos); + + vecIn = vldrdq_gather_offset_u64(pSrc, (uint64x2_t) bitRevOffs1); + idxOffs1 = idxOffs1 + 16; + vst1q(pDst32, (uint32x4_t) vecIn); + pDst32 += 4; + bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos); + + blkCnt--; + } +} + + +/** + @brief Out-of-place 16 bit reversal function for helium + @param[out] pDst points to destination buffer of unknown 16-bit data type + @param[in] pSrc points to input buffer of unknown 16-bit data type + @param[in] fftLen FFT length + @return none +*/ + +__STATIC_INLINE void arm_bitreversal_16_outpl_mve(void *pDst, void *pSrc, uint32_t fftLen) +{ + uint32x4_t idxOffs0, idxOffs1, bitRevOffs0, bitRevOffs1; + uint32_t bitRevPos, blkCnt; + uint16_t *pDst16 = (uint16_t *) pDst; + uint32_t incrIdx = 0; + + /* fwd indexes */ + idxOffs0 = vidupq_wb_u32(&incrIdx, 4); // {0, 4, 8, 12} + idxOffs1 = vidupq_wb_u32(&incrIdx, 4); // {16, 20, 24, 28} + + bitRevPos = (31 - __CLZ(fftLen)) + 4; + blkCnt = fftLen >> 3; + + /* issued earlier to increase gather load idx update / gather load distance */ + /* bit-reverse fwd indexes */ + bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos); + bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos); + while (blkCnt > 0) { + uint32x4_t vecIn; + + vecIn = vldrwq_gather_offset_s32(pSrc, bitRevOffs0); + idxOffs0 = idxOffs0 + 32; + vst1q(pDst16, (uint16x8_t) vecIn); + pDst16 += 8; + bitRevOffs0 = vbrsrq(idxOffs0, bitRevPos); + + vecIn = vldrwq_gather_offset_s32(pSrc, bitRevOffs1); + idxOffs1 = idxOffs1 + 32; + vst1q(pDst16, (uint16x8_t) vecIn); + pDst16 += 8; + bitRevOffs1 = vbrsrq(idxOffs1, bitRevPos); + + blkCnt--; + } +} + + #endif /* (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE)*/ @@ -55,4 +322,4 @@ extern "C" #endif -#endif /* _ARM_VEC_FFT_H_ */ \ No newline at end of file +#endif /* _ARM_VEC_FFT_H_ */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h index 43d8f46..dc32ca6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h @@ -1,11 +1,12 @@ /****************************************************************************** * @file arm_vec_math.h * @brief Public header file for CMSIS DSP Library - * @version V1.7.0 - * @date 15. October 2019 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h index 71ff75d..bca9ef8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h @@ -1,9 +1,12 @@ /****************************************************************************** * @file arm_vec_math_f16.h * @brief Public header file for CMSIS DSP Library + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* - * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -65,11 +68,11 @@ __STATIC_INLINE f16x8_t vrecip_medprec_f16( b = 2.0f16 - xinv.f * ax; xinv.f = xinv.f * b; - xinv.f = vdupq_m(xinv.f, F16INFINITY, vcmpeqq(x, 0.0f)); + xinv.f = vdupq_m_n_f16(xinv.f, F16INFINITY, vcmpeqq_n_f16(x, 0.0f)); /* * restore sign */ - xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f)); + xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq_n_f16(x, 0.0f)); return xinv.f; } @@ -102,11 +105,11 @@ __STATIC_INLINE f16x8_t vrecip_hiprec_f16( b = 2.0f16 - xinv.f * ax; xinv.f = xinv.f * b; - xinv.f = vdupq_m(xinv.f, F16INFINITY, vcmpeqq(x, 0.0f)); + xinv.f = vdupq_m_n_f16(xinv.f, F16INFINITY, vcmpeqq_n_f16(x, 0.0f)); /* * restore sign */ - xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq(x, 0.0f)); + xinv.f = vnegq_m(xinv.f, xinv.f, vcmpltq_n_f16(x, 0.0f)); return xinv.f; } @@ -140,22 +143,17 @@ __STATIC_INLINE float16x8_t vtaylor_polyq_f16( return res; } -__STATIC_INLINE float16x8_t vmant_exp_f16( - float16x8_t x, - int16x8_t * e) -{ - any16x8_t r; - int16x8_t n; - - r.f = x; - n = r.i >> 10; - n = n - 15; - r.i = r.i - (n << 10); - - *e = n; - return r.f; -} - +#define VMANT_EXP_F16(x) \ + any16x8_t r; \ + int16x8_t n; \ + \ + r.f = x; \ + n = r.i >> 10; \ + n = n - 15; \ + r.i = r.i - (n << 10);\ + \ + vecExpUnBiased = n; \ + vecTmpFlt1 = r.f; __STATIC_INLINE float16x8_t vlogq_f16(float16x8_t vecIn) { @@ -167,7 +165,7 @@ __STATIC_INLINE float16x8_t vlogq_f16(float16x8_t vecIn) /* * extract exponent */ - vecTmpFlt1 = vmant_exp_f16(vecIn, &vecExpUnBiased); + VMANT_EXP_F16(vecIn); vecTmpFlt0 = vecTmpFlt1 * vecTmpFlt1; /* @@ -213,7 +211,7 @@ __STATIC_INLINE float16x8_t vlogq_f16(float16x8_t vecIn) */ vecAcc0 = vfmaq(vecAcc0, vecExpUnBiasedFlt, __logf_rng_f16); // set log0 down to -inf - vecAcc0 = vdupq_m(vecAcc0, -F16INFINITY, vcmpeqq(vecIn, 0.0f)); + vecAcc0 = vdupq_m_n_f16(vecAcc0, -(_Float16)F16INFINITY, vcmpeqq_n_f16(vecIn, 0.0f)); return vecAcc0; } @@ -230,7 +228,7 @@ __STATIC_INLINE float16x8_t vexpq_f16( // Reconstruct poly = (float16x8_t) (vqaddq_s16((int16x8_t) (poly), vqshlq_n_s16(m, 10))); - poly = vdupq_m(poly, 0.0f, vcmpltq_n_s16(m, -14)); + poly = vdupq_m_n_f16(poly, 0.0f16, vcmpltq_n_s16(m, -14)); return poly; } @@ -267,20 +265,20 @@ __STATIC_INLINE f16x8_t vrecip_f16(f16x8_t vecIn) vecW = vmulq(vecSx, v.f); // v.f = v.f * (8 + w * (-28 + w * (56 + w * (-70 + w *(56 + w * (-28 + w * (8 - w))))))); - vecTmp = vsubq(vdupq_n_f16(8.0f), vecW); - vecTmp = vfmasq(vecW, vecTmp, -28.0f); - vecTmp = vfmasq(vecW, vecTmp, 56.0f); - vecTmp = vfmasq(vecW, vecTmp, -70.0f); - vecTmp = vfmasq(vecW, vecTmp, 56.0f); - vecTmp = vfmasq(vecW, vecTmp, -28.0f); - vecTmp = vfmasq(vecW, vecTmp, 8.0f); + vecTmp = vsubq(vdupq_n_f16(8.0f16), vecW); + vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); + vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); + vecTmp = vfmasq_n_f16(vecW, vecTmp, -70.0f16); + vecTmp = vfmasq_n_f16(vecW, vecTmp, 56.0f16); + vecTmp = vfmasq_n_f16(vecW, vecTmp, -28.0f16); + vecTmp = vfmasq_n_f16(vecW, vecTmp, 8.0f16); v.f = vmulq(v.f, vecTmp); - v.f = vdupq_m(v.f, F16INFINITY, vcmpeqq(vecIn, 0.0f)); + v.f = vdupq_m_n_f16(v.f, F16INFINITY, vcmpeqq_n_f16(vecIn, 0.0f)); /* * restore sign */ - v.f = vnegq_m(v.f, v.f, vcmpltq(vecIn, 0.0f)); + v.f = vnegq_m(v.f, v.f, vcmpltq_n_f16(vecIn, 0.0f)); return v.f; } @@ -288,10 +286,10 @@ __STATIC_INLINE f16x8_t vtanhq_f16( f16x8_t val) { f16x8_t x = - vminnmq_f16(vmaxnmq_f16(val, vdupq_n_f16(-10.f)), vdupq_n_f16(10.0f)); - f16x8_t exp2x = vexpq_f16(vmulq_n_f16(x, 2.f)); - f16x8_t num = vsubq_n_f16(exp2x, 1.f); - f16x8_t den = vaddq_n_f16(exp2x, 1.f); + vminnmq_f16(vmaxnmq_f16(val, vdupq_n_f16(-10.f16)), vdupq_n_f16(10.0f16)); + f16x8_t exp2x = vexpq_f16(vmulq_n_f16(x, 2.f16)); + f16x8_t num = vsubq_n_f16(exp2x, 1.f16); + f16x8_t den = vaddq_n_f16(exp2x, 1.f16); f16x8_t tanh = vmulq_f16(num, vrecip_f16(den)); return tanh; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h index fe20c48..30ad98d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file basic_math_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -99,6 +100,21 @@ extern "C" +/** + * @brief Floating-point vector multiplication. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ +void arm_mult_f64( +const float64_t * pSrcA, +const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Floating-point vector addition. * @param[in] pSrcA points to the first input vector @@ -114,6 +130,21 @@ extern "C" +/** + * @brief Floating-point vector addition. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_add_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Q7 vector addition. * @param[in] pSrcA points to the first input vector @@ -171,6 +202,21 @@ extern "C" + /** + * @brief Floating-point vector subtraction. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in each vector + */ + void arm_sub_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Q7 vector subtraction. * @param[in] pSrcA points to the first input vector @@ -228,6 +274,21 @@ extern "C" + /** + * @brief Multiplies a floating-point vector by a scalar. + * @param[in] pSrc points to the input vector + * @param[in] scale scale factor to be applied + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ + void arm_scale_f64( + const float64_t * pSrc, + float64_t scale, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Multiplies a Q7 vector by a scalar. * @param[in] pSrc points to the input vector @@ -301,6 +362,18 @@ extern "C" +/** + * @brief Floating-point vector absolute value. + * @param[in] pSrc points to the input buffer + * @param[out] pDst points to the output buffer + * @param[in] blockSize number of samples in each vector + */ +void arm_abs_f64( +const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + /** * @brief Q15 vector absolute value. @@ -341,6 +414,21 @@ extern "C" +/** + * @brief Dot product of floating-point vectors. + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[in] blockSize number of samples in each vector + * @param[out] result output result returned here + */ +void arm_dot_prod_f64( +const float64_t * pSrcA, +const float64_t * pSrcB, + uint32_t blockSize, + float64_t * result); + + + /** * @brief Dot product of Q7 vectors. * @param[in] pSrcA points to the first input vector @@ -425,6 +513,21 @@ extern "C" uint32_t blockSize); +/** + * @brief Adds a constant offset to a floating-point vector. + * @param[in] pSrc points to the input vector + * @param[in] offset is the offset to be added + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ +void arm_offset_f64( +const float64_t * pSrc, + float64_t offset, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Adds a constant offset to a floating-point vector. * @param[in] pSrc points to the input vector @@ -494,6 +597,20 @@ extern "C" uint32_t blockSize); + +/** + * @brief Negates the elements of a floating-point vector. + * @param[in] pSrc points to the input vector + * @param[out] pDst points to the output vector + * @param[in] blockSize number of samples in the vector + */ +void arm_negate_f64( +const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + + /** * @brief Negates the elements of a Q7 vector. * @param[in] pSrc points to the input vector diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h index f1d4aae..92f11da 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file basic_math_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions.h index c527018..0d6d58b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file bayes_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -66,9 +67,10 @@ typedef struct /** * @brief Naive Gaussian Bayesian Estimator * - * @param[in] S points to a naive bayes instance structure - * @param[in] in points to the elements of the input vector. - * @param[in] pBuffer points to a buffer of length numberOfClasses + * @param[in] S points to a naive bayes instance structure + * @param[in] in points to the elements of the input vector. + * @param[out] *pOutputProbabilities points to a buffer of length numberOfClasses containing estimated probabilities + * @param[out] *pBufferB points to a temporary buffer of length numberOfClasses * @return The predicted class * */ @@ -76,7 +78,8 @@ typedef struct uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, const float32_t * in, - float32_t *pBuffer); + float32_t *pOutputProbabilities, + float32_t *pBufferB); #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions_f16.h index 46dabab..a16c49b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/bayes_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file bayes_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -57,9 +58,10 @@ typedef struct /** * @brief Naive Gaussian Bayesian Estimator * - * @param[in] S points to a naive bayes instance structure - * @param[in] in points to the elements of the input vector. - * @param[in] pBuffer points to a buffer of length numberOfClasses + * @param[in] S points to a naive bayes instance structure + * @param[in] in points to the elements of the input vector. + * @param[out] *pOutputProbabilities points to a buffer of length numberOfClasses containing estimated probabilities + * @param[out] *pBufferB points to a temporary buffer of length numberOfClasses * @return The predicted class * */ @@ -67,7 +69,8 @@ typedef struct uint32_t arm_gaussian_naive_bayes_predict_f16(const arm_gaussian_naive_bayes_instance_f16 *S, const float16_t * in, - float16_t *pBuffer); + float16_t *pOutputProbabilities, + float16_t *pBufferB); #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h index 5589a06..b4394de 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file complex_math_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -95,6 +96,18 @@ extern "C" uint32_t numSamples); + /** + * @brief Floating-point complex magnitude squared + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_squared_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t numSamples); + + /** * @brief Q31 complex magnitude squared * @param[in] pSrc points to the complex input vector @@ -131,6 +144,18 @@ extern "C" uint32_t numSamples); +/** + * @brief Floating-point complex magnitude + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t numSamples); + + /** * @brief Q31 complex magnitude * @param[in] pSrc points to the complex input vector @@ -154,6 +179,17 @@ extern "C" q15_t * pDst, uint32_t numSamples); + /** + * @brief Q15 complex magnitude + * @param[in] pSrc points to the complex input vector + * @param[out] pDst points to the real output vector + * @param[in] numSamples number of complex samples in the input vector + */ + void arm_cmplx_mag_fast_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples); + /** * @brief Q15 complex dot product @@ -287,6 +323,21 @@ extern "C" +/** + * @brief Floating-point complex-by-complex multiplication + * @param[in] pSrcA points to the first input vector + * @param[in] pSrcB points to the second input vector + * @param[out] pDst points to the output vector + * @param[in] numSamples number of complex samples in each vector + */ +void arm_cmplx_mult_cmplx_f64( +const float64_t * pSrcA, +const float64_t * pSrcB, + float64_t * pDst, + uint32_t numSamples); + + + #ifdef __cplusplus } #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h index 39d9fa9..e0baa6f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file complex_math_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h index 39218ba..886a23c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file controller_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -51,15 +52,35 @@ extern "C" */ - /** - * @ingroup groupController - */ - - /** - * @addtogroup SinCos - * @{ - */ +/** + @ingroup groupController + */ +/** + @defgroup SinCos Sine Cosine + + Computes the trigonometric sine and cosine values using a combination of table lookup + and linear interpolation. + There are separate functions for Q31 and floating-point data types. + The input to the floating-point version is in degrees while the + fixed-point Q31 have a scaled input with the range + [-1 0.9999] mapping to [-180 +180] degrees. + + The floating point function also allows values that are out of the usual range. When this happens, the function will + take extra time to adjust the input value to the range of [-180 180]. + + The result is accurate to 5 digits after the decimal point. + + The implementation is based on table lookup using 360 values together with linear interpolation. + The steps used are: + -# Calculation of the nearest integer table index. + -# Compute the fractional portion (fract) of the input. + -# Fetch the value corresponding to \c index from sine table to \c y0 and also value from \c index+1 to \c y1. + -# Sine value is computed as *psinVal = y0 + (fract * (y1 - y0)). + -# Fetch the value corresponding to \c index from cosine table to \c y0 and also value from \c index+1 to \c y1. + -# Cosine value is computed as *pcosVal = y0 + (fract * (y1 - y0)). + */ + /** * @brief Floating-point sin_cos function. * @param[in] theta input value in degrees @@ -83,14 +104,11 @@ extern "C" q31_t * pSinVal, q31_t * pCosVal); - /** - * @} end of SinCos group - */ - - /** - * @ingroup groupController - */ +/** + @ingroup groupController + */ + /** * @defgroup PID PID Motor Control * @@ -151,6 +169,7 @@ extern "C" /** + * @ingroup PID * @brief Instance structure for the Q15 PID Control. */ typedef struct @@ -169,6 +188,7 @@ extern "C" } arm_pid_instance_q15; /** + * @ingroup PID * @brief Instance structure for the Q31 PID Control. */ typedef struct @@ -183,6 +203,7 @@ extern "C" } arm_pid_instance_q31; /** + * @ingroup PID * @brief Instance structure for the floating-point PID Control. */ typedef struct @@ -254,12 +275,10 @@ extern "C" - /** - * @addtogroup PID - * @{ - */ + /** + * @ingroup PID * @brief Process function for the floating-point PID Control. * @param[in,out] S is an instance of the floating-point PID Control structure * @param[in] in input sample to process @@ -286,6 +305,7 @@ extern "C" } /** + @ingroup PID @brief Process function for the Q31 PID Control. @param[in,out] S points to an instance of the Q31 PID Control structure @param[in] in input sample to process @@ -331,6 +351,7 @@ __STATIC_FORCEINLINE q31_t arm_pid_q31( /** + @ingroup PID @brief Process function for the Q15 PID Control. @param[in,out] S points to an instance of the Q15 PID Control structure @param[in] in input sample to process @@ -383,9 +404,7 @@ __STATIC_FORCEINLINE q15_t arm_pid_q15( return (out); } - /** - * @} end of PID group - */ + /** * @ingroup groupController @@ -415,12 +434,10 @@ __STATIC_FORCEINLINE q15_t arm_pid_q15( * Refer to the function specific documentation below for usage guidelines. */ - /** - * @addtogroup park - * @{ - */ + /** + * @ingroup park * @brief Floating-point Park transform * @param[in] Ialpha input two-phase vector coordinate alpha * @param[in] Ibeta input two-phase vector coordinate beta @@ -450,6 +467,7 @@ __STATIC_FORCEINLINE q15_t arm_pid_q15( /** + @ingroup park @brief Park transform for Q31 version @param[in] Ialpha input two-phase vector coordinate alpha @param[in] Ibeta input two-phase vector coordinate beta @@ -495,9 +513,6 @@ __STATIC_FORCEINLINE void arm_park_q31( *pIq = __QSUB(product4, product3); } - /** - * @} end of park group - */ /** @@ -521,12 +536,10 @@ __STATIC_FORCEINLINE void arm_park_q31( * Refer to the function specific documentation below for usage guidelines. */ - /** - * @addtogroup inv_park - * @{ - */ + /** + * @ingroup inv_park * @brief Floating-point Inverse Park transform * @param[in] Id input coordinate of rotor reference frame d * @param[in] Iq input coordinate of rotor reference frame q @@ -553,6 +566,7 @@ __STATIC_FORCEINLINE void arm_park_q31( /** + @ingroup inv_park @brief Inverse Park transform for Q31 version @param[in] Id input coordinate of rotor reference frame d @param[in] Iq input coordinate of rotor reference frame q @@ -598,9 +612,6 @@ __STATIC_FORCEINLINE void arm_inv_park_q31( *pIbeta = __QADD(product4, product3); } - /** - * @} end of Inverse park group - */ /** * @ingroup groupController @@ -628,13 +639,10 @@ __STATIC_FORCEINLINE void arm_inv_park_q31( * Refer to the function specific documentation below for usage guidelines. */ - /** - * @addtogroup clarke - * @{ - */ /** * + * @ingroup clarke * @brief Floating-point Clarke transform * @param[in] Ia input three-phase coordinate a * @param[in] Ib input three-phase coordinate b @@ -657,6 +665,7 @@ __STATIC_FORCEINLINE void arm_inv_park_q31( /** + @ingroup clarke @brief Clarke transform for Q31 version @param[in] Ia input three-phase coordinate a @param[in] Ib input three-phase coordinate b @@ -690,9 +699,6 @@ __STATIC_FORCEINLINE void arm_clarke_q31( *pIbeta = __QADD(product1, product2); } - /** - * @} end of clarke group - */ /** @@ -715,12 +721,10 @@ __STATIC_FORCEINLINE void arm_clarke_q31( * Refer to the function specific documentation below for usage guidelines. */ - /** - * @addtogroup inv_clarke - * @{ - */ + /** + * @ingroup inv_clarke * @brief Floating-point Inverse Clarke transform * @param[in] Ialpha input two-phase orthogonal vector axis alpha * @param[in] Ibeta input two-phase orthogonal vector axis beta @@ -743,6 +747,7 @@ __STATIC_FORCEINLINE void arm_clarke_q31( /** + @ingroup inv_clarke @brief Inverse Clarke transform for Q31 version @param[in] Ialpha input two-phase orthogonal vector axis alpha @param[in] Ibeta input two-phase orthogonal vector axis beta @@ -776,9 +781,7 @@ __STATIC_FORCEINLINE void arm_inv_clarke_q31( *pIb = __QSUB(product2, product1); } - /** - * @} end of inv_clarke group - */ + diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions_f16.h index a76e1f6..8fae483 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file controller_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/debug.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/debug.h new file mode 100644 index 0000000..6fb7183 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/debug.h @@ -0,0 +1,146 @@ +/****************************************************************************** + * @file basic_math_functions.h + * @brief Public header file for CMSIS DSP Library + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores + ******************************************************************************/ +/* + * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef _DEBUG_FUNCTIONS_H_ +#define _DEBUG_FUNCTIONS_H_ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" + +#include + +#ifdef __cplusplus +extern "C" +{ +#endif + +#if defined(ARM_FLOAT16_SUPPORTED) +#define PROW_f16(S,NB) \ +{ \ + printf("{%f",(double)(S)[0]); \ + for(unsigned int i=1;i<(NB) ;i++) \ + { \ + printf(",%f",(double)(S)[i]);\ + } \ + printf("}"); \ +}; + +#define PV_f16(S,V,NB)\ +{ \ + printf("%s=",(S)); \ + PROW_f16((V),(NB)); \ + printf(";\n"); \ +}; + +#define PM_f16(S,M) \ +{ \ + printf("%s={",(S)); \ + for(unsigned int row=0;row<(M)->numRows;row++) \ + { \ + if (row != 0) \ + { \ + printf("\n,"); \ + } \ + PROW_f16((M)->pData + row * (M)->numCols, (M)->numCols);\ + } \ + printf("};\n"); \ +} + +#endif + +#define PROW_f32(S,NB) \ +{ \ + printf("{%f",(double)(S)[0]); \ + for(unsigned int i=1;i<(NB) ;i++) \ + { \ + printf(",%f",(double)(S)[i]);\ + } \ + printf("}"); \ +}; + +#define PV_f32(S,V,NB)\ +{ \ + printf("%s=",(S)); \ + PROW_f32((V),(NB)); \ + printf(";\n"); \ +}; + +#define PM_f32(S,M) \ +{ \ + printf("%s={",(S)); \ + for(unsigned int row=0;row<(M)->numRows;row++) \ + { \ + if (row != 0) \ + { \ + printf("\n,"); \ + } \ + PROW_f32((M)->pData + row * (M)->numCols, (M)->numCols);\ + } \ + printf("};\n"); \ +} + +#define PROW_f64(S,NB) \ +{ \ + printf("{%.20g",(double)(S)[0]); \ + for(unsigned int i=1;i<(NB) ;i++) \ + { \ + printf(",%.20g",(double)(S)[i]);\ + } \ + printf("}"); \ +}; + +#define PV_f64(S,V,NB) \ +{ \ + printf("%s=",(S)); \ + PROW_f64((V),(NB));\ + printf(";\n"); \ +}; + +#define PM_f64(S,M) \ +{ \ + printf("%s={",(S)); \ + for(unsigned int row=0;row<(M)->numRows;row++) \ + { \ + if (row != 0) \ + { \ + printf("\n,"); \ + } \ + PROW_f64((M)->pData + row * (M)->numCols, (M)->numCols);\ + } \ + printf("};\n"); \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef _DEBUG_FUNCTIONS_H_ */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h index c1580cb..a8cc19d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file distance_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -68,6 +69,17 @@ __attribute__((weak)) float __powisf2(float a, int b); float32_t arm_euclidean_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); +/** + * @brief Euclidean distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float64_t arm_euclidean_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize); + /** * @brief Bray-Curtis distance between two vectors * @param[in] pA First vector @@ -105,6 +117,17 @@ float32_t arm_canberra_distance_f32(const float32_t *pA,const float32_t *pB, uin float32_t arm_chebyshev_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); +/** + * @brief Chebyshev distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float64_t arm_chebyshev_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize); + + /** * @brief Cityblock (Manhattan) distance between two vectors * @param[in] pA First vector @@ -115,6 +138,16 @@ float32_t arm_chebyshev_distance_f32(const float32_t *pA,const float32_t *pB, ui */ float32_t arm_cityblock_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); +/** + * @brief Cityblock (Manhattan) distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float64_t arm_cityblock_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize); + /** * @brief Correlation distance between two vectors * @@ -140,6 +173,18 @@ float32_t arm_correlation_distance_f32(float32_t *pA,float32_t *pB, uint32_t blo float32_t arm_cosine_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize); +/** + * @brief Cosine distance between two vectors + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float64_t arm_cosine_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize); + /** * @brief Jensen-Shannon distance between two vectors * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions_f16.h index 0d71b6b..46ad233 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file distance_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h index 1828f3f..758b0fb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file fast_math_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -32,6 +33,9 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + + #ifdef __cplusplus extern "C" { @@ -59,17 +63,8 @@ extern "C" * */ - /** - * @ingroup groupFastMath - */ - -/** - @addtogroup sin - @{ - */ - -/** + /** * @brief Fast approximation to the trigonometric sine function for floating-point data. * @param[in] x input value in radians. * @return sin(x). @@ -86,7 +81,6 @@ extern "C" q31_t arm_sin_q31( q31_t x); - /** * @brief Fast approximation to the trigonometric sine function for Q15 data. * @param[in] x Scaled input value in radians. @@ -95,14 +89,6 @@ extern "C" q15_t arm_sin_q15( q15_t x); -/** - @} end of sin group - */ - -/** - @addtogroup cos - @{ - */ /** * @brief Fast approximation to the trigonometric cosine function for floating-point data. @@ -130,10 +116,6 @@ extern "C" q15_t arm_cos_q15( q15_t x); -/** - @} end of cos group - */ - /** @brief Floating-point vector of log values. @@ -147,6 +129,46 @@ extern "C" float32_t * pDst, uint32_t blockSize); + + +/** + @brief Floating-point vector of log values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + void arm_vlog_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + + + /** + * @brief q31 vector of log values. + * @param[in] pSrc points to the input vector in q31 + * @param[out] pDst points to the output vector in q5.26 + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_vlog_q31(const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize); + + /** + * @brief q15 vector of log values. + * @param[in] pSrc points to the input vector in q15 + * @param[out] pDst points to the output vector in q4.11 + * @param[in] blockSize number of samples in each vector + * @return none + */ + void arm_vlog_q15(const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize); + + + /** @brief Floating-point vector of exp values. @param[in] pSrc points to the input vector @@ -159,6 +181,22 @@ extern "C" float32_t * pDst, uint32_t blockSize); + + +/** + @brief Floating-point vector of exp values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + void arm_vexp_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + + /** * @defgroup SQRT Square Root * @@ -194,7 +232,7 @@ extern "C" - \ref ARM_MATH_ARGUMENT_ERROR : input value is negative; *pOut is set to 0 */ __STATIC_FORCEINLINE arm_status arm_sqrt_f32( - float32_t in, + const float32_t in, float32_t * pOut) { if (in >= 0.0f) @@ -252,33 +290,75 @@ arm_status arm_sqrt_q15( q15_t in, q15_t * pOut); + + /** - * @brief Vector Floating-point square root function. - * @param[in] pIn input vector. - * @param[out] pOut vector of square roots of input elements. - * @param[in] len length of input vector. - * @return The function returns ARM_MATH_SUCCESS if input value is positive value or ARM_MATH_ARGUMENT_ERROR if - * in is negative value and returns zero output for negative values. + * @} end of SQRT group */ - void arm_vsqrt_f32( - float32_t * pIn, - float32_t * pOut, - uint16_t len); - void arm_vsqrt_q31( - q31_t * pIn, - q31_t * pOut, - uint16_t len); + /** + @brief Fixed point division + @param[in] numerator Numerator + @param[in] denominator Denominator + @param[out] quotient Quotient value normalized between -1.0 and 1.0 + @param[out] shift Shift left value to get the unnormalized quotient + @return error status + + When dividing by 0, an error ARM_MATH_NANINF is returned. And the quotient is forced + to the saturated negative or positive value. + */ - void arm_vsqrt_q15( - q15_t * pIn, - q15_t * pOut, - uint16_t len); +arm_status arm_divide_q15(q15_t numerator, + q15_t denominator, + q15_t *quotient, + int16_t *shift); /** - * @} end of SQRT group + @brief Fixed point division + @param[in] numerator Numerator + @param[in] denominator Denominator + @param[out] quotient Quotient value normalized between -1.0 and 1.0 + @param[out] shift Shift left value to get the unnormalized quotient + @return error status + + When dividing by 0, an error ARM_MATH_NANINF is returned. And the quotient is forced + to the saturated negative or positive value. + */ + +arm_status arm_divide_q31(q31_t numerator, + q31_t denominator, + q31_t *quotient, + int16_t *shift); + + + + /** + @brief Arc tangent in radian of y/x using sign of x and y to determine right quadrant. + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result + @return error status. + */ + arm_status arm_atan2_f32(float32_t y,float32_t x,float32_t *result); + + + /** + @brief Arc tangent in radian of y/x using sign of x and y to determine right quadrant. + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result in Q2.29 + @return error status. */ + arm_status arm_atan2_q31(q31_t y,q31_t x,q31_t *result); + /** + @brief Arc tangent in radian of y/x using sign of x and y to determine right quadrant. + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result in Q2.13 + @return error status. + */ + arm_status arm_atan2_q15(q15_t y,q15_t x,q15_t *result); #ifdef __cplusplus } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h index 3be576e..c97ec64 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file fast_math_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -107,6 +108,15 @@ __STATIC_FORCEINLINE arm_status arm_sqrt_f16( float16_t * pDst, uint32_t blockSize); + /** + @brief Arc tangent in radian of y/x using sign of x and y to determine right quadrant. + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result + @return error status. + */ + arm_status arm_atan2_f16(float16_t y,float16_t x,float16_t *result); + #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ #ifdef __cplusplus } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h index 4d41606..38a40ba 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file filtering_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -33,6 +34,7 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" #ifdef __cplusplus extern "C" @@ -88,6 +90,16 @@ extern "C" const float32_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ } arm_fir_instance_f32; + /** + * @brief Instance structure for the floating-point FIR filter. + */ + typedef struct + { + uint16_t numTaps; /**< number of filter coefficients in the filter. */ + float64_t *pState; /**< points to the state variable array. The array is of length numTaps+blockSize-1. */ + const float64_t *pCoeffs; /**< points to the coefficient array. The array is of length numTaps. */ + } arm_fir_instance_f64; + /** * @brief Processing function for the Q7 FIR filter. * @param[in] S points to an instance of the Q7 FIR filter structure. @@ -224,6 +236,19 @@ extern "C" float32_t * pDst, uint32_t blockSize); + /** + * @brief Processing function for the floating-point FIR filter. + * @param[in] S points to an instance of the floating-point FIR structure. + * @param[in] pSrc points to the block of input data. + * @param[out] pDst points to the block of output data. + * @param[in] blockSize number of samples to process. + */ + void arm_fir_f64( + const arm_fir_instance_f64 * S, + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + /** * @brief Initialization function for the floating-point FIR filter. * @param[in,out] S points to an instance of the floating-point FIR filter structure. @@ -239,6 +264,21 @@ extern "C" float32_t * pState, uint32_t blockSize); + /** + * @brief Initialization function for the floating-point FIR filter. + * @param[in,out] S points to an instance of the floating-point FIR filter structure. + * @param[in] numTaps Number of filter coefficients in the filter. + * @param[in] pCoeffs points to the filter coefficients. + * @param[in] pState points to the state buffer. + * @param[in] blockSize number of samples that are processed at a time. + */ + void arm_fir_init_f64( + arm_fir_instance_f64 * S, + uint16_t numTaps, + const float64_t * pCoeffs, + float64_t * pState, + uint32_t blockSize); + /** * @brief Instance structure for the Q15 Biquad cascade filter. */ @@ -1171,10 +1211,17 @@ arm_status arm_fir_decimate_init_f32( #if defined(ARM_MATH_NEON) +/** + @brief Compute new coefficient arrays for use in vectorized filter (Neon only). + @param[in] numStages number of 2nd order stages in the filter. + @param[in] pCoeffs points to the original filter coefficients. + @param[in] pComputedCoeffs points to the new computed coefficients for the vectorized version. + @return none +*/ void arm_biquad_cascade_df2T_compute_coefs_f32( - arm_biquad_cascade_df2T_instance_f32 * S, uint8_t numStages, - float32_t * pCoeffs); + const float32_t * pCoeffs, + float32_t * pComputedCoeffs); #endif /** * @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. @@ -1787,6 +1834,22 @@ void arm_biquad_cascade_df2T_compute_coefs_f32( float32_t * pDst); + /** + * @brief Correlation of floating-point sequences. + * @param[in] pSrcA points to the first input sequence. + * @param[in] srcALen length of the first input sequence. + * @param[in] pSrcB points to the second input sequence. + * @param[in] srcBLen length of the second input sequence. + * @param[out] pDst points to the block of output data Length 2 * max(srcALen, srcBLen) - 1. + */ + void arm_correlate_f64( + const float64_t * pSrcA, + uint32_t srcALen, + const float64_t * pSrcB, + uint32_t srcBLen, + float64_t * pDst); + + /** @brief Correlation of Q15 sequences @param[in] pSrcA points to the first input sequence @@ -2432,8 +2495,33 @@ void arm_correlate_fast_q31( } +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ +void arm_levinson_durbin_f32(const float32_t *phi, + float32_t *a, + float32_t *err, + int nbCoefs); + + +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ +void arm_levinson_durbin_q31(const q31_t *phi, + q31_t *a, + q31_t *err, + int nbCoefs); - #ifdef __cplusplus } #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h index 9abb53a..21f33f4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file filtering_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -214,6 +215,20 @@ extern "C" uint32_t srcBLen, float16_t * pDst); + +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ +void arm_levinson_durbin_f16(const float16_t *phi, + float16_t *a, + float16_t *err, + int nbCoefs); + #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ #ifdef __cplusplus } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions.h index e7cf537..a650fe8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file interpolation_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -120,18 +121,6 @@ extern "C" } arm_spline_instance_f32; - - - /** - * @ingroup groupInterpolation - */ - - /** - * @addtogroup SplineInterpolate - * @{ - */ - - /** * @brief Processing function for the floating-point cubic spline interpolation. * @param[in] S points to an instance of the floating-point spline structure. @@ -165,18 +154,7 @@ extern "C" float32_t * tempBuffer); - /** - * @} end of SplineInterpolate group - */ - - - - /** - * @addtogroup LinearInterpolate - * @{ - */ - - /** + /** * @brief Process function for the floating-point Linear Interpolation Function. * @param[in,out] S is an instance of the floating-point Linear Interpolation structure * @param[in] x input sample to process @@ -201,7 +179,7 @@ extern "C" * */ q31_t arm_linear_interp_q31( - q31_t * pYData, + const q31_t * pYData, q31_t x, uint32_t nValues); @@ -219,7 +197,7 @@ extern "C" * */ q15_t arm_linear_interp_q15( - q15_t * pYData, + const q15_t * pYData, q31_t x, uint32_t nValues); @@ -236,27 +214,10 @@ extern "C" * This function can support maximum of table size 2^12. */ q7_t arm_linear_interp_q7( - q7_t * pYData, + const q7_t * pYData, q31_t x, uint32_t nValues); - /** - * @} end of LinearInterpolate group - */ - - - - - /** - * @ingroup groupInterpolation - */ - - - /** - * @addtogroup BilinearInterpolate - * @{ - */ - /** * @brief Floating-point bilinear interpolation. * @param[in,out] S points to an instance of the interpolation structure. @@ -305,10 +266,6 @@ q7_t arm_linear_interp_q7( arm_bilinear_interp_instance_q7 * S, q31_t X, q31_t Y); - /** - * @} end of BilinearInterpolate group - */ - #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions_f16.h index 46abd32..227ecb0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/interpolation_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file interpolation_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h index e5dce74..9bab8e6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file matrix_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -108,6 +109,9 @@ extern "C" * return ARM_MATH_SUCCESS. */ + #define DEFAULT_HOUSEHOLDER_THRESHOLD_F64 (1.0e-16) + #define DEFAULT_HOUSEHOLDER_THRESHOLD_F32 (1.0e-12f) + /** * @brief Instance structure for the floating-point matrix structure. */ @@ -443,6 +447,21 @@ arm_status arm_mat_mult_q31( const arm_matrix_instance_q31 * pSrcB, arm_matrix_instance_q31 * pDst); + /** + * @brief Q31 matrix multiplication + * @param[in] pSrcA points to the first input matrix structure + * @param[in] pSrcB points to the second input matrix structure + * @param[out] pDst points to output matrix structure + * @param[in] pState points to the array for storing intermediate results + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ +arm_status arm_mat_mult_opt_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst, + q31_t *pState); + /** * @brief Q31 matrix and vector multiplication * @param[in] pSrcMat points to the input matrix structure @@ -734,6 +753,88 @@ void arm_mat_init_f32( arm_matrix_instance_f64 * d, uint16_t * pp); +/** + @brief QR decomposition of a m x n floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension n. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + - \ref ARM_MATH_SINGULAR : Input matrix is found to be singular (non-invertible) + */ + +arm_status arm_mat_qr_f32( + const arm_matrix_instance_f32 * pSrc, + const float32_t threshold, + arm_matrix_instance_f32 * pOutR, + arm_matrix_instance_f32 * pOutQ, + float32_t * pOutTau, + float32_t *pTmpA, + float32_t *pTmpB + ); + +/** + @brief QR decomposition of a m x n floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension n. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + - \ref ARM_MATH_SINGULAR : Input matrix is found to be singular (non-invertible) + */ + +arm_status arm_mat_qr_f64( + const arm_matrix_instance_f64 * pSrc, + const float64_t threshold, + arm_matrix_instance_f64 * pOutR, + arm_matrix_instance_f64 * pOutQ, + float64_t * pOutTau, + float64_t *pTmpA, + float64_t *pTmpB + ); + +/** + @brief Householder transform of a floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[outQ] pOut points to the output vector. + @return beta return the scaling factor beta + */ + +float32_t arm_householder_f32( + const float32_t * pSrc, + const float32_t threshold, + uint32_t blockSize, + float32_t * pOut + ); + +/** + @brief Householder transform of a double floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[outQ] pOut points to the output vector. + @return beta return the scaling factor beta + */ + +float64_t arm_householder_f64( + const float64_t * pSrc, + const float64_t threshold, + uint32_t blockSize, + float64_t * pOut + ); + #ifdef __cplusplus } #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h index 0bc32b9..3f54651 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file matrix_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -40,6 +41,8 @@ extern "C" #if defined(ARM_FLOAT16_SUPPORTED) + #define DEFAULT_HOUSEHOLDER_THRESHOLD_F16 (1.0e-3f) + /** * @brief Instance structure for the floating-point matrix structure. */ @@ -211,6 +214,46 @@ void arm_mat_init_f16( arm_matrix_instance_f16 * dst); +/** + @brief QR decomposition of a m x n floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension n. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + - \ref ARM_MATH_SINGULAR : Input matrix is found to be singular (non-invertible) + */ + +arm_status arm_mat_qr_f16( + const arm_matrix_instance_f16 * pSrc, + const float16_t threshold, + arm_matrix_instance_f16 * pOutR, + arm_matrix_instance_f16 * pOutQ, + float16_t * pOutTau, + float16_t *pTmpA, + float16_t *pTmpB + ); + +/** + @brief Householder transform of a half floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[outQ] pOut points to the output vector. + @return beta return the scaling factor beta + */ + +float16_t arm_householder_f16( + const float16_t * pSrc, + const float16_t threshold, + uint32_t blockSize, + float16_t * pOut + ); #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h new file mode 100644 index 0000000..5b0f55d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h @@ -0,0 +1,640 @@ +/****************************************************************************** + * @file matrix_utils.h + * @brief Public header file for CMSIS DSP Library + * @version V1.11.0 + * @date 30 May 2022 + * Target Processor: Cortex-M and Cortex-A cores + ******************************************************************************/ +/* + * Copyright (c) 2010-2022 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#ifndef _MATRIX_UTILS_H_ +#define _MATRIX_UTILS_H_ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_memory.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" + +#ifdef __cplusplus +extern "C" +{ +#endif + +#define ELEM(A,ROW,COL) &((A)->pData[(A)->numCols* (ROW) + (COL)]) + +#define SCALE_COL_T(T,CAST,A,ROW,v,i) \ +{ \ + int32_t w; \ + T *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + const int32_t nb = (A)->numRows - ROW;\ + \ + data += i + numCols * (ROW); \ + \ + for(w=0;w < nb; w++) \ + { \ + *data *= CAST v; \ + data += numCols; \ + } \ +} + +#define COPY_COL_T(T,A,ROW,COL,DST) \ +{ \ + uint32_t row; \ + T *pb=DST; \ + T *pa = (A)->pData + ROW * (A)->numCols + COL;\ + for(row = ROW; row < (A)->numRows; row ++) \ + { \ + *pb++ = *pa; \ + pa += (A)->numCols; \ + } \ +} + +#if defined(ARM_FLOAT16_SUPPORTED) +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#define SWAP_ROWS_F16(A,COL,i,j) \ + { \ + int cnt = ((A)->numCols)-(COL); \ + int32_t w; \ + float16_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + \ + for(w=(COL);w < numCols; w+=8) \ + { \ + f16x8_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp16q(cnt); \ + \ + tmpa=vldrhq_z_f16(&data[i*numCols + w],p0);\ + tmpb=vldrhq_z_f16(&data[j*numCols + w],p0);\ + \ + vstrhq_p(&data[i*numCols + w], tmpb, p0); \ + vstrhq_p(&data[j*numCols + w], tmpa, p0); \ + \ + cnt -= 8; \ + } \ + } + +#define SCALE_ROW_F16(A,COL,v,i) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + int32_t w; \ + float16_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + \ + for(w=(COL);w < numCols; w+=8) \ + { \ + f16x8_t tmpa; \ + mve_pred16_t p0 = vctp16q(cnt); \ + tmpa = vldrhq_z_f16(&data[i*numCols + w],p0);\ + tmpa = vmulq_n_f16(tmpa,(_Float16)v); \ + vstrhq_p(&data[i*numCols + w], tmpa, p0); \ + cnt -= 8; \ + } \ + \ +} + +#define MAC_ROW_F16(COL,A,i,v,B,j) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + int32_t w; \ + float16_t *dataA = (A)->pData; \ + float16_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + \ + for(w=(COL);w < numCols; w+=8) \ + { \ + f16x8_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp16q(cnt); \ + tmpa = vldrhq_z_f16(&dataA[i*numCols + w],p0);\ + tmpb = vldrhq_z_f16(&dataB[j*numCols + w],p0);\ + tmpa = vfmaq_n_f16(tmpa,tmpb,v); \ + vstrhq_p(&dataA[i*numCols + w], tmpa, p0); \ + cnt -= 8; \ + } \ + \ +} + +#define MAS_ROW_F16(COL,A,i,v,B,j) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + int32_t w; \ + float16_t *dataA = (A)->pData; \ + float16_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + f16x8_t vec=vdupq_n_f16(v); \ + \ + for(w=(COL);w < numCols; w+=8) \ + { \ + f16x8_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp16q(cnt); \ + tmpa = vldrhq_z_f16(&dataA[i*numCols + w],p0);\ + tmpb = vldrhq_z_f16(&dataB[j*numCols + w],p0);\ + tmpa = vfmsq_f16(tmpa,tmpb,vec); \ + vstrhq_p(&dataA[i*numCols + w], tmpa, p0); \ + cnt -= 8; \ + } \ + \ +} + +#else + + +#define SWAP_ROWS_F16(A,COL,i,j) \ +{ \ + int32_t w; \ + float16_t *dataI = (A)->pData; \ + float16_t *dataJ = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataI += i*numCols + (COL); \ + dataJ += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + float16_t tmp; \ + tmp = *dataI; \ + *dataI++ = *dataJ; \ + *dataJ++ = tmp; \ + } \ +} + +#define SCALE_ROW_F16(A,COL,v,i) \ +{ \ + int32_t w; \ + float16_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + data += i*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *data++ *= (_Float16)v; \ + } \ +} + + +#define MAC_ROW_F16(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float16_t *dataA = (A)->pData; \ + float16_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + const int32_t nb = numCols-(COL); \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ += (_Float16)v * (_Float16)*dataB++;\ + } \ +} + +#define MAS_ROW_F16(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float16_t *dataA = (A)->pData; \ + float16_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + const int32_t nb = numCols-(COL); \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ -= (_Float16)v * (_Float16)*dataB++;\ + } \ +} + +#endif /*defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE)*/ + +/* Functions with only a scalar version */ +#define COPY_COL_F16(A,ROW,COL,DST) \ + COPY_COL_T(float16_t,A,ROW,COL,DST) + +#define SCALE_COL_F16(A,ROW,v,i) \ + SCALE_COL_T(float16_t,(_Float16),A,ROW,v,i) + +#endif /* defined(ARM_FLOAT16_SUPPORTED)*/ + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#define SWAP_ROWS_F32(A,COL,i,j) \ + { \ + int cnt = ((A)->numCols)-(COL); \ + float32_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + int32_t w; \ + \ + for(w=(COL);w < numCols; w+=4) \ + { \ + f32x4_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp32q(cnt); \ + \ + tmpa=vldrwq_z_f32(&data[i*numCols + w],p0);\ + tmpb=vldrwq_z_f32(&data[j*numCols + w],p0);\ + \ + vstrwq_p(&data[i*numCols + w], tmpb, p0); \ + vstrwq_p(&data[j*numCols + w], tmpa, p0); \ + \ + cnt -= 4; \ + } \ + } + +#define MAC_ROW_F32(COL,A,i,v,B,j) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + int32_t w; \ + \ + for(w=(COL);w < numCols; w+=4) \ + { \ + f32x4_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp32q(cnt); \ + tmpa = vldrwq_z_f32(&dataA[i*numCols + w],p0);\ + tmpb = vldrwq_z_f32(&dataB[j*numCols + w],p0);\ + tmpa = vfmaq_n_f32(tmpa,tmpb,v); \ + vstrwq_p(&dataA[i*numCols + w], tmpa, p0); \ + cnt -= 4; \ + } \ + \ +} + +#define MAS_ROW_F32(COL,A,i,v,B,j) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols; \ + int32_t w; \ + f32x4_t vec=vdupq_n_f32(v); \ + \ + for(w=(COL);w < numCols; w+=4) \ + { \ + f32x4_t tmpa,tmpb; \ + mve_pred16_t p0 = vctp32q(cnt); \ + tmpa = vldrwq_z_f32(&dataA[i*numCols + w],p0);\ + tmpb = vldrwq_z_f32(&dataB[j*numCols + w],p0);\ + tmpa = vfmsq_f32(tmpa,tmpb,vec); \ + vstrwq_p(&dataA[i*numCols + w], tmpa, p0); \ + cnt -= 4; \ + } \ + \ +} + +#define SCALE_ROW_F32(A,COL,v,i) \ +{ \ + int cnt = ((A)->numCols)-(COL); \ + float32_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + int32_t w; \ + \ + for(w=(COL);w < numCols; w+=4) \ + { \ + f32x4_t tmpa; \ + mve_pred16_t p0 = vctp32q(cnt); \ + tmpa = vldrwq_z_f32(&data[i*numCols + w],p0);\ + tmpa = vmulq_n_f32(tmpa,v); \ + vstrwq_p(&data[i*numCols + w], tmpa, p0); \ + cnt -= 4; \ + } \ + \ +} + +#elif defined(ARM_MATH_NEON) && !defined(ARM_MATH_AUTOVECTORIZE) + +#define SWAP_ROWS_F32(A,COL,i,j) \ +{ \ + int32_t w; \ + float32_t *dataI = (A)->pData; \ + float32_t *dataJ = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols - COL; \ + \ + dataI += i*numCols + (COL); \ + dataJ += j*numCols + (COL); \ + \ + float32_t tmp; \ + \ + for(w=0;w < nb; w++) \ + { \ + tmp = *dataI; \ + *dataI++ = *dataJ; \ + *dataJ++ = tmp; \ + } \ +} + +#define MAC_ROW_F32(COL,A,i,v,B,j) \ +{ \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols - (COL); \ + int32_t nbElems; \ + f32x4_t vec = vdupq_n_f32(v); \ + \ + nbElems = nb >> 2; \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + while(nbElems>0) \ + { \ + f32x4_t tmpa,tmpb; \ + tmpa = vld1q_f32(dataA,p0); \ + tmpb = vld1q_f32(dataB,p0); \ + tmpa = vmlaq_f32(tmpa,tmpb,vec);\ + vst1q_f32(dataA, tmpa, p0); \ + nbElems--; \ + dataA += 4; \ + dataB += 4; \ + } \ + \ + nbElems = nb & 3; \ + while(nbElems > 0) \ + { \ + *dataA++ += v* *dataB++; \ + nbElems--; \ + } \ +} + +#define MAS_ROW_F32(COL,A,i,v,B,j) \ +{ \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols - (COL); \ + int32_t nbElems; \ + f32x4_t vec = vdupq_n_f32(v); \ + \ + nbElems = nb >> 2; \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + while(nbElems>0) \ + { \ + f32x4_t tmpa,tmpb; \ + tmpa = vld1q_f32(dataA); \ + tmpb = vld1q_f32(dataB); \ + tmpa = vmlsq_f32(tmpa,tmpb,vec);\ + vst1q_f32(dataA, tmpa); \ + nbElems--; \ + dataA += 4; \ + dataB += 4; \ + } \ + \ + nbElems = nb & 3; \ + while(nbElems > 0) \ + { \ + *dataA++ -= v* *dataB++; \ + nbElems--; \ + } \ +} + +#define SCALE_ROW_F32(A,COL,v,i) \ +{ \ + float32_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + const int32_t nb = numCols - (COL); \ + int32_t nbElems; \ + f32x4_t vec = vdupq_n_f32(v); \ + \ + nbElems = nb >> 2; \ + \ + data += i*numCols + (COL); \ + while(nbElems>0) \ + { \ + f32x4_t tmpa; \ + tmpa = vld1q_f32(data); \ + tmpa = vmulq_f32(tmpa,vec); \ + vst1q_f32(data, tmpa); \ + data += 4; \ + nbElems --; \ + } \ + \ + nbElems = nb & 3; \ + while(nbElems > 0) \ + { \ + *data++ *= v; \ + nbElems--; \ + } \ + \ +} + +#else + +#define SWAP_ROWS_F32(A,COL,i,j) \ +{ \ + int32_t w; \ + float32_t tmp; \ + float32_t *dataI = (A)->pData; \ + float32_t *dataJ = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols - COL; \ + \ + dataI += i*numCols + (COL); \ + dataJ += j*numCols + (COL); \ + \ + \ + for(w=0;w < nb; w++) \ + { \ + tmp = *dataI; \ + *dataI++ = *dataJ; \ + *dataJ++ = tmp; \ + } \ +} + +#define SCALE_ROW_F32(A,COL,v,i) \ +{ \ + int32_t w; \ + float32_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols - COL; \ + \ + data += i*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *data++ *= v; \ + } \ +} + + +#define MAC_ROW_F32(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataA = dataA + i*numCols + (COL); \ + dataB = dataB + j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ += v* *dataB++; \ + } \ +} + +#define MAS_ROW_F32(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float32_t *dataA = (A)->pData; \ + float32_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataA = dataA + i*numCols + (COL); \ + dataB = dataB + j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ -= v* *dataB++; \ + } \ +} + +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + + +/* Functions with only a scalar version */ + +#define COPY_COL_F32(A,ROW,COL,DST) \ + COPY_COL_T(float32_t,A,ROW,COL,DST) + +#define COPY_COL_F64(A,ROW,COL,DST) \ + COPY_COL_T(float64_t,A,ROW,COL,DST) + +#define SWAP_COLS_F32(A,COL,i,j) \ +{ \ + int32_t w; \ + float32_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + for(w=(COL);w < numCols; w++) \ + { \ + float32_t tmp; \ + tmp = data[w*numCols + i]; \ + data[w*numCols + i] = data[w*numCols + j];\ + data[w*numCols + j] = tmp; \ + } \ +} + +#define SCALE_COL_F32(A,ROW,v,i) \ + SCALE_COL_T(float32_t,,A,ROW,v,i) + +#define SWAP_ROWS_F64(A,COL,i,j) \ +{ \ + int32_t w; \ + float64_t *dataI = (A)->pData; \ + float64_t *dataJ = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataI += i*numCols + (COL); \ + dataJ += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + float64_t tmp; \ + tmp = *dataI; \ + *dataI++ = *dataJ; \ + *dataJ++ = tmp; \ + } \ +} + +#define SWAP_COLS_F64(A,COL,i,j) \ +{ \ + int32_t w; \ + float64_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols; \ + for(w=(COL);w < numCols; w++) \ + { \ + float64_t tmp; \ + tmp = data[w*numCols + i]; \ + data[w*numCols + i] = data[w*numCols + j];\ + data[w*numCols + j] = tmp; \ + } \ +} + +#define SCALE_ROW_F64(A,COL,v,i) \ +{ \ + int32_t w; \ + float64_t *data = (A)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + data += i*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *data++ *= v; \ + } \ +} + +#define SCALE_COL_F64(A,ROW,v,i) \ + SCALE_COL_T(float64_t,,A,ROW,v,i) + +#define MAC_ROW_F64(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float64_t *dataA = (A)->pData; \ + float64_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ += v* *dataB++; \ + } \ +} + +#define MAS_ROW_F64(COL,A,i,v,B,j) \ +{ \ + int32_t w; \ + float64_t *dataA = (A)->pData; \ + float64_t *dataB = (B)->pData; \ + const int32_t numCols = (A)->numCols;\ + const int32_t nb = numCols-(COL); \ + \ + dataA += i*numCols + (COL); \ + dataB += j*numCols + (COL); \ + \ + for(w=0;w < nb; w++) \ + { \ + *dataA++ -= v* *dataB++; \ + } \ +} + +#ifdef __cplusplus +} +#endif + +#endif /* ifndef _MATRIX_UTILS_H_ */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h index bb2a9b6..1e36a51 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h @@ -33,7 +33,7 @@ But those are not always available or use a restricted set of intrinsics. */ - + #ifndef _NONE_H_ #define _NONE_H_ @@ -44,7 +44,7 @@ extern "C" { #endif - + /* @@ -59,7 +59,7 @@ MSVC is not going to be used to cross-compile to ARM. So, having a MSVC compiler file in Core or Core_A would not make sense. */ -#if defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) +#if defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) || defined(__APPLE_CC__) __STATIC_FORCEINLINE uint8_t __CLZ(uint32_t data) { if (data == 0U) { return 32U; } @@ -215,6 +215,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) #define __PKHTB(ARG1, ARG2, ARG3) ( (((int32_t)(ARG1) << 0) & (int32_t)0xFFFF0000) | \ (((int32_t)(ARG2) >> ARG3) & (int32_t)0x0000FFFF) ) + #define __SXTAB16_RORn(ARG1, ARG2, ARG3) __SXTAB16(ARG1, __ROR(ARG2, ARG3)) /* * @brief C custom defined SADD16 (by Edge Impulse) @@ -249,7 +250,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) #endif - + // Patched by Edge Impulse, remove `!defined (ARM_MATH_DSP)` check /* @@ -584,10 +585,11 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) { return (sum + (int32_t) (((int64_t) x * y) >> 32)); } - +#if defined ( _MSC_VER ) || defined(__GNUC_PYTHON__) || defined(__APPLE_CC__) // Rotate right, dual extract 8-bits and sign extend each to 16-bits. // rotate value must be 8,16 or 24 // Patched by Edge Impulse to polyfill x86 support + // Patched by Edge Impulse for IAR Workbench __STATIC_FORCEINLINE uint32_t __SXTB16_RORn(uint32_t val1, uint32_t rotate) { uint32_t ret; @@ -601,7 +603,7 @@ __STATIC_FORCEINLINE uint32_t __ROR(uint32_t op1, uint32_t op2) ret |= ((uint32_t)a16 & 0xffff); return ret; } - +#endif // Dual sign-extended 8 to 16-bit addition // Patched by Edge Impulse to polyfill x86 support __STATIC_FORCEINLINE uint32_t __SXTAB16(uint32_t val1, uint32_t val2) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/quaternion_math_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/quaternion_math_functions.h index e7d08e9..8192cd8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/quaternion_math_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/quaternion_math_functions.h @@ -1,6 +1,10 @@ /****************************************************************************** * @file quaternion_math_functions.h * @brief Public header file for CMSIS DSP Library + * @version V1.10.0 + * @date 08 July 2021 + * + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2021 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h index 337057a..866e467 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file statistics_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.1 + * @date 14 July 2022 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -169,6 +170,18 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult); + /** + * @brief Sum of the squares of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_power_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + + /** * @brief Sum of the squares of the elements of a Q15 vector. * @param[in] pSrc is input pointer @@ -241,6 +254,18 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult); + /** + * @brief Mean value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_mean_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + + /** * @brief Variance of the elements of a floating-point vector. * @param[in] pSrc is input pointer @@ -253,6 +278,18 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult); + /** + * @brief Variance of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_var_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + + /** * @brief Variance of the elements of a Q31 vector. * @param[in] pSrc is input pointer @@ -325,6 +362,18 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult); + /** + * @brief Standard deviation of the elements of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_std_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + + /** * @brief Standard deviation of the elements of a Q31 vector. * @param[in] pSrc is input pointer @@ -363,6 +412,30 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q7_t * result, uint32_t * index); + /** + * @brief Minimum value of absolute values of a Q7 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] result is output pointer + * @param[in] index is the array index of the minimum value in the input buffer. + */ + void arm_absmin_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * result, + uint32_t * index); + + /** + * @brief Minimum value of absolute values of a Q7 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] result is output pointer + */ + void arm_absmin_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * result); + /** * @brief Minimum value of a Q15 vector. @@ -377,6 +450,30 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q15_t * pResult, uint32_t * pIndex); +/** + * @brief Minimum value of absolute values of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[in] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_absmin_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a Q15 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + */ + void arm_absmin_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); + /** * @brief Minimum value of a Q31 vector. @@ -391,6 +488,30 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q31_t * pResult, uint32_t * pIndex); + /** + * @brief Minimum value of absolute values of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_absmin_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a Q31 vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + */ + void arm_absmin_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); + /** * @brief Minimum value of a floating-point vector. @@ -405,6 +526,68 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult, uint32_t * pIndex); + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_absmin_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + */ + void arm_absmin_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + + + /** + * @brief Minimum value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_min_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_absmin_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + */ + void arm_absmin_no_idx_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + /** * @brief Maximum value of a Q7 vector. @@ -419,6 +602,30 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q7_t * pResult, uint32_t * pIndex); +/** + * @brief Maximum value of absolute values of a Q7 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex); + +/** + * @brief Maximum value of absolute values of a Q7 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult); + /** * @brief Maximum value of a Q15 vector. @@ -433,6 +640,29 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q15_t * pResult, uint32_t * pIndex); +/** + * @brief Maximum value of absolute values of a Q15 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex); + + /** + * @brief Maximum value of absolute values of a Q15 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult); /** * @brief Maximum value of a Q31 vector. @@ -447,6 +677,29 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, q31_t * pResult, uint32_t * pIndex); +/** + * @brief Maximum value of absolute values of a Q31 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex); + + /** + * @brief Maximum value of absolute values of a Q31 vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult); /** * @brief Maximum value of a floating-point vector. @@ -461,6 +714,67 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, float32_t * pResult, uint32_t * pIndex); +/** + * @brief Maximum value of absolute values of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex); + + /** + * @brief Maximum value of absolute values of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + +/** + * @brief Maximum value of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_max_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex); + +/** + * @brief Maximum value of absolute values of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex); + +/** + * @brief Maximum value of absolute values of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); + /** @brief Maximum value of a floating-point vector. @param[in] pSrc points to the input vector @@ -473,7 +787,213 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, uint32_t blockSize, float32_t *pResult); + /** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_f32( + const float32_t *pSrc, + uint32_t blockSize, + float32_t *pResult); + + /** + @brief Maximum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + void arm_max_no_idx_f64( + const float64_t *pSrc, + uint32_t blockSize, + float64_t *pResult); + + /** + @brief Maximum value of a q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + void arm_max_no_idx_q31( + const q31_t *pSrc, + uint32_t blockSize, + q31_t *pResult); + + /** + @brief Maximum value of a q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + void arm_max_no_idx_q15( + const q15_t *pSrc, + uint32_t blockSize, + q15_t *pResult); + + /** + @brief Maximum value of a q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + void arm_max_no_idx_q7( + const q7_t *pSrc, + uint32_t blockSize, + q7_t *pResult); + + /** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_f64( + const float64_t *pSrc, + uint32_t blockSize, + float64_t *pResult); + +/** + @brief Minimum value of a q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_q31( + const q31_t *pSrc, + uint32_t blockSize, + q31_t *pResult); + + /** + @brief Minimum value of a q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_q15( + const q15_t *pSrc, + uint32_t blockSize, + q15_t *pResult); + + /** + @brief Minimum value of a q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_q7( + const q7_t *pSrc, + uint32_t blockSize, + q7_t *pResult); + +/** + @brief Mean square error between two Q7 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + uint32_t blockSize, + q7_t * pResult); + +/** + @brief Mean square error between two Q15 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + uint32_t blockSize, + q15_t * pResult); + +/** + @brief Mean square error between two Q31 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + uint32_t blockSize, + q31_t * pResult); + +/** + @brief Mean square error between two single precision float vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t * pResult); + +/** + @brief Mean square error between two double precision float vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + uint32_t blockSize, + float64_t * pResult); + + +/** + * @brief Accumulation value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + +void arm_accumulate_f32( +const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult); + +/** + * @brief Accumulation value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ +void arm_accumulate_f64( +const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult); #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h index 055040f..a3db3ee 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file statistics_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.1 + * @date 14 July 2022 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -110,6 +111,19 @@ extern "C" float16_t * pResult, uint32_t * pIndex); + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + * @param[out] pIndex is the array index of the minimum value in the input buffer. + */ + void arm_absmin_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex); + /** * @brief Maximum value of a floating-point vector. * @param[in] pSrc points to the input buffer @@ -123,6 +137,42 @@ extern "C" float16_t * pResult, uint32_t * pIndex); +/** + * @brief Maximum value of absolute values of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + * @param[out] pIndex index of maximum value returned here + */ + void arm_absmax_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex); + + /** + * @brief Minimum value of absolute values of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output pointer + */ + void arm_absmin_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult); + +/** + * @brief Maximum value of a floating-point vector. + * @param[in] pSrc points to the input buffer + * @param[in] blockSize length of the input vector + * @param[out] pResult maximum value returned here + */ + void arm_absmax_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult); + + /** * @brief Entropy * @@ -181,6 +231,44 @@ float16_t arm_kullback_leibler_f16(const float16_t * pSrcA uint32_t blockSize, float16_t *pResult); +/** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + void arm_min_no_idx_f16( + const float16_t *pSrc, + uint32_t blockSize, + float16_t *pResult); + +/** + @brief Mean square error between two half precision float vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none +*/ + +void arm_mse_f16( + const float16_t * pSrcA, + const float16_t * pSrcB, + uint32_t blockSize, + float16_t * pResult); + + +/** + * @brief Sum value of a floating-point vector. + * @param[in] pSrc is input pointer + * @param[in] blockSize is the number of samples to process + * @param[out] pResult is output value. + */ + void arm_accumulate_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult); #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h index 3a2e333..7b586e3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file support_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -294,6 +295,20 @@ extern "C" float32_t * pDst, uint32_t blockSize); + + + /** + * @brief Copies the elements of a floating-point vector. + * @param[in] pSrc input pointer + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_copy_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize); + + /** * @brief Copies the elements of a Q7 vector. @@ -343,6 +358,18 @@ extern "C" uint32_t blockSize); + /** + * @brief Fills a constant value into a floating-point vector. + * @param[in] value input value to be filled + * @param[out] pDst output pointer + * @param[in] blockSize number of samples to process + */ + void arm_fill_f64( + float64_t value, + float64_t * pDst, + uint32_t blockSize); + + /** * @brief Fills a constant value into a Q7 vector. * @param[in] value input value to be filled diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h index 6858f82..f36d06f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file support_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -120,6 +121,64 @@ void arm_barycenter_f16(const float16_t *in , uint32_t nbVectors , uint32_t vecDim); + +/** + @ingroup groupSupport + */ + +/** + * @defgroup typecast Typecasting + */ + +/** + @addtogroup typecast + @{ + */ + +/** + * @brief Interpret a f16 as an s16 value + * @param[in] x input value. + * @return return value. + * + * @par Description + * It is a typecast. No conversion of the float to int is done. + * The memcpy will be optimized out by the compiler. + * memcpy is used to prevent type punning issues. + * With gcc, -fno-builtins MUST not be used or the + * memcpy will not be optimized out. + */ +__STATIC_INLINE int16_t arm_typecast_s16_f16(float16_t x) +{ + int16_t res; + res=*(int16_t*)memcpy((char*)&res,(char*)&x,sizeof(float16_t)); + return(res); +} + +/** + * @brief Interpret an s16 as an f16 value + * @param[in] x input value. + * @return return value. + * + * @par Description + * It is a typecast. No conversion of the int to float is done. + * The memcpy will be optimized out by the compiler. + * memcpy is used to prevent type punning issues. + * With gcc, -fno-builtins MUST not be used or the + * memcpy will not be optimized out. + */ +__STATIC_INLINE float16_t arm_typecast_f16_s16(int16_t x) +{ + float16_t res; + res=*(float16_t*)memcpy((char*)&res,(char*)&x,sizeof(int16_t)); + return(res); +} + + +/** + @} end of typecast group + */ + + #endif /*defined(ARM_FLOAT16_SUPPORTED)*/ #ifdef __cplusplus } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_defines.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_defines.h index 71ad2f7..f93e953 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_defines.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_defines.h @@ -1,6 +1,10 @@ /****************************************************************************** * @file svm_defines.h * @brief Public header file for CMSIS DSP Library + * @version V1.10.0 + * @date 08 July 2021 + * + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions.h index 3e1038c..6576c93 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file svm_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -132,7 +133,7 @@ typedef struct const float32_t *dualCoefficients; /**< Dual coefficients */ const float32_t *supportVectors; /**< Support vectors */ const int32_t *classes; /**< The two SVM classes */ - float32_t coef0; /**< Independant constant */ + float32_t coef0; /**< Independent constant */ float32_t gamma; /**< Gamma factor */ } arm_svm_sigmoid_instance_f32; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions_f16.h index 9d28c74..67c97aa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file svm_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -33,6 +34,7 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/svm_defines.h" + #ifdef __cplusplus extern "C" { @@ -56,24 +58,6 @@ extern "C" * */ -/** - * @brief Integer exponentiation - * @param[in] x value - * @param[in] nb integer exponent >= 1 - * @return x^nb - * - */ -__STATIC_INLINE float16_t arm_exponent_f16(float16_t x, int32_t nb) -{ - float16_t r = x; - nb --; - while(nb > 0) - { - r = r * x; - nb--; - } - return(r); -} /** @@ -131,7 +115,7 @@ typedef struct const float16_t *dualCoefficients; /**< Dual coefficients */ const float16_t *supportVectors; /**< Support vectors */ const int32_t *classes; /**< The two SVM classes */ - float16_t coef0; /**< Independant constant */ + float16_t coef0; /**< Independent constant */ float16_t gamma; /**< Gamma factor */ } arm_svm_sigmoid_instance_f16; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h index 718d12e..2722620 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file transform_functions.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -435,10 +436,26 @@ typedef struct const float32_t * pTwiddleRFFT; /**< Twiddle factors real stage */ } arm_rfft_fast_instance_f32 ; -arm_status arm_rfft_fast_init_f32 ( - arm_rfft_fast_instance_f32 * S, - uint16_t fftLen); - + // Patched by Edge Impulse, expose the specific init functions to the outside world + arm_status arm_rfft_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S, + uint16_t fftLen); + arm_status arm_rfft_32_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_64_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_128_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_256_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_512_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_1024_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_2048_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); + arm_status arm_rfft_4096_fast_init_f32 ( + arm_rfft_fast_instance_f32 * S); void arm_rfft_fast_f32( const arm_rfft_fast_instance_f32 * S, @@ -582,6 +599,149 @@ arm_status arm_rfft_fast_init_f32 ( q15_t * pState, q15_t * pInlineBuffer); + /** + * @brief Instance structure for the Floating-point MFCC function. + */ +typedef struct + { + const float32_t *dctCoefs; /**< Internal DCT coefficients */ + const float32_t *filterCoefs; /**< Internal Mel filter coefficients */ + const float32_t *windowCoefs; /**< Windowing coefficients */ + const uint32_t *filterPos; /**< Internal Mel filter positions in spectrum */ + const uint32_t *filterLengths; /**< Internal Mel filter lengths */ + uint32_t fftLen; /**< FFT length */ + uint32_t nbMelFilters; /**< Number of Mel filters */ + uint32_t nbDctOutputs; /**< Number of DCT outputs */ +#if defined(ARM_MFCC_CFFT_BASED) + /* Implementation of the MFCC is using a CFFT */ + arm_cfft_instance_f32 cfft; /**< Internal CFFT instance */ +#else + /* Implementation of the MFCC is using a RFFT (default) */ + arm_rfft_fast_instance_f32 rfft; +#endif + } arm_mfcc_instance_f32 ; + +arm_status arm_mfcc_init_f32( + arm_mfcc_instance_f32 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const float32_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const float32_t *filterCoefs, + const float32_t *windowCoefs + ); + + +/** + @brief MFCC F32 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values + @param[inout] pTmp points to a temporary buffer of complex + @return none + */ + void arm_mfcc_f32( + const arm_mfcc_instance_f32 * S, + float32_t *pSrc, + float32_t *pDst, + float32_t *pTmp + ); + +typedef struct + { + const q31_t *dctCoefs; /**< Internal DCT coefficients */ + const q31_t *filterCoefs; /**< Internal Mel filter coefficients */ + const q31_t *windowCoefs; /**< Windowing coefficients */ + const uint32_t *filterPos; /**< Internal Mel filter positions in spectrum */ + const uint32_t *filterLengths; /**< Internal Mel filter lengths */ + uint32_t fftLen; /**< FFT length */ + uint32_t nbMelFilters; /**< Number of Mel filters */ + uint32_t nbDctOutputs; /**< Number of DCT outputs */ +#if defined(ARM_MFCC_CFFT_BASED) + /* Implementation of the MFCC is using a CFFT */ + arm_cfft_instance_q31 cfft; /**< Internal CFFT instance */ +#else + /* Implementation of the MFCC is using a RFFT (default) */ + arm_rfft_instance_q31 rfft; +#endif + } arm_mfcc_instance_q31 ; + +arm_status arm_mfcc_init_q31( + arm_mfcc_instance_q31 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const q31_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const q31_t *filterCoefs, + const q31_t *windowCoefs + ); + + +/** + @brief MFCC Q31 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values + @param[inout] pTmp points to a temporary buffer of complex + @return none + */ + arm_status arm_mfcc_q31( + const arm_mfcc_instance_q31 * S, + q31_t *pSrc, + q31_t *pDst, + q31_t *pTmp + ); + +typedef struct + { + const q15_t *dctCoefs; /**< Internal DCT coefficients */ + const q15_t *filterCoefs; /**< Internal Mel filter coefficients */ + const q15_t *windowCoefs; /**< Windowing coefficients */ + const uint32_t *filterPos; /**< Internal Mel filter positions in spectrum */ + const uint32_t *filterLengths; /**< Internal Mel filter lengths */ + uint32_t fftLen; /**< FFT length */ + uint32_t nbMelFilters; /**< Number of Mel filters */ + uint32_t nbDctOutputs; /**< Number of DCT outputs */ +#if defined(ARM_MFCC_CFFT_BASED) + /* Implementation of the MFCC is using a CFFT */ + arm_cfft_instance_q15 cfft; /**< Internal CFFT instance */ +#else + /* Implementation of the MFCC is using a RFFT (default) */ + arm_rfft_instance_q15 rfft; +#endif + } arm_mfcc_instance_q15 ; + +arm_status arm_mfcc_init_q15( + arm_mfcc_instance_q15 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const q15_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const q15_t *filterCoefs, + const q15_t *windowCoefs + ); + + +/** + @brief MFCC Q15 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values in q8.7 format + @param[inout] pTmp points to a temporary buffer of complex + @return error status + */ + arm_status arm_mfcc_q15( + const arm_mfcc_instance_q15 * S, + q15_t *pSrc, + q15_t *pDst, + q31_t *pTmp + ); #ifdef __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h index cb2419a..b38a587 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h @@ -1,8 +1,9 @@ /****************************************************************************** * @file transform_functions_f16.h * @brief Public header file for CMSIS DSP Library - * @version V1.9.0 - * @date 20. July 2020 + * @version V1.10.0 + * @date 08 July 2021 + * Target Processor: Cortex-M and Cortex-A cores ******************************************************************************/ /* * Copyright (c) 2010-2020 Arm Limited or its affiliates. All rights reserved. @@ -146,6 +147,57 @@ arm_status arm_rfft_fast_init_f16 ( void arm_cfft_radix2_f16( const arm_cfft_radix2_instance_f16 * S, float16_t * pSrc); + + /** + * @brief Instance structure for the Floating-point MFCC function. + */ +typedef struct + { + const float16_t *dctCoefs; /**< Internal DCT coefficients */ + const float16_t *filterCoefs; /**< Internal Mel filter coefficients */ + const float16_t *windowCoefs; /**< Windowing coefficients */ + const uint32_t *filterPos; /**< Internal Mel filter positions in spectrum */ + const uint32_t *filterLengths; /**< Internal Mel filter lengths */ + uint32_t fftLen; /**< FFT length */ + uint32_t nbMelFilters; /**< Number of Mel filters */ + uint32_t nbDctOutputs; /**< Number of DCT outputs */ +#if defined(ARM_MFCC_CFFT_BASED) + /* Implementation of the MFCC is using a CFFT */ + arm_cfft_instance_f16 cfft; /**< Internal CFFT instance */ +#else + /* Implementation of the MFCC is using a RFFT (default) */ + arm_rfft_fast_instance_f16 rfft; +#endif + } arm_mfcc_instance_f16 ; + +arm_status arm_mfcc_init_f16( + arm_mfcc_instance_f16 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const float16_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const float16_t *filterCoefs, + const float16_t *windowCoefs + ); + + +/** + @brief MFCC F16 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values + @param[inout] pTmp points to a temporary buffer of complex + @return none + */ + void arm_mfcc_f16( + const arm_mfcc_instance_f16 * S, + float16_t *pSrc, + float16_t *pDst, + float16_t *pTmp + ); + #endif /* defined(ARM_FLOAT16_SUPPORTED)*/ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h index e83da70..e6e24df 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h @@ -27,6 +27,7 @@ #define _ARM_MATH_UTILS_H_ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" +#include #ifdef __cplusplus extern "C" @@ -47,6 +48,7 @@ extern "C" /** * @brief Function to Calculates 1/in (reciprocal) value of Q31 Data type. + It should not be used with negative values. */ __STATIC_FORCEINLINE uint32_t arm_recip_q31( q31_t in, @@ -60,11 +62,11 @@ extern "C" if (in > 0) { - signBits = ((uint32_t) (__CLZ( in) - 1)); + signBits = ((uint32_t) (__CLZ( (uint32_t)in) - 1)); } else { - signBits = ((uint32_t) (__CLZ(-in) - 1)); + signBits = ((uint32_t) (__CLZ((uint32_t)(-in)) - 1)); } /* Convert input sample to 1.31 format */ @@ -98,6 +100,7 @@ extern "C" /** * @brief Function to Calculates 1/in (reciprocal) value of Q15 Data type. + It should not be used with negative values. */ __STATIC_FORCEINLINE uint32_t arm_recip_q15( q15_t in, @@ -105,21 +108,21 @@ extern "C" const q15_t * pRecipTable) { q15_t out = 0; - uint32_t tempVal = 0; + int32_t tempVal = 0; uint32_t index = 0, i = 0; uint32_t signBits = 0; if (in > 0) { - signBits = ((uint32_t)(__CLZ( in) - 17)); + signBits = ((uint32_t)(__CLZ( (uint32_t)in) - 17)); } else { - signBits = ((uint32_t)(__CLZ(-in) - 17)); + signBits = ((uint32_t)(__CLZ((uint32_t)(-in)) - 17)); } /* Convert input sample to 1.15 format */ - in = (in << signBits); + in = (q15_t)(in << signBits); /* calculation of index for initial approximated Val */ index = (uint32_t)(in >> 8); @@ -132,8 +135,8 @@ extern "C" /* running approximation for two iterations */ for (i = 0U; i < 2U; i++) { - tempVal = (uint32_t) (((q31_t) in * out) >> 15); - tempVal = 0x7FFFu - tempVal; + tempVal = (((q31_t) in * out) >> 15); + tempVal = 0x7FFF - tempVal; /* 1.15 with exp 1 */ out = (q15_t) (((q31_t) out * tempVal) >> 14); /* out = clip_q31_to_q15(((q31_t) out * tempVal) >> 14); */ @@ -159,13 +162,13 @@ __STATIC_INLINE void arm_norm_64_to_32u(uint64_t in, int32_t * normalized, int3 int32_t hi = (int32_t) (in >> 32); int32_t lo = (int32_t) ((in << 32) >> 32); - n1 = __CLZ(hi) - 32; + n1 = __CLZ((uint32_t)hi) - 32; if (!n1) { /* * input fits in 32-bit */ - n1 = __CLZ(lo); + n1 = __CLZ((uint32_t)lo); if (!n1) { /* @@ -201,13 +204,13 @@ __STATIC_INLINE void arm_norm_64_to_32u(uint64_t in, int32_t * normalized, int3 /* * 64 bit normalization */ - *normalized = (((uint32_t) lo) >> n1) | (hi << (32 - n1)); + *normalized = (int32_t)(((uint32_t)lo) >> n1) | (hi << (32 - n1)); } } -__STATIC_INLINE q31_t arm_div_q63_to_q31(q63_t num, q31_t den) +__STATIC_INLINE int32_t arm_div_int64_to_int32(int64_t num, int32_t den) { - q31_t result; + int32_t result; uint64_t absNum; int32_t normalized; int32_t norm; @@ -216,18 +219,25 @@ __STATIC_INLINE q31_t arm_div_q63_to_q31(q63_t num, q31_t den) * if sum fits in 32bits * avoid costly 64-bit division */ - absNum = num > 0 ? num : -num; + if (num == (int64_t)LONG_MIN) + { + absNum = LONG_MAX; + } + else + { + absNum = (uint64_t) (num > 0 ? num : -num); + } arm_norm_64_to_32u(absNum, &normalized, &norm); if (norm > 0) /* * 32-bit division */ - result = (q31_t) num / den; + result = (int32_t) num / den; else /* * 64-bit division */ - result = (q31_t) (num / den); + result = (int32_t) (num / den); return result; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f16.c index 7df97b9..e974c82 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f16.c @@ -5,11 +5,13 @@ * Title: arm_abs_f16.c * Description: Floating-point vector absolute value * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,19 +35,6 @@ @ingroup groupMath */ -/** - @defgroup BasicAbs Vector Absolute Value - - Computes the absolute value of a vector on an element-by-element basis. - -
-      pDst[n] = abs(pSrc[n]),   0 <= n < blockSize.
-  
- - The functions support in-place computation allowing the source and - destination pointers to reference the same memory buffer. - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicAbs @@ -156,13 +145,13 @@ void arm_abs_f16( /* C = |A| */ /* Calculate absolute and store result in destination buffer. */ - *pDst++ = fabsf(*pSrc++); + *pDst++ = (_Float16)fabsf((float32_t)*pSrc++); - *pDst++ = fabsf(*pSrc++); + *pDst++ = (_Float16)fabsf((float32_t)*pSrc++); - *pDst++ = fabsf(*pSrc++); + *pDst++ = (_Float16)fabsf((float32_t)*pSrc++); - *pDst++ = fabsf(*pSrc++); + *pDst++ = (_Float16)fabsf((float32_t)*pSrc++); /* Decrement loop counter */ blkCnt--; @@ -184,7 +173,7 @@ void arm_abs_f16( /* C = |A| */ /* Calculate absolute and store result in destination buffer. */ - *pDst++ = fabsf(*pSrc++); + *pDst++ = (_Float16)fabsf((float32_t)*pSrc++); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f32.c index fde9ea5..3d27210 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f32.c @@ -5,13 +5,13 @@ * Title: arm_abs_f32.c * Description: Floating-point vector absolute value * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f64.c new file mode 100644 index 0000000..a0bd5f0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_f64.c @@ -0,0 +1,78 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_abs_f64.c + * Description: Floating-point vector absolute value + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicAbs + @{ + */ + +/** + @brief Floating-point vector absolute value. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + +void arm_abs_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = |A| */ + + /* Calculate absolute and store result in destination buffer. */ + *pDst++ = fabs(*pSrc++); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicAbs group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q15.c index cce4f60..7c8ec53 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q15.c @@ -5,13 +5,13 @@ * Title: arm_abs_q15.c * Description: Q15 vector absolute value * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q31.c index 368e23e..fab95f2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q31.c @@ -5,13 +5,13 @@ * Title: arm_abs_q31.c * Description: Q31 vector absolute value * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q7.c index 8915683..f62d67a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_abs_q7.c @@ -5,13 +5,13 @@ * Title: arm_abs_q7.c * Description: Q7 vector absolute value * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f16.c index 8f825c7..d9d6226 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f16.c @@ -5,11 +5,13 @@ * Title: arm_add_f16.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,17 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicAdd Vector Addition - - Element-by-element addition of two vectors. - -
-      pDst[n] = pSrcA[n] + pSrcB[n],   0 <= n < blockSize.
-  
- - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicAdd @@ -130,10 +121,10 @@ void arm_add_f16( /* C = A + B */ /* Add and store result in destination buffer. */ - *pDst++ = (*pSrcA++) + (*pSrcB++); - *pDst++ = (*pSrcA++) + (*pSrcB++); - *pDst++ = (*pSrcA++) + (*pSrcB++); - *pDst++ = (*pSrcA++) + (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) + (_Float16)(*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) + (_Float16)(*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) + (_Float16)(*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) + (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; @@ -154,7 +145,7 @@ void arm_add_f16( /* C = A + B */ /* Add and store result in destination buffer. */ - *pDst++ = (*pSrcA++) + (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) + (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f32.c index 2a56f0a..4e854f5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f32.c @@ -5,13 +5,13 @@ * Title: arm_add_f32.c * Description: Floating-point vector addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f64.c new file mode 100644 index 0000000..a1f01a7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_add_f64.c + * Description: Floating-point vector addition + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicAdd + @{ + */ + +/** + @brief Floating-point vector addition. + @param[in] pSrcA points to first input vector + @param[in] pSrcB points to second input vector + @param[out] pDst points to output vector + @param[in] blockSize number of samples in each vector + @return none + */ + +void arm_add_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A + B */ + + /* Add and store result in destination buffer. */ + *pDst++ = (*pSrcA++) + (*pSrcB++); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicAdd group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q15.c index a1b6d84..6265058 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q15.c @@ -5,13 +5,13 @@ * Title: arm_add_q15.c * Description: Q15 vector addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -126,11 +126,11 @@ void arm_add_q15( #if defined (ARM_MATH_DSP) /* read 2 times 2 samples at a time from sourceA */ - inA1 = read_q15x2_ia ((q15_t **) &pSrcA); - inA2 = read_q15x2_ia ((q15_t **) &pSrcA); + inA1 = read_q15x2_ia (&pSrcA); + inA2 = read_q15x2_ia (&pSrcA); /* read 2 times 2 samples at a time from sourceB */ - inB1 = read_q15x2_ia ((q15_t **) &pSrcB); - inB2 = read_q15x2_ia ((q15_t **) &pSrcB); + inB1 = read_q15x2_ia (&pSrcB); + inB2 = read_q15x2_ia (&pSrcB); /* Add and store 2 times 2 samples at a time */ write_q15x2_ia (&pDst, __QADD16(inA1, inB1)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q31.c index fe85869..2d6e791 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q31.c @@ -5,13 +5,13 @@ * Title: arm_add_q31.c * Description: Q31 vector addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q7.c index 488b45d..46446c7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_add_q7.c @@ -1,7 +1,7 @@ #include "edge-impulse-sdk/dsp/config.hpp" #if EIDSP_LOAD_CMSIS_DSP_SOURCES /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -23,10 +23,10 @@ * Title: arm_add_q7.c * Description: Q7 vector addition * - * $Date: May 29, 2020 - * $Revision: V1.6.1 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" @@ -121,7 +121,7 @@ void arm_add_q7( #if defined (ARM_MATH_DSP) /* Add and store result in destination buffer (4 samples at a time). */ - write_q7x4_ia (&pDst, __QADD8 (read_q7x4_ia ((q7_t **) &pSrcA), read_q7x4_ia ((q7_t **) &pSrcB))); + write_q7x4_ia (&pDst, __QADD8 (read_q7x4_ia (&pSrcA), read_q7x4_ia (&pSrcB))); #else *pDst++ = (q7_t) __SSAT ((q15_t) *pSrcA++ + *pSrcB++, 8); *pDst++ = (q7_t) __SSAT ((q15_t) *pSrcA++ + *pSrcB++, 8); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u16.c index fb90af6..82aabc8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u16.c @@ -5,13 +5,13 @@ * Title: arm_and_u16.c * Description: uint16_t bitwise AND * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u32.c index 73b8087..0c4b090 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u32.c @@ -5,13 +5,13 @@ * Title: arm_and_u32.c * Description: uint32_t bitwise AND * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u8.c index f68e992..52ac33e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_and_u8.c @@ -5,13 +5,13 @@ * Title: arm_and_u8.c * Description: uint8_t bitwise AND * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f16.c index 38bae53..bc4e732 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f16.c @@ -5,8 +5,10 @@ * Title: arm_clip_f16.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -123,9 +125,9 @@ void arm_clip_f16(const float16_t * pSrc, { for (uint32_t i = 0; i < numSamples; i++) { - if (pSrc[i] > high) + if ((_Float16)pSrc[i] > (_Float16)high) pDst[i] = high; - else if (pSrc[i] < low) + else if ((_Float16)pSrc[i] < (_Float16)low) pDst[i] = low; else pDst[i] = pSrc[i]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f32.c index b25896a..b2b1374 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_f32.c @@ -5,8 +5,10 @@ * Title: arm_clip_f32.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -126,7 +128,8 @@ void arm_clip_f32(const float32_t * pSrc, float32_t high, uint32_t numSamples) { - for (uint32_t i = 0; i < numSamples; i++) + uint32_t i; + for (i = 0; i < numSamples; i++) { if (pSrc[i] > high) pDst[i] = high; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q15.c index 1ba2cfc..287109a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q15.c @@ -5,8 +5,10 @@ * Title: arm_clip_q15.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -116,7 +118,8 @@ void arm_clip_q15(const q15_t * pSrc, q15_t high, uint32_t numSamples) { - for (uint32_t i = 0; i < numSamples; i++) + uint32_t i; + for (i = 0; i < numSamples; i++) { if (pSrc[i] > high) pDst[i] = high; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q31.c index 70d6d59..a82d2df 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q31.c @@ -5,8 +5,10 @@ * Title: arm_clip_q31.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -116,7 +118,8 @@ void arm_clip_q31(const q31_t * pSrc, q31_t high, uint32_t numSamples) { - for (uint32_t i = 0; i < numSamples; i++) + uint32_t i; + for (i = 0; i < numSamples; i++) { if (pSrc[i] > high) pDst[i] = high; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q7.c index 006a7dc..f28678c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_clip_q7.c @@ -5,8 +5,10 @@ * Title: arm_clip_q7.c * Description: Floating-point vector addition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -116,7 +118,8 @@ void arm_clip_q7(const q7_t * pSrc, q7_t high, uint32_t numSamples) { - for (uint32_t i = 0; i < numSamples; i++) + uint32_t i; + for (i = 0; i < numSamples; i++) { if (pSrc[i] > high) pDst[i] = high; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f16.c index 11cbf9e..71ea70d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f16.c @@ -5,13 +5,13 @@ * Title: arm_dot_prod_f16.c * Description: Floating-point dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,18 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicDotProd Vector Dot Product - - Computes the dot product of two vectors. - The vectors are multiplied element-by-element and then summed. - -
-      sum = pSrcA[0]*pSrcB[0] + pSrcA[1]*pSrcB[1] + ... + pSrcA[blockSize-1]*pSrcB[blockSize-1]
-  
- - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicDotProd diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f32.c index cd3b4f0..6f5e421 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f32.c @@ -5,13 +5,13 @@ * Title: arm_dot_prod_f32.c * Description: Floating-point dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 05 October 2021 + * $Revision: V1.9.1 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -134,7 +134,9 @@ void arm_dot_prod_f32( f32x4_t vec1; f32x4_t vec2; f32x4_t accum = vdupq_n_f32(0); - f32x2_t tmp = vdup_n_f32(0); +#if !defined(__aarch64__) + f32x2_t tmp = vdup_n_f32(0); +#endif /* Compute 4 outputs at a time */ blkCnt = blockSize >> 2U; @@ -160,7 +162,7 @@ void arm_dot_prod_f32( blkCnt--; } -#if __aarch64__ +#if defined(__aarch64__) sum = vpadds_f32(vpadd_f32(vget_low_f32(accum), vget_high_f32(accum))); #else tmp = vpadd_f32(vget_low_f32(accum), vget_high_f32(accum)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f64.c new file mode 100644 index 0000000..821931f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_f64.c @@ -0,0 +1,82 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_dot_prod_f64.c + * Description: Floating-point dot product + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicDotProd + @{ + */ + +/** + @brief Dot product of floating-point vectors. + @param[in] pSrcA points to the first input vector. + @param[in] pSrcB points to the second input vector. + @param[in] blockSize number of samples in each vector. + @param[out] result output result returned here. + @return none + */ + +void arm_dot_prod_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + uint32_t blockSize, + float64_t * result) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t sum = 0.; /* Temporary return variable */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ + + /* Calculate dot product and store result in a temporary buffer. */ + sum += (*pSrcA++) * (*pSrcB++); + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in destination buffer */ + *result = sum; +} + +/** + @} end of BasicDotProd group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q15.c index be944f3..a8faebc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q15.c @@ -5,13 +5,13 @@ * Title: arm_dot_prod_q15.c * Description: Q15 dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -126,8 +126,8 @@ void arm_dot_prod_q15( #if defined (ARM_MATH_DSP) /* Calculate dot product and store result in a temporary buffer. */ - sum = __SMLALD(read_q15x2_ia ((q15_t **) &pSrcA), read_q15x2_ia ((q15_t **) &pSrcB), sum); - sum = __SMLALD(read_q15x2_ia ((q15_t **) &pSrcA), read_q15x2_ia ((q15_t **) &pSrcB), sum); + sum = __SMLALD(read_q15x2_ia (&pSrcA), read_q15x2_ia (&pSrcB), sum); + sum = __SMLALD(read_q15x2_ia (&pSrcA), read_q15x2_ia (&pSrcB), sum); #else sum += (q63_t)((q31_t) *pSrcA++ * *pSrcB++); sum += (q63_t)((q31_t) *pSrcA++ * *pSrcB++); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q31.c index ee2d26d..bced7e8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q31.c @@ -5,13 +5,13 @@ * Title: arm_dot_prod_q31.c * Description: Q31 dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q7.c index d17f129..594bd01 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_dot_prod_q7.c @@ -5,13 +5,13 @@ * Title: arm_dot_prod_q7.c * Description: Q7 dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -131,9 +131,9 @@ void arm_dot_prod_q7( #if defined (ARM_MATH_DSP) /* read 4 samples at a time from sourceA */ - input1 = read_q7x4_ia ((q7_t **) &pSrcA); + input1 = read_q7x4_ia (&pSrcA); /* read 4 samples at a time from sourceB */ - input2 = read_q7x4_ia ((q7_t **) &pSrcB); + input2 = read_q7x4_ia (&pSrcB); /* extract two q7_t samples to q15_t samples */ inA1 = __SXTB16(__ROR(input1, 8)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f16.c index 9fc66e4..0b5994b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f16.c @@ -5,11 +5,13 @@ * Title: arm_mult_f16.c * Description: Floating-point vector multiplication * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,17 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicMult Vector Multiplication - - Element-by-element multiplication of two vectors. - -
-      pDst[n] = pSrcA[n] * pSrcB[n],   0 <= n < blockSize.
-  
- - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicMult @@ -129,13 +120,13 @@ void arm_mult_f16( /* C = A * B */ /* Multiply inputs and store result in destination buffer. */ - *pDst++ = (*pSrcA++) * (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) * (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) * (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) * (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) * (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) * (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) * (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) * (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; @@ -156,7 +147,7 @@ void arm_mult_f16( /* C = A * B */ /* Multiply input and store result in destination buffer. */ - *pDst++ = (*pSrcA++) * (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) * (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f32.c index 6441b3b..0744ac5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f32.c @@ -5,13 +5,13 @@ * Title: arm_mult_f32.c * Description: Floating-point vector multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f64.c new file mode 100644 index 0000000..9b914aa --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mult_f64.c + * Description: Floating-point vector multiplication + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicMult + @{ + */ + +/** + @brief Floating-point vector multiplication. + @param[in] pSrcA points to the first input vector. + @param[in] pSrcB points to the second input vector. + @param[out] pDst points to the output vector. + @param[in] blockSize number of samples in each vector. + @return none + */ + +void arm_mult_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A * B */ + + /* Multiply input and store result in destination buffer. */ + *pDst++ = (*pSrcA++) * (*pSrcB++); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicMult group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q15.c index 079059b..d6ec9ec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q15.c @@ -5,13 +5,13 @@ * Title: arm_mult_q15.c * Description: Q15 vector multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -125,13 +125,13 @@ void arm_mult_q15( #if defined (ARM_MATH_DSP) /* read 2 samples at a time from sourceA */ - inA1 = read_q15x2_ia ((q15_t **) &pSrcA); + inA1 = read_q15x2_ia (&pSrcA); /* read 2 samples at a time from sourceB */ - inB1 = read_q15x2_ia ((q15_t **) &pSrcB); + inB1 = read_q15x2_ia (&pSrcB); /* read 2 samples at a time from sourceA */ - inA2 = read_q15x2_ia ((q15_t **) &pSrcA); + inA2 = read_q15x2_ia (&pSrcA); /* read 2 samples at a time from sourceB */ - inB2 = read_q15x2_ia ((q15_t **) &pSrcB); + inB2 = read_q15x2_ia (&pSrcB); /* multiply mul = sourceA * sourceB */ mul1 = (q31_t) ((q15_t) (inA1 >> 16) * (q15_t) (inB1 >> 16)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q31.c index 9598133..60c103c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q31.c @@ -5,13 +5,13 @@ * Title: arm_mult_q31.c * Description: Q31 vector multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q7.c index ce3f4a7..fd0bc3b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_mult_q7.c @@ -5,13 +5,13 @@ * Title: arm_mult_q7.c * Description: Q7 vector multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f16.c index 36e2a88..c4d6ca0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f16.c @@ -5,11 +5,13 @@ * Title: arm_negate_f16.c * Description: Negates floating-point vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,19 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicNegate Vector Negate - - Negates the elements of a vector. - -
-      pDst[n] = -pSrc[n],   0 <= n < blockSize.
-  
- - The functions support in-place computation allowing the source and - destination pointers to reference the same memory buffer. - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicNegate @@ -124,13 +113,13 @@ void arm_negate_f16( /* C = -A */ /* Negate and store result in destination buffer. */ - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; @@ -151,7 +140,7 @@ void arm_negate_f16( /* C = -A */ /* Negate and store result in destination buffer. */ - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f32.c index 3eec34e..e4df7ad 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f32.c @@ -5,13 +5,13 @@ * Title: arm_negate_f32.c * Description: Negates floating-point vectors * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f64.c new file mode 100644 index 0000000..870a767 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_f64.c @@ -0,0 +1,77 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_negate_f64.c + * Description: Negates floating-point vectors + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicNegate + @{ + */ + +/** + @brief Negates the elements of a floating-point vector. + @param[in] pSrc points to input vector. + @param[out] pDst points to output vector. + @param[in] blockSize number of samples in each vector. + @return none + */ + +void arm_negate_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = -A */ + + /* Negate and store result in destination buffer. */ + *pDst++ = -*pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicNegate group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q15.c index 72d964b..c642c24 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q15.c @@ -5,13 +5,13 @@ * Title: arm_negate_q15.c * Description: Negates Q15 vectors * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -120,10 +120,10 @@ void arm_negate_q15( #if defined (ARM_MATH_DSP) /* Negate and store result in destination buffer (2 samples at a time). */ - in1 = read_q15x2_ia ((q15_t **) &pSrc); + in1 = read_q15x2_ia (&pSrc); write_q15x2_ia (&pDst, __QSUB16(0, in1)); - in1 = read_q15x2_ia ((q15_t **) &pSrc); + in1 = read_q15x2_ia (&pSrc); write_q15x2_ia (&pDst, __QSUB16(0, in1)); #else in = *pSrc++; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q31.c index 539f890..e0048e7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q31.c @@ -5,13 +5,13 @@ * Title: arm_negate_q31.c * Description: Negates Q31 vectors * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q7.c index 181896f..3d3cae1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_negate_q7.c @@ -5,13 +5,13 @@ * Title: arm_negate_q7.c * Description: Negates Q7 vectors * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -118,7 +118,7 @@ void arm_negate_q7( #if defined (ARM_MATH_DSP) /* Negate and store result in destination buffer (4 samples at a time). */ - in1 = read_q7x4_ia ((q7_t **) &pSrc); + in1 = read_q7x4_ia (&pSrc); write_q7x4_ia (&pDst, __QSUB8(0, in1)); #else in = *pSrc++; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u16.c index e583e49..5e58873 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u16.c @@ -5,13 +5,13 @@ * Title: arm_not_u16.c * Description: uint16_t bitwise NOT * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u32.c index ce702dd..634800a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u32.c @@ -5,13 +5,13 @@ * Title: arm_not_u32.c * Description: uint32_t bitwise NOT * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u8.c index 87a417d..b83fb0f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_not_u8.c @@ -5,13 +5,13 @@ * Title: arm_not_u8.c * Description: uint8_t bitwise NOT * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f16.c index a8a9bd9..4bb665c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f16.c @@ -5,11 +5,13 @@ * Title: arm_offset_f16.c * Description: Floating-point vector offset * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -32,20 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicOffset Vector Offset - - Adds a constant offset to each element of a vector. - -
-      pDst[n] = pSrc[n] + offset,   0 <= n < blockSize.
-  
- - The functions support in-place computation allowing the source and - destination pointers to reference the same memory buffer. - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ - /** @addtogroup BasicOffset @{ @@ -128,13 +116,13 @@ void arm_offset_f16( /* C = A + offset */ /* Add offset and store result in destination buffer. */ - *pDst++ = (*pSrc++) + offset; + *pDst++ = (_Float16)(*pSrc++) + (_Float16)offset; - *pDst++ = (*pSrc++) + offset; + *pDst++ = (_Float16)(*pSrc++) + (_Float16)offset; - *pDst++ = (*pSrc++) + offset; + *pDst++ = (_Float16)(*pSrc++) + (_Float16)offset; - *pDst++ = (*pSrc++) + offset; + *pDst++ = (_Float16)(*pSrc++) + (_Float16)offset; /* Decrement loop counter */ blkCnt--; @@ -155,7 +143,7 @@ void arm_offset_f16( /* C = A + offset */ /* Add offset and store result in destination buffer. */ - *pDst++ = (*pSrc++) + offset; + *pDst++ = (_Float16)(*pSrc++) + (_Float16)offset; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f32.c index c32e7e4..3033def 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f32.c @@ -5,13 +5,13 @@ * Title: arm_offset_f32.c * Description: Floating-point vector offset * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f64.c new file mode 100644 index 0000000..36b9007 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_offset_f64.c + * Description: Floating-point vector offset + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicOffset + @{ + */ + +/** + @brief Adds a constant offset to a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] offset is the offset to be added + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + +void arm_offset_f64( + const float64_t * pSrc, + float64_t offset, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A + offset */ + + /* Add offset and store result in destination buffer. */ + *pDst++ = (*pSrc++) + offset; + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicOffset group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q15.c index ecf0829..9423730 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q15.c @@ -5,13 +5,13 @@ * Title: arm_offset_q15.c * Description: Q15 vector offset * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -124,8 +124,8 @@ void arm_offset_q15( #if defined (ARM_MATH_DSP) /* Add offset and store result in destination buffer (2 samples at a time). */ - write_q15x2_ia (&pDst, __QADD16(read_q15x2_ia ((q15_t **) &pSrc), offset_packed)); - write_q15x2_ia (&pDst, __QADD16(read_q15x2_ia ((q15_t **) &pSrc), offset_packed)); + write_q15x2_ia (&pDst, __QADD16(read_q15x2_ia (&pSrc), offset_packed)); + write_q15x2_ia (&pDst, __QADD16(read_q15x2_ia (&pSrc), offset_packed)); #else *pDst++ = (q15_t) __SSAT(((q31_t) *pSrc++ + offset), 16); *pDst++ = (q15_t) __SSAT(((q31_t) *pSrc++ + offset), 16); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q31.c index b6ecb9c..b0a1c99 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q31.c @@ -5,13 +5,13 @@ * Title: arm_offset_q31.c * Description: Q31 vector offset * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q7.c index 452cdb2..dacfe48 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_offset_q7.c @@ -5,13 +5,13 @@ * Title: arm_offset_q7.c * Description: Q7 vector offset * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -123,7 +123,7 @@ void arm_offset_q7( #if defined (ARM_MATH_DSP) /* Add offset and store result in destination buffer (4 samples at a time). */ - write_q7x4_ia (&pDst, __QADD8(read_q7x4_ia ((q7_t **) &pSrc), offset_packed)); + write_q7x4_ia (&pDst, __QADD8(read_q7x4_ia (&pSrc), offset_packed)); #else *pDst++ = (q7_t) __SSAT((q15_t) *pSrc++ + offset, 8); *pDst++ = (q7_t) __SSAT((q15_t) *pSrc++ + offset, 8); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u16.c index c7d12ba..2de542a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u16.c @@ -5,13 +5,13 @@ * Title: arm_or_u16.c * Description: uint16_t bitwise inclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u32.c index 655b925..6e285dc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u32.c @@ -5,13 +5,13 @@ * Title: arm_or_u32.c * Description: uint32_t bitwise inclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u8.c index 3eb9058..b9014a3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_or_u8.c @@ -5,13 +5,13 @@ * Title: arm_or_u8.c * Description: uint8_t bitwise inclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f16.c index 240881a..ecd4180 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f16.c @@ -5,13 +5,13 @@ * Title: arm_scale_f16.c * Description: Multiplies a floating-point vector by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,32 +34,7 @@ @ingroup groupMath */ -/** - @defgroup BasicScale Vector Scale - - Multiply a vector by a scalar value. For floating-point data, the algorithm used is: - -
-      pDst[n] = pSrc[n] * scale,   0 <= n < blockSize.
-  
- - In the fixed-point Q7, Q15, and Q31 functions, scale is represented by - a fractional multiplication scaleFract and an arithmetic shift shift. - The shift allows the gain of the scaling operation to exceed 1.0. - The algorithm used with fixed-point data is: -
-      pDst[n] = (pSrc[n] * scaleFract) << shift,   0 <= n < blockSize.
-  
- - The overall scale factor applied to the fixed-point data is -
-      scale = scaleFract * 2^shift.
-  
- - The functions support in-place computation allowing the source and destination - pointers to reference the same memory buffer. - */ /** @addtogroup BasicScale @@ -143,13 +118,13 @@ void arm_scale_f16( /* C = A * scale */ /* Scale input and store result in destination buffer. */ - *pDst++ = (*pSrc++) * scale; + *pDst++ = (_Float16)(*pSrc++) * (_Float16)scale; - *pDst++ = (*pSrc++) * scale; + *pDst++ = (_Float16)(*pSrc++) * (_Float16)scale; - *pDst++ = (*pSrc++) * scale; + *pDst++ = (_Float16)(*pSrc++) * (_Float16)scale; - *pDst++ = (*pSrc++) * scale; + *pDst++ = (_Float16)(*pSrc++) * (_Float16)scale; /* Decrement loop counter */ blkCnt--; @@ -170,7 +145,7 @@ void arm_scale_f16( /* C = A * scale */ /* Scale input and store result in destination buffer. */ - *pDst++ = (*pSrc++) * scale; + *pDst++ = (_Float16)(*pSrc++) * (_Float16)scale; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f32.c index 342b656..c5c5479 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f32.c @@ -5,13 +5,13 @@ * Title: arm_scale_f32.c * Description: Multiplies a floating-point vector by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f64.c new file mode 100644 index 0000000..747f06b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_scale_f64.c + * Description: Multiplies a floating-point vector by a scalar + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicScale + @{ + */ + +/** + @brief Multiplies a floating-point vector by a scalar. + @param[in] pSrc points to the input vector + @param[in] scale scale factor to be applied + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + +void arm_scale_f64( + const float64_t *pSrc, + float64_t scale, + float64_t *pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A * scale */ + + /* Scale input and store result in destination buffer. */ + *pDst++ = (*pSrc++) * scale; + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicScale group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q15.c index 5a53708..3443de5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q15.c @@ -5,13 +5,13 @@ * Title: arm_scale_q15.c * Description: Multiplies a Q15 vector by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -138,8 +138,8 @@ void arm_scale_q15( #if defined (ARM_MATH_DSP) /* read 2 times 2 samples at a time from source */ - inA1 = read_q15x2_ia ((q15_t **) &pSrc); - inA2 = read_q15x2_ia ((q15_t **) &pSrc); + inA1 = read_q15x2_ia (&pSrc); + inA2 = read_q15x2_ia (&pSrc); /* Scale inputs and store result in temporary variables * in single cycle by packing the outputs */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q31.c index 6d0f7c7..271278b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q31.c @@ -5,13 +5,13 @@ * Title: arm_scale_q31.c * Description: Multiplies a Q31 vector by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q7.c index 5847500..f4383ee 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_scale_q7.c @@ -5,13 +5,13 @@ * Title: arm_scale_q7.c * Description: Multiplies a Q7 vector by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q15.c index ce677b3..3579dad 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q15.c @@ -5,13 +5,13 @@ * Title: arm_shift_q15.c * Description: Shifts the elements of a Q15 vector by a specified number of bits * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q31.c index 53f01cd..c2fc8fb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q31.c @@ -5,13 +5,13 @@ * Title: arm_shift_q31.c * Description: Shifts the elements of a Q31 vector by a specified number of bits * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q7.c index eedb7eb..87ef339 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_shift_q7.c @@ -5,13 +5,13 @@ * Title: arm_shift_q7.c * Description: Processing function for the Q7 Shifting * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f16.c index 0cba4bf..571fe5f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f16.c @@ -5,13 +5,13 @@ * Title: arm_sub_f16.c * Description: Floating-point vector subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,17 +34,6 @@ @ingroup groupMath */ -/** - @defgroup BasicSub Vector Subtraction - - Element-by-element subtraction of two vectors. - -
-      pDst[n] = pSrcA[n] - pSrcB[n],   0 <= n < blockSize.
-  
- - There are separate functions for floating-point, Q7, Q15, and Q31 data types. - */ /** @addtogroup BasicSub @@ -131,13 +120,13 @@ void arm_sub_f16( /* C = A - B */ /* Subtract and store result in destination buffer. */ - *pDst++ = (*pSrcA++) - (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) - (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) - (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) - (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) - (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) - (_Float16)(*pSrcB++); - *pDst++ = (*pSrcA++) - (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) - (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; @@ -158,7 +147,7 @@ void arm_sub_f16( /* C = A - B */ /* Subtract and store result in destination buffer. */ - *pDst++ = (*pSrcA++) - (*pSrcB++); + *pDst++ = (_Float16)(*pSrcA++) - (_Float16)(*pSrcB++); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f32.c index 5dbd231..476d7ee 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f32.c @@ -5,13 +5,13 @@ * Title: arm_sub_f32.c * Description: Floating-point vector subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f64.c new file mode 100644 index 0000000..a956a14 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_sub_f64.c + * Description: Floating-point vector subtraction + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupMath + */ + +/** + @addtogroup BasicSub + @{ + */ + +/** + @brief Floating-point vector subtraction. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + +void arm_sub_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A - B */ + + /* Subtract and store result in destination buffer. */ + *pDst++ = (*pSrcA++) - (*pSrcB++); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of BasicSub group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q15.c index f5a2d2c..0892988 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q15.c @@ -5,13 +5,13 @@ * Title: arm_sub_q15.c * Description: Q15 vector subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -127,11 +127,11 @@ void arm_sub_q15( #if defined (ARM_MATH_DSP) /* read 2 times 2 samples at a time from sourceA */ - inA1 = read_q15x2_ia ((q15_t **) &pSrcA); - inA2 = read_q15x2_ia ((q15_t **) &pSrcA); + inA1 = read_q15x2_ia (&pSrcA); + inA2 = read_q15x2_ia (&pSrcA); /* read 2 times 2 samples at a time from sourceB */ - inB1 = read_q15x2_ia ((q15_t **) &pSrcB); - inB2 = read_q15x2_ia ((q15_t **) &pSrcB); + inB1 = read_q15x2_ia (&pSrcB); + inB2 = read_q15x2_ia (&pSrcB); /* Subtract and store 2 times 2 samples at a time */ write_q15x2_ia (&pDst, __QSUB16(inA1, inB1)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q31.c index 79c291e..8aaae08 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q31.c @@ -5,13 +5,13 @@ * Title: arm_sub_q31.c * Description: Q31 vector subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q7.c index e2c1ecb..c2aea32 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_sub_q7.c @@ -5,13 +5,13 @@ * Title: arm_sub_q7.c * Description: Q7 vector subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -119,7 +119,7 @@ void arm_sub_q7( #if defined (ARM_MATH_DSP) /* Subtract and store result in destination buffer (4 samples at a time). */ - write_q7x4_ia (&pDst, __QSUB8(read_q7x4_ia ((q7_t **) &pSrcA), read_q7x4_ia ((q7_t **) &pSrcB))); + write_q7x4_ia (&pDst, __QSUB8(read_q7x4_ia (&pSrcA), read_q7x4_ia (&pSrcB))); #else *pDst++ = (q7_t) __SSAT((q15_t) *pSrcA++ - *pSrcB++, 8); *pDst++ = (q7_t) __SSAT((q15_t) *pSrcA++ - *pSrcB++, 8); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u16.c index 002edf5..def6516 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u16.c @@ -5,13 +5,13 @@ * Title: arm_xor_u16.c * Description: uint16_t bitwise exclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u32.c index f835b1f..74c3d1c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u32.c @@ -5,13 +5,13 @@ * Title: arm_xor_u32.c * Description: uint32_t bitwise exclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u8.c index 6d57e3e..c1c8615 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BasicMathFunctions/arm_xor_u8.c @@ -5,13 +5,13 @@ * Title: arm_xor_u8.c * Description: uint8_t bitwise exclusive OR * - * $Date: 14 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f16.c index 1db5cfe..bfe2e28 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f16.c @@ -5,11 +5,13 @@ * Title: arm_naive_gaussian_bayes_predict_f16 * Description: Naive Gaussian Bayesian Estimator * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,7 +35,6 @@ #include #include -#define PI_F 3.1415926535897932384626433832795f16 /** * @addtogroup groupBayes @@ -43,13 +44,12 @@ /** * @brief Naive Gaussian Bayesian Estimator * - * @param[in] *S points to a naive bayes instance structure - * @param[in] *in points to the elements of the input vector. - * @param[in] *pBuffer points to a buffer of length numberOfClasses + * @param[in] *S points to a naive bayes instance structure + * @param[in] *in points to the elements of the input vector. + * @param[out] *pOutputProbabilities points to a buffer of length numberOfClasses containing estimated probabilities + * @param[out] *pBufferB points to a temporary buffer of length numberOfClasses * @return The predicted class * - * @par If the number of classes is big, MVE version will consume lot of - * stack since the log prior are computed on the stack. * */ @@ -60,19 +60,21 @@ uint32_t arm_gaussian_naive_bayes_predict_f16(const arm_gaussian_naive_bayes_instance_f16 *S, const float16_t * in, - float16_t *pBuffer) + float16_t *pOutputProbabilities, + float16_t *pBufferB + ) { uint32_t nbClass; const float16_t *pTheta = S->theta; const float16_t *pSigma = S->sigma; - float16_t *buffer = pBuffer; + float16_t *buffer = pOutputProbabilities; const float16_t *pIn = in; float16_t result; f16x8_t vsigma; _Float16 tmp; f16x8_t vacc1, vacc2; uint32_t index; - float16_t logclassPriors[S->numberOfClasses]; + float16_t *logclassPriors=pBufferB; float16_t *pLogPrior = logclassPriors; arm_vlog_f16((float16_t *) S->classPriors, logclassPriors, S->numberOfClasses); @@ -131,42 +133,35 @@ uint32_t arm_gaussian_naive_bayes_predict_f16(const arm_gaussian_naive_bayes_ins tmp = -0.5f16 * (_Float16)vecAddAcrossF16Mve(vacc1); tmp -= 0.5f16 * (_Float16)vecAddAcrossF16Mve(vacc2); - *buffer = tmp + *pLogPrior++; + *buffer = (_Float16)tmp + (_Float16)*pLogPrior++; buffer++; } - arm_max_f16(pBuffer, S->numberOfClasses, &result, &index); + arm_max_f16(pOutputProbabilities, S->numberOfClasses, &result, &index); return (index); } #else -/** - * @brief Naive Gaussian Bayesian Estimator - * - * @param[in] *S points to a naive bayes instance structure - * @param[in] *in points to the elements of the input vector. - * @param[in] *pBuffer points to a buffer of length numberOfClasses - * @return The predicted class - * - */ uint32_t arm_gaussian_naive_bayes_predict_f16(const arm_gaussian_naive_bayes_instance_f16 *S, const float16_t * in, - float16_t *pBuffer) + float16_t *pOutputProbabilities, + float16_t *pBufferB) { uint32_t nbClass; uint32_t nbDim; const float16_t *pPrior = S->classPriors; const float16_t *pTheta = S->theta; const float16_t *pSigma = S->sigma; - float16_t *buffer = pBuffer; + float16_t *buffer = pOutputProbabilities; const float16_t *pIn=in; float16_t result; _Float16 sigma; _Float16 tmp; _Float16 acc1,acc2; uint32_t index; + (void)pBufferB; pTheta=S->theta; pSigma=S->sigma; @@ -182,24 +177,24 @@ uint32_t arm_gaussian_naive_bayes_predict_f16(const arm_gaussian_naive_bayes_ins acc2 = 0.0f16; for(nbDim = 0; nbDim < S->vectorDimension; nbDim++) { - sigma = *pSigma + S->epsilon; - acc1 += logf(2.0f16 * (_Float16)PI_F * sigma); - acc2 += (*pIn - *pTheta) * (*pIn - *pTheta) / sigma; + sigma = (_Float16)*pSigma + (_Float16)S->epsilon; + acc1 += (_Float16)logf(2.0f * PI * (float32_t)sigma); + acc2 += ((_Float16)*pIn - (_Float16)*pTheta) * ((_Float16)*pIn - (_Float16)*pTheta) / (_Float16)sigma; pIn++; pTheta++; pSigma++; } - tmp = -0.5f16 * acc1; - tmp -= 0.5f16 * acc2; + tmp = -0.5f16 * (_Float16)acc1; + tmp -= 0.5f16 * (_Float16)acc2; - *buffer = tmp + logf(*pPrior++); + *buffer = (_Float16)tmp + (_Float16)logf((float32_t)*pPrior++); buffer++; } - arm_max_f16(pBuffer,S->numberOfClasses,&result,&index); + arm_max_f16(pOutputProbabilities,S->numberOfClasses,&result,&index); return(index); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f32.c index 24b89e7..30d1ab9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/arm_gaussian_naive_bayes_predict_f32.c @@ -5,11 +5,13 @@ * Title: arm_naive_gaussian_bayes_predict_f32 * Description: Naive Gaussian Bayesian Estimator * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -41,13 +43,12 @@ /** * @brief Naive Gaussian Bayesian Estimator * - * @param[in] *S points to a naive bayes instance structure - * @param[in] *in points to the elements of the input vector. - * @param[in] *pBuffer points to a buffer of length numberOfClasses + * @param[in] *S points to a naive bayes instance structure + * @param[in] *in points to the elements of the input vector. + * @param[out] *pOutputProbabilities points to a buffer of length numberOfClasses containing estimated probabilities + * @param[out] *pBufferB points to a temporary buffer of length numberOfClasses * @return The predicted class * - * @par If the number of classes is big, MVE version will consume lot of - * stack since the log prior are computed on the stack. * */ @@ -58,19 +59,21 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, const float32_t * in, - float32_t *pBuffer) + float32_t *pOutputProbabilities, + float32_t *pBufferB + ) { uint32_t nbClass; const float32_t *pTheta = S->theta; const float32_t *pSigma = S->sigma; - float32_t *buffer = pBuffer; + float32_t *buffer = pOutputProbabilities; const float32_t *pIn = in; float32_t result; f32x4_t vsigma; float32_t tmp; f32x4_t vacc1, vacc2; uint32_t index; - float32_t logclassPriors[S->numberOfClasses]; + float32_t *logclassPriors=pBufferB; float32_t *pLogPrior = logclassPriors; arm_vlog_f32((float32_t *) S->classPriors, logclassPriors, S->numberOfClasses); @@ -133,7 +136,7 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins buffer++; } - arm_max_f32(pBuffer, S->numberOfClasses, &result, &index); + arm_max_f32(pOutputProbabilities, S->numberOfClasses, &result, &index); return (index); } @@ -148,7 +151,8 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, const float32_t * in, - float32_t *pBuffer) + float32_t *pOutputProbabilities, + float32_t *pBufferB) { const float32_t *pPrior = S->classPriors; @@ -159,7 +163,7 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins const float32_t *pTheta1 = S->theta + S->vectorDimension; const float32_t *pSigma1 = S->sigma + S->vectorDimension; - float32_t *buffer = pBuffer; + float32_t *buffer = pOutputProbabilities; const float32_t *pIn=in; float32_t result; @@ -174,6 +178,7 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins float32x2_t tmpV2; float32x4_t thetaV,thetaV1; float32x4_t inV; + (void)pBufferB; epsilonV = vdupq_n_f32(S->epsilon); @@ -322,32 +327,24 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins classBlkCnt--; } - arm_max_f32(pBuffer,S->numberOfClasses,&result,&index); + arm_max_f32(pOutputProbabilities,S->numberOfClasses,&result,&index); return(index); } #else -/** - * @brief Naive Gaussian Bayesian Estimator - * - * @param[in] *S points to a naive bayes instance structure - * @param[in] *in points to the elements of the input vector. - * @param[in] *pBuffer points to a buffer of length numberOfClasses - * @return The predicted class - * - */ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_instance_f32 *S, const float32_t * in, - float32_t *pBuffer) + float32_t *pOutputProbabilities, + float32_t *pBufferB) { uint32_t nbClass; uint32_t nbDim; const float32_t *pPrior = S->classPriors; const float32_t *pTheta = S->theta; const float32_t *pSigma = S->sigma; - float32_t *buffer = pBuffer; + float32_t *buffer = pOutputProbabilities; const float32_t *pIn=in; float32_t result; float32_t sigma; @@ -355,6 +352,8 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins float32_t acc1,acc2; uint32_t index; + (void)pBufferB; + pTheta=S->theta; pSigma=S->sigma; @@ -386,7 +385,7 @@ uint32_t arm_gaussian_naive_bayes_predict_f32(const arm_gaussian_naive_bayes_ins buffer++; } - arm_max_f32(pBuffer,S->numberOfClasses,&result,&index); + arm_max_f32(pOutputProbabilities,S->numberOfClasses,&result,&index); return(index); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables.c index c059075..28a2085 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables.c @@ -5,13 +5,13 @@ * Title: arm_common_tables.c * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70509,8 +70509,36 @@ const q15_t sqrtTable_Q15[256] = { #endif #endif /* defined(ARM_MATH_MVEI) */ +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q31) +/* +ClearAll[tofix]; +tofix[q_][a_] := With[{r = Round[a*2^q]}, + If[r > (2^q - 1), 2^q - 1, r] + ]; + +(* For q = format, 2^nb is length of the table *) +With[{q = 15, nb = 4, q12quarter = 16^^2000}, + With[{shift = Echo[q - nb]}, + Table[tofix[q][1.0/Sqrt[1.0*i/2^q]/8.0], {i, 2^(q - 2), + 2^q + q12quarter - 1, 2^shift}]] + ] // CopyToClipboard + +*/ +const q31_t sqrt_initial_lut_q31[32]={536870912, 506166750, 480191942, 457845052, 438353264, 421156193, \ +405836263, 392075079, 379625062, 368290407, 357913941, 348367849, \ +339546978, 331363921, 323745341, 316629190, 309962566, 303700050, \ +297802400, 292235509, 286969573, 281978417, 277238947, 272730696, \ +268435456, 264336964, 260420644, 256673389, 253083375, 249639903, \ +246333269, 243154642}; +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q31) */ + +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q15) +const q15_t sqrt_initial_lut_q15[16]={8192, 7327, 6689, 6193, 5793, 5461, 5181, 4940, 4730, 4544, 4379, \ +4230, 4096, 3974, 3862, 3759}; +#endif /* !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FAST_TABLES) || defined(ARM_TABLE_SQRT_Q15) */ + -#endif /* if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_TABLES) */ +#endif /* #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FAST_ALLOW_TABLES) */ #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM)) && !defined(ARM_MATH_AUTOVECTORIZE) const float32_t exp_tab[8] = { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables_f16.c index 9541c33..d71efb8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_common_tables_f16.c @@ -5,13 +5,13 @@ * Title: arm_common_tables_f16.c * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs.c index 9dd7af5..30b810a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs.c @@ -6,13 +6,13 @@ * Description: Constant structs that are initialized for user convenience. * For example, some can be given as arguments to the arm_cfft_f32() or arm_rfft_f32() functions. * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs_f16.c index 5e326cb..603e423 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_const_structs_f16.c @@ -6,13 +6,13 @@ * Description: Constant structs that are initialized for user convenience. * For example, some can be given as arguments to the arm_cfft_f32() or arm_rfft_f32() functions. * - * $Date: 27. January 2017 - * $Revision: V.1.5.1 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables.c index b6c845c..ba5aa16 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables.c @@ -6,12 +6,13 @@ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * used for MVE implementation only * - * $Date: 14. April 2020 + * @version V1.10.0 + * @date 04 October 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -28,8 +29,10 @@ * limitations under the License. */ + #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" + -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" + #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) @@ -47,19 +50,19 @@ uint32_t rearranged_twiddle_tab_stride3_arr_16_f32[2]={ 0,0,}; float32_t rearranged_twiddle_stride1_16_f32[8]={ -1.00000000000000000000f,0.00000000000000000000f,0.92387953251128673848f, -0.38268343236508978178f,0.70710678118654757274f,0.70710678118654757274f, -0.38268343236508983729f,0.92387953251128673848f,}; +1.00000000000000000000f,0.00000000000000000000f,0.92387950420379638672f, +0.38268342614173889160f,0.70710676908493041992f,0.70710676908493041992f, +0.38268342614173889160f,0.92387950420379638672f,}; float32_t rearranged_twiddle_stride2_16_f32[8]={ -1.00000000000000000000f,0.00000000000000000000f,0.70710678118654757274f, -0.70710678118654757274f,0.00000000000000006123f,1.00000000000000000000f, --0.70710678118654746172f,0.70710678118654757274f,}; +1.00000000000000000000f,0.00000000000000000000f,0.70710676908493041992f, +0.70710676908493041992f,0.00000000000000006123f,1.00000000000000000000f, +-0.70710676908493041992f,0.70710676908493041992f,}; float32_t rearranged_twiddle_stride3_16_f32[8]={ -1.00000000000000000000f,0.00000000000000000000f,0.38268343236508983729f, -0.92387953251128673848f,-0.70710678118654746172f,0.70710678118654757274f, --0.92387953251128684951f,-0.38268343236508967076f,}; +1.00000000000000000000f,0.00000000000000000000f,0.38268342614173889160f, +0.92387950420379638672f,-0.70710676908493041992f,0.70710676908493041992f, +-0.92387950420379638672f,-0.38268342614173889160f,}; #endif @@ -75,52 +78,52 @@ uint32_t rearranged_twiddle_tab_stride3_arr_64_f32[3]={ 0,32,0,}; float32_t rearranged_twiddle_stride1_64_f32[40]={ -1.00000000000000000000f,0.00000000000000000000f,0.99518472667219692873f, -0.09801714032956060363f,0.98078528040323043058f,0.19509032201612824808f, -0.95694033573220882438f,0.29028467725446233105f,0.92387953251128673848f, -0.38268343236508978178f,0.88192126434835504956f,0.47139673682599764204f, -0.83146961230254523567f,0.55557023301960217765f,0.77301045336273699338f, -0.63439328416364548779f,0.70710678118654757274f,0.70710678118654757274f, -0.63439328416364548779f,0.77301045336273688235f,0.55557023301960228867f, -0.83146961230254523567f,0.47139673682599780857f,0.88192126434835493853f, -0.38268343236508983729f,0.92387953251128673848f,0.29028467725446233105f, -0.95694033573220893540f,0.19509032201612833135f,0.98078528040323043058f, -0.09801714032956077016f,0.99518472667219681771f,1.00000000000000000000f, -0.00000000000000000000f,0.92387953251128673848f,0.38268343236508978178f, -0.70710678118654757274f,0.70710678118654757274f,0.38268343236508983729f, -0.92387953251128673848f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99518471956253051758f, +0.09801714122295379639f,0.98078525066375732422f,0.19509032368659973145f, +0.95694035291671752930f,0.29028466343879699707f,0.92387950420379638672f, +0.38268342614173889160f,0.88192129135131835938f,0.47139674425125122070f, +0.83146959543228149414f,0.55557024478912353516f,0.77301043272018432617f, +0.63439327478408813477f,0.70710676908493041992f,0.70710676908493041992f, +0.63439327478408813477f,0.77301043272018432617f,0.55557024478912353516f, +0.83146959543228149414f,0.47139674425125122070f,0.88192129135131835938f, +0.38268342614173889160f,0.92387950420379638672f,0.29028466343879699707f, +0.95694035291671752930f,0.19509032368659973145f,0.98078525066375732422f, +0.09801714122295379639f,0.99518471956253051758f,1.00000000000000000000f, +0.00000000000000000000f,0.92387950420379638672f,0.38268342614173889160f, +0.70710676908493041992f,0.70710676908493041992f,0.38268342614173889160f, +0.92387950420379638672f,}; float32_t rearranged_twiddle_stride2_64_f32[40]={ -1.00000000000000000000f,0.00000000000000000000f,0.98078528040323043058f, -0.19509032201612824808f,0.92387953251128673848f,0.38268343236508978178f, -0.83146961230254523567f,0.55557023301960217765f,0.70710678118654757274f, -0.70710678118654757274f,0.55557023301960228867f,0.83146961230254523567f, -0.38268343236508983729f,0.92387953251128673848f,0.19509032201612833135f, -0.98078528040323043058f,0.00000000000000006123f,1.00000000000000000000f, --0.19509032201612819257f,0.98078528040323043058f,-0.38268343236508972627f, -0.92387953251128673848f,-0.55557023301960195560f,0.83146961230254534669f, --0.70710678118654746172f,0.70710678118654757274f,-0.83146961230254534669f, -0.55557023301960217765f,-0.92387953251128673848f,0.38268343236508989280f, --0.98078528040323043058f,0.19509032201612860891f,1.00000000000000000000f, -0.00000000000000000000f,0.70710678118654757274f,0.70710678118654757274f, -0.00000000000000006123f,1.00000000000000000000f,-0.70710678118654746172f, -0.70710678118654757274f,}; +1.00000000000000000000f,0.00000000000000000000f,0.98078525066375732422f, +0.19509032368659973145f,0.92387950420379638672f,0.38268342614173889160f, +0.83146959543228149414f,0.55557024478912353516f,0.70710676908493041992f, +0.70710676908493041992f,0.55557024478912353516f,0.83146959543228149414f, +0.38268342614173889160f,0.92387950420379638672f,0.19509032368659973145f, +0.98078525066375732422f,0.00000000000000006123f,1.00000000000000000000f, +-0.19509032368659973145f,0.98078525066375732422f,-0.38268342614173889160f, +0.92387950420379638672f,-0.55557024478912353516f,0.83146959543228149414f, +-0.70710676908493041992f,0.70710676908493041992f,-0.83146959543228149414f, +0.55557024478912353516f,-0.92387950420379638672f,0.38268342614173889160f, +-0.98078525066375732422f,0.19509032368659973145f,1.00000000000000000000f, +0.00000000000000000000f,0.70710676908493041992f,0.70710676908493041992f, +0.00000000000000006123f,1.00000000000000000000f,-0.70710676908493041992f, +0.70710676908493041992f,}; float32_t rearranged_twiddle_stride3_64_f32[40]={ -1.00000000000000000000f,0.00000000000000000000f,0.95694033573220882438f, -0.29028467725446233105f,0.83146961230254523567f,0.55557023301960217765f, -0.63439328416364548779f,0.77301045336273688235f,0.38268343236508983729f, -0.92387953251128673848f,0.09801714032956077016f,0.99518472667219681771f, --0.19509032201612819257f,0.98078528040323043058f,-0.47139673682599769755f, -0.88192126434835504956f,-0.70710678118654746172f,0.70710678118654757274f, --0.88192126434835493853f,0.47139673682599780857f,-0.98078528040323043058f, -0.19509032201612860891f,-0.99518472667219692873f,-0.09801714032956058975f, --0.92387953251128684951f,-0.38268343236508967076f,-0.77301045336273710440f, --0.63439328416364526575f,-0.55557023301960217765f,-0.83146961230254523567f, --0.29028467725446244208f,-0.95694033573220882438f,1.00000000000000000000f, -0.00000000000000000000f,0.38268343236508983729f,0.92387953251128673848f, --0.70710678118654746172f,0.70710678118654757274f,-0.92387953251128684951f, --0.38268343236508967076f,}; +1.00000000000000000000f,0.00000000000000000000f,0.95694035291671752930f, +0.29028466343879699707f,0.83146959543228149414f,0.55557024478912353516f, +0.63439327478408813477f,0.77301043272018432617f,0.38268342614173889160f, +0.92387950420379638672f,0.09801714122295379639f,0.99518471956253051758f, +-0.19509032368659973145f,0.98078525066375732422f,-0.47139674425125122070f, +0.88192129135131835938f,-0.70710676908493041992f,0.70710676908493041992f, +-0.88192129135131835938f,0.47139674425125122070f,-0.98078525066375732422f, +0.19509032368659973145f,-0.99518471956253051758f,-0.09801714122295379639f, +-0.92387950420379638672f,-0.38268342614173889160f,-0.77301043272018432617f, +-0.63439327478408813477f,-0.55557024478912353516f,-0.83146959543228149414f, +-0.29028466343879699707f,-0.95694035291671752930f,1.00000000000000000000f, +0.00000000000000000000f,0.38268342614173889160f,0.92387950420379638672f, +-0.70710676908493041992f,0.70710676908493041992f,-0.92387950420379638672f, +-0.38268342614173889160f,}; #endif @@ -136,178 +139,178 @@ uint32_t rearranged_twiddle_tab_stride3_arr_256_f32[4]={ 0,128,160,0,}; float32_t rearranged_twiddle_stride1_256_f32[168]={ -1.00000000000000000000f,0.00000000000000000000f,0.99969881869620424997f, -0.02454122852291228812f,0.99879545620517240501f,0.04906767432741801493f, -0.99729045667869020697f,0.07356456359966742631f,0.99518472667219692873f, -0.09801714032956060363f,0.99247953459870996706f,0.12241067519921619566f, -0.98917650996478101444f,0.14673047445536174793f,0.98527764238894122162f, -0.17096188876030121717f,0.98078528040323043058f,0.19509032201612824808f, -0.97570213003852857003f,0.21910124015686979759f,0.97003125319454397424f, -0.24298017990326387094f,0.96377606579543984022f,0.26671275747489836538f, -0.95694033573220882438f,0.29028467725446233105f,0.94952818059303667475f, -0.31368174039889151761f,0.94154406518302080631f,0.33688985339222005111f, -0.93299279883473895669f,0.35989503653498811087f,0.92387953251128673848f, -0.38268343236508978178f,0.91420975570353069095f,0.40524131400498986100f, -0.90398929312344333820f,0.42755509343028208491f,0.89322430119551532446f, -0.44961132965460653965f,0.88192126434835504956f,0.47139673682599764204f, -0.87008699110871146054f,0.49289819222978403790f,0.85772861000027211809f, -0.51410274419322166128f,0.84485356524970711689f,0.53499761988709715332f, -0.83146961230254523567f,0.55557023301960217765f,0.81758481315158371139f, -0.57580819141784533866f,0.80320753148064494287f,0.59569930449243335691f, -0.78834642762660622761f,0.61523159058062681925f,0.77301045336273699338f, -0.63439328416364548779f,0.75720884650648456748f,0.65317284295377675551f, -0.74095112535495921691f,0.67155895484701833009f,0.72424708295146700276f, -0.68954054473706682948f,0.70710678118654757274f,0.70710678118654757274f, -0.68954054473706694051f,0.72424708295146689174f,0.67155895484701833009f, -0.74095112535495910588f,0.65317284295377686654f,0.75720884650648456748f, -0.63439328416364548779f,0.77301045336273688235f,0.61523159058062681925f, -0.78834642762660622761f,0.59569930449243346793f,0.80320753148064483184f, -0.57580819141784533866f,0.81758481315158371139f,0.55557023301960228867f, -0.83146961230254523567f,0.53499761988709726435f,0.84485356524970700587f, -0.51410274419322166128f,0.85772861000027211809f,0.49289819222978409341f, -0.87008699110871134952f,0.47139673682599780857f,0.88192126434835493853f, -0.44961132965460659516f,0.89322430119551532446f,0.42755509343028219593f, -0.90398929312344333820f,0.40524131400498986100f,0.91420975570353069095f, -0.38268343236508983729f,0.92387953251128673848f,0.35989503653498827740f, -0.93299279883473884567f,0.33688985339222005111f,0.94154406518302080631f, -0.31368174039889157312f,0.94952818059303667475f,0.29028467725446233105f, -0.95694033573220893540f,0.26671275747489842090f,0.96377606579543984022f, -0.24298017990326398197f,0.97003125319454397424f,0.21910124015686976984f, -0.97570213003852857003f,0.19509032201612833135f,0.98078528040323043058f, -0.17096188876030135595f,0.98527764238894122162f,0.14673047445536174793f, -0.98917650996478101444f,0.12241067519921627893f,0.99247953459870996706f, -0.09801714032956077016f,0.99518472667219681771f,0.07356456359966745406f, -0.99729045667869020697f,0.04906767432741812596f,0.99879545620517240501f, -0.02454122852291226384f,0.99969881869620424997f,1.00000000000000000000f, -0.00000000000000000000f,0.99518472667219692873f,0.09801714032956060363f, -0.98078528040323043058f,0.19509032201612824808f,0.95694033573220882438f, -0.29028467725446233105f,0.92387953251128673848f,0.38268343236508978178f, -0.88192126434835504956f,0.47139673682599764204f,0.83146961230254523567f, -0.55557023301960217765f,0.77301045336273699338f,0.63439328416364548779f, -0.70710678118654757274f,0.70710678118654757274f,0.63439328416364548779f, -0.77301045336273688235f,0.55557023301960228867f,0.83146961230254523567f, -0.47139673682599780857f,0.88192126434835493853f,0.38268343236508983729f, -0.92387953251128673848f,0.29028467725446233105f,0.95694033573220893540f, -0.19509032201612833135f,0.98078528040323043058f,0.09801714032956077016f, -0.99518472667219681771f,1.00000000000000000000f,0.00000000000000000000f, -0.92387953251128673848f,0.38268343236508978178f,0.70710678118654757274f, -0.70710678118654757274f,0.38268343236508983729f,0.92387953251128673848f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99969881772994995117f, +0.02454122900962829590f,0.99879544973373413086f,0.04906767606735229492f, +0.99729043245315551758f,0.07356456667184829712f,0.99518471956253051758f, +0.09801714122295379639f,0.99247956275939941406f,0.12241067737340927124f, +0.98917651176452636719f,0.14673046767711639404f,0.98527765274047851562f, +0.17096188664436340332f,0.98078525066375732422f,0.19509032368659973145f, +0.97570210695266723633f,0.21910123527050018311f,0.97003126144409179688f, +0.24298018217086791992f,0.96377605199813842773f,0.26671275496482849121f, +0.95694035291671752930f,0.29028466343879699707f,0.94952815771102905273f, +0.31368175148963928223f,0.94154405593872070312f,0.33688986301422119141f, +0.93299281597137451172f,0.35989505052566528320f,0.92387950420379638672f, +0.38268342614173889160f,0.91420978307723999023f,0.40524131059646606445f, +0.90398931503295898438f,0.42755508422851562500f,0.89322429895401000977f, +0.44961133599281311035f,0.88192129135131835938f,0.47139674425125122070f, +0.87008696794509887695f,0.49289819598197937012f,0.85772860050201416016f, +0.51410275697708129883f,0.84485357999801635742f,0.53499764204025268555f, +0.83146959543228149414f,0.55557024478912353516f,0.81758481264114379883f, +0.57580816745758056641f,0.80320751667022705078f,0.59569931030273437500f, +0.78834640979766845703f,0.61523157358169555664f,0.77301043272018432617f, +0.63439327478408813477f,0.75720882415771484375f,0.65317285060882568359f, +0.74095112085342407227f,0.67155897617340087891f,0.72424709796905517578f, +0.68954056501388549805f,0.70710676908493041992f,0.70710676908493041992f, +0.68954056501388549805f,0.72424709796905517578f,0.67155897617340087891f, +0.74095112085342407227f,0.65317285060882568359f,0.75720882415771484375f, +0.63439327478408813477f,0.77301043272018432617f,0.61523157358169555664f, +0.78834640979766845703f,0.59569931030273437500f,0.80320751667022705078f, +0.57580816745758056641f,0.81758481264114379883f,0.55557024478912353516f, +0.83146959543228149414f,0.53499764204025268555f,0.84485357999801635742f, +0.51410275697708129883f,0.85772860050201416016f,0.49289819598197937012f, +0.87008696794509887695f,0.47139674425125122070f,0.88192129135131835938f, +0.44961133599281311035f,0.89322429895401000977f,0.42755508422851562500f, +0.90398931503295898438f,0.40524131059646606445f,0.91420978307723999023f, +0.38268342614173889160f,0.92387950420379638672f,0.35989505052566528320f, +0.93299281597137451172f,0.33688986301422119141f,0.94154405593872070312f, +0.31368175148963928223f,0.94952815771102905273f,0.29028466343879699707f, +0.95694035291671752930f,0.26671275496482849121f,0.96377605199813842773f, +0.24298018217086791992f,0.97003126144409179688f,0.21910123527050018311f, +0.97570210695266723633f,0.19509032368659973145f,0.98078525066375732422f, +0.17096188664436340332f,0.98527765274047851562f,0.14673046767711639404f, +0.98917651176452636719f,0.12241067737340927124f,0.99247956275939941406f, +0.09801714122295379639f,0.99518471956253051758f,0.07356456667184829712f, +0.99729043245315551758f,0.04906767606735229492f,0.99879544973373413086f, +0.02454122900962829590f,0.99969881772994995117f,1.00000000000000000000f, +0.00000000000000000000f,0.99518471956253051758f,0.09801714122295379639f, +0.98078525066375732422f,0.19509032368659973145f,0.95694035291671752930f, +0.29028466343879699707f,0.92387950420379638672f,0.38268342614173889160f, +0.88192129135131835938f,0.47139674425125122070f,0.83146959543228149414f, +0.55557024478912353516f,0.77301043272018432617f,0.63439327478408813477f, +0.70710676908493041992f,0.70710676908493041992f,0.63439327478408813477f, +0.77301043272018432617f,0.55557024478912353516f,0.83146959543228149414f, +0.47139674425125122070f,0.88192129135131835938f,0.38268342614173889160f, +0.92387950420379638672f,0.29028466343879699707f,0.95694035291671752930f, +0.19509032368659973145f,0.98078525066375732422f,0.09801714122295379639f, +0.99518471956253051758f,1.00000000000000000000f,0.00000000000000000000f, +0.92387950420379638672f,0.38268342614173889160f,0.70710676908493041992f, +0.70710676908493041992f,0.38268342614173889160f,0.92387950420379638672f,}; float32_t rearranged_twiddle_stride2_256_f32[168]={ -1.00000000000000000000f,0.00000000000000000000f,0.99879545620517240501f, -0.04906767432741801493f,0.99518472667219692873f,0.09801714032956060363f, -0.98917650996478101444f,0.14673047445536174793f,0.98078528040323043058f, -0.19509032201612824808f,0.97003125319454397424f,0.24298017990326387094f, -0.95694033573220882438f,0.29028467725446233105f,0.94154406518302080631f, -0.33688985339222005111f,0.92387953251128673848f,0.38268343236508978178f, -0.90398929312344333820f,0.42755509343028208491f,0.88192126434835504956f, -0.47139673682599764204f,0.85772861000027211809f,0.51410274419322166128f, -0.83146961230254523567f,0.55557023301960217765f,0.80320753148064494287f, -0.59569930449243335691f,0.77301045336273699338f,0.63439328416364548779f, -0.74095112535495921691f,0.67155895484701833009f,0.70710678118654757274f, -0.70710678118654757274f,0.67155895484701833009f,0.74095112535495910588f, -0.63439328416364548779f,0.77301045336273688235f,0.59569930449243346793f, -0.80320753148064483184f,0.55557023301960228867f,0.83146961230254523567f, -0.51410274419322166128f,0.85772861000027211809f,0.47139673682599780857f, -0.88192126434835493853f,0.42755509343028219593f,0.90398929312344333820f, -0.38268343236508983729f,0.92387953251128673848f,0.33688985339222005111f, -0.94154406518302080631f,0.29028467725446233105f,0.95694033573220893540f, -0.24298017990326398197f,0.97003125319454397424f,0.19509032201612833135f, -0.98078528040323043058f,0.14673047445536174793f,0.98917650996478101444f, -0.09801714032956077016f,0.99518472667219681771f,0.04906767432741812596f, -0.99879545620517240501f,0.00000000000000006123f,1.00000000000000000000f, --0.04906767432741800800f,0.99879545620517240501f,-0.09801714032956064526f, -0.99518472667219692873f,-0.14673047445536163691f,0.98917650996478101444f, --0.19509032201612819257f,0.98078528040323043058f,-0.24298017990326387094f, -0.97003125319454397424f,-0.29028467725446216452f,0.95694033573220893540f, --0.33688985339221994009f,0.94154406518302080631f,-0.38268343236508972627f, -0.92387953251128673848f,-0.42755509343028186287f,0.90398929312344344922f, --0.47139673682599769755f,0.88192126434835504956f,-0.51410274419322155026f, -0.85772861000027211809f,-0.55557023301960195560f,0.83146961230254534669f, --0.59569930449243335691f,0.80320753148064494287f,-0.63439328416364537677f, -0.77301045336273710440f,-0.67155895484701844111f,0.74095112535495899486f, --0.70710678118654746172f,0.70710678118654757274f,-0.74095112535495888384f, -0.67155895484701855214f,-0.77301045336273699338f,0.63439328416364548779f, --0.80320753148064483184f,0.59569930449243346793f,-0.83146961230254534669f, -0.55557023301960217765f,-0.85772861000027200706f,0.51410274419322177231f, --0.88192126434835493853f,0.47139673682599780857f,-0.90398929312344333820f, -0.42755509343028202940f,-0.92387953251128673848f,0.38268343236508989280f, --0.94154406518302069529f,0.33688985339222032867f,-0.95694033573220882438f, -0.29028467725446238656f,-0.97003125319454397424f,0.24298017990326406523f, --0.98078528040323043058f,0.19509032201612860891f,-0.98917650996478101444f, -0.14673047445536180344f,-0.99518472667219681771f,0.09801714032956082567f, --0.99879545620517240501f,0.04906767432741796636f,1.00000000000000000000f, -0.00000000000000000000f,0.98078528040323043058f,0.19509032201612824808f, -0.92387953251128673848f,0.38268343236508978178f,0.83146961230254523567f, -0.55557023301960217765f,0.70710678118654757274f,0.70710678118654757274f, -0.55557023301960228867f,0.83146961230254523567f,0.38268343236508983729f, -0.92387953251128673848f,0.19509032201612833135f,0.98078528040323043058f, -0.00000000000000006123f,1.00000000000000000000f,-0.19509032201612819257f, -0.98078528040323043058f,-0.38268343236508972627f,0.92387953251128673848f, --0.55557023301960195560f,0.83146961230254534669f,-0.70710678118654746172f, -0.70710678118654757274f,-0.83146961230254534669f,0.55557023301960217765f, --0.92387953251128673848f,0.38268343236508989280f,-0.98078528040323043058f, -0.19509032201612860891f,1.00000000000000000000f,0.00000000000000000000f, -0.70710678118654757274f,0.70710678118654757274f,0.00000000000000006123f, -1.00000000000000000000f,-0.70710678118654746172f,0.70710678118654757274f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99879544973373413086f, +0.04906767606735229492f,0.99518471956253051758f,0.09801714122295379639f, +0.98917651176452636719f,0.14673046767711639404f,0.98078525066375732422f, +0.19509032368659973145f,0.97003126144409179688f,0.24298018217086791992f, +0.95694035291671752930f,0.29028466343879699707f,0.94154405593872070312f, +0.33688986301422119141f,0.92387950420379638672f,0.38268342614173889160f, +0.90398931503295898438f,0.42755508422851562500f,0.88192129135131835938f, +0.47139674425125122070f,0.85772860050201416016f,0.51410275697708129883f, +0.83146959543228149414f,0.55557024478912353516f,0.80320751667022705078f, +0.59569931030273437500f,0.77301043272018432617f,0.63439327478408813477f, +0.74095112085342407227f,0.67155897617340087891f,0.70710676908493041992f, +0.70710676908493041992f,0.67155897617340087891f,0.74095112085342407227f, +0.63439327478408813477f,0.77301043272018432617f,0.59569931030273437500f, +0.80320751667022705078f,0.55557024478912353516f,0.83146959543228149414f, +0.51410275697708129883f,0.85772860050201416016f,0.47139674425125122070f, +0.88192129135131835938f,0.42755508422851562500f,0.90398931503295898438f, +0.38268342614173889160f,0.92387950420379638672f,0.33688986301422119141f, +0.94154405593872070312f,0.29028466343879699707f,0.95694035291671752930f, +0.24298018217086791992f,0.97003126144409179688f,0.19509032368659973145f, +0.98078525066375732422f,0.14673046767711639404f,0.98917651176452636719f, +0.09801714122295379639f,0.99518471956253051758f,0.04906767606735229492f, +0.99879544973373413086f,0.00000000000000006123f,1.00000000000000000000f, +-0.04906767606735229492f,0.99879544973373413086f,-0.09801714122295379639f, +0.99518471956253051758f,-0.14673046767711639404f,0.98917651176452636719f, +-0.19509032368659973145f,0.98078525066375732422f,-0.24298018217086791992f, +0.97003126144409179688f,-0.29028466343879699707f,0.95694035291671752930f, +-0.33688986301422119141f,0.94154405593872070312f,-0.38268342614173889160f, +0.92387950420379638672f,-0.42755508422851562500f,0.90398931503295898438f, +-0.47139674425125122070f,0.88192129135131835938f,-0.51410275697708129883f, +0.85772860050201416016f,-0.55557024478912353516f,0.83146959543228149414f, +-0.59569931030273437500f,0.80320751667022705078f,-0.63439327478408813477f, +0.77301043272018432617f,-0.67155897617340087891f,0.74095112085342407227f, +-0.70710676908493041992f,0.70710676908493041992f,-0.74095112085342407227f, +0.67155897617340087891f,-0.77301043272018432617f,0.63439327478408813477f, +-0.80320751667022705078f,0.59569931030273437500f,-0.83146959543228149414f, +0.55557024478912353516f,-0.85772860050201416016f,0.51410275697708129883f, +-0.88192129135131835938f,0.47139674425125122070f,-0.90398931503295898438f, +0.42755508422851562500f,-0.92387950420379638672f,0.38268342614173889160f, +-0.94154405593872070312f,0.33688986301422119141f,-0.95694035291671752930f, +0.29028466343879699707f,-0.97003126144409179688f,0.24298018217086791992f, +-0.98078525066375732422f,0.19509032368659973145f,-0.98917651176452636719f, +0.14673046767711639404f,-0.99518471956253051758f,0.09801714122295379639f, +-0.99879544973373413086f,0.04906767606735229492f,1.00000000000000000000f, +0.00000000000000000000f,0.98078525066375732422f,0.19509032368659973145f, +0.92387950420379638672f,0.38268342614173889160f,0.83146959543228149414f, +0.55557024478912353516f,0.70710676908493041992f,0.70710676908493041992f, +0.55557024478912353516f,0.83146959543228149414f,0.38268342614173889160f, +0.92387950420379638672f,0.19509032368659973145f,0.98078525066375732422f, +0.00000000000000006123f,1.00000000000000000000f,-0.19509032368659973145f, +0.98078525066375732422f,-0.38268342614173889160f,0.92387950420379638672f, +-0.55557024478912353516f,0.83146959543228149414f,-0.70710676908493041992f, +0.70710676908493041992f,-0.83146959543228149414f,0.55557024478912353516f, +-0.92387950420379638672f,0.38268342614173889160f,-0.98078525066375732422f, +0.19509032368659973145f,1.00000000000000000000f,0.00000000000000000000f, +0.70710676908493041992f,0.70710676908493041992f,0.00000000000000006123f, +1.00000000000000000000f,-0.70710676908493041992f,0.70710676908493041992f,}; float32_t rearranged_twiddle_stride3_256_f32[168]={ -1.00000000000000000000f,0.00000000000000000000f,0.99729045667869020697f, -0.07356456359966742631f,0.98917650996478101444f,0.14673047445536174793f, -0.97570213003852857003f,0.21910124015686979759f,0.95694033573220882438f, -0.29028467725446233105f,0.93299279883473895669f,0.35989503653498811087f, -0.90398929312344333820f,0.42755509343028208491f,0.87008699110871146054f, -0.49289819222978403790f,0.83146961230254523567f,0.55557023301960217765f, -0.78834642762660622761f,0.61523159058062681925f,0.74095112535495921691f, -0.67155895484701833009f,0.68954054473706694051f,0.72424708295146689174f, -0.63439328416364548779f,0.77301045336273688235f,0.57580819141784533866f, -0.81758481315158371139f,0.51410274419322166128f,0.85772861000027211809f, -0.44961132965460659516f,0.89322430119551532446f,0.38268343236508983729f, -0.92387953251128673848f,0.31368174039889157312f,0.94952818059303667475f, -0.24298017990326398197f,0.97003125319454397424f,0.17096188876030135595f, -0.98527764238894122162f,0.09801714032956077016f,0.99518472667219681771f, -0.02454122852291226384f,0.99969881869620424997f,-0.04906767432741800800f, -0.99879545620517240501f,-0.12241067519921615403f,0.99247953459870996706f, --0.19509032201612819257f,0.98078528040323043058f,-0.26671275747489830987f, -0.96377606579543984022f,-0.33688985339221994009f,0.94154406518302080631f, --0.40524131400498974998f,0.91420975570353069095f,-0.47139673682599769755f, -0.88192126434835504956f,-0.53499761988709704230f,0.84485356524970722791f, --0.59569930449243335691f,0.80320753148064494287f,-0.65317284295377653347f, -0.75720884650648467851f,-0.70710678118654746172f,0.70710678118654757274f, --0.75720884650648467851f,0.65317284295377664449f,-0.80320753148064483184f, -0.59569930449243346793f,-0.84485356524970711689f,0.53499761988709715332f, --0.88192126434835493853f,0.47139673682599780857f,-0.91420975570353069095f, -0.40524131400498991651f,-0.94154406518302069529f,0.33688985339222032867f, --0.96377606579543984022f,0.26671275747489847641f,-0.98078528040323043058f, -0.19509032201612860891f,-0.99247953459870996706f,0.12241067519921634832f, --0.99879545620517240501f,0.04906767432741796636f,-0.99969881869620424997f, --0.02454122852291207996f,-0.99518472667219692873f,-0.09801714032956058975f, --0.98527764238894133264f,-0.17096188876030096737f,-0.97003125319454397424f, --0.24298017990326381543f,-0.94952818059303678577f,-0.31368174039889118454f, --0.92387953251128684951f,-0.38268343236508967076f,-0.89322430119551532446f, --0.44961132965460665067f,-0.85772861000027211809f,-0.51410274419322155026f, --0.81758481315158371139f,-0.57580819141784533866f,-0.77301045336273710440f, --0.63439328416364526575f,-0.72424708295146700276f,-0.68954054473706682948f, --0.67155895484701866316f,-0.74095112535495888384f,-0.61523159058062726334f, --0.78834642762660589455f,-0.55557023301960217765f,-0.83146961230254523567f, --0.49289819222978420443f,-0.87008699110871134952f,-0.42755509343028247349f, --0.90398929312344311615f,-0.35989503653498794433f,-0.93299279883473895669f, --0.29028467725446244208f,-0.95694033573220882438f,-0.21910124015687010290f, --0.97570213003852845901f,-0.14673047445536230304f,-0.98917650996478090342f, --0.07356456359966735692f,-0.99729045667869020697f,1.00000000000000000000f, -0.00000000000000000000f,0.95694033573220882438f,0.29028467725446233105f, -0.83146961230254523567f,0.55557023301960217765f,0.63439328416364548779f, -0.77301045336273688235f,0.38268343236508983729f,0.92387953251128673848f, -0.09801714032956077016f,0.99518472667219681771f,-0.19509032201612819257f, -0.98078528040323043058f,-0.47139673682599769755f,0.88192126434835504956f, --0.70710678118654746172f,0.70710678118654757274f,-0.88192126434835493853f, -0.47139673682599780857f,-0.98078528040323043058f,0.19509032201612860891f, --0.99518472667219692873f,-0.09801714032956058975f,-0.92387953251128684951f, --0.38268343236508967076f,-0.77301045336273710440f,-0.63439328416364526575f, --0.55557023301960217765f,-0.83146961230254523567f,-0.29028467725446244208f, --0.95694033573220882438f,1.00000000000000000000f,0.00000000000000000000f, -0.38268343236508983729f,0.92387953251128673848f,-0.70710678118654746172f, -0.70710678118654757274f,-0.92387953251128684951f,-0.38268343236508967076f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99729043245315551758f, +0.07356456667184829712f,0.98917651176452636719f,0.14673046767711639404f, +0.97570210695266723633f,0.21910123527050018311f,0.95694035291671752930f, +0.29028466343879699707f,0.93299281597137451172f,0.35989505052566528320f, +0.90398931503295898438f,0.42755508422851562500f,0.87008696794509887695f, +0.49289819598197937012f,0.83146959543228149414f,0.55557024478912353516f, +0.78834640979766845703f,0.61523157358169555664f,0.74095112085342407227f, +0.67155897617340087891f,0.68954056501388549805f,0.72424709796905517578f, +0.63439327478408813477f,0.77301043272018432617f,0.57580816745758056641f, +0.81758481264114379883f,0.51410275697708129883f,0.85772860050201416016f, +0.44961133599281311035f,0.89322429895401000977f,0.38268342614173889160f, +0.92387950420379638672f,0.31368175148963928223f,0.94952815771102905273f, +0.24298018217086791992f,0.97003126144409179688f,0.17096188664436340332f, +0.98527765274047851562f,0.09801714122295379639f,0.99518471956253051758f, +0.02454122900962829590f,0.99969881772994995117f,-0.04906767606735229492f, +0.99879544973373413086f,-0.12241067737340927124f,0.99247956275939941406f, +-0.19509032368659973145f,0.98078525066375732422f,-0.26671275496482849121f, +0.96377605199813842773f,-0.33688986301422119141f,0.94154405593872070312f, +-0.40524131059646606445f,0.91420978307723999023f,-0.47139674425125122070f, +0.88192129135131835938f,-0.53499764204025268555f,0.84485357999801635742f, +-0.59569931030273437500f,0.80320751667022705078f,-0.65317285060882568359f, +0.75720882415771484375f,-0.70710676908493041992f,0.70710676908493041992f, +-0.75720882415771484375f,0.65317285060882568359f,-0.80320751667022705078f, +0.59569931030273437500f,-0.84485357999801635742f,0.53499764204025268555f, +-0.88192129135131835938f,0.47139674425125122070f,-0.91420978307723999023f, +0.40524131059646606445f,-0.94154405593872070312f,0.33688986301422119141f, +-0.96377605199813842773f,0.26671275496482849121f,-0.98078525066375732422f, +0.19509032368659973145f,-0.99247956275939941406f,0.12241067737340927124f, +-0.99879544973373413086f,0.04906767606735229492f,-0.99969881772994995117f, +-0.02454122900962829590f,-0.99518471956253051758f,-0.09801714122295379639f, +-0.98527765274047851562f,-0.17096188664436340332f,-0.97003126144409179688f, +-0.24298018217086791992f,-0.94952815771102905273f,-0.31368175148963928223f, +-0.92387950420379638672f,-0.38268342614173889160f,-0.89322429895401000977f, +-0.44961133599281311035f,-0.85772860050201416016f,-0.51410275697708129883f, +-0.81758481264114379883f,-0.57580816745758056641f,-0.77301043272018432617f, +-0.63439327478408813477f,-0.72424709796905517578f,-0.68954056501388549805f, +-0.67155897617340087891f,-0.74095112085342407227f,-0.61523157358169555664f, +-0.78834640979766845703f,-0.55557024478912353516f,-0.83146959543228149414f, +-0.49289819598197937012f,-0.87008696794509887695f,-0.42755508422851562500f, +-0.90398931503295898438f,-0.35989505052566528320f,-0.93299281597137451172f, +-0.29028466343879699707f,-0.95694035291671752930f,-0.21910123527050018311f, +-0.97570210695266723633f,-0.14673046767711639404f,-0.98917651176452636719f, +-0.07356456667184829712f,-0.99729043245315551758f,1.00000000000000000000f, +0.00000000000000000000f,0.95694035291671752930f,0.29028466343879699707f, +0.83146959543228149414f,0.55557024478912353516f,0.63439327478408813477f, +0.77301043272018432617f,0.38268342614173889160f,0.92387950420379638672f, +0.09801714122295379639f,0.99518471956253051758f,-0.19509032368659973145f, +0.98078525066375732422f,-0.47139674425125122070f,0.88192129135131835938f, +-0.70710676908493041992f,0.70710676908493041992f,-0.88192129135131835938f, +0.47139674425125122070f,-0.98078525066375732422f,0.19509032368659973145f, +-0.99518471956253051758f,-0.09801714122295379639f,-0.92387950420379638672f, +-0.38268342614173889160f,-0.77301043272018432617f,-0.63439327478408813477f, +-0.55557024478912353516f,-0.83146959543228149414f,-0.29028466343879699707f, +-0.95694035291671752930f,1.00000000000000000000f,0.00000000000000000000f, +0.38268342614173889160f,0.92387950420379638672f,-0.70710676908493041992f, +0.70710676908493041992f,-0.92387950420379638672f,-0.38268342614173889160f,}; #endif @@ -323,691 +326,691 @@ uint32_t rearranged_twiddle_tab_stride3_arr_1024_f32[5]={ 0,512,640,672,0,}; float32_t rearranged_twiddle_stride1_1024_f32[680]={ -1.00000000000000000000f,0.00000000000000000000f,0.99998117528260110909f, -0.00613588464915447527f,0.99992470183914450299f,0.01227153828571992539f, -0.99983058179582340319f,0.01840672990580482019f,0.99969881869620424997f, -0.02454122852291228812f,0.99952941750109314256f,0.03067480317663662595f, -0.99932238458834954375f,0.03680722294135883171f,0.99907772775264536147f, -0.04293825693494082024f,0.99879545620517240501f,0.04906767432741801493f, -0.99847558057329477421f,0.05519524434968993420f,0.99811811290014917919f, -0.06132073630220857829f,0.99772306664419163624f,0.06744391956366405094f, -0.99729045667869020697f,0.07356456359966742631f,0.99682029929116566791f, -0.07968243797143012563f,0.99631261218277800129f,0.08579731234443989385f, -0.99576741446765981713f,0.09190895649713272386f,0.99518472667219692873f, -0.09801714032956060363f,0.99456457073425541537f,0.10412163387205458642f, -0.99390697000235606051f,0.11022220729388305938f,0.99321194923479450001f, -0.11631863091190475235f,0.99247953459870996706f,0.12241067519921619566f, -0.99170975366909952520f,0.12849811079379316880f,0.99090263542778000971f, -0.13458070850712616773f,0.99005821026229712256f,0.14065823933284921088f, -0.98917650996478101444f,0.14673047445536174793f,0.98825756773074946437f, -0.15279718525844343535f,0.98730141815785843473f,0.15885814333386144570f, -0.98630809724459866938f,0.16491312048996989437f,0.98527764238894122162f, -0.17096188876030121717f,0.98421009238692902521f,0.17700422041214874946f, -0.98310548743121628501f,0.18303988795514095078f,0.98196386910955524296f, -0.18906866414980619262f,0.98078528040323043058f,0.19509032201612824808f, -0.97956976568544051887f,0.20110463484209190055f,0.97831737071962765473f, -0.20711137619221856032f,0.97702814265775439484f,0.21311031991609136194f, -0.97570213003852857003f,0.21910124015686979759f,0.97433938278557585821f, -0.22508391135979283204f,0.97293995220556017678f,0.23105810828067110951f, -0.97150389098625178352f,0.23702360599436719801f,0.97003125319454397424f, -0.24298017990326387094f,0.96852209427441737777f,0.24892760574572014853f, -0.96697647104485207059f,0.25486565960451457169f,0.96539444169768939830f, -0.26079411791527551401f,0.96377606579543984022f,0.26671275747489836538f, -0.96212140426904158019f,0.27262135544994897662f,0.96043051941556578655f, -0.27851968938505305973f,0.95870347489587159906f,0.28440753721127187692f, -0.95694033573220882438f,0.29028467725446233105f,0.95514116830577078243f, -0.29615088824362378883f,0.95330604035419386211f,0.30200594931922808417f, -0.95143502096900833820f,0.30784964004153486661f,0.94952818059303667475f, -0.31368174039889151761f,0.94758559101774109124f,0.31950203081601569188f, -0.94560732538052127971f,0.32531029216226292622f,0.94359345816196038559f, -0.33110630575987642921f,0.94154406518302080631f,0.33688985339222005111f, -0.93945922360218991898f,0.34266071731199437833f,0.93733901191257495977f, -0.34841868024943456472f,0.93518350993894761025f,0.35416352542049034380f, -0.93299279883473895669f,0.35989503653498811087f,0.93076696107898371224f, -0.36561299780477385379f,0.92850608047321558924f,0.37131719395183754306f, -0.92621024213831137928f,0.37700741021641825945f,0.92387953251128673848f, -0.38268343236508978178f,0.92151403934204190183f,0.38834504669882624617f, -0.91911385169005777040f,0.39399204006104809883f,0.91667905992104270485f, -0.39962419984564678810f,0.91420975570353069095f,0.40524131400498986100f, -0.91170603200542987832f,0.41084317105790391089f,0.90916798309052238025f, -0.41642956009763715253f,0.90659570451491533483f,0.42200027079979968159f, -0.90398929312344333820f,0.42755509343028208491f,0.90134884704602202810f, -0.43309381885315195726f,0.89867446569395381673f,0.43861623853852765853f, -0.89596624975618521791f,0.44412214457042920035f,0.89322430119551532446f, -0.44961132965460653965f,0.89044872324475787817f,0.45508358712634383592f, -0.88763962040285393496f,0.46053871095824000514f,0.88479709843093778954f, -0.46597649576796618121f,0.88192126434835504956f,0.47139673682599764204f, -0.87901222642863352519f,0.47679923006332208812f,0.87607009419540660122f, -0.48218377207912271887f,0.87309497841829009079f,0.48755016014843599592f, -0.87008699110871146054f,0.49289819222978403790f,0.86704624551569264845f, -0.49822766697278181303f,0.86397285612158669643f,0.50353838372571757542f, -0.86086693863776730939f,0.50883014254310698909f,0.85772861000027211809f, -0.51410274419322166128f,0.85455798836540053376f,0.51935599016558964269f, -0.85135519310526519554f,0.52458968267846894928f,0.84812034480329723252f, -0.52980362468629460526f,0.84485356524970711689f,0.53499761988709715332f, -0.84155497743689844370f,0.54017147272989285423f,0.83822470555483807875f, -0.54532498842204646383f,0.83486287498638001026f,0.55045797293660481131f, -0.83146961230254523567f,0.55557023301960217765f,0.82804504525775579626f, -0.56066157619733603124f,0.82458930278502529099f,0.56573181078361312046f, -0.82110251499110464835f,0.57078074588696725566f,0.81758481315158371139f, -0.57580819141784533866f,0.81403632970594841378f,0.58081395809576452649f, -0.81045719825259476821f,0.58579785745643886408f,0.80684755354379933401f, -0.59075970185887416442f,0.80320753148064494287f,0.59569930449243335691f, -0.79953726910790501314f,0.60061647938386897305f,0.79583690460888356633f, -0.60551104140432554512f,0.79210657730021238887f,0.61038280627630947528f, -0.78834642762660622761f,0.61523159058062681925f,0.78455659715557524159f, -0.62005721176328909561f,0.78073722857209448822f,0.62485948814238634341f, -0.77688846567323244230f,0.62963823891492698426f,0.77301045336273699338f, -0.63439328416364548779f,0.76910333764557969882f,0.63912444486377573138f, -0.76516726562245895860f,0.64383154288979138613f,0.76120238548426177871f, -0.64851440102211244110f,0.75720884650648456748f,0.65317284295377675551f, -0.75318679904361252042f,0.65780669329707863735f,0.74913639452345937020f, -0.66241577759017178373f,0.74505778544146594733f,0.66699992230363747137f, -0.74095112535495921691f,0.67155895484701833009f,0.73681656887736979300f, -0.67609270357531592310f,0.73265427167241281570f,0.68060099779545302212f, -0.72846439044822519637f,0.68508366777270035541f,0.72424708295146700276f, -0.68954054473706682948f,0.72000250796138165477f,0.69397146088965389055f, -0.71573082528381870571f,0.69837624940897280457f,0.71143219574521643356f, -0.70275474445722529993f,0.70710678118654757274f,0.70710678118654757274f, -0.70275474445722529993f,0.71143219574521643356f,0.69837624940897291559f, -0.71573082528381859468f,0.69397146088965400157f,0.72000250796138165477f, -0.68954054473706694051f,0.72424708295146689174f,0.68508366777270035541f, -0.72846439044822519637f,0.68060099779545302212f,0.73265427167241281570f, -0.67609270357531603413f,0.73681656887736979300f,0.67155895484701833009f, -0.74095112535495910588f,0.66699992230363747137f,0.74505778544146594733f, -0.66241577759017178373f,0.74913639452345925918f,0.65780669329707874837f, -0.75318679904361252042f,0.65317284295377686654f,0.75720884650648456748f, -0.64851440102211255212f,0.76120238548426177871f,0.64383154288979149715f, -0.76516726562245895860f,0.63912444486377573138f,0.76910333764557958780f, -0.63439328416364548779f,0.77301045336273688235f,0.62963823891492709528f, -0.77688846567323244230f,0.62485948814238645443f,0.78073722857209448822f, -0.62005721176328920663f,0.78455659715557524159f,0.61523159058062681925f, -0.78834642762660622761f,0.61038280627630947528f,0.79210657730021227785f, -0.60551104140432554512f,0.79583690460888345530f,0.60061647938386897305f, -0.79953726910790501314f,0.59569930449243346793f,0.80320753148064483184f, -0.59075970185887427544f,0.80684755354379922299f,0.58579785745643886408f, -0.81045719825259476821f,0.58081395809576452649f,0.81403632970594830276f, -0.57580819141784533866f,0.81758481315158371139f,0.57078074588696736669f, -0.82110251499110464835f,0.56573181078361323149f,0.82458930278502529099f, -0.56066157619733603124f,0.82804504525775579626f,0.55557023301960228867f, -0.83146961230254523567f,0.55045797293660481131f,0.83486287498638001026f, -0.54532498842204646383f,0.83822470555483796772f,0.54017147272989296525f, -0.84155497743689833268f,0.53499761988709726435f,0.84485356524970700587f, -0.52980362468629482731f,0.84812034480329712149f,0.52458968267846883826f, -0.85135519310526519554f,0.51935599016558953167f,0.85455798836540053376f, -0.51410274419322166128f,0.85772861000027211809f,0.50883014254310698909f, -0.86086693863776730939f,0.50353838372571757542f,0.86397285612158669643f, -0.49822766697278186854f,0.86704624551569264845f,0.49289819222978409341f, -0.87008699110871134952f,0.48755016014843605143f,0.87309497841829009079f, -0.48218377207912282989f,0.87607009419540660122f,0.47679923006332225466f, -0.87901222642863341417f,0.47139673682599780857f,0.88192126434835493853f, -0.46597649576796612569f,0.88479709843093778954f,0.46053871095824000514f, -0.88763962040285393496f,0.45508358712634383592f,0.89044872324475787817f, -0.44961132965460659516f,0.89322430119551532446f,0.44412214457042925586f, -0.89596624975618510689f,0.43861623853852771404f,0.89867446569395381673f, -0.43309381885315201277f,0.90134884704602202810f,0.42755509343028219593f, -0.90398929312344333820f,0.42200027079979979261f,0.90659570451491533483f, -0.41642956009763731906f,0.90916798309052226923f,0.41084317105790391089f, -0.91170603200542987832f,0.40524131400498986100f,0.91420975570353069095f, -0.39962419984564678810f,0.91667905992104270485f,0.39399204006104809883f, -0.91911385169005777040f,0.38834504669882630168f,0.92151403934204190183f, -0.38268343236508983729f,0.92387953251128673848f,0.37700741021641831496f, -0.92621024213831126826f,0.37131719395183759858f,0.92850608047321558924f, -0.36561299780477396482f,0.93076696107898371224f,0.35989503653498827740f, -0.93299279883473884567f,0.35416352542049051033f,0.93518350993894749923f, -0.34841868024943450921f,0.93733901191257495977f,0.34266071731199437833f, -0.93945922360218991898f,0.33688985339222005111f,0.94154406518302080631f, -0.33110630575987642921f,0.94359345816196038559f,0.32531029216226298173f, -0.94560732538052127971f,0.31950203081601574739f,0.94758559101774109124f, -0.31368174039889157312f,0.94952818059303667475f,0.30784964004153497763f, -0.95143502096900833820f,0.30200594931922819519f,0.95330604035419375109f, -0.29615088824362395536f,0.95514116830577067141f,0.29028467725446233105f, -0.95694033573220893540f,0.28440753721127182141f,0.95870347489587159906f, -0.27851968938505305973f,0.96043051941556578655f,0.27262135544994897662f, -0.96212140426904158019f,0.26671275747489842090f,0.96377606579543984022f, -0.26079411791527556952f,0.96539444169768939830f,0.25486565960451462720f, -0.96697647104485207059f,0.24892760574572025956f,0.96852209427441726675f, -0.24298017990326398197f,0.97003125319454397424f,0.23702360599436733679f, -0.97150389098625178352f,0.23105810828067127605f,0.97293995220556006576f, -0.22508391135979277653f,0.97433938278557585821f,0.21910124015686976984f, -0.97570213003852857003f,0.21311031991609136194f,0.97702814265775439484f, -0.20711137619221856032f,0.97831737071962765473f,0.20110463484209195606f, -0.97956976568544051887f,0.19509032201612833135f,0.98078528040323043058f, -0.18906866414980627589f,0.98196386910955524296f,0.18303988795514106180f, -0.98310548743121628501f,0.17700422041214886049f,0.98421009238692902521f, -0.17096188876030135595f,0.98527764238894122162f,0.16491312048997008866f, -0.98630809724459866938f,0.15885814333386139019f,0.98730141815785843473f, -0.15279718525844340760f,0.98825756773074946437f,0.14673047445536174793f, -0.98917650996478101444f,0.14065823933284923863f,0.99005821026229712256f, -0.13458070850712622324f,0.99090263542778000971f,0.12849811079379322432f, -0.99170975366909952520f,0.12241067519921627893f,0.99247953459870996706f, -0.11631863091190487725f,0.99321194923479450001f,0.11022220729388318428f, -0.99390697000235606051f,0.10412163387205472520f,0.99456457073425541537f, -0.09801714032956077016f,0.99518472667219681771f,0.09190895649713269611f, -0.99576741446765981713f,0.08579731234443987997f,0.99631261218277800129f, -0.07968243797143012563f,0.99682029929116566791f,0.07356456359966745406f, -0.99729045667869020697f,0.06744391956366410645f,0.99772306664419163624f, -0.06132073630220864768f,0.99811811290014917919f,0.05519524434969003135f, -0.99847558057329477421f,0.04906767432741812596f,0.99879545620517240501f, -0.04293825693494095902f,0.99907772775264536147f,0.03680722294135899131f, -0.99932238458834954375f,0.03067480317663658085f,0.99952941750109314256f, -0.02454122852291226384f,0.99969881869620424997f,0.01840672990580482019f, -0.99983058179582340319f,0.01227153828571994447f,0.99992470183914450299f, -0.00613588464915451517f,0.99998117528260110909f,1.00000000000000000000f, -0.00000000000000000000f,0.99969881869620424997f,0.02454122852291228812f, -0.99879545620517240501f,0.04906767432741801493f,0.99729045667869020697f, -0.07356456359966742631f,0.99518472667219692873f,0.09801714032956060363f, -0.99247953459870996706f,0.12241067519921619566f,0.98917650996478101444f, -0.14673047445536174793f,0.98527764238894122162f,0.17096188876030121717f, -0.98078528040323043058f,0.19509032201612824808f,0.97570213003852857003f, -0.21910124015686979759f,0.97003125319454397424f,0.24298017990326387094f, -0.96377606579543984022f,0.26671275747489836538f,0.95694033573220882438f, -0.29028467725446233105f,0.94952818059303667475f,0.31368174039889151761f, -0.94154406518302080631f,0.33688985339222005111f,0.93299279883473895669f, -0.35989503653498811087f,0.92387953251128673848f,0.38268343236508978178f, -0.91420975570353069095f,0.40524131400498986100f,0.90398929312344333820f, -0.42755509343028208491f,0.89322430119551532446f,0.44961132965460653965f, -0.88192126434835504956f,0.47139673682599764204f,0.87008699110871146054f, -0.49289819222978403790f,0.85772861000027211809f,0.51410274419322166128f, -0.84485356524970711689f,0.53499761988709715332f,0.83146961230254523567f, -0.55557023301960217765f,0.81758481315158371139f,0.57580819141784533866f, -0.80320753148064494287f,0.59569930449243335691f,0.78834642762660622761f, -0.61523159058062681925f,0.77301045336273699338f,0.63439328416364548779f, -0.75720884650648456748f,0.65317284295377675551f,0.74095112535495921691f, -0.67155895484701833009f,0.72424708295146700276f,0.68954054473706682948f, -0.70710678118654757274f,0.70710678118654757274f,0.68954054473706694051f, -0.72424708295146689174f,0.67155895484701833009f,0.74095112535495910588f, -0.65317284295377686654f,0.75720884650648456748f,0.63439328416364548779f, -0.77301045336273688235f,0.61523159058062681925f,0.78834642762660622761f, -0.59569930449243346793f,0.80320753148064483184f,0.57580819141784533866f, -0.81758481315158371139f,0.55557023301960228867f,0.83146961230254523567f, -0.53499761988709726435f,0.84485356524970700587f,0.51410274419322166128f, -0.85772861000027211809f,0.49289819222978409341f,0.87008699110871134952f, -0.47139673682599780857f,0.88192126434835493853f,0.44961132965460659516f, -0.89322430119551532446f,0.42755509343028219593f,0.90398929312344333820f, -0.40524131400498986100f,0.91420975570353069095f,0.38268343236508983729f, -0.92387953251128673848f,0.35989503653498827740f,0.93299279883473884567f, -0.33688985339222005111f,0.94154406518302080631f,0.31368174039889157312f, -0.94952818059303667475f,0.29028467725446233105f,0.95694033573220893540f, -0.26671275747489842090f,0.96377606579543984022f,0.24298017990326398197f, -0.97003125319454397424f,0.21910124015686976984f,0.97570213003852857003f, -0.19509032201612833135f,0.98078528040323043058f,0.17096188876030135595f, -0.98527764238894122162f,0.14673047445536174793f,0.98917650996478101444f, -0.12241067519921627893f,0.99247953459870996706f,0.09801714032956077016f, -0.99518472667219681771f,0.07356456359966745406f,0.99729045667869020697f, -0.04906767432741812596f,0.99879545620517240501f,0.02454122852291226384f, -0.99969881869620424997f,1.00000000000000000000f,0.00000000000000000000f, -0.99518472667219692873f,0.09801714032956060363f,0.98078528040323043058f, -0.19509032201612824808f,0.95694033573220882438f,0.29028467725446233105f, -0.92387953251128673848f,0.38268343236508978178f,0.88192126434835504956f, -0.47139673682599764204f,0.83146961230254523567f,0.55557023301960217765f, -0.77301045336273699338f,0.63439328416364548779f,0.70710678118654757274f, -0.70710678118654757274f,0.63439328416364548779f,0.77301045336273688235f, -0.55557023301960228867f,0.83146961230254523567f,0.47139673682599780857f, -0.88192126434835493853f,0.38268343236508983729f,0.92387953251128673848f, -0.29028467725446233105f,0.95694033573220893540f,0.19509032201612833135f, -0.98078528040323043058f,0.09801714032956077016f,0.99518472667219681771f, -1.00000000000000000000f,0.00000000000000000000f,0.92387953251128673848f, -0.38268343236508978178f,0.70710678118654757274f,0.70710678118654757274f, -0.38268343236508983729f,0.92387953251128673848f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99998116493225097656f, +0.00613588467240333557f,0.99992471933364868164f,0.01227153837680816650f, +0.99983060359954833984f,0.01840673014521598816f,0.99969881772994995117f, +0.02454122900962829590f,0.99952942132949829102f,0.03067480400204658508f, +0.99932235479354858398f,0.03680722415447235107f,0.99907773733139038086f, +0.04293825849890708923f,0.99879544973373413086f,0.04906767606735229492f, +0.99847555160522460938f,0.05519524589180946350f,0.99811810255050659180f, +0.06132073700428009033f,0.99772304296493530273f,0.06744392216205596924f, +0.99729043245315551758f,0.07356456667184829712f,0.99682027101516723633f, +0.07968243956565856934f,0.99631261825561523438f,0.08579730987548828125f, +0.99576741456985473633f,0.09190895408391952515f,0.99518471956253051758f, +0.09801714122295379639f,0.99456459283828735352f,0.10412163287401199341f, +0.99390697479248046875f,0.11022220551967620850f,0.99321192502975463867f, +0.11631862819194793701f,0.99247956275939941406f,0.12241067737340927124f, +0.99170976877212524414f,0.12849810719490051270f,0.99090266227722167969f, +0.13458070158958435059f,0.99005818367004394531f,0.14065824449062347412f, +0.98917651176452636719f,0.14673046767711639404f,0.98825758695602416992f, +0.15279719233512878418f,0.98730140924453735352f,0.15885815024375915527f, +0.98630809783935546875f,0.16491311788558959961f,0.98527765274047851562f, +0.17096188664436340332f,0.98421007394790649414f,0.17700421810150146484f, +0.98310548067092895508f,0.18303988873958587646f,0.98196387290954589844f, +0.18906866014003753662f,0.98078525066375732422f,0.19509032368659973145f, +0.97956979274749755859f,0.20110464096069335938f,0.97831737995147705078f, +0.20711137354373931885f,0.97702813148498535156f,0.21311031281948089600f, +0.97570210695266723633f,0.21910123527050018311f,0.97433936595916748047f, +0.22508391737937927246f,0.97293996810913085938f,0.23105810582637786865f, +0.97150391340255737305f,0.23702360689640045166f,0.97003126144409179688f, +0.24298018217086791992f,0.96852207183837890625f,0.24892760813236236572f, +0.96697646379470825195f,0.25486564636230468750f,0.96539443731307983398f, +0.26079410314559936523f,0.96377605199813842773f,0.26671275496482849121f, +0.96212142705917358398f,0.27262136340141296387f,0.96043050289154052734f, +0.27851969003677368164f,0.95870345830917358398f,0.28440752625465393066f, +0.95694035291671752930f,0.29028466343879699707f,0.95514118671417236328f, +0.29615089297294616699f,0.95330601930618286133f,0.30200594663619995117f, +0.95143502950668334961f,0.30784964561462402344f,0.94952815771102905273f, +0.31368175148963928223f,0.94758558273315429688f,0.31950202584266662598f, +0.94560730457305908203f,0.32531028985977172852f,0.94359344244003295898f, +0.33110630512237548828f,0.94154405593872070312f,0.33688986301422119141f, +0.93945920467376708984f,0.34266072511672973633f,0.93733900785446166992f, +0.34841868281364440918f,0.93518352508544921875f,0.35416352748870849609f, +0.93299281597137451172f,0.35989505052566528320f,0.93076694011688232422f, +0.36561298370361328125f,0.92850607633590698242f,0.37131720781326293945f, +0.92621022462844848633f,0.37700742483139038086f,0.92387950420379638672f, +0.38268342614173889160f,0.92151403427124023438f,0.38834503293037414551f, +0.91911387443542480469f,0.39399203658103942871f,0.91667908430099487305f, +0.39962419867515563965f,0.91420978307723999023f,0.40524131059646606445f, +0.91170603036880493164f,0.41084316372871398926f,0.90916800498962402344f, +0.41642954945564270020f,0.90659570693969726562f,0.42200025916099548340f, +0.90398931503295898438f,0.42755508422851562500f,0.90134882926940917969f, +0.43309381604194641113f,0.89867448806762695312f,0.43861624598503112793f, +0.89596623182296752930f,0.44412213563919067383f,0.89322429895401000977f, +0.44961133599281311035f,0.89044874906539916992f,0.45508357882499694824f, +0.88763964176177978516f,0.46053871512413024902f,0.88479709625244140625f, +0.46597650647163391113f,0.88192129135131835938f,0.47139674425125122070f, +0.87901222705841064453f,0.47679921984672546387f,0.87607008218765258789f, +0.48218378424644470215f,0.87309497594833374023f,0.48755016922950744629f, +0.87008696794509887695f,0.49289819598197937012f,0.86704623699188232422f, +0.49822765588760375977f,0.86397284269332885742f,0.50353837013244628906f, +0.86086696386337280273f,0.50883013010025024414f,0.85772860050201416016f, +0.51410275697708129883f,0.85455799102783203125f,0.51935601234436035156f, +0.85135519504547119141f,0.52458965778350830078f,0.84812033176422119141f, +0.52980363368988037109f,0.84485357999801635742f,0.53499764204025268555f, +0.84155499935150146484f,0.54017144441604614258f,0.83822470903396606445f, +0.54532498121261596680f,0.83486288785934448242f,0.55045795440673828125f, +0.83146959543228149414f,0.55557024478912353516f,0.82804507017135620117f, +0.56066155433654785156f,0.82458931207656860352f,0.56573182344436645508f, +0.82110249996185302734f,0.57078075408935546875f,0.81758481264114379883f, +0.57580816745758056641f,0.81403630971908569336f,0.58081394433975219727f, +0.81045717000961303711f,0.58579784631729125977f,0.80684757232666015625f, +0.59075969457626342773f,0.80320751667022705078f,0.59569931030273437500f, +0.79953724145889282227f,0.60061645507812500000f,0.79583692550659179688f, +0.60551106929779052734f,0.79210656881332397461f,0.61038279533386230469f, +0.78834640979766845703f,0.61523157358169555664f,0.78455656766891479492f, +0.62005722522735595703f,0.78073722124099731445f,0.62485951185226440430f, +0.77688848972320556641f,0.62963825464248657227f,0.77301043272018432617f, +0.63439327478408813477f,0.76910334825515747070f,0.63912445306777954102f, +0.76516723632812500000f,0.64383155107498168945f,0.76120239496231079102f, +0.64851438999176025391f,0.75720882415771484375f,0.65317285060882568359f, +0.75318682193756103516f,0.65780669450759887695f,0.74913638830184936523f, +0.66241580247879028320f,0.74505776166915893555f,0.66699993610382080078f, +0.74095112085342407227f,0.67155897617340087891f,0.73681658506393432617f, +0.67609268426895141602f,0.73265427350997924805f,0.68060100078582763672f, +0.72846436500549316406f,0.68508368730545043945f,0.72424709796905517578f, +0.68954056501388549805f,0.72000253200531005859f,0.69397145509719848633f, +0.71573084592819213867f,0.69837623834609985352f,0.71143221855163574219f, +0.70275473594665527344f,0.70710676908493041992f,0.70710676908493041992f, +0.70275473594665527344f,0.71143221855163574219f,0.69837623834609985352f, +0.71573084592819213867f,0.69397145509719848633f,0.72000253200531005859f, +0.68954056501388549805f,0.72424709796905517578f,0.68508368730545043945f, +0.72846436500549316406f,0.68060100078582763672f,0.73265427350997924805f, +0.67609268426895141602f,0.73681658506393432617f,0.67155897617340087891f, +0.74095112085342407227f,0.66699993610382080078f,0.74505776166915893555f, +0.66241580247879028320f,0.74913638830184936523f,0.65780669450759887695f, +0.75318682193756103516f,0.65317285060882568359f,0.75720882415771484375f, +0.64851438999176025391f,0.76120239496231079102f,0.64383155107498168945f, +0.76516723632812500000f,0.63912445306777954102f,0.76910334825515747070f, +0.63439327478408813477f,0.77301043272018432617f,0.62963825464248657227f, +0.77688848972320556641f,0.62485951185226440430f,0.78073722124099731445f, +0.62005722522735595703f,0.78455656766891479492f,0.61523157358169555664f, +0.78834640979766845703f,0.61038279533386230469f,0.79210656881332397461f, +0.60551106929779052734f,0.79583692550659179688f,0.60061645507812500000f, +0.79953724145889282227f,0.59569931030273437500f,0.80320751667022705078f, +0.59075969457626342773f,0.80684757232666015625f,0.58579784631729125977f, +0.81045717000961303711f,0.58081394433975219727f,0.81403630971908569336f, +0.57580816745758056641f,0.81758481264114379883f,0.57078075408935546875f, +0.82110249996185302734f,0.56573182344436645508f,0.82458931207656860352f, +0.56066155433654785156f,0.82804507017135620117f,0.55557024478912353516f, +0.83146959543228149414f,0.55045795440673828125f,0.83486288785934448242f, +0.54532498121261596680f,0.83822470903396606445f,0.54017144441604614258f, +0.84155499935150146484f,0.53499764204025268555f,0.84485357999801635742f, +0.52980363368988037109f,0.84812033176422119141f,0.52458965778350830078f, +0.85135519504547119141f,0.51935601234436035156f,0.85455799102783203125f, +0.51410275697708129883f,0.85772860050201416016f,0.50883013010025024414f, +0.86086696386337280273f,0.50353837013244628906f,0.86397284269332885742f, +0.49822765588760375977f,0.86704623699188232422f,0.49289819598197937012f, +0.87008696794509887695f,0.48755016922950744629f,0.87309497594833374023f, +0.48218378424644470215f,0.87607008218765258789f,0.47679921984672546387f, +0.87901222705841064453f,0.47139674425125122070f,0.88192129135131835938f, +0.46597650647163391113f,0.88479709625244140625f,0.46053871512413024902f, +0.88763964176177978516f,0.45508357882499694824f,0.89044874906539916992f, +0.44961133599281311035f,0.89322429895401000977f,0.44412213563919067383f, +0.89596623182296752930f,0.43861624598503112793f,0.89867448806762695312f, +0.43309381604194641113f,0.90134882926940917969f,0.42755508422851562500f, +0.90398931503295898438f,0.42200025916099548340f,0.90659570693969726562f, +0.41642954945564270020f,0.90916800498962402344f,0.41084316372871398926f, +0.91170603036880493164f,0.40524131059646606445f,0.91420978307723999023f, +0.39962419867515563965f,0.91667908430099487305f,0.39399203658103942871f, +0.91911387443542480469f,0.38834503293037414551f,0.92151403427124023438f, +0.38268342614173889160f,0.92387950420379638672f,0.37700742483139038086f, +0.92621022462844848633f,0.37131720781326293945f,0.92850607633590698242f, +0.36561298370361328125f,0.93076694011688232422f,0.35989505052566528320f, +0.93299281597137451172f,0.35416352748870849609f,0.93518352508544921875f, +0.34841868281364440918f,0.93733900785446166992f,0.34266072511672973633f, +0.93945920467376708984f,0.33688986301422119141f,0.94154405593872070312f, +0.33110630512237548828f,0.94359344244003295898f,0.32531028985977172852f, +0.94560730457305908203f,0.31950202584266662598f,0.94758558273315429688f, +0.31368175148963928223f,0.94952815771102905273f,0.30784964561462402344f, +0.95143502950668334961f,0.30200594663619995117f,0.95330601930618286133f, +0.29615089297294616699f,0.95514118671417236328f,0.29028466343879699707f, +0.95694035291671752930f,0.28440752625465393066f,0.95870345830917358398f, +0.27851969003677368164f,0.96043050289154052734f,0.27262136340141296387f, +0.96212142705917358398f,0.26671275496482849121f,0.96377605199813842773f, +0.26079410314559936523f,0.96539443731307983398f,0.25486564636230468750f, +0.96697646379470825195f,0.24892760813236236572f,0.96852207183837890625f, +0.24298018217086791992f,0.97003126144409179688f,0.23702360689640045166f, +0.97150391340255737305f,0.23105810582637786865f,0.97293996810913085938f, +0.22508391737937927246f,0.97433936595916748047f,0.21910123527050018311f, +0.97570210695266723633f,0.21311031281948089600f,0.97702813148498535156f, +0.20711137354373931885f,0.97831737995147705078f,0.20110464096069335938f, +0.97956979274749755859f,0.19509032368659973145f,0.98078525066375732422f, +0.18906866014003753662f,0.98196387290954589844f,0.18303988873958587646f, +0.98310548067092895508f,0.17700421810150146484f,0.98421007394790649414f, +0.17096188664436340332f,0.98527765274047851562f,0.16491311788558959961f, +0.98630809783935546875f,0.15885815024375915527f,0.98730140924453735352f, +0.15279719233512878418f,0.98825758695602416992f,0.14673046767711639404f, +0.98917651176452636719f,0.14065824449062347412f,0.99005818367004394531f, +0.13458070158958435059f,0.99090266227722167969f,0.12849810719490051270f, +0.99170976877212524414f,0.12241067737340927124f,0.99247956275939941406f, +0.11631862819194793701f,0.99321192502975463867f,0.11022220551967620850f, +0.99390697479248046875f,0.10412163287401199341f,0.99456459283828735352f, +0.09801714122295379639f,0.99518471956253051758f,0.09190895408391952515f, +0.99576741456985473633f,0.08579730987548828125f,0.99631261825561523438f, +0.07968243956565856934f,0.99682027101516723633f,0.07356456667184829712f, +0.99729043245315551758f,0.06744392216205596924f,0.99772304296493530273f, +0.06132073700428009033f,0.99811810255050659180f,0.05519524589180946350f, +0.99847555160522460938f,0.04906767606735229492f,0.99879544973373413086f, +0.04293825849890708923f,0.99907773733139038086f,0.03680722415447235107f, +0.99932235479354858398f,0.03067480400204658508f,0.99952942132949829102f, +0.02454122900962829590f,0.99969881772994995117f,0.01840673014521598816f, +0.99983060359954833984f,0.01227153837680816650f,0.99992471933364868164f, +0.00613588467240333557f,0.99998116493225097656f,1.00000000000000000000f, +0.00000000000000000000f,0.99969881772994995117f,0.02454122900962829590f, +0.99879544973373413086f,0.04906767606735229492f,0.99729043245315551758f, +0.07356456667184829712f,0.99518471956253051758f,0.09801714122295379639f, +0.99247956275939941406f,0.12241067737340927124f,0.98917651176452636719f, +0.14673046767711639404f,0.98527765274047851562f,0.17096188664436340332f, +0.98078525066375732422f,0.19509032368659973145f,0.97570210695266723633f, +0.21910123527050018311f,0.97003126144409179688f,0.24298018217086791992f, +0.96377605199813842773f,0.26671275496482849121f,0.95694035291671752930f, +0.29028466343879699707f,0.94952815771102905273f,0.31368175148963928223f, +0.94154405593872070312f,0.33688986301422119141f,0.93299281597137451172f, +0.35989505052566528320f,0.92387950420379638672f,0.38268342614173889160f, +0.91420978307723999023f,0.40524131059646606445f,0.90398931503295898438f, +0.42755508422851562500f,0.89322429895401000977f,0.44961133599281311035f, +0.88192129135131835938f,0.47139674425125122070f,0.87008696794509887695f, +0.49289819598197937012f,0.85772860050201416016f,0.51410275697708129883f, +0.84485357999801635742f,0.53499764204025268555f,0.83146959543228149414f, +0.55557024478912353516f,0.81758481264114379883f,0.57580816745758056641f, +0.80320751667022705078f,0.59569931030273437500f,0.78834640979766845703f, +0.61523157358169555664f,0.77301043272018432617f,0.63439327478408813477f, +0.75720882415771484375f,0.65317285060882568359f,0.74095112085342407227f, +0.67155897617340087891f,0.72424709796905517578f,0.68954056501388549805f, +0.70710676908493041992f,0.70710676908493041992f,0.68954056501388549805f, +0.72424709796905517578f,0.67155897617340087891f,0.74095112085342407227f, +0.65317285060882568359f,0.75720882415771484375f,0.63439327478408813477f, +0.77301043272018432617f,0.61523157358169555664f,0.78834640979766845703f, +0.59569931030273437500f,0.80320751667022705078f,0.57580816745758056641f, +0.81758481264114379883f,0.55557024478912353516f,0.83146959543228149414f, +0.53499764204025268555f,0.84485357999801635742f,0.51410275697708129883f, +0.85772860050201416016f,0.49289819598197937012f,0.87008696794509887695f, +0.47139674425125122070f,0.88192129135131835938f,0.44961133599281311035f, +0.89322429895401000977f,0.42755508422851562500f,0.90398931503295898438f, +0.40524131059646606445f,0.91420978307723999023f,0.38268342614173889160f, +0.92387950420379638672f,0.35989505052566528320f,0.93299281597137451172f, +0.33688986301422119141f,0.94154405593872070312f,0.31368175148963928223f, +0.94952815771102905273f,0.29028466343879699707f,0.95694035291671752930f, +0.26671275496482849121f,0.96377605199813842773f,0.24298018217086791992f, +0.97003126144409179688f,0.21910123527050018311f,0.97570210695266723633f, +0.19509032368659973145f,0.98078525066375732422f,0.17096188664436340332f, +0.98527765274047851562f,0.14673046767711639404f,0.98917651176452636719f, +0.12241067737340927124f,0.99247956275939941406f,0.09801714122295379639f, +0.99518471956253051758f,0.07356456667184829712f,0.99729043245315551758f, +0.04906767606735229492f,0.99879544973373413086f,0.02454122900962829590f, +0.99969881772994995117f,1.00000000000000000000f,0.00000000000000000000f, +0.99518471956253051758f,0.09801714122295379639f,0.98078525066375732422f, +0.19509032368659973145f,0.95694035291671752930f,0.29028466343879699707f, +0.92387950420379638672f,0.38268342614173889160f,0.88192129135131835938f, +0.47139674425125122070f,0.83146959543228149414f,0.55557024478912353516f, +0.77301043272018432617f,0.63439327478408813477f,0.70710676908493041992f, +0.70710676908493041992f,0.63439327478408813477f,0.77301043272018432617f, +0.55557024478912353516f,0.83146959543228149414f,0.47139674425125122070f, +0.88192129135131835938f,0.38268342614173889160f,0.92387950420379638672f, +0.29028466343879699707f,0.95694035291671752930f,0.19509032368659973145f, +0.98078525066375732422f,0.09801714122295379639f,0.99518471956253051758f, +1.00000000000000000000f,0.00000000000000000000f,0.92387950420379638672f, +0.38268342614173889160f,0.70710676908493041992f,0.70710676908493041992f, +0.38268342614173889160f,0.92387950420379638672f,}; float32_t rearranged_twiddle_stride2_1024_f32[680]={ -1.00000000000000000000f,0.00000000000000000000f,0.99992470183914450299f, -0.01227153828571992539f,0.99969881869620424997f,0.02454122852291228812f, -0.99932238458834954375f,0.03680722294135883171f,0.99879545620517240501f, -0.04906767432741801493f,0.99811811290014917919f,0.06132073630220857829f, -0.99729045667869020697f,0.07356456359966742631f,0.99631261218277800129f, -0.08579731234443989385f,0.99518472667219692873f,0.09801714032956060363f, -0.99390697000235606051f,0.11022220729388305938f,0.99247953459870996706f, -0.12241067519921619566f,0.99090263542778000971f,0.13458070850712616773f, -0.98917650996478101444f,0.14673047445536174793f,0.98730141815785843473f, -0.15885814333386144570f,0.98527764238894122162f,0.17096188876030121717f, -0.98310548743121628501f,0.18303988795514095078f,0.98078528040323043058f, -0.19509032201612824808f,0.97831737071962765473f,0.20711137619221856032f, -0.97570213003852857003f,0.21910124015686979759f,0.97293995220556017678f, -0.23105810828067110951f,0.97003125319454397424f,0.24298017990326387094f, -0.96697647104485207059f,0.25486565960451457169f,0.96377606579543984022f, -0.26671275747489836538f,0.96043051941556578655f,0.27851968938505305973f, -0.95694033573220882438f,0.29028467725446233105f,0.95330604035419386211f, -0.30200594931922808417f,0.94952818059303667475f,0.31368174039889151761f, -0.94560732538052127971f,0.32531029216226292622f,0.94154406518302080631f, -0.33688985339222005111f,0.93733901191257495977f,0.34841868024943456472f, -0.93299279883473895669f,0.35989503653498811087f,0.92850608047321558924f, -0.37131719395183754306f,0.92387953251128673848f,0.38268343236508978178f, -0.91911385169005777040f,0.39399204006104809883f,0.91420975570353069095f, -0.40524131400498986100f,0.90916798309052238025f,0.41642956009763715253f, -0.90398929312344333820f,0.42755509343028208491f,0.89867446569395381673f, -0.43861623853852765853f,0.89322430119551532446f,0.44961132965460653965f, -0.88763962040285393496f,0.46053871095824000514f,0.88192126434835504956f, -0.47139673682599764204f,0.87607009419540660122f,0.48218377207912271887f, -0.87008699110871146054f,0.49289819222978403790f,0.86397285612158669643f, -0.50353838372571757542f,0.85772861000027211809f,0.51410274419322166128f, -0.85135519310526519554f,0.52458968267846894928f,0.84485356524970711689f, -0.53499761988709715332f,0.83822470555483807875f,0.54532498842204646383f, -0.83146961230254523567f,0.55557023301960217765f,0.82458930278502529099f, -0.56573181078361312046f,0.81758481315158371139f,0.57580819141784533866f, -0.81045719825259476821f,0.58579785745643886408f,0.80320753148064494287f, -0.59569930449243335691f,0.79583690460888356633f,0.60551104140432554512f, -0.78834642762660622761f,0.61523159058062681925f,0.78073722857209448822f, -0.62485948814238634341f,0.77301045336273699338f,0.63439328416364548779f, -0.76516726562245895860f,0.64383154288979138613f,0.75720884650648456748f, -0.65317284295377675551f,0.74913639452345937020f,0.66241577759017178373f, -0.74095112535495921691f,0.67155895484701833009f,0.73265427167241281570f, -0.68060099779545302212f,0.72424708295146700276f,0.68954054473706682948f, -0.71573082528381870571f,0.69837624940897280457f,0.70710678118654757274f, -0.70710678118654757274f,0.69837624940897291559f,0.71573082528381859468f, -0.68954054473706694051f,0.72424708295146689174f,0.68060099779545302212f, -0.73265427167241281570f,0.67155895484701833009f,0.74095112535495910588f, -0.66241577759017178373f,0.74913639452345925918f,0.65317284295377686654f, -0.75720884650648456748f,0.64383154288979149715f,0.76516726562245895860f, -0.63439328416364548779f,0.77301045336273688235f,0.62485948814238645443f, -0.78073722857209448822f,0.61523159058062681925f,0.78834642762660622761f, -0.60551104140432554512f,0.79583690460888345530f,0.59569930449243346793f, -0.80320753148064483184f,0.58579785745643886408f,0.81045719825259476821f, -0.57580819141784533866f,0.81758481315158371139f,0.56573181078361323149f, -0.82458930278502529099f,0.55557023301960228867f,0.83146961230254523567f, -0.54532498842204646383f,0.83822470555483796772f,0.53499761988709726435f, -0.84485356524970700587f,0.52458968267846883826f,0.85135519310526519554f, -0.51410274419322166128f,0.85772861000027211809f,0.50353838372571757542f, -0.86397285612158669643f,0.49289819222978409341f,0.87008699110871134952f, -0.48218377207912282989f,0.87607009419540660122f,0.47139673682599780857f, -0.88192126434835493853f,0.46053871095824000514f,0.88763962040285393496f, -0.44961132965460659516f,0.89322430119551532446f,0.43861623853852771404f, -0.89867446569395381673f,0.42755509343028219593f,0.90398929312344333820f, -0.41642956009763731906f,0.90916798309052226923f,0.40524131400498986100f, -0.91420975570353069095f,0.39399204006104809883f,0.91911385169005777040f, -0.38268343236508983729f,0.92387953251128673848f,0.37131719395183759858f, -0.92850608047321558924f,0.35989503653498827740f,0.93299279883473884567f, -0.34841868024943450921f,0.93733901191257495977f,0.33688985339222005111f, -0.94154406518302080631f,0.32531029216226298173f,0.94560732538052127971f, -0.31368174039889157312f,0.94952818059303667475f,0.30200594931922819519f, -0.95330604035419375109f,0.29028467725446233105f,0.95694033573220893540f, -0.27851968938505305973f,0.96043051941556578655f,0.26671275747489842090f, -0.96377606579543984022f,0.25486565960451462720f,0.96697647104485207059f, -0.24298017990326398197f,0.97003125319454397424f,0.23105810828067127605f, -0.97293995220556006576f,0.21910124015686976984f,0.97570213003852857003f, -0.20711137619221856032f,0.97831737071962765473f,0.19509032201612833135f, -0.98078528040323043058f,0.18303988795514106180f,0.98310548743121628501f, -0.17096188876030135595f,0.98527764238894122162f,0.15885814333386139019f, -0.98730141815785843473f,0.14673047445536174793f,0.98917650996478101444f, -0.13458070850712622324f,0.99090263542778000971f,0.12241067519921627893f, -0.99247953459870996706f,0.11022220729388318428f,0.99390697000235606051f, -0.09801714032956077016f,0.99518472667219681771f,0.08579731234443987997f, -0.99631261218277800129f,0.07356456359966745406f,0.99729045667869020697f, -0.06132073630220864768f,0.99811811290014917919f,0.04906767432741812596f, -0.99879545620517240501f,0.03680722294135899131f,0.99932238458834954375f, -0.02454122852291226384f,0.99969881869620424997f,0.01227153828571994447f, -0.99992470183914450299f,0.00000000000000006123f,1.00000000000000000000f, --0.01227153828571982304f,0.99992470183914450299f,-0.02454122852291214241f, -0.99969881869620424997f,-0.03680722294135886641f,0.99932238458834954375f, --0.04906767432741800800f,0.99879545620517240501f,-0.06132073630220852972f, -0.99811811290014917919f,-0.07356456359966732916f,0.99729045667869020697f, --0.08579731234443975507f,0.99631261218277800129f,-0.09801714032956064526f, -0.99518472667219692873f,-0.11022220729388305938f,0.99390697000235606051f, --0.12241067519921615403f,0.99247953459870996706f,-0.13458070850712611222f, -0.99090263542778000971f,-0.14673047445536163691f,0.98917650996478101444f, --0.15885814333386127917f,0.98730141815785843473f,-0.17096188876030124493f, -0.98527764238894122162f,-0.18303988795514092303f,0.98310548743121628501f, --0.19509032201612819257f,0.98078528040323043058f,-0.20711137619221844930f, -0.97831737071962765473f,-0.21910124015686965881f,0.97570213003852857003f, --0.23105810828067113727f,0.97293995220556017678f,-0.24298017990326387094f, -0.97003125319454397424f,-0.25486565960451451618f,0.96697647104485207059f, --0.26671275747489830987f,0.96377606579543984022f,-0.27851968938505294870f, -0.96043051941556589757f,-0.29028467725446216452f,0.95694033573220893540f, --0.30200594931922808417f,0.95330604035419386211f,-0.31368174039889140658f, -0.94952818059303667475f,-0.32531029216226287071f,0.94560732538052139073f, --0.33688985339221994009f,0.94154406518302080631f,-0.34841868024943439819f, -0.93733901191257495977f,-0.35989503653498816638f,0.93299279883473884567f, --0.37131719395183748755f,0.92850608047321558924f,-0.38268343236508972627f, -0.92387953251128673848f,-0.39399204006104798781f,0.91911385169005777040f, --0.40524131400498974998f,0.91420975570353069095f,-0.41642956009763698599f, -0.90916798309052249127f,-0.42755509343028186287f,0.90398929312344344922f, --0.43861623853852738097f,0.89867446569395392775f,-0.44961132965460670619f, -0.89322430119551521344f,-0.46053871095824006066f,0.88763962040285393496f, --0.47139673682599769755f,0.88192126434835504956f,-0.48218377207912271887f, -0.87607009419540660122f,-0.49289819222978398239f,0.87008699110871146054f, --0.50353838372571746440f,0.86397285612158680745f,-0.51410274419322155026f, -0.85772861000027211809f,-0.52458968267846872724f,0.85135519310526519554f, --0.53499761988709704230f,0.84485356524970722791f,-0.54532498842204624179f, -0.83822470555483818977f,-0.55557023301960195560f,0.83146961230254534669f, --0.56573181078361323149f,0.82458930278502517996f,-0.57580819141784533866f, -0.81758481315158371139f,-0.58579785745643886408f,0.81045719825259476821f, --0.59569930449243335691f,0.80320753148064494287f,-0.60551104140432543410f, -0.79583690460888356633f,-0.61523159058062670823f,0.78834642762660633863f, --0.62485948814238623239f,0.78073722857209459924f,-0.63439328416364537677f, -0.77301045336273710440f,-0.64383154288979127511f,0.76516726562245906962f, --0.65317284295377653347f,0.75720884650648467851f,-0.66241577759017189475f, -0.74913639452345925918f,-0.67155895484701844111f,0.74095112535495899486f, --0.68060099779545302212f,0.73265427167241281570f,-0.68954054473706694051f, -0.72424708295146689174f,-0.69837624940897280457f,0.71573082528381870571f, --0.70710678118654746172f,0.70710678118654757274f,-0.71573082528381859468f, -0.69837624940897291559f,-0.72424708295146678072f,0.68954054473706705153f, --0.73265427167241270467f,0.68060099779545324417f,-0.74095112535495888384f, -0.67155895484701855214f,-0.74913639452345914815f,0.66241577759017200577f, --0.75720884650648467851f,0.65317284295377664449f,-0.76516726562245895860f, -0.64383154288979138613f,-0.77301045336273699338f,0.63439328416364548779f, --0.78073722857209448822f,0.62485948814238634341f,-0.78834642762660622761f, -0.61523159058062693028f,-0.79583690460888345530f,0.60551104140432565615f, --0.80320753148064483184f,0.59569930449243346793f,-0.81045719825259465718f, -0.58579785745643897510f,-0.81758481315158360037f,0.57580819141784544968f, --0.82458930278502506894f,0.56573181078361345353f,-0.83146961230254534669f, -0.55557023301960217765f,-0.83822470555483807875f,0.54532498842204635281f, --0.84485356524970711689f,0.53499761988709715332f,-0.85135519310526519554f, -0.52458968267846894928f,-0.85772861000027200706f,0.51410274419322177231f, --0.86397285612158669643f,0.50353838372571757542f,-0.87008699110871134952f, -0.49289819222978414892f,-0.87607009419540649020f,0.48218377207912288540f, --0.88192126434835493853f,0.47139673682599780857f,-0.88763962040285382393f, -0.46053871095824022719f,-0.89322430119551521344f,0.44961132965460687272f, --0.89867446569395392775f,0.43861623853852754751f,-0.90398929312344333820f, -0.42755509343028202940f,-0.90916798309052238025f,0.41642956009763715253f, --0.91420975570353069095f,0.40524131400498991651f,-0.91911385169005777040f, -0.39399204006104815434f,-0.92387953251128673848f,0.38268343236508989280f, --0.92850608047321547822f,0.37131719395183770960f,-0.93299279883473884567f, -0.35989503653498833291f,-0.93733901191257484875f,0.34841868024943478677f, --0.94154406518302069529f,0.33688985339222032867f,-0.94560732538052116869f, -0.32531029216226325929f,-0.94952818059303667475f,0.31368174039889140658f, --0.95330604035419386211f,0.30200594931922802866f,-0.95694033573220882438f, -0.29028467725446238656f,-0.96043051941556578655f,0.27851968938505317075f, --0.96377606579543984022f,0.26671275747489847641f,-0.96697647104485207059f, -0.25486565960451468271f,-0.97003125319454397424f,0.24298017990326406523f, --0.97293995220556006576f,0.23105810828067133156f,-0.97570213003852845901f, -0.21910124015687004739f,-0.97831737071962754371f,0.20711137619221883788f, --0.98078528040323043058f,0.19509032201612860891f,-0.98310548743121628501f, -0.18303988795514089527f,-0.98527764238894122162f,0.17096188876030121717f, --0.98730141815785843473f,0.15885814333386147346f,-0.98917650996478101444f, -0.14673047445536180344f,-0.99090263542778000971f,0.13458070850712627875f, --0.99247953459870996706f,0.12241067519921634832f,-0.99390697000235606051f, -0.11022220729388323979f,-0.99518472667219681771f,0.09801714032956082567f, --0.99631261218277800129f,0.08579731234444015753f,-0.99729045667869020697f, -0.07356456359966773162f,-0.99811811290014917919f,0.06132073630220848809f, --0.99879545620517240501f,0.04906767432741796636f,-0.99932238458834954375f, -0.03680722294135883171f,-0.99969881869620424997f,0.02454122852291232629f, --0.99992470183914450299f,0.01227153828572000692f,1.00000000000000000000f, -0.00000000000000000000f,0.99879545620517240501f,0.04906767432741801493f, -0.99518472667219692873f,0.09801714032956060363f,0.98917650996478101444f, -0.14673047445536174793f,0.98078528040323043058f,0.19509032201612824808f, -0.97003125319454397424f,0.24298017990326387094f,0.95694033573220882438f, -0.29028467725446233105f,0.94154406518302080631f,0.33688985339222005111f, -0.92387953251128673848f,0.38268343236508978178f,0.90398929312344333820f, -0.42755509343028208491f,0.88192126434835504956f,0.47139673682599764204f, -0.85772861000027211809f,0.51410274419322166128f,0.83146961230254523567f, -0.55557023301960217765f,0.80320753148064494287f,0.59569930449243335691f, -0.77301045336273699338f,0.63439328416364548779f,0.74095112535495921691f, -0.67155895484701833009f,0.70710678118654757274f,0.70710678118654757274f, -0.67155895484701833009f,0.74095112535495910588f,0.63439328416364548779f, -0.77301045336273688235f,0.59569930449243346793f,0.80320753148064483184f, -0.55557023301960228867f,0.83146961230254523567f,0.51410274419322166128f, -0.85772861000027211809f,0.47139673682599780857f,0.88192126434835493853f, -0.42755509343028219593f,0.90398929312344333820f,0.38268343236508983729f, -0.92387953251128673848f,0.33688985339222005111f,0.94154406518302080631f, -0.29028467725446233105f,0.95694033573220893540f,0.24298017990326398197f, -0.97003125319454397424f,0.19509032201612833135f,0.98078528040323043058f, -0.14673047445536174793f,0.98917650996478101444f,0.09801714032956077016f, -0.99518472667219681771f,0.04906767432741812596f,0.99879545620517240501f, -0.00000000000000006123f,1.00000000000000000000f,-0.04906767432741800800f, -0.99879545620517240501f,-0.09801714032956064526f,0.99518472667219692873f, --0.14673047445536163691f,0.98917650996478101444f,-0.19509032201612819257f, -0.98078528040323043058f,-0.24298017990326387094f,0.97003125319454397424f, --0.29028467725446216452f,0.95694033573220893540f,-0.33688985339221994009f, -0.94154406518302080631f,-0.38268343236508972627f,0.92387953251128673848f, --0.42755509343028186287f,0.90398929312344344922f,-0.47139673682599769755f, -0.88192126434835504956f,-0.51410274419322155026f,0.85772861000027211809f, --0.55557023301960195560f,0.83146961230254534669f,-0.59569930449243335691f, -0.80320753148064494287f,-0.63439328416364537677f,0.77301045336273710440f, --0.67155895484701844111f,0.74095112535495899486f,-0.70710678118654746172f, -0.70710678118654757274f,-0.74095112535495888384f,0.67155895484701855214f, --0.77301045336273699338f,0.63439328416364548779f,-0.80320753148064483184f, -0.59569930449243346793f,-0.83146961230254534669f,0.55557023301960217765f, --0.85772861000027200706f,0.51410274419322177231f,-0.88192126434835493853f, -0.47139673682599780857f,-0.90398929312344333820f,0.42755509343028202940f, --0.92387953251128673848f,0.38268343236508989280f,-0.94154406518302069529f, -0.33688985339222032867f,-0.95694033573220882438f,0.29028467725446238656f, --0.97003125319454397424f,0.24298017990326406523f,-0.98078528040323043058f, -0.19509032201612860891f,-0.98917650996478101444f,0.14673047445536180344f, --0.99518472667219681771f,0.09801714032956082567f,-0.99879545620517240501f, -0.04906767432741796636f,1.00000000000000000000f,0.00000000000000000000f, -0.98078528040323043058f,0.19509032201612824808f,0.92387953251128673848f, -0.38268343236508978178f,0.83146961230254523567f,0.55557023301960217765f, -0.70710678118654757274f,0.70710678118654757274f,0.55557023301960228867f, -0.83146961230254523567f,0.38268343236508983729f,0.92387953251128673848f, -0.19509032201612833135f,0.98078528040323043058f,0.00000000000000006123f, -1.00000000000000000000f,-0.19509032201612819257f,0.98078528040323043058f, --0.38268343236508972627f,0.92387953251128673848f,-0.55557023301960195560f, -0.83146961230254534669f,-0.70710678118654746172f,0.70710678118654757274f, --0.83146961230254534669f,0.55557023301960217765f,-0.92387953251128673848f, -0.38268343236508989280f,-0.98078528040323043058f,0.19509032201612860891f, -1.00000000000000000000f,0.00000000000000000000f,0.70710678118654757274f, -0.70710678118654757274f,0.00000000000000006123f,1.00000000000000000000f, --0.70710678118654746172f,0.70710678118654757274f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99992471933364868164f, +0.01227153837680816650f,0.99969881772994995117f,0.02454122900962829590f, +0.99932235479354858398f,0.03680722415447235107f,0.99879544973373413086f, +0.04906767606735229492f,0.99811810255050659180f,0.06132073700428009033f, +0.99729043245315551758f,0.07356456667184829712f,0.99631261825561523438f, +0.08579730987548828125f,0.99518471956253051758f,0.09801714122295379639f, +0.99390697479248046875f,0.11022220551967620850f,0.99247956275939941406f, +0.12241067737340927124f,0.99090266227722167969f,0.13458070158958435059f, +0.98917651176452636719f,0.14673046767711639404f,0.98730140924453735352f, +0.15885815024375915527f,0.98527765274047851562f,0.17096188664436340332f, +0.98310548067092895508f,0.18303988873958587646f,0.98078525066375732422f, +0.19509032368659973145f,0.97831737995147705078f,0.20711137354373931885f, +0.97570210695266723633f,0.21910123527050018311f,0.97293996810913085938f, +0.23105810582637786865f,0.97003126144409179688f,0.24298018217086791992f, +0.96697646379470825195f,0.25486564636230468750f,0.96377605199813842773f, +0.26671275496482849121f,0.96043050289154052734f,0.27851969003677368164f, +0.95694035291671752930f,0.29028466343879699707f,0.95330601930618286133f, +0.30200594663619995117f,0.94952815771102905273f,0.31368175148963928223f, +0.94560730457305908203f,0.32531028985977172852f,0.94154405593872070312f, +0.33688986301422119141f,0.93733900785446166992f,0.34841868281364440918f, +0.93299281597137451172f,0.35989505052566528320f,0.92850607633590698242f, +0.37131720781326293945f,0.92387950420379638672f,0.38268342614173889160f, +0.91911387443542480469f,0.39399203658103942871f,0.91420978307723999023f, +0.40524131059646606445f,0.90916800498962402344f,0.41642954945564270020f, +0.90398931503295898438f,0.42755508422851562500f,0.89867448806762695312f, +0.43861624598503112793f,0.89322429895401000977f,0.44961133599281311035f, +0.88763964176177978516f,0.46053871512413024902f,0.88192129135131835938f, +0.47139674425125122070f,0.87607008218765258789f,0.48218378424644470215f, +0.87008696794509887695f,0.49289819598197937012f,0.86397284269332885742f, +0.50353837013244628906f,0.85772860050201416016f,0.51410275697708129883f, +0.85135519504547119141f,0.52458965778350830078f,0.84485357999801635742f, +0.53499764204025268555f,0.83822470903396606445f,0.54532498121261596680f, +0.83146959543228149414f,0.55557024478912353516f,0.82458931207656860352f, +0.56573182344436645508f,0.81758481264114379883f,0.57580816745758056641f, +0.81045717000961303711f,0.58579784631729125977f,0.80320751667022705078f, +0.59569931030273437500f,0.79583692550659179688f,0.60551106929779052734f, +0.78834640979766845703f,0.61523157358169555664f,0.78073722124099731445f, +0.62485951185226440430f,0.77301043272018432617f,0.63439327478408813477f, +0.76516723632812500000f,0.64383155107498168945f,0.75720882415771484375f, +0.65317285060882568359f,0.74913638830184936523f,0.66241580247879028320f, +0.74095112085342407227f,0.67155897617340087891f,0.73265427350997924805f, +0.68060100078582763672f,0.72424709796905517578f,0.68954056501388549805f, +0.71573084592819213867f,0.69837623834609985352f,0.70710676908493041992f, +0.70710676908493041992f,0.69837623834609985352f,0.71573084592819213867f, +0.68954056501388549805f,0.72424709796905517578f,0.68060100078582763672f, +0.73265427350997924805f,0.67155897617340087891f,0.74095112085342407227f, +0.66241580247879028320f,0.74913638830184936523f,0.65317285060882568359f, +0.75720882415771484375f,0.64383155107498168945f,0.76516723632812500000f, +0.63439327478408813477f,0.77301043272018432617f,0.62485951185226440430f, +0.78073722124099731445f,0.61523157358169555664f,0.78834640979766845703f, +0.60551106929779052734f,0.79583692550659179688f,0.59569931030273437500f, +0.80320751667022705078f,0.58579784631729125977f,0.81045717000961303711f, +0.57580816745758056641f,0.81758481264114379883f,0.56573182344436645508f, +0.82458931207656860352f,0.55557024478912353516f,0.83146959543228149414f, +0.54532498121261596680f,0.83822470903396606445f,0.53499764204025268555f, +0.84485357999801635742f,0.52458965778350830078f,0.85135519504547119141f, +0.51410275697708129883f,0.85772860050201416016f,0.50353837013244628906f, +0.86397284269332885742f,0.49289819598197937012f,0.87008696794509887695f, +0.48218378424644470215f,0.87607008218765258789f,0.47139674425125122070f, +0.88192129135131835938f,0.46053871512413024902f,0.88763964176177978516f, +0.44961133599281311035f,0.89322429895401000977f,0.43861624598503112793f, +0.89867448806762695312f,0.42755508422851562500f,0.90398931503295898438f, +0.41642954945564270020f,0.90916800498962402344f,0.40524131059646606445f, +0.91420978307723999023f,0.39399203658103942871f,0.91911387443542480469f, +0.38268342614173889160f,0.92387950420379638672f,0.37131720781326293945f, +0.92850607633590698242f,0.35989505052566528320f,0.93299281597137451172f, +0.34841868281364440918f,0.93733900785446166992f,0.33688986301422119141f, +0.94154405593872070312f,0.32531028985977172852f,0.94560730457305908203f, +0.31368175148963928223f,0.94952815771102905273f,0.30200594663619995117f, +0.95330601930618286133f,0.29028466343879699707f,0.95694035291671752930f, +0.27851969003677368164f,0.96043050289154052734f,0.26671275496482849121f, +0.96377605199813842773f,0.25486564636230468750f,0.96697646379470825195f, +0.24298018217086791992f,0.97003126144409179688f,0.23105810582637786865f, +0.97293996810913085938f,0.21910123527050018311f,0.97570210695266723633f, +0.20711137354373931885f,0.97831737995147705078f,0.19509032368659973145f, +0.98078525066375732422f,0.18303988873958587646f,0.98310548067092895508f, +0.17096188664436340332f,0.98527765274047851562f,0.15885815024375915527f, +0.98730140924453735352f,0.14673046767711639404f,0.98917651176452636719f, +0.13458070158958435059f,0.99090266227722167969f,0.12241067737340927124f, +0.99247956275939941406f,0.11022220551967620850f,0.99390697479248046875f, +0.09801714122295379639f,0.99518471956253051758f,0.08579730987548828125f, +0.99631261825561523438f,0.07356456667184829712f,0.99729043245315551758f, +0.06132073700428009033f,0.99811810255050659180f,0.04906767606735229492f, +0.99879544973373413086f,0.03680722415447235107f,0.99932235479354858398f, +0.02454122900962829590f,0.99969881772994995117f,0.01227153837680816650f, +0.99992471933364868164f,0.00000000000000006123f,1.00000000000000000000f, +-0.01227153837680816650f,0.99992471933364868164f,-0.02454122900962829590f, +0.99969881772994995117f,-0.03680722415447235107f,0.99932235479354858398f, +-0.04906767606735229492f,0.99879544973373413086f,-0.06132073700428009033f, +0.99811810255050659180f,-0.07356456667184829712f,0.99729043245315551758f, +-0.08579730987548828125f,0.99631261825561523438f,-0.09801714122295379639f, +0.99518471956253051758f,-0.11022220551967620850f,0.99390697479248046875f, +-0.12241067737340927124f,0.99247956275939941406f,-0.13458070158958435059f, +0.99090266227722167969f,-0.14673046767711639404f,0.98917651176452636719f, +-0.15885815024375915527f,0.98730140924453735352f,-0.17096188664436340332f, +0.98527765274047851562f,-0.18303988873958587646f,0.98310548067092895508f, +-0.19509032368659973145f,0.98078525066375732422f,-0.20711137354373931885f, +0.97831737995147705078f,-0.21910123527050018311f,0.97570210695266723633f, +-0.23105810582637786865f,0.97293996810913085938f,-0.24298018217086791992f, +0.97003126144409179688f,-0.25486564636230468750f,0.96697646379470825195f, +-0.26671275496482849121f,0.96377605199813842773f,-0.27851969003677368164f, +0.96043050289154052734f,-0.29028466343879699707f,0.95694035291671752930f, +-0.30200594663619995117f,0.95330601930618286133f,-0.31368175148963928223f, +0.94952815771102905273f,-0.32531028985977172852f,0.94560730457305908203f, +-0.33688986301422119141f,0.94154405593872070312f,-0.34841868281364440918f, +0.93733900785446166992f,-0.35989505052566528320f,0.93299281597137451172f, +-0.37131720781326293945f,0.92850607633590698242f,-0.38268342614173889160f, +0.92387950420379638672f,-0.39399203658103942871f,0.91911387443542480469f, +-0.40524131059646606445f,0.91420978307723999023f,-0.41642954945564270020f, +0.90916800498962402344f,-0.42755508422851562500f,0.90398931503295898438f, +-0.43861624598503112793f,0.89867448806762695312f,-0.44961133599281311035f, +0.89322429895401000977f,-0.46053871512413024902f,0.88763964176177978516f, +-0.47139674425125122070f,0.88192129135131835938f,-0.48218378424644470215f, +0.87607008218765258789f,-0.49289819598197937012f,0.87008696794509887695f, +-0.50353837013244628906f,0.86397284269332885742f,-0.51410275697708129883f, +0.85772860050201416016f,-0.52458965778350830078f,0.85135519504547119141f, +-0.53499764204025268555f,0.84485357999801635742f,-0.54532498121261596680f, +0.83822470903396606445f,-0.55557024478912353516f,0.83146959543228149414f, +-0.56573182344436645508f,0.82458931207656860352f,-0.57580816745758056641f, +0.81758481264114379883f,-0.58579784631729125977f,0.81045717000961303711f, +-0.59569931030273437500f,0.80320751667022705078f,-0.60551106929779052734f, +0.79583692550659179688f,-0.61523157358169555664f,0.78834640979766845703f, +-0.62485951185226440430f,0.78073722124099731445f,-0.63439327478408813477f, +0.77301043272018432617f,-0.64383155107498168945f,0.76516723632812500000f, +-0.65317285060882568359f,0.75720882415771484375f,-0.66241580247879028320f, +0.74913638830184936523f,-0.67155897617340087891f,0.74095112085342407227f, +-0.68060100078582763672f,0.73265427350997924805f,-0.68954056501388549805f, +0.72424709796905517578f,-0.69837623834609985352f,0.71573084592819213867f, +-0.70710676908493041992f,0.70710676908493041992f,-0.71573084592819213867f, +0.69837623834609985352f,-0.72424709796905517578f,0.68954056501388549805f, +-0.73265427350997924805f,0.68060100078582763672f,-0.74095112085342407227f, +0.67155897617340087891f,-0.74913638830184936523f,0.66241580247879028320f, +-0.75720882415771484375f,0.65317285060882568359f,-0.76516723632812500000f, +0.64383155107498168945f,-0.77301043272018432617f,0.63439327478408813477f, +-0.78073722124099731445f,0.62485951185226440430f,-0.78834640979766845703f, +0.61523157358169555664f,-0.79583692550659179688f,0.60551106929779052734f, +-0.80320751667022705078f,0.59569931030273437500f,-0.81045717000961303711f, +0.58579784631729125977f,-0.81758481264114379883f,0.57580816745758056641f, +-0.82458931207656860352f,0.56573182344436645508f,-0.83146959543228149414f, +0.55557024478912353516f,-0.83822470903396606445f,0.54532498121261596680f, +-0.84485357999801635742f,0.53499764204025268555f,-0.85135519504547119141f, +0.52458965778350830078f,-0.85772860050201416016f,0.51410275697708129883f, +-0.86397284269332885742f,0.50353837013244628906f,-0.87008696794509887695f, +0.49289819598197937012f,-0.87607008218765258789f,0.48218378424644470215f, +-0.88192129135131835938f,0.47139674425125122070f,-0.88763964176177978516f, +0.46053871512413024902f,-0.89322429895401000977f,0.44961133599281311035f, +-0.89867448806762695312f,0.43861624598503112793f,-0.90398931503295898438f, +0.42755508422851562500f,-0.90916800498962402344f,0.41642954945564270020f, +-0.91420978307723999023f,0.40524131059646606445f,-0.91911387443542480469f, +0.39399203658103942871f,-0.92387950420379638672f,0.38268342614173889160f, +-0.92850607633590698242f,0.37131720781326293945f,-0.93299281597137451172f, +0.35989505052566528320f,-0.93733900785446166992f,0.34841868281364440918f, +-0.94154405593872070312f,0.33688986301422119141f,-0.94560730457305908203f, +0.32531028985977172852f,-0.94952815771102905273f,0.31368175148963928223f, +-0.95330601930618286133f,0.30200594663619995117f,-0.95694035291671752930f, +0.29028466343879699707f,-0.96043050289154052734f,0.27851969003677368164f, +-0.96377605199813842773f,0.26671275496482849121f,-0.96697646379470825195f, +0.25486564636230468750f,-0.97003126144409179688f,0.24298018217086791992f, +-0.97293996810913085938f,0.23105810582637786865f,-0.97570210695266723633f, +0.21910123527050018311f,-0.97831737995147705078f,0.20711137354373931885f, +-0.98078525066375732422f,0.19509032368659973145f,-0.98310548067092895508f, +0.18303988873958587646f,-0.98527765274047851562f,0.17096188664436340332f, +-0.98730140924453735352f,0.15885815024375915527f,-0.98917651176452636719f, +0.14673046767711639404f,-0.99090266227722167969f,0.13458070158958435059f, +-0.99247956275939941406f,0.12241067737340927124f,-0.99390697479248046875f, +0.11022220551967620850f,-0.99518471956253051758f,0.09801714122295379639f, +-0.99631261825561523438f,0.08579730987548828125f,-0.99729043245315551758f, +0.07356456667184829712f,-0.99811810255050659180f,0.06132073700428009033f, +-0.99879544973373413086f,0.04906767606735229492f,-0.99932235479354858398f, +0.03680722415447235107f,-0.99969881772994995117f,0.02454122900962829590f, +-0.99992471933364868164f,0.01227153837680816650f,1.00000000000000000000f, +0.00000000000000000000f,0.99879544973373413086f,0.04906767606735229492f, +0.99518471956253051758f,0.09801714122295379639f,0.98917651176452636719f, +0.14673046767711639404f,0.98078525066375732422f,0.19509032368659973145f, +0.97003126144409179688f,0.24298018217086791992f,0.95694035291671752930f, +0.29028466343879699707f,0.94154405593872070312f,0.33688986301422119141f, +0.92387950420379638672f,0.38268342614173889160f,0.90398931503295898438f, +0.42755508422851562500f,0.88192129135131835938f,0.47139674425125122070f, +0.85772860050201416016f,0.51410275697708129883f,0.83146959543228149414f, +0.55557024478912353516f,0.80320751667022705078f,0.59569931030273437500f, +0.77301043272018432617f,0.63439327478408813477f,0.74095112085342407227f, +0.67155897617340087891f,0.70710676908493041992f,0.70710676908493041992f, +0.67155897617340087891f,0.74095112085342407227f,0.63439327478408813477f, +0.77301043272018432617f,0.59569931030273437500f,0.80320751667022705078f, +0.55557024478912353516f,0.83146959543228149414f,0.51410275697708129883f, +0.85772860050201416016f,0.47139674425125122070f,0.88192129135131835938f, +0.42755508422851562500f,0.90398931503295898438f,0.38268342614173889160f, +0.92387950420379638672f,0.33688986301422119141f,0.94154405593872070312f, +0.29028466343879699707f,0.95694035291671752930f,0.24298018217086791992f, +0.97003126144409179688f,0.19509032368659973145f,0.98078525066375732422f, +0.14673046767711639404f,0.98917651176452636719f,0.09801714122295379639f, +0.99518471956253051758f,0.04906767606735229492f,0.99879544973373413086f, +0.00000000000000006123f,1.00000000000000000000f,-0.04906767606735229492f, +0.99879544973373413086f,-0.09801714122295379639f,0.99518471956253051758f, +-0.14673046767711639404f,0.98917651176452636719f,-0.19509032368659973145f, +0.98078525066375732422f,-0.24298018217086791992f,0.97003126144409179688f, +-0.29028466343879699707f,0.95694035291671752930f,-0.33688986301422119141f, +0.94154405593872070312f,-0.38268342614173889160f,0.92387950420379638672f, +-0.42755508422851562500f,0.90398931503295898438f,-0.47139674425125122070f, +0.88192129135131835938f,-0.51410275697708129883f,0.85772860050201416016f, +-0.55557024478912353516f,0.83146959543228149414f,-0.59569931030273437500f, +0.80320751667022705078f,-0.63439327478408813477f,0.77301043272018432617f, +-0.67155897617340087891f,0.74095112085342407227f,-0.70710676908493041992f, +0.70710676908493041992f,-0.74095112085342407227f,0.67155897617340087891f, +-0.77301043272018432617f,0.63439327478408813477f,-0.80320751667022705078f, +0.59569931030273437500f,-0.83146959543228149414f,0.55557024478912353516f, +-0.85772860050201416016f,0.51410275697708129883f,-0.88192129135131835938f, +0.47139674425125122070f,-0.90398931503295898438f,0.42755508422851562500f, +-0.92387950420379638672f,0.38268342614173889160f,-0.94154405593872070312f, +0.33688986301422119141f,-0.95694035291671752930f,0.29028466343879699707f, +-0.97003126144409179688f,0.24298018217086791992f,-0.98078525066375732422f, +0.19509032368659973145f,-0.98917651176452636719f,0.14673046767711639404f, +-0.99518471956253051758f,0.09801714122295379639f,-0.99879544973373413086f, +0.04906767606735229492f,1.00000000000000000000f,0.00000000000000000000f, +0.98078525066375732422f,0.19509032368659973145f,0.92387950420379638672f, +0.38268342614173889160f,0.83146959543228149414f,0.55557024478912353516f, +0.70710676908493041992f,0.70710676908493041992f,0.55557024478912353516f, +0.83146959543228149414f,0.38268342614173889160f,0.92387950420379638672f, +0.19509032368659973145f,0.98078525066375732422f,0.00000000000000006123f, +1.00000000000000000000f,-0.19509032368659973145f,0.98078525066375732422f, +-0.38268342614173889160f,0.92387950420379638672f,-0.55557024478912353516f, +0.83146959543228149414f,-0.70710676908493041992f,0.70710676908493041992f, +-0.83146959543228149414f,0.55557024478912353516f,-0.92387950420379638672f, +0.38268342614173889160f,-0.98078525066375732422f,0.19509032368659973145f, +1.00000000000000000000f,0.00000000000000000000f,0.70710676908493041992f, +0.70710676908493041992f,0.00000000000000006123f,1.00000000000000000000f, +-0.70710676908493041992f,0.70710676908493041992f,}; float32_t rearranged_twiddle_stride3_1024_f32[680]={ -1.00000000000000000000f,0.00000000000000000000f,0.99983058179582340319f, -0.01840672990580482019f,0.99932238458834954375f,0.03680722294135883171f, -0.99847558057329477421f,0.05519524434968993420f,0.99729045667869020697f, -0.07356456359966742631f,0.99576741446765981713f,0.09190895649713272386f, -0.99390697000235606051f,0.11022220729388305938f,0.99170975366909952520f, -0.12849811079379316880f,0.98917650996478101444f,0.14673047445536174793f, -0.98630809724459866938f,0.16491312048996989437f,0.98310548743121628501f, -0.18303988795514095078f,0.97956976568544051887f,0.20110463484209190055f, -0.97570213003852857003f,0.21910124015686979759f,0.97150389098625178352f, -0.23702360599436719801f,0.96697647104485207059f,0.25486565960451457169f, -0.96212140426904158019f,0.27262135544994897662f,0.95694033573220882438f, -0.29028467725446233105f,0.95143502096900833820f,0.30784964004153486661f, -0.94560732538052127971f,0.32531029216226292622f,0.93945922360218991898f, -0.34266071731199437833f,0.93299279883473895669f,0.35989503653498811087f, -0.92621024213831137928f,0.37700741021641825945f,0.91911385169005777040f, -0.39399204006104809883f,0.91170603200542987832f,0.41084317105790391089f, -0.90398929312344333820f,0.42755509343028208491f,0.89596624975618521791f, -0.44412214457042920035f,0.88763962040285393496f,0.46053871095824000514f, -0.87901222642863352519f,0.47679923006332208812f,0.87008699110871146054f, -0.49289819222978403790f,0.86086693863776730939f,0.50883014254310698909f, -0.85135519310526519554f,0.52458968267846894928f,0.84155497743689844370f, -0.54017147272989285423f,0.83146961230254523567f,0.55557023301960217765f, -0.82110251499110464835f,0.57078074588696725566f,0.81045719825259476821f, -0.58579785745643886408f,0.79953726910790501314f,0.60061647938386897305f, -0.78834642762660622761f,0.61523159058062681925f,0.77688846567323244230f, -0.62963823891492698426f,0.76516726562245895860f,0.64383154288979138613f, -0.75318679904361252042f,0.65780669329707863735f,0.74095112535495921691f, -0.67155895484701833009f,0.72846439044822519637f,0.68508366777270035541f, -0.71573082528381870571f,0.69837624940897280457f,0.70275474445722529993f, -0.71143219574521643356f,0.68954054473706694051f,0.72424708295146689174f, -0.67609270357531603413f,0.73681656887736979300f,0.66241577759017178373f, -0.74913639452345925918f,0.64851440102211255212f,0.76120238548426177871f, -0.63439328416364548779f,0.77301045336273688235f,0.62005721176328920663f, -0.78455659715557524159f,0.60551104140432554512f,0.79583690460888345530f, -0.59075970185887427544f,0.80684755354379922299f,0.57580819141784533866f, -0.81758481315158371139f,0.56066157619733603124f,0.82804504525775579626f, -0.54532498842204646383f,0.83822470555483796772f,0.52980362468629482731f, -0.84812034480329712149f,0.51410274419322166128f,0.85772861000027211809f, -0.49822766697278186854f,0.86704624551569264845f,0.48218377207912282989f, -0.87607009419540660122f,0.46597649576796612569f,0.88479709843093778954f, -0.44961132965460659516f,0.89322430119551532446f,0.43309381885315201277f, -0.90134884704602202810f,0.41642956009763731906f,0.90916798309052226923f, -0.39962419984564678810f,0.91667905992104270485f,0.38268343236508983729f, -0.92387953251128673848f,0.36561299780477396482f,0.93076696107898371224f, -0.34841868024943450921f,0.93733901191257495977f,0.33110630575987642921f, -0.94359345816196038559f,0.31368174039889157312f,0.94952818059303667475f, -0.29615088824362395536f,0.95514116830577067141f,0.27851968938505305973f, -0.96043051941556578655f,0.26079411791527556952f,0.96539444169768939830f, -0.24298017990326398197f,0.97003125319454397424f,0.22508391135979277653f, -0.97433938278557585821f,0.20711137619221856032f,0.97831737071962765473f, -0.18906866414980627589f,0.98196386910955524296f,0.17096188876030135595f, -0.98527764238894122162f,0.15279718525844340760f,0.98825756773074946437f, -0.13458070850712622324f,0.99090263542778000971f,0.11631863091190487725f, -0.99321194923479450001f,0.09801714032956077016f,0.99518472667219681771f, -0.07968243797143012563f,0.99682029929116566791f,0.06132073630220864768f, -0.99811811290014917919f,0.04293825693494095902f,0.99907772775264536147f, -0.02454122852291226384f,0.99969881869620424997f,0.00613588464915451517f, -0.99998117528260110909f,-0.01227153828571982304f,0.99992470183914450299f, --0.03067480317663645942f,0.99952941750109314256f,-0.04906767432741800800f, -0.99879545620517240501f,-0.06744391956366398155f,0.99772306664419163624f, --0.08579731234443975507f,0.99631261218277800129f,-0.10412163387205460030f, -0.99456457073425541537f,-0.12241067519921615403f,0.99247953459870996706f, --0.14065823933284912761f,0.99005821026229712256f,-0.15885814333386127917f, -0.98730141815785843473f,-0.17700422041214874946f,0.98421009238692902521f, --0.19509032201612819257f,0.98078528040323043058f,-0.21311031991609125091f, -0.97702814265775439484f,-0.23105810828067113727f,0.97293995220556017678f, --0.24892760574572012078f,0.96852209427441737777f,-0.26671275747489830987f, -0.96377606579543984022f,-0.28440753721127171039f,0.95870347489587159906f, --0.30200594931922808417f,0.95330604035419386211f,-0.31950203081601563637f, -0.94758559101774120226f,-0.33688985339221994009f,0.94154406518302080631f, --0.35416352542049039931f,0.93518350993894761025f,-0.37131719395183748755f, -0.92850608047321558924f,-0.38834504669882619066f,0.92151403934204201285f, --0.40524131400498974998f,0.91420975570353069095f,-0.42200027079979968159f, -0.90659570451491533483f,-0.43861623853852738097f,0.89867446569395392775f, --0.45508358712634372489f,0.89044872324475798919f,-0.47139673682599769755f, -0.88192126434835504956f,-0.48755016014843571837f,0.87309497841829020182f, --0.50353838372571746440f,0.86397285612158680745f,-0.51935599016558964269f, -0.85455798836540053376f,-0.53499761988709704230f,0.84485356524970722791f, --0.55045797293660470029f,0.83486287498638012128f,-0.56573181078361323149f, -0.82458930278502517996f,-0.58081395809576441547f,0.81403632970594852480f, --0.59569930449243335691f,0.80320753148064494287f,-0.61038280627630958630f, -0.79210657730021227785f,-0.62485948814238623239f,0.78073722857209459924f, --0.63912444486377573138f,0.76910333764557958780f,-0.65317284295377653347f, -0.75720884650648467851f,-0.66699992230363736034f,0.74505778544146605835f, --0.68060099779545302212f,0.73265427167241281570f,-0.69397146088965377952f, -0.72000250796138176579f,-0.70710678118654746172f,0.70710678118654757274f, --0.72000250796138165477f,0.69397146088965389055f,-0.73265427167241270467f, -0.68060099779545324417f,-0.74505778544146594733f,0.66699992230363758239f, --0.75720884650648467851f,0.65317284295377664449f,-0.76910333764557947678f, -0.63912444486377584241f,-0.78073722857209448822f,0.62485948814238634341f, --0.79210657730021216683f,0.61038280627630969732f,-0.80320753148064483184f, -0.59569930449243346793f,-0.81403632970594841378f,0.58081395809576452649f, --0.82458930278502506894f,0.56573181078361345353f,-0.83486287498638001026f, -0.55045797293660492233f,-0.84485356524970711689f,0.53499761988709715332f, --0.85455798836540042274f,0.51935599016558975372f,-0.86397285612158669643f, -0.50353838372571757542f,-0.87309497841829009079f,0.48755016014843588490f, --0.88192126434835493853f,0.47139673682599780857f,-0.89044872324475787817f, -0.45508358712634389143f,-0.89867446569395392775f,0.43861623853852754751f, --0.90659570451491533483f,0.42200027079979984812f,-0.91420975570353069095f, -0.40524131400498991651f,-0.92151403934204179080f,0.38834504669882657923f, --0.92850608047321547822f,0.37131719395183770960f,-0.93518350993894761025f, -0.35416352542049039931f,-0.94154406518302069529f,0.33688985339222032867f, --0.94758559101774109124f,0.31950203081601580291f,-0.95330604035419386211f, -0.30200594931922802866f,-0.95870347489587148804f,0.28440753721127209896f, --0.96377606579543984022f,0.26671275747489847641f,-0.96852209427441737777f, -0.24892760574572009302f,-0.97293995220556006576f,0.23105810828067133156f, --0.97702814265775439484f,0.21311031991609141745f,-0.98078528040323043058f, -0.19509032201612860891f,-0.98421009238692902521f,0.17700422041214894375f, --0.98730141815785843473f,0.15885814333386147346f,-0.99005821026229701154f, -0.14065823933284954395f,-0.99247953459870996706f,0.12241067519921634832f, --0.99456457073425541537f,0.10412163387205457254f,-0.99631261218277800129f, -0.08579731234444015753f,-0.99772306664419163624f,0.06744391956366417584f, --0.99879545620517240501f,0.04906767432741796636f,-0.99952941750109314256f, -0.03067480317663686534f,-0.99992470183914450299f,0.01227153828572000692f, --0.99998117528260110909f,-0.00613588464915455420f,-0.99969881869620424997f, --0.02454122852291207996f,-0.99907772775264536147f,-0.04293825693494077861f, --0.99811811290014917919f,-0.06132073630220824523f,-0.99682029929116577893f, --0.07968243797142994522f,-0.99518472667219692873f,-0.09801714032956058975f, --0.99321194923479461103f,-0.11631863091190447479f,-0.99090263542778000971f, --0.13458070850712605671f,-0.98825756773074946437f,-0.15279718525844343535f, --0.98527764238894133264f,-0.17096188876030096737f,-0.98196386910955524296f, --0.18906866414980610935f,-0.97831737071962765473f,-0.20711137619221858808f, --0.97433938278557585821f,-0.22508391135979261000f,-0.97003125319454397424f, --0.24298017990326381543f,-0.96539444169768939830f,-0.26079411791527562503f, --0.96043051941556589757f,-0.27851968938505289319f,-0.95514116830577078243f, --0.29615088824362378883f,-0.94952818059303678577f,-0.31368174039889118454f, --0.94359345816196038559f,-0.33110630575987626267f,-0.93733901191257495977f, --0.34841868024943456472f,-0.93076696107898382326f,-0.36561299780477357624f, --0.92387953251128684951f,-0.38268343236508967076f,-0.91667905992104270485f, --0.39962419984564684361f,-0.90916798309052249127f,-0.41642956009763693048f, --0.90134884704602202810f,-0.43309381885315184624f,-0.89322430119551532446f, --0.44961132965460665067f,-0.88479709843093790056f,-0.46597649576796595916f, --0.87607009419540660122f,-0.48218377207912266336f,-0.86704624551569287050f, --0.49822766697278153547f,-0.85772861000027211809f,-0.51410274419322155026f, --0.84812034480329723252f,-0.52980362468629460526f,-0.83822470555483818977f, --0.54532498842204613076f,-0.82804504525775590729f,-0.56066157619733592021f, --0.81758481315158371139f,-0.57580819141784533866f,-0.80684755354379944503f, --0.59075970185887394237f,-0.79583690460888356633f,-0.60551104140432543410f, --0.78455659715557524159f,-0.62005721176328920663f,-0.77301045336273710440f, --0.63439328416364526575f,-0.76120238548426188974f,-0.64851440102211233008f, --0.74913639452345925918f,-0.66241577759017178373f,-0.73681656887737001504f, --0.67609270357531581208f,-0.72424708295146700276f,-0.68954054473706682948f, --0.71143219574521665560f,-0.70275474445722507788f,-0.69837624940897302661f, --0.71573082528381848366f,-0.68508366777270035541f,-0.72846439044822519637f, --0.67155895484701866316f,-0.74095112535495888384f,-0.65780669329707874837f, --0.75318679904361240940f,-0.64383154288979149715f,-0.76516726562245895860f, --0.62963823891492687324f,-0.77688846567323255332f,-0.61523159058062726334f, --0.78834642762660589455f,-0.60061647938386930612f,-0.79953726910790479110f, --0.58579785745643908612f,-0.81045719825259465718f,-0.57078074588696736669f, --0.82110251499110464835f,-0.55557023301960217765f,-0.83146961230254523567f, --0.54017147272989274320f,-0.84155497743689855472f,-0.52458968267846928235f, --0.85135519310526486247f,-0.50883014254310732216f,-0.86086693863776708735f, --0.49289819222978420443f,-0.87008699110871134952f,-0.47679923006332214364f, --0.87901222642863341417f,-0.46053871095823989412f,-0.88763962040285404598f, --0.44412214457042975546f,-0.89596624975618488484f,-0.42755509343028247349f, --0.90398929312344311615f,-0.41084317105790418845f,-0.91170603200542976730f, --0.39399204006104820985f,-0.91911385169005765938f,-0.37700741021641820394f, --0.92621024213831137928f,-0.35989503653498794433f,-0.93299279883473895669f, --0.34266071731199487793f,-0.93945922360218969693f,-0.32531029216226331480f, --0.94560732538052116869f,-0.30784964004153508865f,-0.95143502096900833820f, --0.29028467725446244208f,-0.95694033573220882438f,-0.27262135544994886560f, --0.96212140426904158019f,-0.25486565960451434965f,-0.96697647104485218161f, --0.23702360599436766986f,-0.97150389098625167250f,-0.21910124015687010290f, --0.97570213003852845901f,-0.20110463484209206708f,-0.97956976568544051887f, --0.18303988795514095078f,-0.98310548743121628501f,-0.16491312048996975559f, --0.98630809724459866938f,-0.14673047445536230304f,-0.98917650996478090342f, --0.12849811079379358514f,-0.99170975366909952520f,-0.11022220729388330918f, --0.99390697000235606051f,-0.09190895649713282101f,-0.99576741446765981713f, --0.07356456359966735692f,-0.99729045667869020697f,-0.05519524434968971216f, --0.99847558057329477421f,-0.03680722294135933131f,-0.99932238458834943273f, --0.01840672990580516366f,-0.99983058179582340319f,1.00000000000000000000f, -0.00000000000000000000f,0.99729045667869020697f,0.07356456359966742631f, -0.98917650996478101444f,0.14673047445536174793f,0.97570213003852857003f, -0.21910124015686979759f,0.95694033573220882438f,0.29028467725446233105f, -0.93299279883473895669f,0.35989503653498811087f,0.90398929312344333820f, -0.42755509343028208491f,0.87008699110871146054f,0.49289819222978403790f, -0.83146961230254523567f,0.55557023301960217765f,0.78834642762660622761f, -0.61523159058062681925f,0.74095112535495921691f,0.67155895484701833009f, -0.68954054473706694051f,0.72424708295146689174f,0.63439328416364548779f, -0.77301045336273688235f,0.57580819141784533866f,0.81758481315158371139f, -0.51410274419322166128f,0.85772861000027211809f,0.44961132965460659516f, -0.89322430119551532446f,0.38268343236508983729f,0.92387953251128673848f, -0.31368174039889157312f,0.94952818059303667475f,0.24298017990326398197f, -0.97003125319454397424f,0.17096188876030135595f,0.98527764238894122162f, -0.09801714032956077016f,0.99518472667219681771f,0.02454122852291226384f, -0.99969881869620424997f,-0.04906767432741800800f,0.99879545620517240501f, --0.12241067519921615403f,0.99247953459870996706f,-0.19509032201612819257f, -0.98078528040323043058f,-0.26671275747489830987f,0.96377606579543984022f, --0.33688985339221994009f,0.94154406518302080631f,-0.40524131400498974998f, -0.91420975570353069095f,-0.47139673682599769755f,0.88192126434835504956f, --0.53499761988709704230f,0.84485356524970722791f,-0.59569930449243335691f, -0.80320753148064494287f,-0.65317284295377653347f,0.75720884650648467851f, --0.70710678118654746172f,0.70710678118654757274f,-0.75720884650648467851f, -0.65317284295377664449f,-0.80320753148064483184f,0.59569930449243346793f, --0.84485356524970711689f,0.53499761988709715332f,-0.88192126434835493853f, -0.47139673682599780857f,-0.91420975570353069095f,0.40524131400498991651f, --0.94154406518302069529f,0.33688985339222032867f,-0.96377606579543984022f, -0.26671275747489847641f,-0.98078528040323043058f,0.19509032201612860891f, --0.99247953459870996706f,0.12241067519921634832f,-0.99879545620517240501f, -0.04906767432741796636f,-0.99969881869620424997f,-0.02454122852291207996f, --0.99518472667219692873f,-0.09801714032956058975f,-0.98527764238894133264f, --0.17096188876030096737f,-0.97003125319454397424f,-0.24298017990326381543f, --0.94952818059303678577f,-0.31368174039889118454f,-0.92387953251128684951f, --0.38268343236508967076f,-0.89322430119551532446f,-0.44961132965460665067f, --0.85772861000027211809f,-0.51410274419322155026f,-0.81758481315158371139f, --0.57580819141784533866f,-0.77301045336273710440f,-0.63439328416364526575f, --0.72424708295146700276f,-0.68954054473706682948f,-0.67155895484701866316f, --0.74095112535495888384f,-0.61523159058062726334f,-0.78834642762660589455f, --0.55557023301960217765f,-0.83146961230254523567f,-0.49289819222978420443f, --0.87008699110871134952f,-0.42755509343028247349f,-0.90398929312344311615f, --0.35989503653498794433f,-0.93299279883473895669f,-0.29028467725446244208f, --0.95694033573220882438f,-0.21910124015687010290f,-0.97570213003852845901f, --0.14673047445536230304f,-0.98917650996478090342f,-0.07356456359966735692f, --0.99729045667869020697f,1.00000000000000000000f,0.00000000000000000000f, -0.95694033573220882438f,0.29028467725446233105f,0.83146961230254523567f, -0.55557023301960217765f,0.63439328416364548779f,0.77301045336273688235f, -0.38268343236508983729f,0.92387953251128673848f,0.09801714032956077016f, -0.99518472667219681771f,-0.19509032201612819257f,0.98078528040323043058f, --0.47139673682599769755f,0.88192126434835504956f,-0.70710678118654746172f, -0.70710678118654757274f,-0.88192126434835493853f,0.47139673682599780857f, --0.98078528040323043058f,0.19509032201612860891f,-0.99518472667219692873f, --0.09801714032956058975f,-0.92387953251128684951f,-0.38268343236508967076f, --0.77301045336273710440f,-0.63439328416364526575f,-0.55557023301960217765f, --0.83146961230254523567f,-0.29028467725446244208f,-0.95694033573220882438f, -1.00000000000000000000f,0.00000000000000000000f,0.38268343236508983729f, -0.92387953251128673848f,-0.70710678118654746172f,0.70710678118654757274f, --0.92387953251128684951f,-0.38268343236508967076f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99983060359954833984f, +0.01840673014521598816f,0.99932235479354858398f,0.03680722415447235107f, +0.99847555160522460938f,0.05519524589180946350f,0.99729043245315551758f, +0.07356456667184829712f,0.99576741456985473633f,0.09190895408391952515f, +0.99390697479248046875f,0.11022220551967620850f,0.99170976877212524414f, +0.12849810719490051270f,0.98917651176452636719f,0.14673046767711639404f, +0.98630809783935546875f,0.16491311788558959961f,0.98310548067092895508f, +0.18303988873958587646f,0.97956979274749755859f,0.20110464096069335938f, +0.97570210695266723633f,0.21910123527050018311f,0.97150391340255737305f, +0.23702360689640045166f,0.96697646379470825195f,0.25486564636230468750f, +0.96212142705917358398f,0.27262136340141296387f,0.95694035291671752930f, +0.29028466343879699707f,0.95143502950668334961f,0.30784964561462402344f, +0.94560730457305908203f,0.32531028985977172852f,0.93945920467376708984f, +0.34266072511672973633f,0.93299281597137451172f,0.35989505052566528320f, +0.92621022462844848633f,0.37700742483139038086f,0.91911387443542480469f, +0.39399203658103942871f,0.91170603036880493164f,0.41084316372871398926f, +0.90398931503295898438f,0.42755508422851562500f,0.89596623182296752930f, +0.44412213563919067383f,0.88763964176177978516f,0.46053871512413024902f, +0.87901222705841064453f,0.47679921984672546387f,0.87008696794509887695f, +0.49289819598197937012f,0.86086696386337280273f,0.50883013010025024414f, +0.85135519504547119141f,0.52458965778350830078f,0.84155499935150146484f, +0.54017144441604614258f,0.83146959543228149414f,0.55557024478912353516f, +0.82110249996185302734f,0.57078075408935546875f,0.81045717000961303711f, +0.58579784631729125977f,0.79953724145889282227f,0.60061645507812500000f, +0.78834640979766845703f,0.61523157358169555664f,0.77688848972320556641f, +0.62963825464248657227f,0.76516723632812500000f,0.64383155107498168945f, +0.75318682193756103516f,0.65780669450759887695f,0.74095112085342407227f, +0.67155897617340087891f,0.72846436500549316406f,0.68508368730545043945f, +0.71573084592819213867f,0.69837623834609985352f,0.70275473594665527344f, +0.71143221855163574219f,0.68954056501388549805f,0.72424709796905517578f, +0.67609268426895141602f,0.73681658506393432617f,0.66241580247879028320f, +0.74913638830184936523f,0.64851438999176025391f,0.76120239496231079102f, +0.63439327478408813477f,0.77301043272018432617f,0.62005722522735595703f, +0.78455656766891479492f,0.60551106929779052734f,0.79583692550659179688f, +0.59075969457626342773f,0.80684757232666015625f,0.57580816745758056641f, +0.81758481264114379883f,0.56066155433654785156f,0.82804507017135620117f, +0.54532498121261596680f,0.83822470903396606445f,0.52980363368988037109f, +0.84812033176422119141f,0.51410275697708129883f,0.85772860050201416016f, +0.49822765588760375977f,0.86704623699188232422f,0.48218378424644470215f, +0.87607008218765258789f,0.46597650647163391113f,0.88479709625244140625f, +0.44961133599281311035f,0.89322429895401000977f,0.43309381604194641113f, +0.90134882926940917969f,0.41642954945564270020f,0.90916800498962402344f, +0.39962419867515563965f,0.91667908430099487305f,0.38268342614173889160f, +0.92387950420379638672f,0.36561298370361328125f,0.93076694011688232422f, +0.34841868281364440918f,0.93733900785446166992f,0.33110630512237548828f, +0.94359344244003295898f,0.31368175148963928223f,0.94952815771102905273f, +0.29615089297294616699f,0.95514118671417236328f,0.27851969003677368164f, +0.96043050289154052734f,0.26079410314559936523f,0.96539443731307983398f, +0.24298018217086791992f,0.97003126144409179688f,0.22508391737937927246f, +0.97433936595916748047f,0.20711137354373931885f,0.97831737995147705078f, +0.18906866014003753662f,0.98196387290954589844f,0.17096188664436340332f, +0.98527765274047851562f,0.15279719233512878418f,0.98825758695602416992f, +0.13458070158958435059f,0.99090266227722167969f,0.11631862819194793701f, +0.99321192502975463867f,0.09801714122295379639f,0.99518471956253051758f, +0.07968243956565856934f,0.99682027101516723633f,0.06132073700428009033f, +0.99811810255050659180f,0.04293825849890708923f,0.99907773733139038086f, +0.02454122900962829590f,0.99969881772994995117f,0.00613588467240333557f, +0.99998116493225097656f,-0.01227153837680816650f,0.99992471933364868164f, +-0.03067480400204658508f,0.99952942132949829102f,-0.04906767606735229492f, +0.99879544973373413086f,-0.06744392216205596924f,0.99772304296493530273f, +-0.08579730987548828125f,0.99631261825561523438f,-0.10412163287401199341f, +0.99456459283828735352f,-0.12241067737340927124f,0.99247956275939941406f, +-0.14065824449062347412f,0.99005818367004394531f,-0.15885815024375915527f, +0.98730140924453735352f,-0.17700421810150146484f,0.98421007394790649414f, +-0.19509032368659973145f,0.98078525066375732422f,-0.21311031281948089600f, +0.97702813148498535156f,-0.23105810582637786865f,0.97293996810913085938f, +-0.24892760813236236572f,0.96852207183837890625f,-0.26671275496482849121f, +0.96377605199813842773f,-0.28440752625465393066f,0.95870345830917358398f, +-0.30200594663619995117f,0.95330601930618286133f,-0.31950202584266662598f, +0.94758558273315429688f,-0.33688986301422119141f,0.94154405593872070312f, +-0.35416352748870849609f,0.93518352508544921875f,-0.37131720781326293945f, +0.92850607633590698242f,-0.38834503293037414551f,0.92151403427124023438f, +-0.40524131059646606445f,0.91420978307723999023f,-0.42200025916099548340f, +0.90659570693969726562f,-0.43861624598503112793f,0.89867448806762695312f, +-0.45508357882499694824f,0.89044874906539916992f,-0.47139674425125122070f, +0.88192129135131835938f,-0.48755016922950744629f,0.87309497594833374023f, +-0.50353837013244628906f,0.86397284269332885742f,-0.51935601234436035156f, +0.85455799102783203125f,-0.53499764204025268555f,0.84485357999801635742f, +-0.55045795440673828125f,0.83486288785934448242f,-0.56573182344436645508f, +0.82458931207656860352f,-0.58081394433975219727f,0.81403630971908569336f, +-0.59569931030273437500f,0.80320751667022705078f,-0.61038279533386230469f, +0.79210656881332397461f,-0.62485951185226440430f,0.78073722124099731445f, +-0.63912445306777954102f,0.76910334825515747070f,-0.65317285060882568359f, +0.75720882415771484375f,-0.66699993610382080078f,0.74505776166915893555f, +-0.68060100078582763672f,0.73265427350997924805f,-0.69397145509719848633f, +0.72000253200531005859f,-0.70710676908493041992f,0.70710676908493041992f, +-0.72000253200531005859f,0.69397145509719848633f,-0.73265427350997924805f, +0.68060100078582763672f,-0.74505776166915893555f,0.66699993610382080078f, +-0.75720882415771484375f,0.65317285060882568359f,-0.76910334825515747070f, +0.63912445306777954102f,-0.78073722124099731445f,0.62485951185226440430f, +-0.79210656881332397461f,0.61038279533386230469f,-0.80320751667022705078f, +0.59569931030273437500f,-0.81403630971908569336f,0.58081394433975219727f, +-0.82458931207656860352f,0.56573182344436645508f,-0.83486288785934448242f, +0.55045795440673828125f,-0.84485357999801635742f,0.53499764204025268555f, +-0.85455799102783203125f,0.51935601234436035156f,-0.86397284269332885742f, +0.50353837013244628906f,-0.87309497594833374023f,0.48755016922950744629f, +-0.88192129135131835938f,0.47139674425125122070f,-0.89044874906539916992f, +0.45508357882499694824f,-0.89867448806762695312f,0.43861624598503112793f, +-0.90659570693969726562f,0.42200025916099548340f,-0.91420978307723999023f, +0.40524131059646606445f,-0.92151403427124023438f,0.38834503293037414551f, +-0.92850607633590698242f,0.37131720781326293945f,-0.93518352508544921875f, +0.35416352748870849609f,-0.94154405593872070312f,0.33688986301422119141f, +-0.94758558273315429688f,0.31950202584266662598f,-0.95330601930618286133f, +0.30200594663619995117f,-0.95870345830917358398f,0.28440752625465393066f, +-0.96377605199813842773f,0.26671275496482849121f,-0.96852207183837890625f, +0.24892760813236236572f,-0.97293996810913085938f,0.23105810582637786865f, +-0.97702813148498535156f,0.21311031281948089600f,-0.98078525066375732422f, +0.19509032368659973145f,-0.98421007394790649414f,0.17700421810150146484f, +-0.98730140924453735352f,0.15885815024375915527f,-0.99005818367004394531f, +0.14065824449062347412f,-0.99247956275939941406f,0.12241067737340927124f, +-0.99456459283828735352f,0.10412163287401199341f,-0.99631261825561523438f, +0.08579730987548828125f,-0.99772304296493530273f,0.06744392216205596924f, +-0.99879544973373413086f,0.04906767606735229492f,-0.99952942132949829102f, +0.03067480400204658508f,-0.99992471933364868164f,0.01227153837680816650f, +-0.99998116493225097656f,-0.00613588467240333557f,-0.99969881772994995117f, +-0.02454122900962829590f,-0.99907773733139038086f,-0.04293825849890708923f, +-0.99811810255050659180f,-0.06132073700428009033f,-0.99682027101516723633f, +-0.07968243956565856934f,-0.99518471956253051758f,-0.09801714122295379639f, +-0.99321192502975463867f,-0.11631862819194793701f,-0.99090266227722167969f, +-0.13458070158958435059f,-0.98825758695602416992f,-0.15279719233512878418f, +-0.98527765274047851562f,-0.17096188664436340332f,-0.98196387290954589844f, +-0.18906866014003753662f,-0.97831737995147705078f,-0.20711137354373931885f, +-0.97433936595916748047f,-0.22508391737937927246f,-0.97003126144409179688f, +-0.24298018217086791992f,-0.96539443731307983398f,-0.26079410314559936523f, +-0.96043050289154052734f,-0.27851969003677368164f,-0.95514118671417236328f, +-0.29615089297294616699f,-0.94952815771102905273f,-0.31368175148963928223f, +-0.94359344244003295898f,-0.33110630512237548828f,-0.93733900785446166992f, +-0.34841868281364440918f,-0.93076694011688232422f,-0.36561298370361328125f, +-0.92387950420379638672f,-0.38268342614173889160f,-0.91667908430099487305f, +-0.39962419867515563965f,-0.90916800498962402344f,-0.41642954945564270020f, +-0.90134882926940917969f,-0.43309381604194641113f,-0.89322429895401000977f, +-0.44961133599281311035f,-0.88479709625244140625f,-0.46597650647163391113f, +-0.87607008218765258789f,-0.48218378424644470215f,-0.86704623699188232422f, +-0.49822765588760375977f,-0.85772860050201416016f,-0.51410275697708129883f, +-0.84812033176422119141f,-0.52980363368988037109f,-0.83822470903396606445f, +-0.54532498121261596680f,-0.82804507017135620117f,-0.56066155433654785156f, +-0.81758481264114379883f,-0.57580816745758056641f,-0.80684757232666015625f, +-0.59075969457626342773f,-0.79583692550659179688f,-0.60551106929779052734f, +-0.78455656766891479492f,-0.62005722522735595703f,-0.77301043272018432617f, +-0.63439327478408813477f,-0.76120239496231079102f,-0.64851438999176025391f, +-0.74913638830184936523f,-0.66241580247879028320f,-0.73681658506393432617f, +-0.67609268426895141602f,-0.72424709796905517578f,-0.68954056501388549805f, +-0.71143221855163574219f,-0.70275473594665527344f,-0.69837623834609985352f, +-0.71573084592819213867f,-0.68508368730545043945f,-0.72846436500549316406f, +-0.67155897617340087891f,-0.74095112085342407227f,-0.65780669450759887695f, +-0.75318682193756103516f,-0.64383155107498168945f,-0.76516723632812500000f, +-0.62963825464248657227f,-0.77688848972320556641f,-0.61523157358169555664f, +-0.78834640979766845703f,-0.60061645507812500000f,-0.79953724145889282227f, +-0.58579784631729125977f,-0.81045717000961303711f,-0.57078075408935546875f, +-0.82110249996185302734f,-0.55557024478912353516f,-0.83146959543228149414f, +-0.54017144441604614258f,-0.84155499935150146484f,-0.52458965778350830078f, +-0.85135519504547119141f,-0.50883013010025024414f,-0.86086696386337280273f, +-0.49289819598197937012f,-0.87008696794509887695f,-0.47679921984672546387f, +-0.87901222705841064453f,-0.46053871512413024902f,-0.88763964176177978516f, +-0.44412213563919067383f,-0.89596623182296752930f,-0.42755508422851562500f, +-0.90398931503295898438f,-0.41084316372871398926f,-0.91170603036880493164f, +-0.39399203658103942871f,-0.91911387443542480469f,-0.37700742483139038086f, +-0.92621022462844848633f,-0.35989505052566528320f,-0.93299281597137451172f, +-0.34266072511672973633f,-0.93945920467376708984f,-0.32531028985977172852f, +-0.94560730457305908203f,-0.30784964561462402344f,-0.95143502950668334961f, +-0.29028466343879699707f,-0.95694035291671752930f,-0.27262136340141296387f, +-0.96212142705917358398f,-0.25486564636230468750f,-0.96697646379470825195f, +-0.23702360689640045166f,-0.97150391340255737305f,-0.21910123527050018311f, +-0.97570210695266723633f,-0.20110464096069335938f,-0.97956979274749755859f, +-0.18303988873958587646f,-0.98310548067092895508f,-0.16491311788558959961f, +-0.98630809783935546875f,-0.14673046767711639404f,-0.98917651176452636719f, +-0.12849810719490051270f,-0.99170976877212524414f,-0.11022220551967620850f, +-0.99390697479248046875f,-0.09190895408391952515f,-0.99576741456985473633f, +-0.07356456667184829712f,-0.99729043245315551758f,-0.05519524589180946350f, +-0.99847555160522460938f,-0.03680722415447235107f,-0.99932235479354858398f, +-0.01840673014521598816f,-0.99983060359954833984f,1.00000000000000000000f, +0.00000000000000000000f,0.99729043245315551758f,0.07356456667184829712f, +0.98917651176452636719f,0.14673046767711639404f,0.97570210695266723633f, +0.21910123527050018311f,0.95694035291671752930f,0.29028466343879699707f, +0.93299281597137451172f,0.35989505052566528320f,0.90398931503295898438f, +0.42755508422851562500f,0.87008696794509887695f,0.49289819598197937012f, +0.83146959543228149414f,0.55557024478912353516f,0.78834640979766845703f, +0.61523157358169555664f,0.74095112085342407227f,0.67155897617340087891f, +0.68954056501388549805f,0.72424709796905517578f,0.63439327478408813477f, +0.77301043272018432617f,0.57580816745758056641f,0.81758481264114379883f, +0.51410275697708129883f,0.85772860050201416016f,0.44961133599281311035f, +0.89322429895401000977f,0.38268342614173889160f,0.92387950420379638672f, +0.31368175148963928223f,0.94952815771102905273f,0.24298018217086791992f, +0.97003126144409179688f,0.17096188664436340332f,0.98527765274047851562f, +0.09801714122295379639f,0.99518471956253051758f,0.02454122900962829590f, +0.99969881772994995117f,-0.04906767606735229492f,0.99879544973373413086f, +-0.12241067737340927124f,0.99247956275939941406f,-0.19509032368659973145f, +0.98078525066375732422f,-0.26671275496482849121f,0.96377605199813842773f, +-0.33688986301422119141f,0.94154405593872070312f,-0.40524131059646606445f, +0.91420978307723999023f,-0.47139674425125122070f,0.88192129135131835938f, +-0.53499764204025268555f,0.84485357999801635742f,-0.59569931030273437500f, +0.80320751667022705078f,-0.65317285060882568359f,0.75720882415771484375f, +-0.70710676908493041992f,0.70710676908493041992f,-0.75720882415771484375f, +0.65317285060882568359f,-0.80320751667022705078f,0.59569931030273437500f, +-0.84485357999801635742f,0.53499764204025268555f,-0.88192129135131835938f, +0.47139674425125122070f,-0.91420978307723999023f,0.40524131059646606445f, +-0.94154405593872070312f,0.33688986301422119141f,-0.96377605199813842773f, +0.26671275496482849121f,-0.98078525066375732422f,0.19509032368659973145f, +-0.99247956275939941406f,0.12241067737340927124f,-0.99879544973373413086f, +0.04906767606735229492f,-0.99969881772994995117f,-0.02454122900962829590f, +-0.99518471956253051758f,-0.09801714122295379639f,-0.98527765274047851562f, +-0.17096188664436340332f,-0.97003126144409179688f,-0.24298018217086791992f, +-0.94952815771102905273f,-0.31368175148963928223f,-0.92387950420379638672f, +-0.38268342614173889160f,-0.89322429895401000977f,-0.44961133599281311035f, +-0.85772860050201416016f,-0.51410275697708129883f,-0.81758481264114379883f, +-0.57580816745758056641f,-0.77301043272018432617f,-0.63439327478408813477f, +-0.72424709796905517578f,-0.68954056501388549805f,-0.67155897617340087891f, +-0.74095112085342407227f,-0.61523157358169555664f,-0.78834640979766845703f, +-0.55557024478912353516f,-0.83146959543228149414f,-0.49289819598197937012f, +-0.87008696794509887695f,-0.42755508422851562500f,-0.90398931503295898438f, +-0.35989505052566528320f,-0.93299281597137451172f,-0.29028466343879699707f, +-0.95694035291671752930f,-0.21910123527050018311f,-0.97570210695266723633f, +-0.14673046767711639404f,-0.98917651176452636719f,-0.07356456667184829712f, +-0.99729043245315551758f,1.00000000000000000000f,0.00000000000000000000f, +0.95694035291671752930f,0.29028466343879699707f,0.83146959543228149414f, +0.55557024478912353516f,0.63439327478408813477f,0.77301043272018432617f, +0.38268342614173889160f,0.92387950420379638672f,0.09801714122295379639f, +0.99518471956253051758f,-0.19509032368659973145f,0.98078525066375732422f, +-0.47139674425125122070f,0.88192129135131835938f,-0.70710676908493041992f, +0.70710676908493041992f,-0.88192129135131835938f,0.47139674425125122070f, +-0.98078525066375732422f,0.19509032368659973145f,-0.99518471956253051758f, +-0.09801714122295379639f,-0.92387950420379638672f,-0.38268342614173889160f, +-0.77301043272018432617f,-0.63439327478408813477f,-0.55557024478912353516f, +-0.83146959543228149414f,-0.29028466343879699707f,-0.95694035291671752930f, +1.00000000000000000000f,0.00000000000000000000f,0.38268342614173889160f, +0.92387950420379638672f,-0.70710676908493041992f,0.70710676908493041992f, +-0.92387950420379638672f,-0.38268342614173889160f,}; #endif @@ -1023,2740 +1026,2740 @@ uint32_t rearranged_twiddle_tab_stride3_arr_4096_f32[6]={ 0,2048,2560,2688,2720,0,}; float32_t rearranged_twiddle_stride1_4096_f32[2728]={ -1.00000000000000000000f,0.00000000000000000000f,0.99999882345170187925f, -0.00153398018628476550f,0.99999529380957619118f,0.00306795676296597614f, -0.99998941108192840321f,0.00460192612044857050f,0.99998117528260110909f, -0.00613588464915447527f,0.99997058643097413988f,0.00766982873953109701f, -0.99995764455196389786f,0.00920375478205981944f,0.99994234967602391162f, -0.01073765916726449055f,0.99992470183914450299f,0.01227153828571992539f, -0.99990470108285289808f,0.01380538852806039059f,0.99988234745421256111f, -0.01533920628498810015f,0.99985764100582386060f,0.01687298794728171042f, -0.99983058179582340319f,0.01840672990580482019f,0.99980116988788425569f, -0.01994042855151444138f,0.99976940535121527898f,0.02147408027546950787f, -0.99973528826056168306f,0.02300768146883936868f,0.99969881869620424997f, -0.02454122852291228812f,0.99965999674395922270f,0.02607471782910390085f, -0.99961882249517863830f,0.02760814577896573974f,0.99957529604674921764f, -0.02914150876419372219f,0.99952941750109314256f,0.03067480317663662595f, -0.99948118696616694567f,0.03220802540830458582f,0.99943060455546173237f, -0.03374117185137757990f,0.99937767038800284780f,0.03527423889821394709f, -0.99932238458834954375f,0.03680722294135883171f,0.99926474728659442359f, -0.03834012037355269409f,0.99920475861836388631f,0.03987292758773981066f, -0.99914241872481690532f,0.04140564097707673946f,0.99907772775264536147f, -0.04293825693494082024f,0.99901068585407337697f,0.04447077185493866769f, -0.99894129318685687124f,0.04600318213091462299f,0.99886954991428356099f, -0.04753548415695930257f,0.99879545620517240501f,0.04906767432741801493f, -0.99871901223387293811f,0.05059974903689928166f,0.99864021818026527111f, -0.05213170468028332366f,0.99855907422975931365f,0.05366353765273051968f, -0.99847558057329477421f,0.05519524434968993420f,0.99838973740734016094f, -0.05672682116690774823f,0.99830154493389289261f,0.05825826450043575244f, -0.99821100336047818846f,0.05978957074663986820f,0.99811811290014917919f, -0.06132073630220857829f,0.99802287377148624081f,0.06285175756416140624f, -0.99792528619859599548f,0.06438263092985746505f,0.99782535041111164453f, -0.06591335279700380467f,0.99772306664419163624f,0.06744391956366405094f, -0.99761843513851955478f,0.06897432762826674613f,0.99751145614030345410f, -0.07050457338961385600f,0.99740212990127530279f,0.07203465324688933247f, -0.99729045667869020697f,0.07356456359966742631f,0.99717643673532618820f, -0.07509430084792130533f,0.99706007033948296225f,0.07662386139203149205f, -0.99694135776498216117f,0.07815324163279423197f,0.99682029929116566791f, -0.07968243797143012563f,0.99669689520289606044f,0.08121144680959244133f, -0.99657114579055483539f,0.08274026454937569164f,0.99644305135004263008f, -0.08426888759332407108f,0.99631261218277800129f,0.08579731234443989385f, -0.99617982859569698117f,0.08732553520619205922f,0.99604470090125196702f, -0.08885355258252460031f,0.99590722941741172125f,0.09038136087786498296f, -0.99576741446765981713f,0.09190895649713272386f,0.99562525638099430569f, -0.09343633584574778661f,0.99548075549192693856f,0.09496349532963899165f, -0.99533391214048227980f,0.09649043135525259274f,0.99518472667219692873f, -0.09801714032956060363f,0.99503319943811863180f,0.09954361866006931903f, -0.99487933079480561638f,0.10106986275482782167f,0.99472312110432570265f, -0.10259586902243628126f,0.99456457073425541537f,0.10412163387205458642f, -0.99440368005767909576f,0.10564715371341061589f,0.99424044945318790223f, -0.10717242495680884273f,0.99407487930487936634f,0.10869744401313871651f, -0.99390697000235606051f,0.11022220729388305938f,0.99373672194072459884f, -0.11174671121112658700f,0.99356413552059530403f,0.11327095217756434631f, -0.99338921114808065305f,0.11479492660651008373f,0.99321194923479450001f, -0.11631863091190475235f,0.99303235019785141002f,0.11784206150832497728f, -0.99285041445986510489f,0.11936521481099135467f,0.99266614244894801899f, -0.12088808723577708359f,0.99247953459870996706f,0.12241067519921619566f, -0.99229059134825736699f,0.12393297511851215920f,0.99209931314219179654f, -0.12545498341154623367f,0.99190570043060932726f,0.12697669649688586579f, -0.99170975366909952520f,0.12849811079379316880f,0.99151147331874389668f, -0.13001922272223334631f,0.99131085984611544415f,0.13154002870288311611f, -0.99110791372327688986f,0.13306052515713906459f,0.99090263542778000971f, -0.13458070850712616773f,0.99069502544266463406f,0.13610057517570620100f, -0.99048508425645709341f,0.13762012158648603832f,0.99027281236316910817f, -0.13913934416382620074f,0.99005821026229712256f,0.14065823933284921088f, -0.98984127845882052821f,0.14217680351944803063f,0.98962201746320088702f, -0.14369503315029447110f,0.98940042779138037687f,0.14521292465284746376f, -0.98917650996478101444f,0.14673047445536174793f,0.98895026451030298986f, -0.14824767898689603096f,0.98872169196032377858f,0.14976453467732150915f, -0.98849079285269658701f,0.15128103795733022219f,0.98825756773074946437f, -0.15279718525844343535f,0.98802201714328352633f,0.15431297301302010494f, -0.98778414164457217783f,0.15582839765426523271f,0.98754394179435922574f, -0.15734345561623824805f,0.98730141815785843473f,0.15885814333386144570f, -0.98705657130575097380f,0.16037245724292828464f,0.98680940181418552726f, -0.16188639378011182579f,0.98655991026477540817f,0.16339994938297322524f, -0.98630809724459866938f,0.16491312048996989437f,0.98605396334619543897f, -0.16642590354046410406f,0.98579750916756747614f,0.16793829497473117263f, -0.98553873531217606185f,0.16945029123396795900f,0.98527764238894122162f, -0.17096188876030121717f,0.98501423101223983814f,0.17247308399679595059f, -0.98474850180190420801f,0.17398387338746382214f,0.98448045538322093151f, -0.17549425337727142526f,0.98421009238692902521f,0.17700422041214874946f, -0.98393741344921892278f,0.17851377093899750692f,0.98366241921173025453f, -0.18002290140569951471f,0.98338511032155118130f,0.18153160826112496595f, -0.98310548743121628501f,0.18303988795514095078f,0.98282355119870523641f, -0.18454773693861961648f,0.98253930228744124076f,0.18605515166344663291f, -0.98225274136628937249f,0.18756212858252960252f,0.98196386910955524296f, -0.18906866414980619262f,0.98167268619698311305f,0.19057475482025273972f, -0.98137919331375456089f,0.19208039704989243734f,0.98108339115048670553f, -0.19358558729580360724f,0.98078528040323043058f,0.19509032201612824808f, -0.98048486177346938497f,0.19659459767008022335f,0.98018213596811742949f, -0.19809841071795356027f,0.97987710369951763756f,0.19960175762113097075f, -0.97956976568544051887f,0.20110463484209190055f,0.97926012264908202098f, -0.20260703884442113343f,0.97894817531906219710f,0.20410896609281686809f, -0.97863392442942320759f,0.20561041305309923910f,0.97831737071962765473f, -0.20711137619221856032f,0.97799851493455713936f,0.20861185197826348503f, -0.97767735782450992943f,0.21011183688046961016f,0.97735390014519996082f, -0.21161132736922755315f,0.97702814265775439484f,0.21311031991609136194f, -0.97670008612871184184f,0.21460881099378675829f,0.97636973133002114000f, -0.21610679707621952006f,0.97603707903903902388f,0.21760427463848364127f, -0.97570213003852857003f,0.21910124015686979759f,0.97536488511665697665f, -0.22059769010887350649f,0.97502534506699412020f,0.22209362097320350937f, -0.97468351068851066810f,0.22358902922978998729f,0.97433938278557585821f, -0.22508391135979283204f,0.97399296216795583359f,0.22657826384561000066f, -0.97364424965081197705f,0.22807208317088573102f,0.97329324605469824672f, -0.22956536582051886852f,0.97293995220556017678f,0.23105810828067110951f, -0.97258436893473221296f,0.23255030703877524467f,0.97222649707893626925f, -0.23404195858354343018f,0.97186633748027939639f,0.23553305940497548665f, -0.97150389098625178352f,0.23702360599436719801f,0.97113915844972509284f, -0.23851359484431841618f,0.97077214072895035013f,0.24000302244874149871f, -0.97040283868755550234f,0.24149188530286933019f,0.97003125319454397424f, -0.24298017990326387094f,0.96965738512429244800f,0.24446790274782415064f, -0.96928123535654853171f,0.24595505033579459497f,0.96890280477642887202f, -0.24744161916777326904f,0.96852209427441737777f,0.24892760574572014853f, -0.96813910474636244441f,0.25041300657296522436f,0.96775383709347551076f, -0.25189781815421696809f,0.96736629222232850545f,0.25338203699557015902f, -0.96697647104485207059f,0.25486565960451457169f,0.96658437447833311928f, -0.25634868248994291395f,0.96619000344541250413f,0.25783110216215898713f, -0.96579335887408368500f,0.25931291513288623474f,0.96539444169768939830f, -0.26079411791527551401f,0.96499325285492032478f,0.26227470702391358914f, -0.96458979328981275803f,0.26375467897483134694f,0.96418406395174582890f, -0.26523403028551179039f,0.96377606579543984022f,0.26671275747489836538f, -0.96336579978095404631f,0.26819085706340317632f,0.96295326687368387741f, -0.26966832557291509076f,0.96253846804435916340f,0.27114515952680801059f, -0.96212140426904158019f,0.27262135544994897662f,0.96170207652912254037f, -0.27409690986870638429f,0.96128048581132063966f,0.27557181931095814376f, -0.96085663310767965850f,0.27704608030609989555f,0.96043051941556578655f, -0.27851968938505305973f,0.96000214573766595727f,0.27999264308027321801f, -0.95957151308198451733f,0.28146493792575794091f,0.95913862246184189431f, -0.28293657045705539188f,0.95870347489587159906f,0.28440753721127187692f, -0.95826607140801767226f,0.28587783472708061527f,0.95782641302753290802f, -0.28734745954472951102f,0.95738450078897585627f,0.28881640820604947972f, -0.95694033573220882438f,0.29028467725446233105f,0.95649391890239510161f, -0.29175226323498926195f,0.95604525134999640557f,0.29321916269425862822f, -0.95559433413077110586f,0.29468537218051432669f,0.95514116830577078243f, -0.29615088824362378883f,0.95468575494133833814f,0.29761570743508619641f, -0.95422809510910566733f,0.29907982630804047508f,0.95376818988599032512f, -0.30054324141727345454f,0.95330604035419386211f,0.30200594931922808417f, -0.95284164760119871573f,0.30346794657201131562f,0.95237501271976587880f, -0.30492922973540237397f,0.95190613680793234597f,0.30638979537086091787f, -0.95143502096900833820f,0.30784964004153486661f,0.95096166631157508231f, -0.30930876031226872680f,0.95048607394948170235f,0.31076715274961147495f, -0.95000824500184299914f,0.31222481392182488413f,0.94952818059303667475f, -0.31368174039889151761f,0.94904588185270055689f,0.31513792875252244485f, -0.94856134991573026749f,0.31659337555616584581f,0.94807458592227622507f, -0.31804807738501494896f,0.94758559101774109124f,0.31950203081601569188f, -0.94709436635277721717f,0.32095523242787521445f,0.94660091308328353499f, -0.32240767880106985244f,0.94610523237040344835f,0.32385936651785285356f, -0.94560732538052127971f,0.32531029216226292622f,0.94510719328526060501f, -0.32676045232013173347f,0.94460483726148025685f,0.32820984357909249729f, -0.94410025849127265918f,0.32965846252858749255f,0.94359345816196038559f, -0.33110630575987642921f,0.94308443746609349478f,0.33255336986604422389f, -0.94257319760144686605f,0.33399965144200938205f,0.94205973977101731265f, -0.33544514708453160301f,0.94154406518302080631f,0.33688985339222005111f, -0.94102617505088925753f,0.33833376696554112728f,0.94050607059326829518f, -0.33977688440682685123f,0.93998375303401404679f,0.34121920232028235542f, -0.93945922360218991898f,0.34266071731199437833f,0.93893248353206459900f, -0.34410142598993881391f,0.93840353406310805795f,0.34554132496398909380f, -0.93787237643998988545f,0.34698041084592368133f,0.93733901191257495977f, -0.34841868024943456472f,0.93680344173592156043f,0.34985612979013491763f, -0.93626566717027825959f,0.35129275608556709276f,0.93572568948108036935f, -0.35272855575521072646f,0.93518350993894761025f,0.35416352542049034380f, -0.93463912981968078064f,0.35559766170478385172f,0.93409255040425887007f, -0.35703096123342997759f,0.93354377297883617270f,0.35846342063373654030f, -0.93299279883473895669f,0.35989503653498811087f,0.93243962926846235550f, -0.36132580556845428355f,0.93188426558166814750f,0.36275572436739722537f, -0.93132670908118042608f,0.36418478956707989180f,0.93076696107898371224f, -0.36561299780477385379f,0.93020502289221906889f,0.36704034571976718038f, -0.92964089584318121418f,0.36846682995337232125f,0.92907458125931585702f, -0.36989244714893410038f,0.92850608047321558924f,0.37131719395183754306f, -0.92793539482261788720f,0.37274106700951575855f,0.92736252565040111495f, -0.37416406297145793358f,0.92678747430458174872f,0.37558617848921721505f, -0.92621024213831137928f,0.37700741021641825945f,0.92563083050987271516f, -0.37842775480876555960f,0.92504924078267758425f,0.37984720892405116066f, -0.92446547432526260391f,0.38126576922216237620f,0.92387953251128673848f, -0.38268343236508978178f,0.92329141671952763559f,0.38410019501693504207f, -0.92270112833387862850f,0.38551605384391884890f,0.92210866874334518339f, -0.38693100551438858181f,0.92151403934204190183f,0.38834504669882624617f, -0.92091724152918941204f,0.38975817406985641123f,0.92031827670911059425f, -0.39117038430225387069f,0.91971714629122736095f,0.39258167407295146978f, -0.91911385169005777040f,0.39399204006104809883f,0.91850839432521225181f, -0.39540147894781635385f,0.91790077562139049672f,0.39680998741671030805f, -0.91729099700837790632f,0.39821756215337356100f,0.91667905992104270485f, -0.39962419984564678810f,0.91606496579933172075f,0.40102989718357562321f, -0.91544871608826783316f,0.40243465085941843018f,0.91483031223794619713f, -0.40383845756765407442f,0.91420975570353069095f,0.40524131400498986100f, -0.91358704794525080750f,0.40664321687036902864f,0.91296219042839821256f, -0.40804416286497868782f,0.91233518462332274801f,0.40944414869225759235f, -0.91170603200542987832f,0.41084317105790391089f,0.91107473405517636067f, -0.41224122666988288755f,0.91044129225806724737f,0.41363831223843450235f, -0.90980570810465222209f,0.41503442447608163146f,0.90916798309052238025f, -0.41642956009763715253f,0.90852811871630612117f,0.41782371582021227141f, -0.90788611648766626150f,0.41921688836322390515f,0.90724197791529581636f, -0.42060907444840250902f,0.90659570451491533483f,0.42200027079979968159f, -0.90594729780726845902f,0.42339047414379604728f,0.90529675931811881551f, -0.42477968120910880589f,0.90464409057824624050f,0.42616788872679961520f, -0.90398929312344333820f,0.42755509343028208491f,0.90333236849451181705f, -0.42894129205532949278f,0.90267331823725882600f,0.43032648134008261165f, -0.90201214390249317976f,0.43171065802505725895f,0.90134884704602202810f, -0.43309381885315195726f,0.90068342922864685907f,0.43447596056965565037f, -0.90001589201616016833f,0.43585707992225547480f,0.89934623697934157338f, -0.43723717366104408732f,0.89867446569395381673f,0.43861623853852765853f, -0.89800057974073987932f,0.43999427130963325583f,0.89732458070541831763f, -0.44137126873171667052f,0.89664647017868015499f,0.44274722756457002282f, -0.89596624975618521791f,0.44412214457042920035f,0.89528392103855758410f, -0.44549601651398174074f,0.89459948563138269595f,0.44686884016237415906f, -0.89391294514520325265f,0.44824061228521988598f,0.89322430119551532446f, -0.44961132965460653965f,0.89253355540276457791f,0.45098098904510386387f, -0.89184070939234272313f,0.45234958723377088896f,0.89114576479458318392f, -0.45371712100016386993f,0.89044872324475787817f,0.45508358712634383592f, -0.88974958638307277692f,0.45644898239688391772f,0.88904835585466457371f, -0.45781330359887717485f,0.88834503330959635470f,0.45917654752194408951f, -0.88763962040285393496f,0.46053871095824000514f,0.88693211879434219469f, -0.46189979070246273141f,0.88622253014888063838f,0.46325978355186014923f, -0.88551085613619995307f,0.46461868630623781584f,0.88479709843093778954f, -0.46597649576796618121f,0.88408125871263498752f,0.46733320874198841510f, -0.88336333866573157891f,0.46868882203582790114f,0.88264333997956279099f, -0.47004333245959561971f,0.88192126434835504956f,0.47139673682599764204f, -0.88119711347122209322f,0.47274903195034279069f,0.88047088905216075450f, -0.47410021465054996703f,0.87974259280004740713f,0.47545028174715586733f, -0.87901222642863352519f,0.47679923006332208812f,0.87827979165654157523f, -0.47814705642484300885f,0.87754529020726135258f,0.47949375766015295275f, -0.87680872380914565145f,0.48083933060033395845f,0.87607009419540660122f, -0.48218377207912271887f,0.87532940310411089246f,0.48352707893291868579f, -0.87458665227817611321f,0.48486924800079106435f,0.87384184346536686316f, -0.48621027612448641797f,0.87309497841829009079f,0.48755016014843599592f, -0.87234605889439154058f,0.48888889691976317176f,0.87159508665595097909f, -0.49022648328829115938f,0.87084206347007897531f,0.49156291610654989643f, -0.87008699110871146054f,0.49289819222978403790f,0.86932987134860684186f, -0.49423230851595967295f,0.86857070597134089507f,0.49556526182577254058f, -0.86780949676330332299f,0.49689704902265446895f,0.86704624551569264845f, -0.49822766697278181303f,0.86628095402451299467f,0.49955711254508183838f, -0.86551362409056908920f,0.50088538261124071482f,0.86474425751946237817f, -0.50221247404571078832f,0.86397285612158669643f,0.50353838372571757542f, -0.86319942171212415971f,0.50486310853126759035f,0.86242395611104050168f, -0.50618664534515522835f,0.86164646114308129921f,0.50750899105297087033f, -0.86086693863776730939f,0.50883014254310698909f,0.86008539042939013974f, -0.51015009670676680908f,0.85930181835700847337f,0.51146885043797030157f, -0.85851622426444273994f,0.51278640063356295542f,0.85772861000027211809f, -0.51410274419322166128f,0.85693897741782876221f,0.51541787801946292724f, -0.85614732837519447184f,0.51673179901764987321f,0.85535366473519602870f, -0.51804450409599933636f,0.85455798836540053376f,0.51935599016558964269f, -0.85376030113811141042f,0.52066625414036715735f,0.85296060493036363059f, -0.52197529293715438925f,0.85215890162391982887f,0.52328310347565643035f, -0.85135519310526519554f,0.52458968267846894928f,0.85054948126560347976f, -0.52589502747108463065f,0.84974176800085254868f,0.52719913478190127964f, -0.84893205521163961347f,0.52850200154222848337f,0.84812034480329723252f, -0.52980362468629460526f,0.84730663868585831544f,0.53110400115125500076f, -0.84649093877405212627f,0.53240312787719790144f,0.84567324698729906540f, -0.53370100180715296379f,0.84485356524970711689f,0.53499761988709715332f, -0.84403189549006640835f,0.53629297906596318235f,0.84320823964184543620f, -0.53758707629564539410f,0.84238259964318584760f,0.53887990853100842248f, -0.84155497743689844370f,0.54017147272989285423f,0.84072537497045807253f, -0.54146176585312344454f,0.83989379419599952126f,0.54275078486451588944f, -0.83906023707031274217f,0.54403852673088382019f,0.83822470555483807875f, -0.54532498842204646383f,0.83738720161566193578f,0.54661016691083486041f, -0.83654772722351200542f,0.54789405917310018967f,0.83570628435375260423f, -0.54917666218771965525f,0.83486287498638001026f,0.55045797293660481131f, -0.83401750110601813315f,0.55173798840470733573f,0.83317016470191318511f, -0.55301670558002746780f,0.83232086776792968408f,0.55429412145362000341f, -0.83146961230254523567f,0.55557023301960217765f,0.83061640030884631436f, -0.55684503727516010407f,0.82976123379452304540f,0.55811853122055610221f, -0.82890411477186487499f,0.55939071185913613604f,0.82804504525775579626f, -0.56066157619733603124f,0.82718402727366913130f,0.56193112124468935775f, -0.82632106284566353427f,0.56319934401383409117f,0.82545615400437755138f, -0.56446624152051938506f,0.82458930278502529099f,0.56573181078361312046f, -0.82372051122739142759f,0.56699604882510867832f,0.82284978137582642788f, -0.56825895267013148970f,0.82197711527924155472f,0.56952051934694714053f, -0.82110251499110464835f,0.57078074588696725566f,0.82022598256943468620f, -0.57203962932475704850f,0.81934752007679700903f,0.57329716669804220430f, -0.81846712958029865792f,0.57455335504771576360f,0.81758481315158371139f, -0.57580819141784533866f,0.81670057286682784525f,0.57706167285567944170f, -0.81581441080673378075f,0.57831379641165558958f,0.81492632905652662156f, -0.57956455913940563285f,0.81403632970594841378f,0.58081395809576452649f, -0.81314441484925359394f,0.58206199034077543697f,0.81225058658520399302f, -0.58330865293769829094f,0.81135484701706372945f,0.58455394295301532637f, -0.81045719825259476821f,0.58579785745643886408f,0.80955764240405125864f, -0.58704039352091796911f,0.80865618158817498262f,0.58828154822264522306f, -0.80775281792619035848f,0.58952131864106394055f,0.80684755354379933401f, -0.59075970185887416442f,0.80594039057117627944f,0.59199669496204099239f, -0.80503133114296365758f,0.59323229503979979516f,0.80412037739826569549f, -0.59446649918466443197f,0.80320753148064494287f,0.59569930449243335691f, -0.80229279553811572168f,0.59693070806219639124f,0.80137617172314024039f, -0.59816070699634238395f,0.80045766219262282082f,0.59938929840056454079f, -0.79953726910790501314f,0.60061647938386897305f,0.79861499463476093297f, -0.60184224705858002658f,0.79769084094339115509f,0.60306659854034816437f, -0.79676481020841882774f,0.60428953094815596181f,0.79583690460888356633f, -0.60551104140432554512f,0.79490712632823701256f,0.60673112703452447558f, -0.79397547755433717231f,0.60794978496777363208f,0.79304196047944364167f, -0.60916701233645320634f,0.79210657730021238887f,0.61038280627630947528f, -0.79116933021769020318f,0.61159716392646190641f,0.79023022143731003197f, -0.61281008242940970820f,0.78928925316888565167f,0.61402155893103849138f, -0.78834642762660622761f,0.61523159058062681925f,0.78740174702903142911f, -0.61644017453085364622f,0.78645521359908576731f,0.61764730793780386886f, -0.78550682956405393220f,0.61885298796097631957f,0.78455659715557524159f, -0.62005721176328909561f,0.78360451860963820092f,0.62125997651108755271f, -0.78265059616657572938f,0.62246127937414996723f,0.78169483207105938671f, -0.62366111752569453053f,0.78073722857209448822f,0.62485948814238634341f, -0.77977778792301455368f,0.62605638840434352232f,0.77881651238147597827f, -0.62725181549514408275f,0.77785340420945314754f,0.62844576660183271155f, -0.77688846567323244230f,0.62963823891492698426f,0.77592169904340768660f, -0.63082922962842447046f,0.77495310659487393057f,0.63201873593980906207f, -0.77398269060682289844f,0.63320675505005719064f,0.77301045336273699338f, -0.63439328416364548779f,0.77203639715038452351f,0.63557832048855611440f, -0.77106052426181381776f,0.63676186123628419899f,0.77008283699334789674f, -0.63794390362184405507f,0.76910333764557969882f,0.63912444486377573138f, -0.76812202852336541881f,0.64030348218415167327f,0.76713891193582040007f, -0.64148101280858305095f,0.76615399019631291733f,0.64265703396622686494f, -0.76516726562245895860f,0.64383154288979138613f,0.76417874053611667406f, -0.64500453681554392737f,0.76318841726338138010f,0.64617601298331628357f, -0.76219629813457900891f,0.64734596863651205911f,0.76120238548426177871f, -0.64851440102211244110f,0.76020668165120242055f,0.64968130739068319368f, -0.75920918897838796102f,0.65084668499638087535f,0.75820990981301528144f, -0.65201053109695950027f,0.75720884650648456748f,0.65317284295377675551f, -0.75620600141439453523f,0.65433361783180044036f,0.75520137689653654700f, -0.65549285299961534967f,0.75419497531688917125f,0.65665054572942893607f, -0.75318679904361252042f,0.65780669329707863735f,0.75217685044904269986f, -0.65896129298203731661f,0.75116513190968636771f,0.66011434206742047870f, -0.75015164580621507273f,0.66126583783999226540f,0.74913639452345937020f, -0.66241577759017178373f,0.74811938045040360379f,0.66356415861203976725f, -0.74710060598018013245f,0.66471097820334479334f,0.74608007351006377927f, -0.66585623366550972246f,0.74505778544146594733f,0.66699992230363747137f, -0.74403374417992929057f,0.66814204142651845153f,0.74300795213512171866f, -0.66928258834663600929f,0.74198041172083106787f,0.67042156038017308717f, -0.74095112535495921691f,0.67155895484701833009f,0.73992009545951620275f, -0.67269476907077285777f,0.73888732446061511361f,0.67382900037875603783f, -0.73785281478846598269f,0.67496164610201192513f,0.73681656887736979300f, -0.67609270357531592310f,0.73577858916571359238f,0.67722217013718033485f, -0.73473887809596349907f,0.67835004312986146857f,0.73369743811466026084f, -0.67947631989936496666f,0.73265427167241281570f,0.68060099779545302212f, -0.73160938122389262972f,0.68172407417164970767f,0.73056276922782759087f, -0.68284554638524808112f,0.72951443814699701296f,0.68396541179731540350f, -0.72846439044822519637f,0.68508366777270035541f,0.72741262860237576593f, -0.68620031168003858824f,0.72635915508434600873f,0.68731534089175905233f, -0.72530397237306076796f,0.68842875278409043638f,0.72424708295146700276f, -0.68954054473706682948f,0.72318848930652745999f,0.69065071413453460458f, -0.72212819392921534511f,0.69175925836415774750f,0.72106619931450810501f, -0.69286617481742462932f,0.72000250796138165477f,0.69397146088965389055f, -0.71893712237280449351f,0.69507511398000088043f,0.71787004505573170920f, -0.69617713149146298601f,0.71680127852109953857f,0.69727751083088651551f, -0.71573082528381870571f,0.69837624940897280457f,0.71465868786276909308f, -0.69947334464028376733f,0.71358486878079352422f,0.70056879394324833576f, -0.71250937056469243469f,0.70166259474016845488f,0.71143219574521643356f, -0.70275474445722529993f,0.71035334685706241764f,0.70384524052448493858f, -0.70927282643886568891f,0.70493408037590488124f,0.70819063703319540259f, -0.70602126144933974317f,0.70710678118654757274f,0.70710678118654757274f, -0.70602126144933974317f,0.70819063703319540259f,0.70493408037590499227f, -0.70927282643886568891f,0.70384524052448493858f,0.71035334685706241764f, -0.70275474445722529993f,0.71143219574521643356f,0.70166259474016845488f, -0.71250937056469232367f,0.70056879394324844679f,0.71358486878079352422f, -0.69947334464028376733f,0.71465868786276909308f,0.69837624940897291559f, -0.71573082528381859468f,0.69727751083088662654f,0.71680127852109942754f, -0.69617713149146298601f,0.71787004505573170920f,0.69507511398000088043f, -0.71893712237280438249f,0.69397146088965400157f,0.72000250796138165477f, -0.69286617481742474034f,0.72106619931450810501f,0.69175925836415774750f, -0.72212819392921534511f,0.69065071413453460458f,0.72318848930652734897f, -0.68954054473706694051f,0.72424708295146689174f,0.68842875278409043638f, -0.72530397237306076796f,0.68731534089175905233f,0.72635915508434600873f, -0.68620031168003858824f,0.72741262860237576593f,0.68508366777270035541f, -0.72846439044822519637f,0.68396541179731551452f,0.72951443814699690193f, -0.68284554638524808112f,0.73056276922782759087f,0.68172407417164981869f, -0.73160938122389262972f,0.68060099779545302212f,0.73265427167241281570f, -0.67947631989936496666f,0.73369743811466026084f,0.67835004312986146857f, -0.73473887809596349907f,0.67722217013718044587f,0.73577858916571348136f, -0.67609270357531603413f,0.73681656887736979300f,0.67496164610201203615f, -0.73785281478846598269f,0.67382900037875614885f,0.73888732446061511361f, -0.67269476907077296879f,0.73992009545951609173f,0.67155895484701833009f, -0.74095112535495910588f,0.67042156038017308717f,0.74198041172083095685f, -0.66928258834663600929f,0.74300795213512171866f,0.66814204142651856255f, -0.74403374417992929057f,0.66699992230363747137f,0.74505778544146594733f, -0.66585623366550972246f,0.74608007351006366825f,0.66471097820334490436f, -0.74710060598018013245f,0.66356415861203987827f,0.74811938045040349277f, -0.66241577759017178373f,0.74913639452345925918f,0.66126583783999226540f, -0.75015164580621496171f,0.66011434206742047870f,0.75116513190968636771f, -0.65896129298203731661f,0.75217685044904269986f,0.65780669329707874837f, -0.75318679904361252042f,0.65665054572942904709f,0.75419497531688917125f, -0.65549285299961546070f,0.75520137689653654700f,0.65433361783180055138f, -0.75620600141439453523f,0.65317284295377686654f,0.75720884650648456748f, -0.65201053109695950027f,0.75820990981301528144f,0.65084668499638098638f, -0.75920918897838796102f,0.64968130739068319368f,0.76020668165120242055f, -0.64851440102211255212f,0.76120238548426177871f,0.64734596863651205911f, -0.76219629813457889789f,0.64617601298331639459f,0.76318841726338126907f, -0.64500453681554403840f,0.76417874053611667406f,0.64383154288979149715f, -0.76516726562245895860f,0.64265703396622686494f,0.76615399019631280630f, -0.64148101280858316198f,0.76713891193582040007f,0.64030348218415167327f, -0.76812202852336530778f,0.63912444486377573138f,0.76910333764557958780f, -0.63794390362184416610f,0.77008283699334789674f,0.63676186123628419899f, -0.77106052426181381776f,0.63557832048855622542f,0.77203639715038441249f, -0.63439328416364548779f,0.77301045336273688235f,0.63320675505005719064f, -0.77398269060682278742f,0.63201873593980906207f,0.77495310659487381955f, -0.63082922962842458148f,0.77592169904340757558f,0.62963823891492709528f, -0.77688846567323244230f,0.62844576660183271155f,0.77785340420945303652f, -0.62725181549514419377f,0.77881651238147586724f,0.62605638840434352232f, -0.77977778792301444266f,0.62485948814238645443f,0.78073722857209448822f, -0.62366111752569464155f,0.78169483207105938671f,0.62246127937415007825f, -0.78265059616657572938f,0.62125997651108766373f,0.78360451860963820092f, -0.62005721176328920663f,0.78455659715557524159f,0.61885298796097631957f, -0.78550682956405393220f,0.61764730793780397988f,0.78645521359908576731f, -0.61644017453085364622f,0.78740174702903131809f,0.61523159058062681925f, -0.78834642762660622761f,0.61402155893103849138f,0.78928925316888565167f, -0.61281008242940970820f,0.79023022143731003197f,0.61159716392646201744f, -0.79116933021769009216f,0.61038280627630947528f,0.79210657730021227785f, -0.60916701233645320634f,0.79304196047944364167f,0.60794978496777374311f, -0.79397547755433717231f,0.60673112703452447558f,0.79490712632823701256f, -0.60551104140432554512f,0.79583690460888345530f,0.60428953094815607283f, -0.79676481020841871672f,0.60306659854034827539f,0.79769084094339104407f, -0.60184224705858002658f,0.79861499463476082195f,0.60061647938386897305f, -0.79953726910790501314f,0.59938929840056454079f,0.80045766219262270980f, -0.59816070699634238395f,0.80137617172314012937f,0.59693070806219650226f, -0.80229279553811572168f,0.59569930449243346793f,0.80320753148064483184f, -0.59446649918466454299f,0.80412037739826569549f,0.59323229503979979516f, -0.80503133114296365758f,0.59199669496204099239f,0.80594039057117627944f, -0.59075970185887427544f,0.80684755354379922299f,0.58952131864106394055f, -0.80775281792619024746f,0.58828154822264533408f,0.80865618158817498262f, -0.58704039352091808013f,0.80955764240405125864f,0.58579785745643886408f, -0.81045719825259476821f,0.58455394295301532637f,0.81135484701706372945f, -0.58330865293769829094f,0.81225058658520388200f,0.58206199034077554799f, -0.81314441484925359394f,0.58081395809576452649f,0.81403632970594830276f, -0.57956455913940574387f,0.81492632905652662156f,0.57831379641165558958f, -0.81581441080673378075f,0.57706167285567955272f,0.81670057286682784525f, -0.57580819141784533866f,0.81758481315158371139f,0.57455335504771576360f, -0.81846712958029865792f,0.57329716669804231532f,0.81934752007679689800f, -0.57203962932475704850f,0.82022598256943468620f,0.57078074588696736669f, -0.82110251499110464835f,0.56952051934694725155f,0.82197711527924155472f, -0.56825895267013148970f,0.82284978137582631685f,0.56699604882510867832f, -0.82372051122739131657f,0.56573181078361323149f,0.82458930278502529099f, -0.56446624152051949608f,0.82545615400437744036f,0.56319934401383409117f, -0.82632106284566353427f,0.56193112124468946877f,0.82718402727366913130f, -0.56066157619733603124f,0.82804504525775579626f,0.55939071185913613604f, -0.82890411477186487499f,0.55811853122055610221f,0.82976123379452304540f, -0.55684503727516010407f,0.83061640030884620334f,0.55557023301960228867f, -0.83146961230254523567f,0.55429412145362011444f,0.83232086776792968408f, -0.55301670558002757883f,0.83317016470191318511f,0.55173798840470744675f, -0.83401750110601813315f,0.55045797293660481131f,0.83486287498638001026f, -0.54917666218771976627f,0.83570628435375260423f,0.54789405917310018967f, -0.83654772722351189440f,0.54661016691083486041f,0.83738720161566193578f, -0.54532498842204646383f,0.83822470555483796772f,0.54403852673088393122f, -0.83906023707031263115f,0.54275078486451600046f,0.83989379419599941023f, -0.54146176585312355556f,0.84072537497045796151f,0.54017147272989296525f, -0.84155497743689833268f,0.53887990853100842248f,0.84238259964318584760f, -0.53758707629564550512f,0.84320823964184543620f,0.53629297906596318235f, -0.84403189549006640835f,0.53499761988709726435f,0.84485356524970700587f, -0.53370100180715296379f,0.84567324698729906540f,0.53240312787719801246f, -0.84649093877405212627f,0.53110400115125500076f,0.84730663868585831544f, -0.52980362468629482731f,0.84812034480329712149f,0.52850200154222848337f, -0.84893205521163961347f,0.52719913478190139067f,0.84974176800085243766f, -0.52589502747108474168f,0.85054948126560336874f,0.52458968267846883826f, -0.85135519310526519554f,0.52328310347565643035f,0.85215890162391982887f, -0.52197529293715438925f,0.85296060493036363059f,0.52066625414036726838f, -0.85376030113811129940f,0.51935599016558953167f,0.85455798836540053376f, -0.51804450409599933636f,0.85535366473519602870f,0.51673179901764998423f, -0.85614732837519447184f,0.51541787801946314929f,0.85693897741782865118f, -0.51410274419322166128f,0.85772861000027211809f,0.51278640063356306644f, -0.85851622426444273994f,0.51146885043797052361f,0.85930181835700836235f, -0.51015009670676669806f,0.86008539042939025077f,0.50883014254310698909f, -0.86086693863776730939f,0.50750899105297087033f,0.86164646114308129921f, -0.50618664534515533937f,0.86242395611104050168f,0.50486310853126747933f, -0.86319942171212415971f,0.50353838372571757542f,0.86397285612158669643f, -0.50221247404571089934f,0.86474425751946237817f,0.50088538261124093687f, -0.86551362409056897818f,0.49955711254508183838f,0.86628095402451299467f, -0.49822766697278186854f,0.86704624551569264845f,0.49689704902265463549f, -0.86780949676330321196f,0.49556526182577248507f,0.86857070597134089507f, -0.49423230851595972846f,0.86932987134860673084f,0.49289819222978409341f, -0.87008699110871134952f,0.49156291610655006297f,0.87084206347007886428f, -0.49022648328829110387f,0.87159508665595109012f,0.48888889691976322727f, -0.87234605889439142956f,0.48755016014843605143f,0.87309497841829009079f, -0.48621027612448652899f,0.87384184346536675214f,0.48486924800079111986f, -0.87458665227817611321f,0.48352707893291874131f,0.87532940310411078144f, -0.48218377207912282989f,0.87607009419540660122f,0.48083933060033390294f, -0.87680872380914576247f,0.47949375766015300826f,0.87754529020726124156f, -0.47814705642484311987f,0.87827979165654146421f,0.47679923006332225466f, -0.87901222642863341417f,0.47545028174715586733f,0.87974259280004740713f, -0.47410021465055002254f,0.88047088905216075450f,0.47274903195034290171f, -0.88119711347122198219f,0.47139673682599780857f,0.88192126434835493853f, -0.47004333245959561971f,0.88264333997956279099f,0.46868882203582795665f, -0.88336333866573157891f,0.46733320874198852612f,0.88408125871263498752f, -0.46597649576796612569f,0.88479709843093778954f,0.46461868630623781584f, -0.88551085613619995307f,0.46325978355186026025f,0.88622253014888063838f, -0.46189979070246284243f,0.88693211879434208367f,0.46053871095824000514f, -0.88763962040285393496f,0.45917654752194414502f,0.88834503330959635470f, -0.45781330359887728587f,0.88904835585466457371f,0.45644898239688386221f, -0.88974958638307288794f,0.45508358712634383592f,0.89044872324475787817f, -0.45371712100016392544f,0.89114576479458318392f,0.45234958723377099998f, -0.89184070939234272313f,0.45098098904510380835f,0.89253355540276468894f, -0.44961132965460659516f,0.89322430119551532446f,0.44824061228521999700f, -0.89391294514520325265f,0.44686884016237432560f,0.89459948563138258493f, -0.44549601651398174074f,0.89528392103855758410f,0.44412214457042925586f, -0.89596624975618510689f,0.44274722756457013384f,0.89664647017868015499f, -0.44137126873171661501f,0.89732458070541831763f,0.43999427130963325583f, -0.89800057974073987932f,0.43861623853852771404f,0.89867446569395381673f, -0.43723717366104419835f,0.89934623697934146236f,0.43585707992225547480f, -0.90001589201616027935f,0.43447596056965570588f,0.90068342922864685907f, -0.43309381885315201277f,0.90134884704602202810f,0.43171065802505736997f, -0.90201214390249306874f,0.43032648134008261165f,0.90267331823725882600f, -0.42894129205532954829f,0.90333236849451181705f,0.42755509343028219593f, -0.90398929312344333820f,0.42616788872679961520f,0.90464409057824624050f, -0.42477968120910880589f,0.90529675931811881551f,0.42339047414379610279f, -0.90594729780726845902f,0.42200027079979979261f,0.90659570451491533483f, -0.42060907444840250902f,0.90724197791529592738f,0.41921688836322396066f, -0.90788611648766626150f,0.41782371582021238243f,0.90852811871630612117f, -0.41642956009763731906f,0.90916798309052226923f,0.41503442447608163146f, -0.90980570810465222209f,0.41363831223843455787f,0.91044129225806713634f, -0.41224122666988299857f,0.91107473405517624965f,0.41084317105790391089f, -0.91170603200542987832f,0.40944414869225764786f,0.91233518462332274801f, -0.40804416286497874333f,0.91296219042839810154f,0.40664321687036913966f, -0.91358704794525080750f,0.40524131400498986100f,0.91420975570353069095f, -0.40383845756765412993f,0.91483031223794608611f,0.40243465085941854120f, -0.91544871608826783316f,0.40102989718357578974f,0.91606496579933160973f, -0.39962419984564678810f,0.91667905992104270485f,0.39821756215337361651f, -0.91729099700837790632f,0.39680998741671041907f,0.91790077562139038569f, -0.39540147894781629834f,0.91850839432521225181f,0.39399204006104809883f, -0.91911385169005777040f,0.39258167407295152529f,0.91971714629122736095f, -0.39117038430225398171f,0.92031827670911048322f,0.38975817406985641123f, -0.92091724152918941204f,0.38834504669882630168f,0.92151403934204190183f, -0.38693100551438869283f,0.92210866874334507237f,0.38551605384391901543f, -0.92270112833387851747f,0.38410019501693504207f,0.92329141671952763559f, -0.38268343236508983729f,0.92387953251128673848f,0.38126576922216248722f, -0.92446547432526260391f,0.37984720892405110515f,0.92504924078267758425f, -0.37842775480876561511f,0.92563083050987271516f,0.37700741021641831496f, -0.92621024213831126826f,0.37558617848921732607f,0.92678747430458174872f, -0.37416406297145798909f,0.92736252565040111495f,0.37274106700951581406f, -0.92793539482261788720f,0.37131719395183759858f,0.92850608047321558924f, -0.36989244714893426691f,0.92907458125931574600f,0.36846682995337232125f, -0.92964089584318121418f,0.36704034571976723589f,0.93020502289221906889f, -0.36561299780477396482f,0.93076696107898371224f,0.36418478956707983629f, -0.93132670908118042608f,0.36275572436739722537f,0.93188426558166814750f, -0.36132580556845433906f,0.93243962926846235550f,0.35989503653498827740f, -0.93299279883473884567f,0.35846342063373654030f,0.93354377297883617270f, -0.35703096123343003310f,0.93409255040425887007f,0.35559766170478396274f, -0.93463912981968078064f,0.35416352542049051033f,0.93518350993894749923f, -0.35272855575521072646f,0.93572568948108036935f,0.35129275608556714827f, -0.93626566717027825959f,0.34985612979013502866f,0.93680344173592156043f, -0.34841868024943450921f,0.93733901191257495977f,0.34698041084592368133f, -0.93787237643998988545f,0.34554132496398914931f,0.93840353406310805795f, -0.34410142598993898044f,0.93893248353206448797f,0.34266071731199437833f, -0.93945922360218991898f,0.34121920232028241093f,0.93998375303401393577f, -0.33977688440682696225f,0.94050607059326829518f,0.33833376696554129381f, -0.94102617505088925753f,0.33688985339222005111f,0.94154406518302080631f, -0.33544514708453165852f,0.94205973977101731265f,0.33399965144200949307f, -0.94257319760144686605f,0.33255336986604422389f,0.94308443746609349478f, -0.33110630575987642921f,0.94359345816196038559f,0.32965846252858754806f, -0.94410025849127265918f,0.32820984357909266382f,0.94460483726148025685f, -0.32676045232013178898f,0.94510719328526060501f,0.32531029216226298173f, -0.94560732538052127971f,0.32385936651785296458f,0.94610523237040333733f, -0.32240767880107001897f,0.94660091308328353499f,0.32095523242787521445f, -0.94709436635277721717f,0.31950203081601574739f,0.94758559101774109124f, -0.31804807738501505998f,0.94807458592227622507f,0.31659337555616584581f, -0.94856134991573026749f,0.31513792875252244485f,0.94904588185270055689f, -0.31368174039889157312f,0.94952818059303667475f,0.31222481392182505067f, -0.95000824500184299914f,0.31076715274961147495f,0.95048607394948170235f, -0.30930876031226878231f,0.95096166631157508231f,0.30784964004153497763f, -0.95143502096900833820f,0.30638979537086108440f,0.95190613680793223494f, -0.30492922973540242948f,0.95237501271976587880f,0.30346794657201137113f, -0.95284164760119871573f,0.30200594931922819519f,0.95330604035419375109f, -0.30054324141727339903f,0.95376818988599032512f,0.29907982630804047508f, -0.95422809510910566733f,0.29761570743508630743f,0.95468575494133833814f, -0.29615088824362395536f,0.95514116830577067141f,0.29468537218051432669f, -0.95559433413077110586f,0.29321916269425868373f,0.95604525134999640557f, -0.29175226323498937298f,0.95649391890239499059f,0.29028467725446233105f, -0.95694033573220893540f,0.28881640820604947972f,0.95738450078897585627f, -0.28734745954472956653f,0.95782641302753290802f,0.28587783472708072630f, -0.95826607140801767226f,0.28440753721127182141f,0.95870347489587159906f, -0.28293657045705539188f,0.95913862246184189431f,0.28146493792575805193f, -0.95957151308198451733f,0.27999264308027338455f,0.96000214573766584625f, -0.27851968938505305973f,0.96043051941556578655f,0.27704608030609995106f, -0.96085663310767965850f,0.27557181931095825478f,0.96128048581132063966f, -0.27409690986870632878f,0.96170207652912254037f,0.27262135544994897662f, -0.96212140426904158019f,0.27114515952680806610f,0.96253846804435916340f, -0.26966832557291520178f,0.96295326687368387741f,0.26819085706340317632f, -0.96336579978095404631f,0.26671275747489842090f,0.96377606579543984022f, -0.26523403028551190141f,0.96418406395174571788f,0.26375467897483151347f, -0.96458979328981264700f,0.26227470702391358914f,0.96499325285492032478f, -0.26079411791527556952f,0.96539444169768939830f,0.25931291513288634576f, -0.96579335887408357397f,0.25783110216215893162f,0.96619000344541261516f, -0.25634868248994291395f,0.96658437447833311928f,0.25486565960451462720f, -0.96697647104485207059f,0.25338203699557027004f,0.96736629222232850545f, -0.25189781815421691258f,0.96775383709347551076f,0.25041300657296527987f, -0.96813910474636244441f,0.24892760574572025956f,0.96852209427441726675f, -0.24744161916777343557f,0.96890280477642887202f,0.24595505033579459497f, -0.96928123535654853171f,0.24446790274782420616f,0.96965738512429244800f, -0.24298017990326398197f,0.97003125319454397424f,0.24149188530286930243f, -0.97040283868755550234f,0.24000302244874149871f,0.97077214072895035013f, -0.23851359484431849944f,0.97113915844972509284f,0.23702360599436733679f, -0.97150389098625178352f,0.23553305940497545889f,0.97186633748027939639f, -0.23404195858354345794f,0.97222649707893626925f,0.23255030703877532794f, -0.97258436893473221296f,0.23105810828067127605f,0.97293995220556006576f, -0.22956536582051886852f,0.97329324605469824672f,0.22807208317088578653f, -0.97364424965081186603f,0.22657826384561011168f,0.97399296216795583359f, -0.22508391135979277653f,0.97433938278557585821f,0.22358902922979001504f, -0.97468351068851066810f,0.22209362097320359264f,0.97502534506699412020f, -0.22059769010887364526f,0.97536488511665686563f,0.21910124015686976984f, -0.97570213003852857003f,0.21760427463848366902f,0.97603707903903902388f, -0.21610679707621960333f,0.97636973133002114000f,0.21460881099378692483f, -0.97670008612871184184f,0.21311031991609136194f,0.97702814265775439484f, -0.21161132736922760866f,0.97735390014519996082f,0.21011183688046972118f, -0.97767735782450992943f,0.20861185197826345727f,0.97799851493455713936f, -0.20711137619221856032f,0.97831737071962765473f,0.20561041305309932237f, -0.97863392442942309657f,0.20410896609281700687f,0.97894817531906219710f, -0.20260703884442110567f,0.97926012264908202098f,0.20110463484209195606f, -0.97956976568544051887f,0.19960175762113105402f,0.97987710369951763756f, -0.19809841071795372680f,0.98018213596811731847f,0.19659459767008022335f, -0.98048486177346938497f,0.19509032201612833135f,0.98078528040323043058f, -0.19358558729580374602f,0.98108339115048659451f,0.19208039704989238183f, -0.98137919331375456089f,0.19057475482025279523f,0.98167268619698311305f, -0.18906866414980627589f,0.98196386910955524296f,0.18756212858252974129f, -0.98225274136628937249f,0.18605515166344663291f,0.98253930228744124076f, -0.18454773693861964423f,0.98282355119870523641f,0.18303988795514106180f, -0.98310548743121628501f,0.18153160826112513249f,0.98338511032155118130f, -0.18002290140569951471f,0.98366241921173025453f,0.17851377093899759019f, -0.98393741344921892278f,0.17700422041214886049f,0.98421009238692902521f, -0.17549425337727139751f,0.98448045538322093151f,0.17398387338746384989f, -0.98474850180190420801f,0.17247308399679603386f,0.98501423101223983814f, -0.17096188876030135595f,0.98527764238894122162f,0.16945029123396793125f, -0.98553873531217606185f,0.16793829497473122814f,0.98579750916756736512f, -0.16642590354046421508f,0.98605396334619543897f,0.16491312048997008866f, -0.98630809724459866938f,0.16339994938297322524f,0.98655991026477540817f, -0.16188639378011188130f,0.98680940181418541624f,0.16037245724292839566f, -0.98705657130575097380f,0.15885814333386139019f,0.98730141815785843473f, -0.15734345561623827581f,0.98754394179435922574f,0.15582839765426531597f, -0.98778414164457217783f,0.15431297301302024372f,0.98802201714328352633f, -0.15279718525844340760f,0.98825756773074946437f,0.15128103795733024994f, -0.98849079285269658701f,0.14976453467732162017f,0.98872169196032377858f, -0.14824767898689619749f,0.98895026451030298986f,0.14673047445536174793f, -0.98917650996478101444f,0.14521292465284751927f,0.98940042779138037687f, -0.14369503315029458212f,0.98962201746320077600f,0.14217680351944800288f, -0.98984127845882052821f,0.14065823933284923863f,0.99005821026229712256f, -0.13913934416382628401f,0.99027281236316910817f,0.13762012158648617710f, -0.99048508425645698239f,0.13610057517570620100f,0.99069502544266463406f, -0.13458070850712622324f,0.99090263542778000971f,0.13306052515713917561f, -0.99110791372327677884f,0.13154002870288328264f,0.99131085984611544415f, -0.13001922272223334631f,0.99151147331874389668f,0.12849811079379322432f, -0.99170975366909952520f,0.12697669649688597682f,0.99190570043060932726f, -0.12545498341154620592f,0.99209931314219179654f,0.12393297511851220083f, -0.99229059134825736699f,0.12241067519921627893f,0.99247953459870996706f, -0.12088808723577722237f,0.99266614244894801899f,0.11936521481099135467f, -0.99285041445986510489f,0.11784206150832501891f,0.99303235019785141002f, -0.11631863091190487725f,0.99321194923479450001f,0.11479492660651025027f, -0.99338921114808065305f,0.11327095217756436019f,0.99356413552059530403f, -0.11174671121112665639f,0.99373672194072459884f,0.11022220729388318428f, -0.99390697000235606051f,0.10869744401313867488f,0.99407487930487936634f, -0.10717242495680887049f,0.99424044945318790223f,0.10564715371341069916f, -0.99440368005767909576f,0.10412163387205472520f,0.99456457073425541537f, -0.10259586902243628126f,0.99472312110432570265f,0.10106986275482787718f, -0.99487933079480561638f,0.09954361866006944393f,0.99503319943811863180f, -0.09801714032956077016f,0.99518472667219681771f,0.09649043135525260662f, -0.99533391214048227980f,0.09496349532963906104f,0.99548075549192693856f, -0.09343633584574791151f,0.99562525638099430569f,0.09190895649713269611f, -0.99576741446765981713f,0.09038136087786501072f,0.99590722941741172125f, -0.08885355258252468358f,0.99604470090125196702f,0.08732553520619222576f, -0.99617982859569687015f,0.08579731234443987997f,0.99631261218277800129f, -0.08426888759332412659f,0.99644305135004263008f,0.08274026454937580266f, -0.99657114579055483539f,0.08121144680959238582f,0.99669689520289606044f, -0.07968243797143012563f,0.99682029929116566791f,0.07815324163279431524f, -0.99694135776498216117f,0.07662386139203161695f,0.99706007033948296225f, -0.07509430084792129145f,0.99717643673532618820f,0.07356456359966745406f, -0.99729045667869020697f,0.07203465324688941573f,0.99740212990127530279f, -0.07050457338961400866f,0.99751145614030345410f,0.06897432762826673225f, -0.99761843513851955478f,0.06744391956366410645f,0.99772306664419163624f, -0.06591335279700392957f,0.99782535041111164453f,0.06438263092985740954f, -0.99792528619859599548f,0.06285175756416142012f,0.99802287377148624081f, -0.06132073630220864768f,0.99811811290014917919f,0.05978957074664000698f, -0.99821100336047818846f,0.05825826450043573163f,0.99830154493389289261f, -0.05672682116690778292f,0.99838973740734016094f,0.05519524434969003135f, -0.99847558057329477421f,0.05366353765273067927f,0.99855907422975931365f, -0.05213170468028331672f,0.99864021818026527111f,0.05059974903689933717f, -0.99871901223387293811f,0.04906767432741812596f,0.99879545620517240501f, -0.04753548415695926094f,0.99886954991428356099f,0.04600318213091464381f, -0.99894129318685687124f,0.04447077185493874402f,0.99901068585407337697f, -0.04293825693494095902f,0.99907772775264536147f,0.04140564097707671171f, -0.99914241872481690532f,0.03987292758773984536f,0.99920475861836388631f, -0.03834012037355279123f,0.99926474728659442359f,0.03680722294135899131f, -0.99932238458834954375f,0.03527423889821394709f,0.99937767038800284780f, -0.03374117185137764235f,0.99943060455546173237f,0.03220802540830470378f, -0.99948118696616694567f,0.03067480317663658085f,0.99952941750109314256f, -0.02914150876419373953f,0.99957529604674921764f,0.02760814577896581953f, -0.99961882249517863830f,0.02607471782910403962f,0.99965999674395922270f, -0.02454122852291226384f,0.99969881869620424997f,0.02300768146883941032f, -0.99973528826056168306f,0.02147408027546960502f,0.99976940535121527898f, -0.01994042855151459750f,0.99980116988788425569f,0.01840672990580482019f, -0.99983058179582340319f,0.01687298794728177287f,0.99985764100582386060f, -0.01533920628498821985f,0.99988234745421256111f,0.01380538852806034895f, -0.99990470108285289808f,0.01227153828571994447f,0.99992470183914450299f, -0.01073765916726457208f,0.99994234967602391162f,0.00920375478205995995f, -0.99995764455196389786f,0.00766982873953107706f,0.99997058643097413988f, -0.00613588464915451517f,0.99998117528260110909f,0.00460192612044867198f, -0.99998941108192840321f,0.00306795676296613791f,0.99999529380957619118f, -0.00153398018628476615f,0.99999882345170187925f,1.00000000000000000000f, -0.00000000000000000000f,0.99998117528260110909f,0.00613588464915447527f, -0.99992470183914450299f,0.01227153828571992539f,0.99983058179582340319f, -0.01840672990580482019f,0.99969881869620424997f,0.02454122852291228812f, -0.99952941750109314256f,0.03067480317663662595f,0.99932238458834954375f, -0.03680722294135883171f,0.99907772775264536147f,0.04293825693494082024f, -0.99879545620517240501f,0.04906767432741801493f,0.99847558057329477421f, -0.05519524434968993420f,0.99811811290014917919f,0.06132073630220857829f, -0.99772306664419163624f,0.06744391956366405094f,0.99729045667869020697f, -0.07356456359966742631f,0.99682029929116566791f,0.07968243797143012563f, -0.99631261218277800129f,0.08579731234443989385f,0.99576741446765981713f, -0.09190895649713272386f,0.99518472667219692873f,0.09801714032956060363f, -0.99456457073425541537f,0.10412163387205458642f,0.99390697000235606051f, -0.11022220729388305938f,0.99321194923479450001f,0.11631863091190475235f, -0.99247953459870996706f,0.12241067519921619566f,0.99170975366909952520f, -0.12849811079379316880f,0.99090263542778000971f,0.13458070850712616773f, -0.99005821026229712256f,0.14065823933284921088f,0.98917650996478101444f, -0.14673047445536174793f,0.98825756773074946437f,0.15279718525844343535f, -0.98730141815785843473f,0.15885814333386144570f,0.98630809724459866938f, -0.16491312048996989437f,0.98527764238894122162f,0.17096188876030121717f, -0.98421009238692902521f,0.17700422041214874946f,0.98310548743121628501f, -0.18303988795514095078f,0.98196386910955524296f,0.18906866414980619262f, -0.98078528040323043058f,0.19509032201612824808f,0.97956976568544051887f, -0.20110463484209190055f,0.97831737071962765473f,0.20711137619221856032f, -0.97702814265775439484f,0.21311031991609136194f,0.97570213003852857003f, -0.21910124015686979759f,0.97433938278557585821f,0.22508391135979283204f, -0.97293995220556017678f,0.23105810828067110951f,0.97150389098625178352f, -0.23702360599436719801f,0.97003125319454397424f,0.24298017990326387094f, -0.96852209427441737777f,0.24892760574572014853f,0.96697647104485207059f, -0.25486565960451457169f,0.96539444169768939830f,0.26079411791527551401f, -0.96377606579543984022f,0.26671275747489836538f,0.96212140426904158019f, -0.27262135544994897662f,0.96043051941556578655f,0.27851968938505305973f, -0.95870347489587159906f,0.28440753721127187692f,0.95694033573220882438f, -0.29028467725446233105f,0.95514116830577078243f,0.29615088824362378883f, -0.95330604035419386211f,0.30200594931922808417f,0.95143502096900833820f, -0.30784964004153486661f,0.94952818059303667475f,0.31368174039889151761f, -0.94758559101774109124f,0.31950203081601569188f,0.94560732538052127971f, -0.32531029216226292622f,0.94359345816196038559f,0.33110630575987642921f, -0.94154406518302080631f,0.33688985339222005111f,0.93945922360218991898f, -0.34266071731199437833f,0.93733901191257495977f,0.34841868024943456472f, -0.93518350993894761025f,0.35416352542049034380f,0.93299279883473895669f, -0.35989503653498811087f,0.93076696107898371224f,0.36561299780477385379f, -0.92850608047321558924f,0.37131719395183754306f,0.92621024213831137928f, -0.37700741021641825945f,0.92387953251128673848f,0.38268343236508978178f, -0.92151403934204190183f,0.38834504669882624617f,0.91911385169005777040f, -0.39399204006104809883f,0.91667905992104270485f,0.39962419984564678810f, -0.91420975570353069095f,0.40524131400498986100f,0.91170603200542987832f, -0.41084317105790391089f,0.90916798309052238025f,0.41642956009763715253f, -0.90659570451491533483f,0.42200027079979968159f,0.90398929312344333820f, -0.42755509343028208491f,0.90134884704602202810f,0.43309381885315195726f, -0.89867446569395381673f,0.43861623853852765853f,0.89596624975618521791f, -0.44412214457042920035f,0.89322430119551532446f,0.44961132965460653965f, -0.89044872324475787817f,0.45508358712634383592f,0.88763962040285393496f, -0.46053871095824000514f,0.88479709843093778954f,0.46597649576796618121f, -0.88192126434835504956f,0.47139673682599764204f,0.87901222642863352519f, -0.47679923006332208812f,0.87607009419540660122f,0.48218377207912271887f, -0.87309497841829009079f,0.48755016014843599592f,0.87008699110871146054f, -0.49289819222978403790f,0.86704624551569264845f,0.49822766697278181303f, -0.86397285612158669643f,0.50353838372571757542f,0.86086693863776730939f, -0.50883014254310698909f,0.85772861000027211809f,0.51410274419322166128f, -0.85455798836540053376f,0.51935599016558964269f,0.85135519310526519554f, -0.52458968267846894928f,0.84812034480329723252f,0.52980362468629460526f, -0.84485356524970711689f,0.53499761988709715332f,0.84155497743689844370f, -0.54017147272989285423f,0.83822470555483807875f,0.54532498842204646383f, -0.83486287498638001026f,0.55045797293660481131f,0.83146961230254523567f, -0.55557023301960217765f,0.82804504525775579626f,0.56066157619733603124f, -0.82458930278502529099f,0.56573181078361312046f,0.82110251499110464835f, -0.57078074588696725566f,0.81758481315158371139f,0.57580819141784533866f, -0.81403632970594841378f,0.58081395809576452649f,0.81045719825259476821f, -0.58579785745643886408f,0.80684755354379933401f,0.59075970185887416442f, -0.80320753148064494287f,0.59569930449243335691f,0.79953726910790501314f, -0.60061647938386897305f,0.79583690460888356633f,0.60551104140432554512f, -0.79210657730021238887f,0.61038280627630947528f,0.78834642762660622761f, -0.61523159058062681925f,0.78455659715557524159f,0.62005721176328909561f, -0.78073722857209448822f,0.62485948814238634341f,0.77688846567323244230f, -0.62963823891492698426f,0.77301045336273699338f,0.63439328416364548779f, -0.76910333764557969882f,0.63912444486377573138f,0.76516726562245895860f, -0.64383154288979138613f,0.76120238548426177871f,0.64851440102211244110f, -0.75720884650648456748f,0.65317284295377675551f,0.75318679904361252042f, -0.65780669329707863735f,0.74913639452345937020f,0.66241577759017178373f, -0.74505778544146594733f,0.66699992230363747137f,0.74095112535495921691f, -0.67155895484701833009f,0.73681656887736979300f,0.67609270357531592310f, -0.73265427167241281570f,0.68060099779545302212f,0.72846439044822519637f, -0.68508366777270035541f,0.72424708295146700276f,0.68954054473706682948f, -0.72000250796138165477f,0.69397146088965389055f,0.71573082528381870571f, -0.69837624940897280457f,0.71143219574521643356f,0.70275474445722529993f, -0.70710678118654757274f,0.70710678118654757274f,0.70275474445722529993f, -0.71143219574521643356f,0.69837624940897291559f,0.71573082528381859468f, -0.69397146088965400157f,0.72000250796138165477f,0.68954054473706694051f, -0.72424708295146689174f,0.68508366777270035541f,0.72846439044822519637f, -0.68060099779545302212f,0.73265427167241281570f,0.67609270357531603413f, -0.73681656887736979300f,0.67155895484701833009f,0.74095112535495910588f, -0.66699992230363747137f,0.74505778544146594733f,0.66241577759017178373f, -0.74913639452345925918f,0.65780669329707874837f,0.75318679904361252042f, -0.65317284295377686654f,0.75720884650648456748f,0.64851440102211255212f, -0.76120238548426177871f,0.64383154288979149715f,0.76516726562245895860f, -0.63912444486377573138f,0.76910333764557958780f,0.63439328416364548779f, -0.77301045336273688235f,0.62963823891492709528f,0.77688846567323244230f, -0.62485948814238645443f,0.78073722857209448822f,0.62005721176328920663f, -0.78455659715557524159f,0.61523159058062681925f,0.78834642762660622761f, -0.61038280627630947528f,0.79210657730021227785f,0.60551104140432554512f, -0.79583690460888345530f,0.60061647938386897305f,0.79953726910790501314f, -0.59569930449243346793f,0.80320753148064483184f,0.59075970185887427544f, -0.80684755354379922299f,0.58579785745643886408f,0.81045719825259476821f, -0.58081395809576452649f,0.81403632970594830276f,0.57580819141784533866f, -0.81758481315158371139f,0.57078074588696736669f,0.82110251499110464835f, -0.56573181078361323149f,0.82458930278502529099f,0.56066157619733603124f, -0.82804504525775579626f,0.55557023301960228867f,0.83146961230254523567f, -0.55045797293660481131f,0.83486287498638001026f,0.54532498842204646383f, -0.83822470555483796772f,0.54017147272989296525f,0.84155497743689833268f, -0.53499761988709726435f,0.84485356524970700587f,0.52980362468629482731f, -0.84812034480329712149f,0.52458968267846883826f,0.85135519310526519554f, -0.51935599016558953167f,0.85455798836540053376f,0.51410274419322166128f, -0.85772861000027211809f,0.50883014254310698909f,0.86086693863776730939f, -0.50353838372571757542f,0.86397285612158669643f,0.49822766697278186854f, -0.86704624551569264845f,0.49289819222978409341f,0.87008699110871134952f, -0.48755016014843605143f,0.87309497841829009079f,0.48218377207912282989f, -0.87607009419540660122f,0.47679923006332225466f,0.87901222642863341417f, -0.47139673682599780857f,0.88192126434835493853f,0.46597649576796612569f, -0.88479709843093778954f,0.46053871095824000514f,0.88763962040285393496f, -0.45508358712634383592f,0.89044872324475787817f,0.44961132965460659516f, -0.89322430119551532446f,0.44412214457042925586f,0.89596624975618510689f, -0.43861623853852771404f,0.89867446569395381673f,0.43309381885315201277f, -0.90134884704602202810f,0.42755509343028219593f,0.90398929312344333820f, -0.42200027079979979261f,0.90659570451491533483f,0.41642956009763731906f, -0.90916798309052226923f,0.41084317105790391089f,0.91170603200542987832f, -0.40524131400498986100f,0.91420975570353069095f,0.39962419984564678810f, -0.91667905992104270485f,0.39399204006104809883f,0.91911385169005777040f, -0.38834504669882630168f,0.92151403934204190183f,0.38268343236508983729f, -0.92387953251128673848f,0.37700741021641831496f,0.92621024213831126826f, -0.37131719395183759858f,0.92850608047321558924f,0.36561299780477396482f, -0.93076696107898371224f,0.35989503653498827740f,0.93299279883473884567f, -0.35416352542049051033f,0.93518350993894749923f,0.34841868024943450921f, -0.93733901191257495977f,0.34266071731199437833f,0.93945922360218991898f, -0.33688985339222005111f,0.94154406518302080631f,0.33110630575987642921f, -0.94359345816196038559f,0.32531029216226298173f,0.94560732538052127971f, -0.31950203081601574739f,0.94758559101774109124f,0.31368174039889157312f, -0.94952818059303667475f,0.30784964004153497763f,0.95143502096900833820f, -0.30200594931922819519f,0.95330604035419375109f,0.29615088824362395536f, -0.95514116830577067141f,0.29028467725446233105f,0.95694033573220893540f, -0.28440753721127182141f,0.95870347489587159906f,0.27851968938505305973f, -0.96043051941556578655f,0.27262135544994897662f,0.96212140426904158019f, -0.26671275747489842090f,0.96377606579543984022f,0.26079411791527556952f, -0.96539444169768939830f,0.25486565960451462720f,0.96697647104485207059f, -0.24892760574572025956f,0.96852209427441726675f,0.24298017990326398197f, -0.97003125319454397424f,0.23702360599436733679f,0.97150389098625178352f, -0.23105810828067127605f,0.97293995220556006576f,0.22508391135979277653f, -0.97433938278557585821f,0.21910124015686976984f,0.97570213003852857003f, -0.21311031991609136194f,0.97702814265775439484f,0.20711137619221856032f, -0.97831737071962765473f,0.20110463484209195606f,0.97956976568544051887f, -0.19509032201612833135f,0.98078528040323043058f,0.18906866414980627589f, -0.98196386910955524296f,0.18303988795514106180f,0.98310548743121628501f, -0.17700422041214886049f,0.98421009238692902521f,0.17096188876030135595f, -0.98527764238894122162f,0.16491312048997008866f,0.98630809724459866938f, -0.15885814333386139019f,0.98730141815785843473f,0.15279718525844340760f, -0.98825756773074946437f,0.14673047445536174793f,0.98917650996478101444f, -0.14065823933284923863f,0.99005821026229712256f,0.13458070850712622324f, -0.99090263542778000971f,0.12849811079379322432f,0.99170975366909952520f, -0.12241067519921627893f,0.99247953459870996706f,0.11631863091190487725f, -0.99321194923479450001f,0.11022220729388318428f,0.99390697000235606051f, -0.10412163387205472520f,0.99456457073425541537f,0.09801714032956077016f, -0.99518472667219681771f,0.09190895649713269611f,0.99576741446765981713f, -0.08579731234443987997f,0.99631261218277800129f,0.07968243797143012563f, -0.99682029929116566791f,0.07356456359966745406f,0.99729045667869020697f, -0.06744391956366410645f,0.99772306664419163624f,0.06132073630220864768f, -0.99811811290014917919f,0.05519524434969003135f,0.99847558057329477421f, -0.04906767432741812596f,0.99879545620517240501f,0.04293825693494095902f, -0.99907772775264536147f,0.03680722294135899131f,0.99932238458834954375f, -0.03067480317663658085f,0.99952941750109314256f,0.02454122852291226384f, -0.99969881869620424997f,0.01840672990580482019f,0.99983058179582340319f, -0.01227153828571994447f,0.99992470183914450299f,0.00613588464915451517f, -0.99998117528260110909f,1.00000000000000000000f,0.00000000000000000000f, -0.99969881869620424997f,0.02454122852291228812f,0.99879545620517240501f, -0.04906767432741801493f,0.99729045667869020697f,0.07356456359966742631f, -0.99518472667219692873f,0.09801714032956060363f,0.99247953459870996706f, -0.12241067519921619566f,0.98917650996478101444f,0.14673047445536174793f, -0.98527764238894122162f,0.17096188876030121717f,0.98078528040323043058f, -0.19509032201612824808f,0.97570213003852857003f,0.21910124015686979759f, -0.97003125319454397424f,0.24298017990326387094f,0.96377606579543984022f, -0.26671275747489836538f,0.95694033573220882438f,0.29028467725446233105f, -0.94952818059303667475f,0.31368174039889151761f,0.94154406518302080631f, -0.33688985339222005111f,0.93299279883473895669f,0.35989503653498811087f, -0.92387953251128673848f,0.38268343236508978178f,0.91420975570353069095f, -0.40524131400498986100f,0.90398929312344333820f,0.42755509343028208491f, -0.89322430119551532446f,0.44961132965460653965f,0.88192126434835504956f, -0.47139673682599764204f,0.87008699110871146054f,0.49289819222978403790f, -0.85772861000027211809f,0.51410274419322166128f,0.84485356524970711689f, -0.53499761988709715332f,0.83146961230254523567f,0.55557023301960217765f, -0.81758481315158371139f,0.57580819141784533866f,0.80320753148064494287f, -0.59569930449243335691f,0.78834642762660622761f,0.61523159058062681925f, -0.77301045336273699338f,0.63439328416364548779f,0.75720884650648456748f, -0.65317284295377675551f,0.74095112535495921691f,0.67155895484701833009f, -0.72424708295146700276f,0.68954054473706682948f,0.70710678118654757274f, -0.70710678118654757274f,0.68954054473706694051f,0.72424708295146689174f, -0.67155895484701833009f,0.74095112535495910588f,0.65317284295377686654f, -0.75720884650648456748f,0.63439328416364548779f,0.77301045336273688235f, -0.61523159058062681925f,0.78834642762660622761f,0.59569930449243346793f, -0.80320753148064483184f,0.57580819141784533866f,0.81758481315158371139f, -0.55557023301960228867f,0.83146961230254523567f,0.53499761988709726435f, -0.84485356524970700587f,0.51410274419322166128f,0.85772861000027211809f, -0.49289819222978409341f,0.87008699110871134952f,0.47139673682599780857f, -0.88192126434835493853f,0.44961132965460659516f,0.89322430119551532446f, -0.42755509343028219593f,0.90398929312344333820f,0.40524131400498986100f, -0.91420975570353069095f,0.38268343236508983729f,0.92387953251128673848f, -0.35989503653498827740f,0.93299279883473884567f,0.33688985339222005111f, -0.94154406518302080631f,0.31368174039889157312f,0.94952818059303667475f, -0.29028467725446233105f,0.95694033573220893540f,0.26671275747489842090f, -0.96377606579543984022f,0.24298017990326398197f,0.97003125319454397424f, -0.21910124015686976984f,0.97570213003852857003f,0.19509032201612833135f, -0.98078528040323043058f,0.17096188876030135595f,0.98527764238894122162f, -0.14673047445536174793f,0.98917650996478101444f,0.12241067519921627893f, -0.99247953459870996706f,0.09801714032956077016f,0.99518472667219681771f, -0.07356456359966745406f,0.99729045667869020697f,0.04906767432741812596f, -0.99879545620517240501f,0.02454122852291226384f,0.99969881869620424997f, -1.00000000000000000000f,0.00000000000000000000f,0.99518472667219692873f, -0.09801714032956060363f,0.98078528040323043058f,0.19509032201612824808f, -0.95694033573220882438f,0.29028467725446233105f,0.92387953251128673848f, -0.38268343236508978178f,0.88192126434835504956f,0.47139673682599764204f, -0.83146961230254523567f,0.55557023301960217765f,0.77301045336273699338f, -0.63439328416364548779f,0.70710678118654757274f,0.70710678118654757274f, -0.63439328416364548779f,0.77301045336273688235f,0.55557023301960228867f, -0.83146961230254523567f,0.47139673682599780857f,0.88192126434835493853f, -0.38268343236508983729f,0.92387953251128673848f,0.29028467725446233105f, -0.95694033573220893540f,0.19509032201612833135f,0.98078528040323043058f, -0.09801714032956077016f,0.99518472667219681771f,1.00000000000000000000f, -0.00000000000000000000f,0.92387953251128673848f,0.38268343236508978178f, -0.70710678118654757274f,0.70710678118654757274f,0.38268343236508983729f, -0.92387953251128673848f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99999880790710449219f, +0.00153398013208061457f,0.99999529123306274414f,0.00306795677170157433f, +0.99998939037322998047f,0.00460192607715725899f,0.99998116493225097656f, +0.00613588467240333557f,0.99997061491012573242f,0.00766982883214950562f, +0.99995762109756469727f,0.00920375436544418335f,0.99994236230850219727f, +0.01073765940964221954f,0.99992471933364868164f,0.01227153837680816650f, +0.99990469217300415039f,0.01380538847297430038f,0.99988234043121337891f, +0.01533920597285032272f,0.99985766410827636719f,0.01687298715114593506f, +0.99983060359954833984f,0.01840673014521598816f,0.99980115890502929688f, +0.01994042843580245972f,0.99976938962936401367f,0.02147408016026020050f, +0.99973529577255249023f,0.02300768159329891205f,0.99969881772994995117f, +0.02454122900962829590f,0.99966001510620117188f,0.02607471868395805359f, +0.99961882829666137695f,0.02760814502835273743f,0.99957531690597534180f, +0.02914150804281234741f,0.99952942132949829102f,0.03067480400204658508f, +0.99948120117187500000f,0.03220802545547485352f,0.99943059682846069336f, +0.03374117240309715271f,0.99937766790390014648f,0.03527423739433288574f, +0.99932235479354858398f,0.03680722415447235107f,0.99926477670669555664f, +0.03834012150764465332f,0.99920475482940673828f,0.03987292572855949402f, +0.99914240837097167969f,0.04140564054250717163f,0.99907773733139038086f, +0.04293825849890708923f,0.99901068210601806641f,0.04447077214717864990f, +0.99894130229949951172f,0.04600318148732185364f,0.99886953830718994141f, +0.04753548279404640198f,0.99879544973373413086f,0.04906767606735229492f, +0.99871903657913208008f,0.05059975013136863708f,0.99864023923873901367f, +0.05213170498609542847f,0.99855905771255493164f,0.05366353690624237061f, +0.99847555160522460938f,0.05519524589180946350f,0.99838972091674804688f, +0.05672682076692581177f,0.99830156564712524414f,0.05825826525688171387f, +0.99821102619171142578f,0.05978957191109657288f,0.99811810255050659180f, +0.06132073700428009033f,0.99802285432815551758f,0.06285175681114196777f, +0.99792528152465820312f,0.06438262760639190674f,0.99782532453536987305f, +0.06591334939002990723f,0.99772304296493530273f,0.06744392216205596924f, +0.99761843681335449219f,0.06897433102130889893f,0.99751144647598266602f, +0.07050457596778869629f,0.99740213155746459961f,0.07203464955091476440f, +0.99729043245315551758f,0.07356456667184829712f,0.99717640876770019531f, +0.07509429752826690674f,0.99706006050109863281f,0.07662386447191238403f, +0.99694132804870605469f,0.07815324515104293823f,0.99682027101516723633f, +0.07968243956565856934f,0.99669688940048217773f,0.08121144771575927734f, +0.99657112360000610352f,0.08274026215076446533f,0.99644303321838378906f, +0.08426889032125473022f,0.99631261825561523438f,0.08579730987548828125f, +0.99617981910705566406f,0.08732553571462631226f,0.99604469537734985352f, +0.08885355293750762939f,0.99590724706649780273f,0.09038136154413223267f, +0.99576741456985473633f,0.09190895408391952515f,0.99562525749206542969f, +0.09343633800745010376f,0.99548077583312988281f,0.09496349841356277466f, +0.99533390998840332031f,0.09649042785167694092f,0.99518471956253051758f, +0.09801714122295379639f,0.99503320455551147461f,0.09954361617565155029f, +0.99487930536270141602f,0.10106986016035079956f,0.99472314119338989258f, +0.10259586572647094727f,0.99456459283828735352f,0.10412163287401199341f, +0.99440366029739379883f,0.10564715415239334106f,0.99424046277999877930f, +0.10717242211103439331f,0.99407488107681274414f,0.10869744420051574707f, +0.99390697479248046875f,0.11022220551967620850f,0.99373674392700195312f, +0.11174671351909637451f,0.99356412887573242188f,0.11327095329761505127f, +0.99338918924331665039f,0.11479492485523223877f,0.99321192502975463867f, +0.11631862819194793701f,0.99303233623504638672f,0.11784206330776214600f, +0.99285042285919189453f,0.11936521530151367188f,0.99266612529754638672f, +0.12088808417320251465f,0.99247956275939941406f,0.12241067737340927124f, +0.99229061603546142578f,0.12393297255039215088f,0.99209928512573242188f, +0.12545497715473175049f,0.99190568923950195312f,0.12697669863700866699f, +0.99170976877212524414f,0.12849810719490051270f,0.99151146411895751953f, +0.13001921772956848145f,0.99131083488464355469f,0.13154003024101257324f, +0.99110794067382812500f,0.13306052982807159424f,0.99090266227722167969f, +0.13458070158958435059f,0.99069499969482421875f,0.13610057532787322998f, +0.99048507213592529297f,0.13762012124061584473f,0.99027281999588012695f, +0.13913933932781219482f,0.99005818367004394531f,0.14065824449062347412f, +0.98984128236770629883f,0.14217680692672729492f,0.98962199687957763672f, +0.14369502663612365723f,0.98940044641494750977f,0.14521291851997375488f, +0.98917651176452636719f,0.14673046767711639404f,0.98895025253295898438f, +0.14824767410755157471f,0.98872166872024536133f,0.14976453781127929688f, +0.98849081993103027344f,0.15128104388713836670f,0.98825758695602416992f, +0.15279719233512878418f,0.98802202939987182617f,0.15431296825408935547f, +0.98778414726257324219f,0.15582840144634246826f,0.98754394054412841797f, +0.15734346210956573486f,0.98730140924453735352f,0.15885815024375915527f, +0.98705655336380004883f,0.16037245094776153564f,0.98680937290191650391f, +0.16188639402389526367f,0.98655992746353149414f,0.16339994966983795166f, +0.98630809783935546875f,0.16491311788558959961f,0.98605394363403320312f, +0.16642589867115020752f,0.98579752445220947266f,0.16793829202651977539f, +0.98553872108459472656f,0.16945029795169830322f,0.98527765274047851562f, +0.17096188664436340332f,0.98501425981521606445f,0.17247308790683746338f, +0.98474848270416259766f,0.17398387193679809570f,0.98448044061660766602f, +0.17549425363540649414f,0.98421007394790649414f,0.17700421810150146484f, +0.98393744230270385742f,0.17851376533508300781f,0.98366242647171020508f, +0.18002289533615112305f,0.98338508605957031250f,0.18153160810470581055f, +0.98310548067092895508f,0.18303988873958587646f,0.98282355070114135742f, +0.18454773724079132080f,0.98253929615020751953f,0.18605515360832214355f, +0.98225271701812744141f,0.18756212294101715088f,0.98196387290954589844f, +0.18906866014003753662f,0.98167270421981811523f,0.19057475030422210693f, +0.98137921094894409180f,0.19208039343357086182f,0.98108339309692382812f, +0.19358558952808380127f,0.98078525066375732422f,0.19509032368659973145f, +0.98048484325408935547f,0.19659459590911865234f,0.98018211126327514648f, +0.19809840619564056396f,0.97987711429595947266f,0.19960175454616546631f, +0.97956979274749755859f,0.20110464096069335938f,0.97926014661788940430f, +0.20260703563690185547f,0.97894817590713500977f,0.20410896837711334229f, +0.97863394021987915039f,0.20561040937900543213f,0.97831737995147705078f, +0.20711137354373931885f,0.97799849510192871094f,0.20861184597015380859f, +0.97767734527587890625f,0.21011184155941009521f,0.97735387086868286133f, +0.21161133050918579102f,0.97702813148498535156f,0.21311031281948089600f, +0.97670006752014160156f,0.21460881829261779785f,0.97636973857879638672f, +0.21610680222511291504f,0.97603708505630493164f,0.21760427951812744141f, +0.97570210695266723633f,0.21910123527050018311f,0.97536486387252807617f, +0.22059768438339233398f,0.97502535581588745117f,0.22209362685680389404f, +0.97468352317810058594f,0.22358903288841247559f,0.97433936595916748047f, +0.22508391737937927246f,0.97399294376373291016f,0.22657826542854309082f, +0.97364425659179687500f,0.22807207703590393066f,0.97329324483871459961f, +0.22956536710262298584f,0.97293996810913085938f,0.23105810582637786865f, +0.97258436679840087891f,0.23255030810832977295f,0.97222650051116943359f, +0.23404195904731750488f,0.97186630964279174805f,0.23553305864334106445f, +0.97150391340255737305f,0.23702360689640045166f,0.97113913297653198242f, +0.23851358890533447266f,0.97077214717864990234f,0.24000301957130432129f, +0.97040283679962158203f,0.24149188399314880371f,0.97003126144409179688f, +0.24298018217086791992f,0.96965736150741577148f,0.24446789920330047607f, +0.96928125619888305664f,0.24595504999160766602f,0.96890282630920410156f, +0.24744161963462829590f,0.96852207183837890625f,0.24892760813236236572f, +0.96813911199569702148f,0.25041300058364868164f,0.96775382757186889648f, +0.25189781188964843750f,0.96736627817153930664f,0.25338202714920043945f, +0.96697646379470825195f,0.25486564636230468750f,0.96658438444137573242f, +0.25634866952896118164f,0.96618998050689697266f,0.25783109664916992188f, +0.96579337120056152344f,0.25931292772293090820f,0.96539443731307983398f, +0.26079410314559936523f,0.96499323844909667969f,0.26227471232414245605f, +0.96458977460861206055f,0.26375466585159301758f,0.96418404579162597656f, +0.26523402333259582520f,0.96377605199813842773f,0.26671275496482849121f, +0.96336579322814941406f,0.26819086074829101562f,0.96295326948165893555f, +0.26966831088066101074f,0.96253848075866699219f,0.27114516496658325195f, +0.96212142705917358398f,0.27262136340141296387f,0.96170204877853393555f, +0.27409690618515014648f,0.96128046512603759766f,0.27557182312011718750f, +0.96085661649703979492f,0.27704608440399169922f,0.96043050289154052734f, +0.27851969003677368164f,0.96000212430953979492f,0.27999264001846313477f, +0.95957154035568237305f,0.28146493434906005859f,0.95913863182067871094f, +0.28293657302856445312f,0.95870345830917358398f,0.28440752625465393066f, +0.95826607942581176758f,0.28587782382965087891f,0.95782643556594848633f, +0.28734746575355529785f,0.95738452672958374023f,0.28881642222404479980f, +0.95694035291671752930f,0.29028466343879699707f,0.95649391412734985352f, +0.29175224900245666504f,0.95604526996612548828f,0.29321914911270141602f, +0.95559436082839965820f,0.29468536376953125000f,0.95514118671417236328f, +0.29615089297294616699f,0.95468574762344360352f,0.29761570692062377930f, +0.95422810316085815430f,0.29907983541488647461f,0.95376819372177124023f, +0.30054324865341186523f,0.95330601930618286133f,0.30200594663619995117f, +0.95284163951873779297f,0.30346795916557312012f,0.95237499475479125977f, +0.30492922663688659668f,0.95190614461898803711f,0.30638980865478515625f, +0.95143502950668334961f,0.30784964561462402344f,0.95096164941787719727f, +0.30930876731872558594f,0.95048606395721435547f,0.31076714396476745605f, +0.95000827312469482422f,0.31222480535507202148f,0.94952815771102905273f, +0.31368175148963928223f,0.94904589653015136719f,0.31513792276382446289f, +0.94856137037277221680f,0.31659337878227233887f,0.94807457923889160156f, +0.31804808974266052246f,0.94758558273315429688f,0.31950202584266662598f, +0.94709438085556030273f,0.32095524668693542480f,0.94660091400146484375f, +0.32240769267082214355f,0.94610524177551269531f,0.32385936379432678223f, +0.94560730457305908203f,0.32531028985977172852f,0.94510722160339355469f, +0.32676044106483459473f,0.94460481405258178711f,0.32820984721183776855f, +0.94410026073455810547f,0.32965844869613647461f,0.94359344244003295898f, +0.33110630512237548828f,0.94308441877365112305f,0.33255335688591003418f, +0.94257318973541259766f,0.33399966359138488770f,0.94205975532531738281f, +0.33544513583183288574f,0.94154405593872070312f,0.33688986301422119141f, +0.94102615118026733398f,0.33833375573158264160f,0.94050604104995727539f, +0.33977687358856201172f,0.93998372554779052734f,0.34121921658515930176f, +0.93945920467376708984f,0.34266072511672973633f,0.93893247842788696289f, +0.34410142898559570312f,0.93840354681015014648f,0.34554132819175720215f, +0.93787235021591186523f,0.34698042273521423340f,0.93733900785446166992f, +0.34841868281364440918f,0.93680346012115478516f,0.34985613822937011719f, +0.93626564741134643555f,0.35129275918006896973f,0.93572568893432617188f, +0.35272854566574096680f,0.93518352508544921875f,0.35416352748870849609f, +0.93463915586471557617f,0.35559767484664916992f,0.93409252166748046875f, +0.35703095793724060059f,0.93354380130767822266f,0.35846340656280517578f, +0.93299281597137451172f,0.35989505052566528320f,0.93243962526321411133f, +0.36132580041885375977f,0.93188428878784179688f,0.36275571584701538086f, +0.93132668733596801758f,0.36418479681015014648f,0.93076694011688232422f, +0.36561298370361328125f,0.93020504713058471680f,0.36704033613204956055f, +0.92964088916778564453f,0.36846682429313659668f,0.92907458543777465820f, +0.36989244818687438965f,0.92850607633590698242f,0.37131720781326293945f, +0.92793542146682739258f,0.37274107336997985840f,0.92736250162124633789f, +0.37416407465934753418f,0.92678749561309814453f,0.37558618187904357910f, +0.92621022462844848633f,0.37700742483139038086f,0.92563080787658691406f, +0.37842774391174316406f,0.92504924535751342773f,0.37984719872474670410f, +0.92446547746658325195f,0.38126575946807861328f,0.92387950420379638672f, +0.38268342614173889160f,0.92329144477844238281f,0.38410019874572753906f, +0.92270112037658691406f,0.38551604747772216797f,0.92210865020751953125f, +0.38693100214004516602f,0.92151403427124023438f,0.38834503293037414551f, +0.92091721296310424805f,0.38975816965103149414f,0.92031830549240112305f, +0.39117038249969482422f,0.91971713304519653320f,0.39258167147636413574f, +0.91911387443542480469f,0.39399203658103942871f,0.91850841045379638672f, +0.39540147781372070312f,0.91790080070495605469f,0.39680999517440795898f, +0.91729098558425903320f,0.39821755886077880859f,0.91667908430099487305f, +0.39962419867515563965f,0.91606497764587402344f,0.40102988481521606445f, +0.91544872522354125977f,0.40243464708328247070f,0.91483032703399658203f, +0.40383845567703247070f,0.91420978307723999023f,0.40524131059646606445f, +0.91358703374862670898f,0.40664321184158325195f,0.91296219825744628906f, +0.40804415941238403320f,0.91233515739440917969f,0.40944415330886840820f, +0.91170603036880493164f,0.41084316372871398926f,0.91107475757598876953f, +0.41224122047424316406f,0.91044127941131591797f,0.41363832354545593262f, +0.90980571508407592773f,0.41503441333770751953f,0.90916800498962402344f, +0.41642954945564270020f,0.90852808952331542969f,0.41782370209693908691f, +0.90788608789443969727f,0.41921690106391906738f,0.90724200010299682617f, +0.42060908675193786621f,0.90659570693969726562f,0.42200025916099548340f, +0.90594726800918579102f,0.42339047789573669434f,0.90529674291610717773f, +0.42477968335151672363f,0.90464407205581665039f,0.42616787552833557129f, +0.90398931503295898438f,0.42755508422851562500f,0.90333235263824462891f, +0.42894127964973449707f,0.90267330408096313477f,0.43032649159431457520f, +0.90201216936111450195f,0.43171066045761108398f,0.90134882926940917969f, +0.43309381604194641113f,0.90068340301513671875f,0.43447595834732055664f, +0.90001589059829711914f,0.43585708737373352051f,0.89934623241424560547f, +0.43723717331886291504f,0.89867448806762695312f,0.43861624598503112793f, +0.89800059795379638672f,0.43999427556991577148f,0.89732456207275390625f, +0.44137126207351684570f,0.89664649963378906250f,0.44274723529815673828f, +0.89596623182296752930f,0.44412213563919067383f,0.89528393745422363281f, +0.44549602270126342773f,0.89459949731826782227f,0.44686883687973022461f, +0.89391297101974487305f,0.44824060797691345215f,0.89322429895401000977f, +0.44961133599281311035f,0.89253354072570800781f,0.45098099112510681152f, +0.89184069633483886719f,0.45234957337379455566f,0.89114576578140258789f, +0.45371711254119873047f,0.89044874906539916992f,0.45508357882499694824f, +0.88974958658218383789f,0.45644897222518920898f,0.88904833793640136719f, +0.45781329274177551270f,0.88834506273269653320f,0.45917654037475585938f, +0.88763964176177978516f,0.46053871512413024902f,0.88693213462829589844f, +0.46189978718757629395f,0.88622254133224487305f,0.46325978636741638184f, +0.88551086187362670898f,0.46461868286132812500f,0.88479709625244140625f, +0.46597650647163391113f,0.88408124446868896484f,0.46733319759368896484f, +0.88336336612701416016f,0.46868881583213806152f,0.88264334201812744141f, +0.47004333138465881348f,0.88192129135131835938f,0.47139674425125122070f, +0.88119709491729736328f,0.47274902462959289551f,0.88047087192535400391f, +0.47410020232200622559f,0.87974262237548828125f,0.47545027732849121094f, +0.87901222705841064453f,0.47679921984672546387f,0.87827980518341064453f, +0.47814705967903137207f,0.87754529714584350586f,0.47949376702308654785f, +0.87680870294570922852f,0.48083934187889099121f,0.87607008218765258789f, +0.48218378424644470215f,0.87532937526702880859f,0.48352706432342529297f, +0.87458664178848266602f,0.48486924171447753906f,0.87384182214736938477f, +0.48621028661727905273f,0.87309497594833374023f,0.48755016922950744629f, +0.87234604358673095703f,0.48888888955116271973f,0.87159508466720581055f, +0.49022647738456726074f,0.87084203958511352539f,0.49156290292739868164f, +0.87008696794509887695f,0.49289819598197937012f,0.86932986974716186523f, +0.49423229694366455078f,0.86857068538665771484f,0.49556526541709899902f, +0.86780947446823120117f,0.49689704179763793945f,0.86704623699188232422f, +0.49822765588760375977f,0.86628097295761108398f,0.49955710768699645996f, +0.86551362276077270508f,0.50088536739349365234f,0.86474424600601196289f, +0.50221246480941772461f,0.86397284269332885742f,0.50353837013244628906f, +0.86319941282272338867f,0.50486308336257934570f,0.86242395639419555664f, +0.50618666410446166992f,0.86164647340774536133f,0.50750899314880371094f, +0.86086696386337280273f,0.50883013010025024414f,0.86008536815643310547f, +0.51015007495880126953f,0.85930180549621582031f,0.51146882772445678711f, +0.85851621627807617188f,0.51278638839721679688f,0.85772860050201416016f, +0.51410275697708129883f,0.85693895816802978516f,0.51541787385940551758f, +0.85614734888076782227f,0.51673179864883422852f,0.85535365343093872070f, +0.51804453134536743164f,0.85455799102783203125f,0.51935601234436035156f, +0.85376030206680297852f,0.52066624164581298828f,0.85296058654785156250f, +0.52197527885437011719f,0.85215890407562255859f,0.52328312397003173828f, +0.85135519504547119141f,0.52458965778350830078f,0.85054945945739746094f, +0.52589499950408935547f,0.84974175691604614258f,0.52719914913177490234f, +0.84893202781677246094f,0.52850198745727539062f,0.84812033176422119141f, +0.52980363368988037109f,0.84730660915374755859f,0.53110402822494506836f, +0.84649091958999633789f,0.53240311145782470703f,0.84567326307296752930f, +0.53370100259780883789f,0.84485357999801635742f,0.53499764204025268555f, +0.84403187036514282227f,0.53629297018051147461f,0.84320825338363647461f, +0.53758704662322998047f,0.84238260984420776367f,0.53887993097305297852f, +0.84155499935150146484f,0.54017144441604614258f,0.84072536230087280273f, +0.54146176576614379883f,0.83989381790161132812f,0.54275077581405639648f, +0.83906024694442749023f,0.54403853416442871094f,0.83822470903396606445f, +0.54532498121261596680f,0.83738720417022705078f,0.54661017656326293945f, +0.83654773235321044922f,0.54789406061172485352f,0.83570629358291625977f, +0.54917663335800170898f,0.83486288785934448242f,0.55045795440673828125f, +0.83401751518249511719f,0.55173796415328979492f,0.83317017555236816406f, +0.55301672220230102539f,0.83232086896896362305f,0.55429410934448242188f, +0.83146959543228149414f,0.55557024478912353516f,0.83061641454696655273f, +0.55684500932693481445f,0.82976120710372924805f,0.55811852216720581055f, +0.82890409231185913086f,0.55939072370529174805f,0.82804507017135620117f, +0.56066155433654785156f,0.82718402147293090820f,0.56193113327026367188f, +0.82632106542587280273f,0.56319934129714965820f,0.82545614242553710938f, +0.56446623802185058594f,0.82458931207656860352f,0.56573182344436645508f, +0.82372051477432250977f,0.56699603796005249023f,0.82284981012344360352f, +0.56825894117355346680f,0.82197713851928710938f,0.56952053308486938477f, +0.82110249996185302734f,0.57078075408935546875f,0.82022595405578613281f, +0.57203960418701171875f,0.81934750080108642578f,0.57329714298248291016f, +0.81846714019775390625f,0.57455337047576904297f,0.81758481264114379883f, +0.57580816745758056641f,0.81670057773590087891f,0.57706165313720703125f, +0.81581443548202514648f,0.57831376791000366211f,0.81492632627487182617f, +0.57956457138061523438f,0.81403630971908569336f,0.58081394433975219727f, +0.81314438581466674805f,0.58206200599670410156f,0.81225061416625976562f, +0.58330863714218139648f,0.81135487556457519531f,0.58455395698547363281f, +0.81045717000961303711f,0.58579784631729125977f,0.80955761671066284180f, +0.58704036474227905273f,0.80865615606307983398f,0.58828157186508178711f, +0.80775284767150878906f,0.58952128887176513672f,0.80684757232666015625f, +0.59075969457626342773f,0.80594038963317871094f,0.59199666976928710938f, +0.80503135919570922852f,0.59323227405548095703f,0.80412036180496215820f, +0.59446650743484497070f,0.80320751667022705078f,0.59569931030273437500f, +0.80229282379150390625f,0.59693068265914916992f,0.80137616395950317383f, +0.59816068410873413086f,0.80045765638351440430f,0.59938931465148925781f, +0.79953724145889282227f,0.60061645507812500000f,0.79861497879028320312f, +0.60184222459793090820f,0.79769086837768554688f,0.60306662321090698242f, +0.79676479101181030273f,0.60428953170776367188f,0.79583692550659179688f, +0.60551106929779052734f,0.79490715265274047852f,0.60673111677169799805f, +0.79397547245025634766f,0.60794979333877563477f,0.79304194450378417969f, +0.60916703939437866211f,0.79210656881332397461f,0.61038279533386230469f, +0.79116934537887573242f,0.61159718036651611328f,0.79023021459579467773f, +0.61281007528305053711f,0.78928923606872558594f,0.61402153968811035156f, +0.78834640979766845703f,0.61523157358169555664f,0.78740173578262329102f, +0.61644017696380615234f,0.78645521402359008789f,0.61764729022979736328f, +0.78550684452056884766f,0.61885297298431396484f,0.78455656766891479492f, +0.62005722522735595703f,0.78360450267791748047f,0.62125998735427856445f, +0.78265058994293212891f,0.62246125936508178711f,0.78169482946395874023f, +0.62366110086441040039f,0.78073722124099731445f,0.62485951185226440430f, +0.77977776527404785156f,0.62605637311935424805f,0.77881652116775512695f, +0.62725180387496948242f,0.77785342931747436523f,0.62844574451446533203f, +0.77688848972320556641f,0.62963825464248657227f,0.77592170238494873047f, +0.63082921504974365234f,0.77495312690734863281f,0.63201874494552612305f, +0.77398270368576049805f,0.63320678472518920898f,0.77301043272018432617f, +0.63439327478408813477f,0.77203637361526489258f,0.63557833433151245117f, +0.77106052637100219727f,0.63676184415817260742f,0.77008283138275146484f, +0.63794392347335815430f,0.76910334825515747070f,0.63912445306777954102f, +0.76812201738357543945f,0.64030349254608154297f,0.76713889837265014648f, +0.64148104190826416016f,0.76615399122238159180f,0.64265704154968261719f, +0.76516723632812500000f,0.64383155107498168945f,0.76417875289916992188f, +0.64500451087951660156f,0.76318842172622680664f,0.64617604017257690430f, +0.76219630241394042969f,0.64734596014022827148f,0.76120239496231079102f, +0.64851438999176025391f,0.76020669937133789062f,0.64968132972717285156f, +0.75920921564102172852f,0.65084666013717651367f,0.75820988416671752930f, +0.65201056003570556641f,0.75720882415771484375f,0.65317285060882568359f, +0.75620597600936889648f,0.65433359146118164062f,0.75520139932632446289f, +0.65549284219741821289f,0.75419497489929199219f,0.65665054321289062500f, +0.75318682193756103516f,0.65780669450759887695f,0.75217682123184204102f, +0.65896129608154296875f,0.75116515159606933594f,0.66011434793472290039f, +0.75015163421630859375f,0.66126585006713867188f,0.74913638830184936523f, +0.66241580247879028320f,0.74811935424804687500f,0.66356414556503295898f, +0.74710059165954589844f,0.66471099853515625000f,0.74608010053634643555f, +0.66585624217987060547f,0.74505776166915893555f,0.66699993610382080078f, +0.74403375387191772461f,0.66814202070236206055f,0.74300795793533325195f, +0.66928261518478393555f,0.74198043346405029297f,0.67042154073715209961f, +0.74095112085342407227f,0.67155897617340087891f,0.73992007970809936523f, +0.67269474267959594727f,0.73888731002807617188f,0.67382901906967163086f, +0.73785281181335449219f,0.67496162652969360352f,0.73681658506393432617f, +0.67609268426895141602f,0.73577857017517089844f,0.67722219228744506836f, +0.73473888635635375977f,0.67835003137588500977f,0.73369741439819335938f, +0.67947632074356079102f,0.73265427350997924805f,0.68060100078582763672f, +0.73160940408706665039f,0.68172407150268554688f,0.73056274652481079102f, +0.68284553289413452148f,0.72951442003250122070f,0.68396538496017456055f, +0.72846436500549316406f,0.68508368730545043945f,0.72741264104843139648f, +0.68620032072067260742f,0.72635912895202636719f,0.68731534481048583984f, +0.72530394792556762695f,0.68842875957489013672f,0.72424709796905517578f, +0.68954056501388549805f,0.72318845987319946289f,0.69065070152282714844f, +0.72212821245193481445f,0.69175922870635986328f,0.72106617689132690430f, +0.69286614656448364258f,0.72000253200531005859f,0.69397145509719848633f, +0.71893709897994995117f,0.69507509469985961914f,0.71787005662918090820f, +0.69617712497711181641f,0.71680128574371337891f,0.69727748632431030273f, +0.71573084592819213867f,0.69837623834609985352f,0.71465867757797241211f, +0.69947332143783569336f,0.71358484029769897461f,0.70056879520416259766f, +0.71250939369201660156f,0.70166260004043579102f,0.71143221855163574219f, +0.70275473594665527344f,0.71035337448120117188f,0.70384526252746582031f, +0.70927280187606811523f,0.70493406057357788086f,0.70819061994552612305f, +0.70602124929428100586f,0.70710676908493041992f,0.70710676908493041992f, +0.70602124929428100586f,0.70819061994552612305f,0.70493406057357788086f, +0.70927280187606811523f,0.70384526252746582031f,0.71035337448120117188f, +0.70275473594665527344f,0.71143221855163574219f,0.70166260004043579102f, +0.71250939369201660156f,0.70056879520416259766f,0.71358484029769897461f, +0.69947332143783569336f,0.71465867757797241211f,0.69837623834609985352f, +0.71573084592819213867f,0.69727748632431030273f,0.71680128574371337891f, +0.69617712497711181641f,0.71787005662918090820f,0.69507509469985961914f, +0.71893709897994995117f,0.69397145509719848633f,0.72000253200531005859f, +0.69286614656448364258f,0.72106617689132690430f,0.69175922870635986328f, +0.72212821245193481445f,0.69065070152282714844f,0.72318845987319946289f, +0.68954056501388549805f,0.72424709796905517578f,0.68842875957489013672f, +0.72530394792556762695f,0.68731534481048583984f,0.72635912895202636719f, +0.68620032072067260742f,0.72741264104843139648f,0.68508368730545043945f, +0.72846436500549316406f,0.68396538496017456055f,0.72951442003250122070f, +0.68284553289413452148f,0.73056274652481079102f,0.68172407150268554688f, +0.73160940408706665039f,0.68060100078582763672f,0.73265427350997924805f, +0.67947632074356079102f,0.73369741439819335938f,0.67835003137588500977f, +0.73473888635635375977f,0.67722219228744506836f,0.73577857017517089844f, +0.67609268426895141602f,0.73681658506393432617f,0.67496162652969360352f, +0.73785281181335449219f,0.67382901906967163086f,0.73888731002807617188f, +0.67269474267959594727f,0.73992007970809936523f,0.67155897617340087891f, +0.74095112085342407227f,0.67042154073715209961f,0.74198043346405029297f, +0.66928261518478393555f,0.74300795793533325195f,0.66814202070236206055f, +0.74403375387191772461f,0.66699993610382080078f,0.74505776166915893555f, +0.66585624217987060547f,0.74608010053634643555f,0.66471099853515625000f, +0.74710059165954589844f,0.66356414556503295898f,0.74811935424804687500f, +0.66241580247879028320f,0.74913638830184936523f,0.66126585006713867188f, +0.75015163421630859375f,0.66011434793472290039f,0.75116515159606933594f, +0.65896129608154296875f,0.75217682123184204102f,0.65780669450759887695f, +0.75318682193756103516f,0.65665054321289062500f,0.75419497489929199219f, +0.65549284219741821289f,0.75520139932632446289f,0.65433359146118164062f, +0.75620597600936889648f,0.65317285060882568359f,0.75720882415771484375f, +0.65201056003570556641f,0.75820988416671752930f,0.65084666013717651367f, +0.75920921564102172852f,0.64968132972717285156f,0.76020669937133789062f, +0.64851438999176025391f,0.76120239496231079102f,0.64734596014022827148f, +0.76219630241394042969f,0.64617604017257690430f,0.76318842172622680664f, +0.64500451087951660156f,0.76417875289916992188f,0.64383155107498168945f, +0.76516723632812500000f,0.64265704154968261719f,0.76615399122238159180f, +0.64148104190826416016f,0.76713889837265014648f,0.64030349254608154297f, +0.76812201738357543945f,0.63912445306777954102f,0.76910334825515747070f, +0.63794392347335815430f,0.77008283138275146484f,0.63676184415817260742f, +0.77106052637100219727f,0.63557833433151245117f,0.77203637361526489258f, +0.63439327478408813477f,0.77301043272018432617f,0.63320678472518920898f, +0.77398270368576049805f,0.63201874494552612305f,0.77495312690734863281f, +0.63082921504974365234f,0.77592170238494873047f,0.62963825464248657227f, +0.77688848972320556641f,0.62844574451446533203f,0.77785342931747436523f, +0.62725180387496948242f,0.77881652116775512695f,0.62605637311935424805f, +0.77977776527404785156f,0.62485951185226440430f,0.78073722124099731445f, +0.62366110086441040039f,0.78169482946395874023f,0.62246125936508178711f, +0.78265058994293212891f,0.62125998735427856445f,0.78360450267791748047f, +0.62005722522735595703f,0.78455656766891479492f,0.61885297298431396484f, +0.78550684452056884766f,0.61764729022979736328f,0.78645521402359008789f, +0.61644017696380615234f,0.78740173578262329102f,0.61523157358169555664f, +0.78834640979766845703f,0.61402153968811035156f,0.78928923606872558594f, +0.61281007528305053711f,0.79023021459579467773f,0.61159718036651611328f, +0.79116934537887573242f,0.61038279533386230469f,0.79210656881332397461f, +0.60916703939437866211f,0.79304194450378417969f,0.60794979333877563477f, +0.79397547245025634766f,0.60673111677169799805f,0.79490715265274047852f, +0.60551106929779052734f,0.79583692550659179688f,0.60428953170776367188f, +0.79676479101181030273f,0.60306662321090698242f,0.79769086837768554688f, +0.60184222459793090820f,0.79861497879028320312f,0.60061645507812500000f, +0.79953724145889282227f,0.59938931465148925781f,0.80045765638351440430f, +0.59816068410873413086f,0.80137616395950317383f,0.59693068265914916992f, +0.80229282379150390625f,0.59569931030273437500f,0.80320751667022705078f, +0.59446650743484497070f,0.80412036180496215820f,0.59323227405548095703f, +0.80503135919570922852f,0.59199666976928710938f,0.80594038963317871094f, +0.59075969457626342773f,0.80684757232666015625f,0.58952128887176513672f, +0.80775284767150878906f,0.58828157186508178711f,0.80865615606307983398f, +0.58704036474227905273f,0.80955761671066284180f,0.58579784631729125977f, +0.81045717000961303711f,0.58455395698547363281f,0.81135487556457519531f, +0.58330863714218139648f,0.81225061416625976562f,0.58206200599670410156f, +0.81314438581466674805f,0.58081394433975219727f,0.81403630971908569336f, +0.57956457138061523438f,0.81492632627487182617f,0.57831376791000366211f, +0.81581443548202514648f,0.57706165313720703125f,0.81670057773590087891f, +0.57580816745758056641f,0.81758481264114379883f,0.57455337047576904297f, +0.81846714019775390625f,0.57329714298248291016f,0.81934750080108642578f, +0.57203960418701171875f,0.82022595405578613281f,0.57078075408935546875f, +0.82110249996185302734f,0.56952053308486938477f,0.82197713851928710938f, +0.56825894117355346680f,0.82284981012344360352f,0.56699603796005249023f, +0.82372051477432250977f,0.56573182344436645508f,0.82458931207656860352f, +0.56446623802185058594f,0.82545614242553710938f,0.56319934129714965820f, +0.82632106542587280273f,0.56193113327026367188f,0.82718402147293090820f, +0.56066155433654785156f,0.82804507017135620117f,0.55939072370529174805f, +0.82890409231185913086f,0.55811852216720581055f,0.82976120710372924805f, +0.55684500932693481445f,0.83061641454696655273f,0.55557024478912353516f, +0.83146959543228149414f,0.55429410934448242188f,0.83232086896896362305f, +0.55301672220230102539f,0.83317017555236816406f,0.55173796415328979492f, +0.83401751518249511719f,0.55045795440673828125f,0.83486288785934448242f, +0.54917663335800170898f,0.83570629358291625977f,0.54789406061172485352f, +0.83654773235321044922f,0.54661017656326293945f,0.83738720417022705078f, +0.54532498121261596680f,0.83822470903396606445f,0.54403853416442871094f, +0.83906024694442749023f,0.54275077581405639648f,0.83989381790161132812f, +0.54146176576614379883f,0.84072536230087280273f,0.54017144441604614258f, +0.84155499935150146484f,0.53887993097305297852f,0.84238260984420776367f, +0.53758704662322998047f,0.84320825338363647461f,0.53629297018051147461f, +0.84403187036514282227f,0.53499764204025268555f,0.84485357999801635742f, +0.53370100259780883789f,0.84567326307296752930f,0.53240311145782470703f, +0.84649091958999633789f,0.53110402822494506836f,0.84730660915374755859f, +0.52980363368988037109f,0.84812033176422119141f,0.52850198745727539062f, +0.84893202781677246094f,0.52719914913177490234f,0.84974175691604614258f, +0.52589499950408935547f,0.85054945945739746094f,0.52458965778350830078f, +0.85135519504547119141f,0.52328312397003173828f,0.85215890407562255859f, +0.52197527885437011719f,0.85296058654785156250f,0.52066624164581298828f, +0.85376030206680297852f,0.51935601234436035156f,0.85455799102783203125f, +0.51804453134536743164f,0.85535365343093872070f,0.51673179864883422852f, +0.85614734888076782227f,0.51541787385940551758f,0.85693895816802978516f, +0.51410275697708129883f,0.85772860050201416016f,0.51278638839721679688f, +0.85851621627807617188f,0.51146882772445678711f,0.85930180549621582031f, +0.51015007495880126953f,0.86008536815643310547f,0.50883013010025024414f, +0.86086696386337280273f,0.50750899314880371094f,0.86164647340774536133f, +0.50618666410446166992f,0.86242395639419555664f,0.50486308336257934570f, +0.86319941282272338867f,0.50353837013244628906f,0.86397284269332885742f, +0.50221246480941772461f,0.86474424600601196289f,0.50088536739349365234f, +0.86551362276077270508f,0.49955710768699645996f,0.86628097295761108398f, +0.49822765588760375977f,0.86704623699188232422f,0.49689704179763793945f, +0.86780947446823120117f,0.49556526541709899902f,0.86857068538665771484f, +0.49423229694366455078f,0.86932986974716186523f,0.49289819598197937012f, +0.87008696794509887695f,0.49156290292739868164f,0.87084203958511352539f, +0.49022647738456726074f,0.87159508466720581055f,0.48888888955116271973f, +0.87234604358673095703f,0.48755016922950744629f,0.87309497594833374023f, +0.48621028661727905273f,0.87384182214736938477f,0.48486924171447753906f, +0.87458664178848266602f,0.48352706432342529297f,0.87532937526702880859f, +0.48218378424644470215f,0.87607008218765258789f,0.48083934187889099121f, +0.87680870294570922852f,0.47949376702308654785f,0.87754529714584350586f, +0.47814705967903137207f,0.87827980518341064453f,0.47679921984672546387f, +0.87901222705841064453f,0.47545027732849121094f,0.87974262237548828125f, +0.47410020232200622559f,0.88047087192535400391f,0.47274902462959289551f, +0.88119709491729736328f,0.47139674425125122070f,0.88192129135131835938f, +0.47004333138465881348f,0.88264334201812744141f,0.46868881583213806152f, +0.88336336612701416016f,0.46733319759368896484f,0.88408124446868896484f, +0.46597650647163391113f,0.88479709625244140625f,0.46461868286132812500f, +0.88551086187362670898f,0.46325978636741638184f,0.88622254133224487305f, +0.46189978718757629395f,0.88693213462829589844f,0.46053871512413024902f, +0.88763964176177978516f,0.45917654037475585938f,0.88834506273269653320f, +0.45781329274177551270f,0.88904833793640136719f,0.45644897222518920898f, +0.88974958658218383789f,0.45508357882499694824f,0.89044874906539916992f, +0.45371711254119873047f,0.89114576578140258789f,0.45234957337379455566f, +0.89184069633483886719f,0.45098099112510681152f,0.89253354072570800781f, +0.44961133599281311035f,0.89322429895401000977f,0.44824060797691345215f, +0.89391297101974487305f,0.44686883687973022461f,0.89459949731826782227f, +0.44549602270126342773f,0.89528393745422363281f,0.44412213563919067383f, +0.89596623182296752930f,0.44274723529815673828f,0.89664649963378906250f, +0.44137126207351684570f,0.89732456207275390625f,0.43999427556991577148f, +0.89800059795379638672f,0.43861624598503112793f,0.89867448806762695312f, +0.43723717331886291504f,0.89934623241424560547f,0.43585708737373352051f, +0.90001589059829711914f,0.43447595834732055664f,0.90068340301513671875f, +0.43309381604194641113f,0.90134882926940917969f,0.43171066045761108398f, +0.90201216936111450195f,0.43032649159431457520f,0.90267330408096313477f, +0.42894127964973449707f,0.90333235263824462891f,0.42755508422851562500f, +0.90398931503295898438f,0.42616787552833557129f,0.90464407205581665039f, +0.42477968335151672363f,0.90529674291610717773f,0.42339047789573669434f, +0.90594726800918579102f,0.42200025916099548340f,0.90659570693969726562f, +0.42060908675193786621f,0.90724200010299682617f,0.41921690106391906738f, +0.90788608789443969727f,0.41782370209693908691f,0.90852808952331542969f, +0.41642954945564270020f,0.90916800498962402344f,0.41503441333770751953f, +0.90980571508407592773f,0.41363832354545593262f,0.91044127941131591797f, +0.41224122047424316406f,0.91107475757598876953f,0.41084316372871398926f, +0.91170603036880493164f,0.40944415330886840820f,0.91233515739440917969f, +0.40804415941238403320f,0.91296219825744628906f,0.40664321184158325195f, +0.91358703374862670898f,0.40524131059646606445f,0.91420978307723999023f, +0.40383845567703247070f,0.91483032703399658203f,0.40243464708328247070f, +0.91544872522354125977f,0.40102988481521606445f,0.91606497764587402344f, +0.39962419867515563965f,0.91667908430099487305f,0.39821755886077880859f, +0.91729098558425903320f,0.39680999517440795898f,0.91790080070495605469f, +0.39540147781372070312f,0.91850841045379638672f,0.39399203658103942871f, +0.91911387443542480469f,0.39258167147636413574f,0.91971713304519653320f, +0.39117038249969482422f,0.92031830549240112305f,0.38975816965103149414f, +0.92091721296310424805f,0.38834503293037414551f,0.92151403427124023438f, +0.38693100214004516602f,0.92210865020751953125f,0.38551604747772216797f, +0.92270112037658691406f,0.38410019874572753906f,0.92329144477844238281f, +0.38268342614173889160f,0.92387950420379638672f,0.38126575946807861328f, +0.92446547746658325195f,0.37984719872474670410f,0.92504924535751342773f, +0.37842774391174316406f,0.92563080787658691406f,0.37700742483139038086f, +0.92621022462844848633f,0.37558618187904357910f,0.92678749561309814453f, +0.37416407465934753418f,0.92736250162124633789f,0.37274107336997985840f, +0.92793542146682739258f,0.37131720781326293945f,0.92850607633590698242f, +0.36989244818687438965f,0.92907458543777465820f,0.36846682429313659668f, +0.92964088916778564453f,0.36704033613204956055f,0.93020504713058471680f, +0.36561298370361328125f,0.93076694011688232422f,0.36418479681015014648f, +0.93132668733596801758f,0.36275571584701538086f,0.93188428878784179688f, +0.36132580041885375977f,0.93243962526321411133f,0.35989505052566528320f, +0.93299281597137451172f,0.35846340656280517578f,0.93354380130767822266f, +0.35703095793724060059f,0.93409252166748046875f,0.35559767484664916992f, +0.93463915586471557617f,0.35416352748870849609f,0.93518352508544921875f, +0.35272854566574096680f,0.93572568893432617188f,0.35129275918006896973f, +0.93626564741134643555f,0.34985613822937011719f,0.93680346012115478516f, +0.34841868281364440918f,0.93733900785446166992f,0.34698042273521423340f, +0.93787235021591186523f,0.34554132819175720215f,0.93840354681015014648f, +0.34410142898559570312f,0.93893247842788696289f,0.34266072511672973633f, +0.93945920467376708984f,0.34121921658515930176f,0.93998372554779052734f, +0.33977687358856201172f,0.94050604104995727539f,0.33833375573158264160f, +0.94102615118026733398f,0.33688986301422119141f,0.94154405593872070312f, +0.33544513583183288574f,0.94205975532531738281f,0.33399966359138488770f, +0.94257318973541259766f,0.33255335688591003418f,0.94308441877365112305f, +0.33110630512237548828f,0.94359344244003295898f,0.32965844869613647461f, +0.94410026073455810547f,0.32820984721183776855f,0.94460481405258178711f, +0.32676044106483459473f,0.94510722160339355469f,0.32531028985977172852f, +0.94560730457305908203f,0.32385936379432678223f,0.94610524177551269531f, +0.32240769267082214355f,0.94660091400146484375f,0.32095524668693542480f, +0.94709438085556030273f,0.31950202584266662598f,0.94758558273315429688f, +0.31804808974266052246f,0.94807457923889160156f,0.31659337878227233887f, +0.94856137037277221680f,0.31513792276382446289f,0.94904589653015136719f, +0.31368175148963928223f,0.94952815771102905273f,0.31222480535507202148f, +0.95000827312469482422f,0.31076714396476745605f,0.95048606395721435547f, +0.30930876731872558594f,0.95096164941787719727f,0.30784964561462402344f, +0.95143502950668334961f,0.30638980865478515625f,0.95190614461898803711f, +0.30492922663688659668f,0.95237499475479125977f,0.30346795916557312012f, +0.95284163951873779297f,0.30200594663619995117f,0.95330601930618286133f, +0.30054324865341186523f,0.95376819372177124023f,0.29907983541488647461f, +0.95422810316085815430f,0.29761570692062377930f,0.95468574762344360352f, +0.29615089297294616699f,0.95514118671417236328f,0.29468536376953125000f, +0.95559436082839965820f,0.29321914911270141602f,0.95604526996612548828f, +0.29175224900245666504f,0.95649391412734985352f,0.29028466343879699707f, +0.95694035291671752930f,0.28881642222404479980f,0.95738452672958374023f, +0.28734746575355529785f,0.95782643556594848633f,0.28587782382965087891f, +0.95826607942581176758f,0.28440752625465393066f,0.95870345830917358398f, +0.28293657302856445312f,0.95913863182067871094f,0.28146493434906005859f, +0.95957154035568237305f,0.27999264001846313477f,0.96000212430953979492f, +0.27851969003677368164f,0.96043050289154052734f,0.27704608440399169922f, +0.96085661649703979492f,0.27557182312011718750f,0.96128046512603759766f, +0.27409690618515014648f,0.96170204877853393555f,0.27262136340141296387f, +0.96212142705917358398f,0.27114516496658325195f,0.96253848075866699219f, +0.26966831088066101074f,0.96295326948165893555f,0.26819086074829101562f, +0.96336579322814941406f,0.26671275496482849121f,0.96377605199813842773f, +0.26523402333259582520f,0.96418404579162597656f,0.26375466585159301758f, +0.96458977460861206055f,0.26227471232414245605f,0.96499323844909667969f, +0.26079410314559936523f,0.96539443731307983398f,0.25931292772293090820f, +0.96579337120056152344f,0.25783109664916992188f,0.96618998050689697266f, +0.25634866952896118164f,0.96658438444137573242f,0.25486564636230468750f, +0.96697646379470825195f,0.25338202714920043945f,0.96736627817153930664f, +0.25189781188964843750f,0.96775382757186889648f,0.25041300058364868164f, +0.96813911199569702148f,0.24892760813236236572f,0.96852207183837890625f, +0.24744161963462829590f,0.96890282630920410156f,0.24595504999160766602f, +0.96928125619888305664f,0.24446789920330047607f,0.96965736150741577148f, +0.24298018217086791992f,0.97003126144409179688f,0.24149188399314880371f, +0.97040283679962158203f,0.24000301957130432129f,0.97077214717864990234f, +0.23851358890533447266f,0.97113913297653198242f,0.23702360689640045166f, +0.97150391340255737305f,0.23553305864334106445f,0.97186630964279174805f, +0.23404195904731750488f,0.97222650051116943359f,0.23255030810832977295f, +0.97258436679840087891f,0.23105810582637786865f,0.97293996810913085938f, +0.22956536710262298584f,0.97329324483871459961f,0.22807207703590393066f, +0.97364425659179687500f,0.22657826542854309082f,0.97399294376373291016f, +0.22508391737937927246f,0.97433936595916748047f,0.22358903288841247559f, +0.97468352317810058594f,0.22209362685680389404f,0.97502535581588745117f, +0.22059768438339233398f,0.97536486387252807617f,0.21910123527050018311f, +0.97570210695266723633f,0.21760427951812744141f,0.97603708505630493164f, +0.21610680222511291504f,0.97636973857879638672f,0.21460881829261779785f, +0.97670006752014160156f,0.21311031281948089600f,0.97702813148498535156f, +0.21161133050918579102f,0.97735387086868286133f,0.21011184155941009521f, +0.97767734527587890625f,0.20861184597015380859f,0.97799849510192871094f, +0.20711137354373931885f,0.97831737995147705078f,0.20561040937900543213f, +0.97863394021987915039f,0.20410896837711334229f,0.97894817590713500977f, +0.20260703563690185547f,0.97926014661788940430f,0.20110464096069335938f, +0.97956979274749755859f,0.19960175454616546631f,0.97987711429595947266f, +0.19809840619564056396f,0.98018211126327514648f,0.19659459590911865234f, +0.98048484325408935547f,0.19509032368659973145f,0.98078525066375732422f, +0.19358558952808380127f,0.98108339309692382812f,0.19208039343357086182f, +0.98137921094894409180f,0.19057475030422210693f,0.98167270421981811523f, +0.18906866014003753662f,0.98196387290954589844f,0.18756212294101715088f, +0.98225271701812744141f,0.18605515360832214355f,0.98253929615020751953f, +0.18454773724079132080f,0.98282355070114135742f,0.18303988873958587646f, +0.98310548067092895508f,0.18153160810470581055f,0.98338508605957031250f, +0.18002289533615112305f,0.98366242647171020508f,0.17851376533508300781f, +0.98393744230270385742f,0.17700421810150146484f,0.98421007394790649414f, +0.17549425363540649414f,0.98448044061660766602f,0.17398387193679809570f, +0.98474848270416259766f,0.17247308790683746338f,0.98501425981521606445f, +0.17096188664436340332f,0.98527765274047851562f,0.16945029795169830322f, +0.98553872108459472656f,0.16793829202651977539f,0.98579752445220947266f, +0.16642589867115020752f,0.98605394363403320312f,0.16491311788558959961f, +0.98630809783935546875f,0.16339994966983795166f,0.98655992746353149414f, +0.16188639402389526367f,0.98680937290191650391f,0.16037245094776153564f, +0.98705655336380004883f,0.15885815024375915527f,0.98730140924453735352f, +0.15734346210956573486f,0.98754394054412841797f,0.15582840144634246826f, +0.98778414726257324219f,0.15431296825408935547f,0.98802202939987182617f, +0.15279719233512878418f,0.98825758695602416992f,0.15128104388713836670f, +0.98849081993103027344f,0.14976453781127929688f,0.98872166872024536133f, +0.14824767410755157471f,0.98895025253295898438f,0.14673046767711639404f, +0.98917651176452636719f,0.14521291851997375488f,0.98940044641494750977f, +0.14369502663612365723f,0.98962199687957763672f,0.14217680692672729492f, +0.98984128236770629883f,0.14065824449062347412f,0.99005818367004394531f, +0.13913933932781219482f,0.99027281999588012695f,0.13762012124061584473f, +0.99048507213592529297f,0.13610057532787322998f,0.99069499969482421875f, +0.13458070158958435059f,0.99090266227722167969f,0.13306052982807159424f, +0.99110794067382812500f,0.13154003024101257324f,0.99131083488464355469f, +0.13001921772956848145f,0.99151146411895751953f,0.12849810719490051270f, +0.99170976877212524414f,0.12697669863700866699f,0.99190568923950195312f, +0.12545497715473175049f,0.99209928512573242188f,0.12393297255039215088f, +0.99229061603546142578f,0.12241067737340927124f,0.99247956275939941406f, +0.12088808417320251465f,0.99266612529754638672f,0.11936521530151367188f, +0.99285042285919189453f,0.11784206330776214600f,0.99303233623504638672f, +0.11631862819194793701f,0.99321192502975463867f,0.11479492485523223877f, +0.99338918924331665039f,0.11327095329761505127f,0.99356412887573242188f, +0.11174671351909637451f,0.99373674392700195312f,0.11022220551967620850f, +0.99390697479248046875f,0.10869744420051574707f,0.99407488107681274414f, +0.10717242211103439331f,0.99424046277999877930f,0.10564715415239334106f, +0.99440366029739379883f,0.10412163287401199341f,0.99456459283828735352f, +0.10259586572647094727f,0.99472314119338989258f,0.10106986016035079956f, +0.99487930536270141602f,0.09954361617565155029f,0.99503320455551147461f, +0.09801714122295379639f,0.99518471956253051758f,0.09649042785167694092f, +0.99533390998840332031f,0.09496349841356277466f,0.99548077583312988281f, +0.09343633800745010376f,0.99562525749206542969f,0.09190895408391952515f, +0.99576741456985473633f,0.09038136154413223267f,0.99590724706649780273f, +0.08885355293750762939f,0.99604469537734985352f,0.08732553571462631226f, +0.99617981910705566406f,0.08579730987548828125f,0.99631261825561523438f, +0.08426889032125473022f,0.99644303321838378906f,0.08274026215076446533f, +0.99657112360000610352f,0.08121144771575927734f,0.99669688940048217773f, +0.07968243956565856934f,0.99682027101516723633f,0.07815324515104293823f, +0.99694132804870605469f,0.07662386447191238403f,0.99706006050109863281f, +0.07509429752826690674f,0.99717640876770019531f,0.07356456667184829712f, +0.99729043245315551758f,0.07203464955091476440f,0.99740213155746459961f, +0.07050457596778869629f,0.99751144647598266602f,0.06897433102130889893f, +0.99761843681335449219f,0.06744392216205596924f,0.99772304296493530273f, +0.06591334939002990723f,0.99782532453536987305f,0.06438262760639190674f, +0.99792528152465820312f,0.06285175681114196777f,0.99802285432815551758f, +0.06132073700428009033f,0.99811810255050659180f,0.05978957191109657288f, +0.99821102619171142578f,0.05825826525688171387f,0.99830156564712524414f, +0.05672682076692581177f,0.99838972091674804688f,0.05519524589180946350f, +0.99847555160522460938f,0.05366353690624237061f,0.99855905771255493164f, +0.05213170498609542847f,0.99864023923873901367f,0.05059975013136863708f, +0.99871903657913208008f,0.04906767606735229492f,0.99879544973373413086f, +0.04753548279404640198f,0.99886953830718994141f,0.04600318148732185364f, +0.99894130229949951172f,0.04447077214717864990f,0.99901068210601806641f, +0.04293825849890708923f,0.99907773733139038086f,0.04140564054250717163f, +0.99914240837097167969f,0.03987292572855949402f,0.99920475482940673828f, +0.03834012150764465332f,0.99926477670669555664f,0.03680722415447235107f, +0.99932235479354858398f,0.03527423739433288574f,0.99937766790390014648f, +0.03374117240309715271f,0.99943059682846069336f,0.03220802545547485352f, +0.99948120117187500000f,0.03067480400204658508f,0.99952942132949829102f, +0.02914150804281234741f,0.99957531690597534180f,0.02760814502835273743f, +0.99961882829666137695f,0.02607471868395805359f,0.99966001510620117188f, +0.02454122900962829590f,0.99969881772994995117f,0.02300768159329891205f, +0.99973529577255249023f,0.02147408016026020050f,0.99976938962936401367f, +0.01994042843580245972f,0.99980115890502929688f,0.01840673014521598816f, +0.99983060359954833984f,0.01687298715114593506f,0.99985766410827636719f, +0.01533920597285032272f,0.99988234043121337891f,0.01380538847297430038f, +0.99990469217300415039f,0.01227153837680816650f,0.99992471933364868164f, +0.01073765940964221954f,0.99994236230850219727f,0.00920375436544418335f, +0.99995762109756469727f,0.00766982883214950562f,0.99997061491012573242f, +0.00613588467240333557f,0.99998116493225097656f,0.00460192607715725899f, +0.99998939037322998047f,0.00306795677170157433f,0.99999529123306274414f, +0.00153398013208061457f,0.99999880790710449219f,1.00000000000000000000f, +0.00000000000000000000f,0.99998116493225097656f,0.00613588467240333557f, +0.99992471933364868164f,0.01227153837680816650f,0.99983060359954833984f, +0.01840673014521598816f,0.99969881772994995117f,0.02454122900962829590f, +0.99952942132949829102f,0.03067480400204658508f,0.99932235479354858398f, +0.03680722415447235107f,0.99907773733139038086f,0.04293825849890708923f, +0.99879544973373413086f,0.04906767606735229492f,0.99847555160522460938f, +0.05519524589180946350f,0.99811810255050659180f,0.06132073700428009033f, +0.99772304296493530273f,0.06744392216205596924f,0.99729043245315551758f, +0.07356456667184829712f,0.99682027101516723633f,0.07968243956565856934f, +0.99631261825561523438f,0.08579730987548828125f,0.99576741456985473633f, +0.09190895408391952515f,0.99518471956253051758f,0.09801714122295379639f, +0.99456459283828735352f,0.10412163287401199341f,0.99390697479248046875f, +0.11022220551967620850f,0.99321192502975463867f,0.11631862819194793701f, +0.99247956275939941406f,0.12241067737340927124f,0.99170976877212524414f, +0.12849810719490051270f,0.99090266227722167969f,0.13458070158958435059f, +0.99005818367004394531f,0.14065824449062347412f,0.98917651176452636719f, +0.14673046767711639404f,0.98825758695602416992f,0.15279719233512878418f, +0.98730140924453735352f,0.15885815024375915527f,0.98630809783935546875f, +0.16491311788558959961f,0.98527765274047851562f,0.17096188664436340332f, +0.98421007394790649414f,0.17700421810150146484f,0.98310548067092895508f, +0.18303988873958587646f,0.98196387290954589844f,0.18906866014003753662f, +0.98078525066375732422f,0.19509032368659973145f,0.97956979274749755859f, +0.20110464096069335938f,0.97831737995147705078f,0.20711137354373931885f, +0.97702813148498535156f,0.21311031281948089600f,0.97570210695266723633f, +0.21910123527050018311f,0.97433936595916748047f,0.22508391737937927246f, +0.97293996810913085938f,0.23105810582637786865f,0.97150391340255737305f, +0.23702360689640045166f,0.97003126144409179688f,0.24298018217086791992f, +0.96852207183837890625f,0.24892760813236236572f,0.96697646379470825195f, +0.25486564636230468750f,0.96539443731307983398f,0.26079410314559936523f, +0.96377605199813842773f,0.26671275496482849121f,0.96212142705917358398f, +0.27262136340141296387f,0.96043050289154052734f,0.27851969003677368164f, +0.95870345830917358398f,0.28440752625465393066f,0.95694035291671752930f, +0.29028466343879699707f,0.95514118671417236328f,0.29615089297294616699f, +0.95330601930618286133f,0.30200594663619995117f,0.95143502950668334961f, +0.30784964561462402344f,0.94952815771102905273f,0.31368175148963928223f, +0.94758558273315429688f,0.31950202584266662598f,0.94560730457305908203f, +0.32531028985977172852f,0.94359344244003295898f,0.33110630512237548828f, +0.94154405593872070312f,0.33688986301422119141f,0.93945920467376708984f, +0.34266072511672973633f,0.93733900785446166992f,0.34841868281364440918f, +0.93518352508544921875f,0.35416352748870849609f,0.93299281597137451172f, +0.35989505052566528320f,0.93076694011688232422f,0.36561298370361328125f, +0.92850607633590698242f,0.37131720781326293945f,0.92621022462844848633f, +0.37700742483139038086f,0.92387950420379638672f,0.38268342614173889160f, +0.92151403427124023438f,0.38834503293037414551f,0.91911387443542480469f, +0.39399203658103942871f,0.91667908430099487305f,0.39962419867515563965f, +0.91420978307723999023f,0.40524131059646606445f,0.91170603036880493164f, +0.41084316372871398926f,0.90916800498962402344f,0.41642954945564270020f, +0.90659570693969726562f,0.42200025916099548340f,0.90398931503295898438f, +0.42755508422851562500f,0.90134882926940917969f,0.43309381604194641113f, +0.89867448806762695312f,0.43861624598503112793f,0.89596623182296752930f, +0.44412213563919067383f,0.89322429895401000977f,0.44961133599281311035f, +0.89044874906539916992f,0.45508357882499694824f,0.88763964176177978516f, +0.46053871512413024902f,0.88479709625244140625f,0.46597650647163391113f, +0.88192129135131835938f,0.47139674425125122070f,0.87901222705841064453f, +0.47679921984672546387f,0.87607008218765258789f,0.48218378424644470215f, +0.87309497594833374023f,0.48755016922950744629f,0.87008696794509887695f, +0.49289819598197937012f,0.86704623699188232422f,0.49822765588760375977f, +0.86397284269332885742f,0.50353837013244628906f,0.86086696386337280273f, +0.50883013010025024414f,0.85772860050201416016f,0.51410275697708129883f, +0.85455799102783203125f,0.51935601234436035156f,0.85135519504547119141f, +0.52458965778350830078f,0.84812033176422119141f,0.52980363368988037109f, +0.84485357999801635742f,0.53499764204025268555f,0.84155499935150146484f, +0.54017144441604614258f,0.83822470903396606445f,0.54532498121261596680f, +0.83486288785934448242f,0.55045795440673828125f,0.83146959543228149414f, +0.55557024478912353516f,0.82804507017135620117f,0.56066155433654785156f, +0.82458931207656860352f,0.56573182344436645508f,0.82110249996185302734f, +0.57078075408935546875f,0.81758481264114379883f,0.57580816745758056641f, +0.81403630971908569336f,0.58081394433975219727f,0.81045717000961303711f, +0.58579784631729125977f,0.80684757232666015625f,0.59075969457626342773f, +0.80320751667022705078f,0.59569931030273437500f,0.79953724145889282227f, +0.60061645507812500000f,0.79583692550659179688f,0.60551106929779052734f, +0.79210656881332397461f,0.61038279533386230469f,0.78834640979766845703f, +0.61523157358169555664f,0.78455656766891479492f,0.62005722522735595703f, +0.78073722124099731445f,0.62485951185226440430f,0.77688848972320556641f, +0.62963825464248657227f,0.77301043272018432617f,0.63439327478408813477f, +0.76910334825515747070f,0.63912445306777954102f,0.76516723632812500000f, +0.64383155107498168945f,0.76120239496231079102f,0.64851438999176025391f, +0.75720882415771484375f,0.65317285060882568359f,0.75318682193756103516f, +0.65780669450759887695f,0.74913638830184936523f,0.66241580247879028320f, +0.74505776166915893555f,0.66699993610382080078f,0.74095112085342407227f, +0.67155897617340087891f,0.73681658506393432617f,0.67609268426895141602f, +0.73265427350997924805f,0.68060100078582763672f,0.72846436500549316406f, +0.68508368730545043945f,0.72424709796905517578f,0.68954056501388549805f, +0.72000253200531005859f,0.69397145509719848633f,0.71573084592819213867f, +0.69837623834609985352f,0.71143221855163574219f,0.70275473594665527344f, +0.70710676908493041992f,0.70710676908493041992f,0.70275473594665527344f, +0.71143221855163574219f,0.69837623834609985352f,0.71573084592819213867f, +0.69397145509719848633f,0.72000253200531005859f,0.68954056501388549805f, +0.72424709796905517578f,0.68508368730545043945f,0.72846436500549316406f, +0.68060100078582763672f,0.73265427350997924805f,0.67609268426895141602f, +0.73681658506393432617f,0.67155897617340087891f,0.74095112085342407227f, +0.66699993610382080078f,0.74505776166915893555f,0.66241580247879028320f, +0.74913638830184936523f,0.65780669450759887695f,0.75318682193756103516f, +0.65317285060882568359f,0.75720882415771484375f,0.64851438999176025391f, +0.76120239496231079102f,0.64383155107498168945f,0.76516723632812500000f, +0.63912445306777954102f,0.76910334825515747070f,0.63439327478408813477f, +0.77301043272018432617f,0.62963825464248657227f,0.77688848972320556641f, +0.62485951185226440430f,0.78073722124099731445f,0.62005722522735595703f, +0.78455656766891479492f,0.61523157358169555664f,0.78834640979766845703f, +0.61038279533386230469f,0.79210656881332397461f,0.60551106929779052734f, +0.79583692550659179688f,0.60061645507812500000f,0.79953724145889282227f, +0.59569931030273437500f,0.80320751667022705078f,0.59075969457626342773f, +0.80684757232666015625f,0.58579784631729125977f,0.81045717000961303711f, +0.58081394433975219727f,0.81403630971908569336f,0.57580816745758056641f, +0.81758481264114379883f,0.57078075408935546875f,0.82110249996185302734f, +0.56573182344436645508f,0.82458931207656860352f,0.56066155433654785156f, +0.82804507017135620117f,0.55557024478912353516f,0.83146959543228149414f, +0.55045795440673828125f,0.83486288785934448242f,0.54532498121261596680f, +0.83822470903396606445f,0.54017144441604614258f,0.84155499935150146484f, +0.53499764204025268555f,0.84485357999801635742f,0.52980363368988037109f, +0.84812033176422119141f,0.52458965778350830078f,0.85135519504547119141f, +0.51935601234436035156f,0.85455799102783203125f,0.51410275697708129883f, +0.85772860050201416016f,0.50883013010025024414f,0.86086696386337280273f, +0.50353837013244628906f,0.86397284269332885742f,0.49822765588760375977f, +0.86704623699188232422f,0.49289819598197937012f,0.87008696794509887695f, +0.48755016922950744629f,0.87309497594833374023f,0.48218378424644470215f, +0.87607008218765258789f,0.47679921984672546387f,0.87901222705841064453f, +0.47139674425125122070f,0.88192129135131835938f,0.46597650647163391113f, +0.88479709625244140625f,0.46053871512413024902f,0.88763964176177978516f, +0.45508357882499694824f,0.89044874906539916992f,0.44961133599281311035f, +0.89322429895401000977f,0.44412213563919067383f,0.89596623182296752930f, +0.43861624598503112793f,0.89867448806762695312f,0.43309381604194641113f, +0.90134882926940917969f,0.42755508422851562500f,0.90398931503295898438f, +0.42200025916099548340f,0.90659570693969726562f,0.41642954945564270020f, +0.90916800498962402344f,0.41084316372871398926f,0.91170603036880493164f, +0.40524131059646606445f,0.91420978307723999023f,0.39962419867515563965f, +0.91667908430099487305f,0.39399203658103942871f,0.91911387443542480469f, +0.38834503293037414551f,0.92151403427124023438f,0.38268342614173889160f, +0.92387950420379638672f,0.37700742483139038086f,0.92621022462844848633f, +0.37131720781326293945f,0.92850607633590698242f,0.36561298370361328125f, +0.93076694011688232422f,0.35989505052566528320f,0.93299281597137451172f, +0.35416352748870849609f,0.93518352508544921875f,0.34841868281364440918f, +0.93733900785446166992f,0.34266072511672973633f,0.93945920467376708984f, +0.33688986301422119141f,0.94154405593872070312f,0.33110630512237548828f, +0.94359344244003295898f,0.32531028985977172852f,0.94560730457305908203f, +0.31950202584266662598f,0.94758558273315429688f,0.31368175148963928223f, +0.94952815771102905273f,0.30784964561462402344f,0.95143502950668334961f, +0.30200594663619995117f,0.95330601930618286133f,0.29615089297294616699f, +0.95514118671417236328f,0.29028466343879699707f,0.95694035291671752930f, +0.28440752625465393066f,0.95870345830917358398f,0.27851969003677368164f, +0.96043050289154052734f,0.27262136340141296387f,0.96212142705917358398f, +0.26671275496482849121f,0.96377605199813842773f,0.26079410314559936523f, +0.96539443731307983398f,0.25486564636230468750f,0.96697646379470825195f, +0.24892760813236236572f,0.96852207183837890625f,0.24298018217086791992f, +0.97003126144409179688f,0.23702360689640045166f,0.97150391340255737305f, +0.23105810582637786865f,0.97293996810913085938f,0.22508391737937927246f, +0.97433936595916748047f,0.21910123527050018311f,0.97570210695266723633f, +0.21311031281948089600f,0.97702813148498535156f,0.20711137354373931885f, +0.97831737995147705078f,0.20110464096069335938f,0.97956979274749755859f, +0.19509032368659973145f,0.98078525066375732422f,0.18906866014003753662f, +0.98196387290954589844f,0.18303988873958587646f,0.98310548067092895508f, +0.17700421810150146484f,0.98421007394790649414f,0.17096188664436340332f, +0.98527765274047851562f,0.16491311788558959961f,0.98630809783935546875f, +0.15885815024375915527f,0.98730140924453735352f,0.15279719233512878418f, +0.98825758695602416992f,0.14673046767711639404f,0.98917651176452636719f, +0.14065824449062347412f,0.99005818367004394531f,0.13458070158958435059f, +0.99090266227722167969f,0.12849810719490051270f,0.99170976877212524414f, +0.12241067737340927124f,0.99247956275939941406f,0.11631862819194793701f, +0.99321192502975463867f,0.11022220551967620850f,0.99390697479248046875f, +0.10412163287401199341f,0.99456459283828735352f,0.09801714122295379639f, +0.99518471956253051758f,0.09190895408391952515f,0.99576741456985473633f, +0.08579730987548828125f,0.99631261825561523438f,0.07968243956565856934f, +0.99682027101516723633f,0.07356456667184829712f,0.99729043245315551758f, +0.06744392216205596924f,0.99772304296493530273f,0.06132073700428009033f, +0.99811810255050659180f,0.05519524589180946350f,0.99847555160522460938f, +0.04906767606735229492f,0.99879544973373413086f,0.04293825849890708923f, +0.99907773733139038086f,0.03680722415447235107f,0.99932235479354858398f, +0.03067480400204658508f,0.99952942132949829102f,0.02454122900962829590f, +0.99969881772994995117f,0.01840673014521598816f,0.99983060359954833984f, +0.01227153837680816650f,0.99992471933364868164f,0.00613588467240333557f, +0.99998116493225097656f,1.00000000000000000000f,0.00000000000000000000f, +0.99969881772994995117f,0.02454122900962829590f,0.99879544973373413086f, +0.04906767606735229492f,0.99729043245315551758f,0.07356456667184829712f, +0.99518471956253051758f,0.09801714122295379639f,0.99247956275939941406f, +0.12241067737340927124f,0.98917651176452636719f,0.14673046767711639404f, +0.98527765274047851562f,0.17096188664436340332f,0.98078525066375732422f, +0.19509032368659973145f,0.97570210695266723633f,0.21910123527050018311f, +0.97003126144409179688f,0.24298018217086791992f,0.96377605199813842773f, +0.26671275496482849121f,0.95694035291671752930f,0.29028466343879699707f, +0.94952815771102905273f,0.31368175148963928223f,0.94154405593872070312f, +0.33688986301422119141f,0.93299281597137451172f,0.35989505052566528320f, +0.92387950420379638672f,0.38268342614173889160f,0.91420978307723999023f, +0.40524131059646606445f,0.90398931503295898438f,0.42755508422851562500f, +0.89322429895401000977f,0.44961133599281311035f,0.88192129135131835938f, +0.47139674425125122070f,0.87008696794509887695f,0.49289819598197937012f, +0.85772860050201416016f,0.51410275697708129883f,0.84485357999801635742f, +0.53499764204025268555f,0.83146959543228149414f,0.55557024478912353516f, +0.81758481264114379883f,0.57580816745758056641f,0.80320751667022705078f, +0.59569931030273437500f,0.78834640979766845703f,0.61523157358169555664f, +0.77301043272018432617f,0.63439327478408813477f,0.75720882415771484375f, +0.65317285060882568359f,0.74095112085342407227f,0.67155897617340087891f, +0.72424709796905517578f,0.68954056501388549805f,0.70710676908493041992f, +0.70710676908493041992f,0.68954056501388549805f,0.72424709796905517578f, +0.67155897617340087891f,0.74095112085342407227f,0.65317285060882568359f, +0.75720882415771484375f,0.63439327478408813477f,0.77301043272018432617f, +0.61523157358169555664f,0.78834640979766845703f,0.59569931030273437500f, +0.80320751667022705078f,0.57580816745758056641f,0.81758481264114379883f, +0.55557024478912353516f,0.83146959543228149414f,0.53499764204025268555f, +0.84485357999801635742f,0.51410275697708129883f,0.85772860050201416016f, +0.49289819598197937012f,0.87008696794509887695f,0.47139674425125122070f, +0.88192129135131835938f,0.44961133599281311035f,0.89322429895401000977f, +0.42755508422851562500f,0.90398931503295898438f,0.40524131059646606445f, +0.91420978307723999023f,0.38268342614173889160f,0.92387950420379638672f, +0.35989505052566528320f,0.93299281597137451172f,0.33688986301422119141f, +0.94154405593872070312f,0.31368175148963928223f,0.94952815771102905273f, +0.29028466343879699707f,0.95694035291671752930f,0.26671275496482849121f, +0.96377605199813842773f,0.24298018217086791992f,0.97003126144409179688f, +0.21910123527050018311f,0.97570210695266723633f,0.19509032368659973145f, +0.98078525066375732422f,0.17096188664436340332f,0.98527765274047851562f, +0.14673046767711639404f,0.98917651176452636719f,0.12241067737340927124f, +0.99247956275939941406f,0.09801714122295379639f,0.99518471956253051758f, +0.07356456667184829712f,0.99729043245315551758f,0.04906767606735229492f, +0.99879544973373413086f,0.02454122900962829590f,0.99969881772994995117f, +1.00000000000000000000f,0.00000000000000000000f,0.99518471956253051758f, +0.09801714122295379639f,0.98078525066375732422f,0.19509032368659973145f, +0.95694035291671752930f,0.29028466343879699707f,0.92387950420379638672f, +0.38268342614173889160f,0.88192129135131835938f,0.47139674425125122070f, +0.83146959543228149414f,0.55557024478912353516f,0.77301043272018432617f, +0.63439327478408813477f,0.70710676908493041992f,0.70710676908493041992f, +0.63439327478408813477f,0.77301043272018432617f,0.55557024478912353516f, +0.83146959543228149414f,0.47139674425125122070f,0.88192129135131835938f, +0.38268342614173889160f,0.92387950420379638672f,0.29028466343879699707f, +0.95694035291671752930f,0.19509032368659973145f,0.98078525066375732422f, +0.09801714122295379639f,0.99518471956253051758f,1.00000000000000000000f, +0.00000000000000000000f,0.92387950420379638672f,0.38268342614173889160f, +0.70710676908493041992f,0.70710676908493041992f,0.38268342614173889160f, +0.92387950420379638672f,}; float32_t rearranged_twiddle_stride2_4096_f32[2728]={ -1.00000000000000000000f,0.00000000000000000000f,0.99999529380957619118f, -0.00306795676296597614f,0.99998117528260110909f,0.00613588464915447527f, -0.99995764455196389786f,0.00920375478205981944f,0.99992470183914450299f, -0.01227153828571992539f,0.99988234745421256111f,0.01533920628498810015f, -0.99983058179582340319f,0.01840672990580482019f,0.99976940535121527898f, -0.02147408027546950787f,0.99969881869620424997f,0.02454122852291228812f, -0.99961882249517863830f,0.02760814577896573974f,0.99952941750109314256f, -0.03067480317663662595f,0.99943060455546173237f,0.03374117185137757990f, -0.99932238458834954375f,0.03680722294135883171f,0.99920475861836388631f, -0.03987292758773981066f,0.99907772775264536147f,0.04293825693494082024f, -0.99894129318685687124f,0.04600318213091462299f,0.99879545620517240501f, -0.04906767432741801493f,0.99864021818026527111f,0.05213170468028332366f, -0.99847558057329477421f,0.05519524434968993420f,0.99830154493389289261f, -0.05825826450043575244f,0.99811811290014917919f,0.06132073630220857829f, -0.99792528619859599548f,0.06438263092985746505f,0.99772306664419163624f, -0.06744391956366405094f,0.99751145614030345410f,0.07050457338961385600f, -0.99729045667869020697f,0.07356456359966742631f,0.99706007033948296225f, -0.07662386139203149205f,0.99682029929116566791f,0.07968243797143012563f, -0.99657114579055483539f,0.08274026454937569164f,0.99631261218277800129f, -0.08579731234443989385f,0.99604470090125196702f,0.08885355258252460031f, -0.99576741446765981713f,0.09190895649713272386f,0.99548075549192693856f, -0.09496349532963899165f,0.99518472667219692873f,0.09801714032956060363f, -0.99487933079480561638f,0.10106986275482782167f,0.99456457073425541537f, -0.10412163387205458642f,0.99424044945318790223f,0.10717242495680884273f, -0.99390697000235606051f,0.11022220729388305938f,0.99356413552059530403f, -0.11327095217756434631f,0.99321194923479450001f,0.11631863091190475235f, -0.99285041445986510489f,0.11936521481099135467f,0.99247953459870996706f, -0.12241067519921619566f,0.99209931314219179654f,0.12545498341154623367f, -0.99170975366909952520f,0.12849811079379316880f,0.99131085984611544415f, -0.13154002870288311611f,0.99090263542778000971f,0.13458070850712616773f, -0.99048508425645709341f,0.13762012158648603832f,0.99005821026229712256f, -0.14065823933284921088f,0.98962201746320088702f,0.14369503315029447110f, -0.98917650996478101444f,0.14673047445536174793f,0.98872169196032377858f, -0.14976453467732150915f,0.98825756773074946437f,0.15279718525844343535f, -0.98778414164457217783f,0.15582839765426523271f,0.98730141815785843473f, -0.15885814333386144570f,0.98680940181418552726f,0.16188639378011182579f, -0.98630809724459866938f,0.16491312048996989437f,0.98579750916756747614f, -0.16793829497473117263f,0.98527764238894122162f,0.17096188876030121717f, -0.98474850180190420801f,0.17398387338746382214f,0.98421009238692902521f, -0.17700422041214874946f,0.98366241921173025453f,0.18002290140569951471f, -0.98310548743121628501f,0.18303988795514095078f,0.98253930228744124076f, -0.18605515166344663291f,0.98196386910955524296f,0.18906866414980619262f, -0.98137919331375456089f,0.19208039704989243734f,0.98078528040323043058f, -0.19509032201612824808f,0.98018213596811742949f,0.19809841071795356027f, -0.97956976568544051887f,0.20110463484209190055f,0.97894817531906219710f, -0.20410896609281686809f,0.97831737071962765473f,0.20711137619221856032f, -0.97767735782450992943f,0.21011183688046961016f,0.97702814265775439484f, -0.21311031991609136194f,0.97636973133002114000f,0.21610679707621952006f, -0.97570213003852857003f,0.21910124015686979759f,0.97502534506699412020f, -0.22209362097320350937f,0.97433938278557585821f,0.22508391135979283204f, -0.97364424965081197705f,0.22807208317088573102f,0.97293995220556017678f, -0.23105810828067110951f,0.97222649707893626925f,0.23404195858354343018f, -0.97150389098625178352f,0.23702360599436719801f,0.97077214072895035013f, -0.24000302244874149871f,0.97003125319454397424f,0.24298017990326387094f, -0.96928123535654853171f,0.24595505033579459497f,0.96852209427441737777f, -0.24892760574572014853f,0.96775383709347551076f,0.25189781815421696809f, -0.96697647104485207059f,0.25486565960451457169f,0.96619000344541250413f, -0.25783110216215898713f,0.96539444169768939830f,0.26079411791527551401f, -0.96458979328981275803f,0.26375467897483134694f,0.96377606579543984022f, -0.26671275747489836538f,0.96295326687368387741f,0.26966832557291509076f, -0.96212140426904158019f,0.27262135544994897662f,0.96128048581132063966f, -0.27557181931095814376f,0.96043051941556578655f,0.27851968938505305973f, -0.95957151308198451733f,0.28146493792575794091f,0.95870347489587159906f, -0.28440753721127187692f,0.95782641302753290802f,0.28734745954472951102f, -0.95694033573220882438f,0.29028467725446233105f,0.95604525134999640557f, -0.29321916269425862822f,0.95514116830577078243f,0.29615088824362378883f, -0.95422809510910566733f,0.29907982630804047508f,0.95330604035419386211f, -0.30200594931922808417f,0.95237501271976587880f,0.30492922973540237397f, -0.95143502096900833820f,0.30784964004153486661f,0.95048607394948170235f, -0.31076715274961147495f,0.94952818059303667475f,0.31368174039889151761f, -0.94856134991573026749f,0.31659337555616584581f,0.94758559101774109124f, -0.31950203081601569188f,0.94660091308328353499f,0.32240767880106985244f, -0.94560732538052127971f,0.32531029216226292622f,0.94460483726148025685f, -0.32820984357909249729f,0.94359345816196038559f,0.33110630575987642921f, -0.94257319760144686605f,0.33399965144200938205f,0.94154406518302080631f, -0.33688985339222005111f,0.94050607059326829518f,0.33977688440682685123f, -0.93945922360218991898f,0.34266071731199437833f,0.93840353406310805795f, -0.34554132496398909380f,0.93733901191257495977f,0.34841868024943456472f, -0.93626566717027825959f,0.35129275608556709276f,0.93518350993894761025f, -0.35416352542049034380f,0.93409255040425887007f,0.35703096123342997759f, -0.93299279883473895669f,0.35989503653498811087f,0.93188426558166814750f, -0.36275572436739722537f,0.93076696107898371224f,0.36561299780477385379f, -0.92964089584318121418f,0.36846682995337232125f,0.92850608047321558924f, -0.37131719395183754306f,0.92736252565040111495f,0.37416406297145793358f, -0.92621024213831137928f,0.37700741021641825945f,0.92504924078267758425f, -0.37984720892405116066f,0.92387953251128673848f,0.38268343236508978178f, -0.92270112833387862850f,0.38551605384391884890f,0.92151403934204190183f, -0.38834504669882624617f,0.92031827670911059425f,0.39117038430225387069f, -0.91911385169005777040f,0.39399204006104809883f,0.91790077562139049672f, -0.39680998741671030805f,0.91667905992104270485f,0.39962419984564678810f, -0.91544871608826783316f,0.40243465085941843018f,0.91420975570353069095f, -0.40524131400498986100f,0.91296219042839821256f,0.40804416286497868782f, -0.91170603200542987832f,0.41084317105790391089f,0.91044129225806724737f, -0.41363831223843450235f,0.90916798309052238025f,0.41642956009763715253f, -0.90788611648766626150f,0.41921688836322390515f,0.90659570451491533483f, -0.42200027079979968159f,0.90529675931811881551f,0.42477968120910880589f, -0.90398929312344333820f,0.42755509343028208491f,0.90267331823725882600f, -0.43032648134008261165f,0.90134884704602202810f,0.43309381885315195726f, -0.90001589201616016833f,0.43585707992225547480f,0.89867446569395381673f, -0.43861623853852765853f,0.89732458070541831763f,0.44137126873171667052f, -0.89596624975618521791f,0.44412214457042920035f,0.89459948563138269595f, -0.44686884016237415906f,0.89322430119551532446f,0.44961132965460653965f, -0.89184070939234272313f,0.45234958723377088896f,0.89044872324475787817f, -0.45508358712634383592f,0.88904835585466457371f,0.45781330359887717485f, -0.88763962040285393496f,0.46053871095824000514f,0.88622253014888063838f, -0.46325978355186014923f,0.88479709843093778954f,0.46597649576796618121f, -0.88336333866573157891f,0.46868882203582790114f,0.88192126434835504956f, -0.47139673682599764204f,0.88047088905216075450f,0.47410021465054996703f, -0.87901222642863352519f,0.47679923006332208812f,0.87754529020726135258f, -0.47949375766015295275f,0.87607009419540660122f,0.48218377207912271887f, -0.87458665227817611321f,0.48486924800079106435f,0.87309497841829009079f, -0.48755016014843599592f,0.87159508665595097909f,0.49022648328829115938f, -0.87008699110871146054f,0.49289819222978403790f,0.86857070597134089507f, -0.49556526182577254058f,0.86704624551569264845f,0.49822766697278181303f, -0.86551362409056908920f,0.50088538261124071482f,0.86397285612158669643f, -0.50353838372571757542f,0.86242395611104050168f,0.50618664534515522835f, -0.86086693863776730939f,0.50883014254310698909f,0.85930181835700847337f, -0.51146885043797030157f,0.85772861000027211809f,0.51410274419322166128f, -0.85614732837519447184f,0.51673179901764987321f,0.85455798836540053376f, -0.51935599016558964269f,0.85296060493036363059f,0.52197529293715438925f, -0.85135519310526519554f,0.52458968267846894928f,0.84974176800085254868f, -0.52719913478190127964f,0.84812034480329723252f,0.52980362468629460526f, -0.84649093877405212627f,0.53240312787719790144f,0.84485356524970711689f, -0.53499761988709715332f,0.84320823964184543620f,0.53758707629564539410f, -0.84155497743689844370f,0.54017147272989285423f,0.83989379419599952126f, -0.54275078486451588944f,0.83822470555483807875f,0.54532498842204646383f, -0.83654772722351200542f,0.54789405917310018967f,0.83486287498638001026f, -0.55045797293660481131f,0.83317016470191318511f,0.55301670558002746780f, -0.83146961230254523567f,0.55557023301960217765f,0.82976123379452304540f, -0.55811853122055610221f,0.82804504525775579626f,0.56066157619733603124f, -0.82632106284566353427f,0.56319934401383409117f,0.82458930278502529099f, -0.56573181078361312046f,0.82284978137582642788f,0.56825895267013148970f, -0.82110251499110464835f,0.57078074588696725566f,0.81934752007679700903f, -0.57329716669804220430f,0.81758481315158371139f,0.57580819141784533866f, -0.81581441080673378075f,0.57831379641165558958f,0.81403632970594841378f, -0.58081395809576452649f,0.81225058658520399302f,0.58330865293769829094f, -0.81045719825259476821f,0.58579785745643886408f,0.80865618158817498262f, -0.58828154822264522306f,0.80684755354379933401f,0.59075970185887416442f, -0.80503133114296365758f,0.59323229503979979516f,0.80320753148064494287f, -0.59569930449243335691f,0.80137617172314024039f,0.59816070699634238395f, -0.79953726910790501314f,0.60061647938386897305f,0.79769084094339115509f, -0.60306659854034816437f,0.79583690460888356633f,0.60551104140432554512f, -0.79397547755433717231f,0.60794978496777363208f,0.79210657730021238887f, -0.61038280627630947528f,0.79023022143731003197f,0.61281008242940970820f, -0.78834642762660622761f,0.61523159058062681925f,0.78645521359908576731f, -0.61764730793780386886f,0.78455659715557524159f,0.62005721176328909561f, -0.78265059616657572938f,0.62246127937414996723f,0.78073722857209448822f, -0.62485948814238634341f,0.77881651238147597827f,0.62725181549514408275f, -0.77688846567323244230f,0.62963823891492698426f,0.77495310659487393057f, -0.63201873593980906207f,0.77301045336273699338f,0.63439328416364548779f, -0.77106052426181381776f,0.63676186123628419899f,0.76910333764557969882f, -0.63912444486377573138f,0.76713891193582040007f,0.64148101280858305095f, -0.76516726562245895860f,0.64383154288979138613f,0.76318841726338138010f, -0.64617601298331628357f,0.76120238548426177871f,0.64851440102211244110f, -0.75920918897838796102f,0.65084668499638087535f,0.75720884650648456748f, -0.65317284295377675551f,0.75520137689653654700f,0.65549285299961534967f, -0.75318679904361252042f,0.65780669329707863735f,0.75116513190968636771f, -0.66011434206742047870f,0.74913639452345937020f,0.66241577759017178373f, -0.74710060598018013245f,0.66471097820334479334f,0.74505778544146594733f, -0.66699992230363747137f,0.74300795213512171866f,0.66928258834663600929f, -0.74095112535495921691f,0.67155895484701833009f,0.73888732446061511361f, -0.67382900037875603783f,0.73681656887736979300f,0.67609270357531592310f, -0.73473887809596349907f,0.67835004312986146857f,0.73265427167241281570f, -0.68060099779545302212f,0.73056276922782759087f,0.68284554638524808112f, -0.72846439044822519637f,0.68508366777270035541f,0.72635915508434600873f, -0.68731534089175905233f,0.72424708295146700276f,0.68954054473706682948f, -0.72212819392921534511f,0.69175925836415774750f,0.72000250796138165477f, -0.69397146088965389055f,0.71787004505573170920f,0.69617713149146298601f, -0.71573082528381870571f,0.69837624940897280457f,0.71358486878079352422f, -0.70056879394324833576f,0.71143219574521643356f,0.70275474445722529993f, -0.70927282643886568891f,0.70493408037590488124f,0.70710678118654757274f, -0.70710678118654757274f,0.70493408037590499227f,0.70927282643886568891f, -0.70275474445722529993f,0.71143219574521643356f,0.70056879394324844679f, -0.71358486878079352422f,0.69837624940897291559f,0.71573082528381859468f, -0.69617713149146298601f,0.71787004505573170920f,0.69397146088965400157f, -0.72000250796138165477f,0.69175925836415774750f,0.72212819392921534511f, -0.68954054473706694051f,0.72424708295146689174f,0.68731534089175905233f, -0.72635915508434600873f,0.68508366777270035541f,0.72846439044822519637f, -0.68284554638524808112f,0.73056276922782759087f,0.68060099779545302212f, -0.73265427167241281570f,0.67835004312986146857f,0.73473887809596349907f, -0.67609270357531603413f,0.73681656887736979300f,0.67382900037875614885f, -0.73888732446061511361f,0.67155895484701833009f,0.74095112535495910588f, -0.66928258834663600929f,0.74300795213512171866f,0.66699992230363747137f, -0.74505778544146594733f,0.66471097820334490436f,0.74710060598018013245f, -0.66241577759017178373f,0.74913639452345925918f,0.66011434206742047870f, -0.75116513190968636771f,0.65780669329707874837f,0.75318679904361252042f, -0.65549285299961546070f,0.75520137689653654700f,0.65317284295377686654f, -0.75720884650648456748f,0.65084668499638098638f,0.75920918897838796102f, -0.64851440102211255212f,0.76120238548426177871f,0.64617601298331639459f, -0.76318841726338126907f,0.64383154288979149715f,0.76516726562245895860f, -0.64148101280858316198f,0.76713891193582040007f,0.63912444486377573138f, -0.76910333764557958780f,0.63676186123628419899f,0.77106052426181381776f, -0.63439328416364548779f,0.77301045336273688235f,0.63201873593980906207f, -0.77495310659487381955f,0.62963823891492709528f,0.77688846567323244230f, -0.62725181549514419377f,0.77881651238147586724f,0.62485948814238645443f, -0.78073722857209448822f,0.62246127937415007825f,0.78265059616657572938f, -0.62005721176328920663f,0.78455659715557524159f,0.61764730793780397988f, -0.78645521359908576731f,0.61523159058062681925f,0.78834642762660622761f, -0.61281008242940970820f,0.79023022143731003197f,0.61038280627630947528f, -0.79210657730021227785f,0.60794978496777374311f,0.79397547755433717231f, -0.60551104140432554512f,0.79583690460888345530f,0.60306659854034827539f, -0.79769084094339104407f,0.60061647938386897305f,0.79953726910790501314f, -0.59816070699634238395f,0.80137617172314012937f,0.59569930449243346793f, -0.80320753148064483184f,0.59323229503979979516f,0.80503133114296365758f, -0.59075970185887427544f,0.80684755354379922299f,0.58828154822264533408f, -0.80865618158817498262f,0.58579785745643886408f,0.81045719825259476821f, -0.58330865293769829094f,0.81225058658520388200f,0.58081395809576452649f, -0.81403632970594830276f,0.57831379641165558958f,0.81581441080673378075f, -0.57580819141784533866f,0.81758481315158371139f,0.57329716669804231532f, -0.81934752007679689800f,0.57078074588696736669f,0.82110251499110464835f, -0.56825895267013148970f,0.82284978137582631685f,0.56573181078361323149f, -0.82458930278502529099f,0.56319934401383409117f,0.82632106284566353427f, -0.56066157619733603124f,0.82804504525775579626f,0.55811853122055610221f, -0.82976123379452304540f,0.55557023301960228867f,0.83146961230254523567f, -0.55301670558002757883f,0.83317016470191318511f,0.55045797293660481131f, -0.83486287498638001026f,0.54789405917310018967f,0.83654772722351189440f, -0.54532498842204646383f,0.83822470555483796772f,0.54275078486451600046f, -0.83989379419599941023f,0.54017147272989296525f,0.84155497743689833268f, -0.53758707629564550512f,0.84320823964184543620f,0.53499761988709726435f, -0.84485356524970700587f,0.53240312787719801246f,0.84649093877405212627f, -0.52980362468629482731f,0.84812034480329712149f,0.52719913478190139067f, -0.84974176800085243766f,0.52458968267846883826f,0.85135519310526519554f, -0.52197529293715438925f,0.85296060493036363059f,0.51935599016558953167f, -0.85455798836540053376f,0.51673179901764998423f,0.85614732837519447184f, -0.51410274419322166128f,0.85772861000027211809f,0.51146885043797052361f, -0.85930181835700836235f,0.50883014254310698909f,0.86086693863776730939f, -0.50618664534515533937f,0.86242395611104050168f,0.50353838372571757542f, -0.86397285612158669643f,0.50088538261124093687f,0.86551362409056897818f, -0.49822766697278186854f,0.86704624551569264845f,0.49556526182577248507f, -0.86857070597134089507f,0.49289819222978409341f,0.87008699110871134952f, -0.49022648328829110387f,0.87159508665595109012f,0.48755016014843605143f, -0.87309497841829009079f,0.48486924800079111986f,0.87458665227817611321f, -0.48218377207912282989f,0.87607009419540660122f,0.47949375766015300826f, -0.87754529020726124156f,0.47679923006332225466f,0.87901222642863341417f, -0.47410021465055002254f,0.88047088905216075450f,0.47139673682599780857f, -0.88192126434835493853f,0.46868882203582795665f,0.88336333866573157891f, -0.46597649576796612569f,0.88479709843093778954f,0.46325978355186026025f, -0.88622253014888063838f,0.46053871095824000514f,0.88763962040285393496f, -0.45781330359887728587f,0.88904835585466457371f,0.45508358712634383592f, -0.89044872324475787817f,0.45234958723377099998f,0.89184070939234272313f, -0.44961132965460659516f,0.89322430119551532446f,0.44686884016237432560f, -0.89459948563138258493f,0.44412214457042925586f,0.89596624975618510689f, -0.44137126873171661501f,0.89732458070541831763f,0.43861623853852771404f, -0.89867446569395381673f,0.43585707992225547480f,0.90001589201616027935f, -0.43309381885315201277f,0.90134884704602202810f,0.43032648134008261165f, -0.90267331823725882600f,0.42755509343028219593f,0.90398929312344333820f, -0.42477968120910880589f,0.90529675931811881551f,0.42200027079979979261f, -0.90659570451491533483f,0.41921688836322396066f,0.90788611648766626150f, -0.41642956009763731906f,0.90916798309052226923f,0.41363831223843455787f, -0.91044129225806713634f,0.41084317105790391089f,0.91170603200542987832f, -0.40804416286497874333f,0.91296219042839810154f,0.40524131400498986100f, -0.91420975570353069095f,0.40243465085941854120f,0.91544871608826783316f, -0.39962419984564678810f,0.91667905992104270485f,0.39680998741671041907f, -0.91790077562139038569f,0.39399204006104809883f,0.91911385169005777040f, -0.39117038430225398171f,0.92031827670911048322f,0.38834504669882630168f, -0.92151403934204190183f,0.38551605384391901543f,0.92270112833387851747f, -0.38268343236508983729f,0.92387953251128673848f,0.37984720892405110515f, -0.92504924078267758425f,0.37700741021641831496f,0.92621024213831126826f, -0.37416406297145798909f,0.92736252565040111495f,0.37131719395183759858f, -0.92850608047321558924f,0.36846682995337232125f,0.92964089584318121418f, -0.36561299780477396482f,0.93076696107898371224f,0.36275572436739722537f, -0.93188426558166814750f,0.35989503653498827740f,0.93299279883473884567f, -0.35703096123343003310f,0.93409255040425887007f,0.35416352542049051033f, -0.93518350993894749923f,0.35129275608556714827f,0.93626566717027825959f, -0.34841868024943450921f,0.93733901191257495977f,0.34554132496398914931f, -0.93840353406310805795f,0.34266071731199437833f,0.93945922360218991898f, -0.33977688440682696225f,0.94050607059326829518f,0.33688985339222005111f, -0.94154406518302080631f,0.33399965144200949307f,0.94257319760144686605f, -0.33110630575987642921f,0.94359345816196038559f,0.32820984357909266382f, -0.94460483726148025685f,0.32531029216226298173f,0.94560732538052127971f, -0.32240767880107001897f,0.94660091308328353499f,0.31950203081601574739f, -0.94758559101774109124f,0.31659337555616584581f,0.94856134991573026749f, -0.31368174039889157312f,0.94952818059303667475f,0.31076715274961147495f, -0.95048607394948170235f,0.30784964004153497763f,0.95143502096900833820f, -0.30492922973540242948f,0.95237501271976587880f,0.30200594931922819519f, -0.95330604035419375109f,0.29907982630804047508f,0.95422809510910566733f, -0.29615088824362395536f,0.95514116830577067141f,0.29321916269425868373f, -0.95604525134999640557f,0.29028467725446233105f,0.95694033573220893540f, -0.28734745954472956653f,0.95782641302753290802f,0.28440753721127182141f, -0.95870347489587159906f,0.28146493792575805193f,0.95957151308198451733f, -0.27851968938505305973f,0.96043051941556578655f,0.27557181931095825478f, -0.96128048581132063966f,0.27262135544994897662f,0.96212140426904158019f, -0.26966832557291520178f,0.96295326687368387741f,0.26671275747489842090f, -0.96377606579543984022f,0.26375467897483151347f,0.96458979328981264700f, -0.26079411791527556952f,0.96539444169768939830f,0.25783110216215893162f, -0.96619000344541261516f,0.25486565960451462720f,0.96697647104485207059f, -0.25189781815421691258f,0.96775383709347551076f,0.24892760574572025956f, -0.96852209427441726675f,0.24595505033579459497f,0.96928123535654853171f, -0.24298017990326398197f,0.97003125319454397424f,0.24000302244874149871f, -0.97077214072895035013f,0.23702360599436733679f,0.97150389098625178352f, -0.23404195858354345794f,0.97222649707893626925f,0.23105810828067127605f, -0.97293995220556006576f,0.22807208317088578653f,0.97364424965081186603f, -0.22508391135979277653f,0.97433938278557585821f,0.22209362097320359264f, -0.97502534506699412020f,0.21910124015686976984f,0.97570213003852857003f, -0.21610679707621960333f,0.97636973133002114000f,0.21311031991609136194f, -0.97702814265775439484f,0.21011183688046972118f,0.97767735782450992943f, -0.20711137619221856032f,0.97831737071962765473f,0.20410896609281700687f, -0.97894817531906219710f,0.20110463484209195606f,0.97956976568544051887f, -0.19809841071795372680f,0.98018213596811731847f,0.19509032201612833135f, -0.98078528040323043058f,0.19208039704989238183f,0.98137919331375456089f, -0.18906866414980627589f,0.98196386910955524296f,0.18605515166344663291f, -0.98253930228744124076f,0.18303988795514106180f,0.98310548743121628501f, -0.18002290140569951471f,0.98366241921173025453f,0.17700422041214886049f, -0.98421009238692902521f,0.17398387338746384989f,0.98474850180190420801f, -0.17096188876030135595f,0.98527764238894122162f,0.16793829497473122814f, -0.98579750916756736512f,0.16491312048997008866f,0.98630809724459866938f, -0.16188639378011188130f,0.98680940181418541624f,0.15885814333386139019f, -0.98730141815785843473f,0.15582839765426531597f,0.98778414164457217783f, -0.15279718525844340760f,0.98825756773074946437f,0.14976453467732162017f, -0.98872169196032377858f,0.14673047445536174793f,0.98917650996478101444f, -0.14369503315029458212f,0.98962201746320077600f,0.14065823933284923863f, -0.99005821026229712256f,0.13762012158648617710f,0.99048508425645698239f, -0.13458070850712622324f,0.99090263542778000971f,0.13154002870288328264f, -0.99131085984611544415f,0.12849811079379322432f,0.99170975366909952520f, -0.12545498341154620592f,0.99209931314219179654f,0.12241067519921627893f, -0.99247953459870996706f,0.11936521481099135467f,0.99285041445986510489f, -0.11631863091190487725f,0.99321194923479450001f,0.11327095217756436019f, -0.99356413552059530403f,0.11022220729388318428f,0.99390697000235606051f, -0.10717242495680887049f,0.99424044945318790223f,0.10412163387205472520f, -0.99456457073425541537f,0.10106986275482787718f,0.99487933079480561638f, -0.09801714032956077016f,0.99518472667219681771f,0.09496349532963906104f, -0.99548075549192693856f,0.09190895649713269611f,0.99576741446765981713f, -0.08885355258252468358f,0.99604470090125196702f,0.08579731234443987997f, -0.99631261218277800129f,0.08274026454937580266f,0.99657114579055483539f, -0.07968243797143012563f,0.99682029929116566791f,0.07662386139203161695f, -0.99706007033948296225f,0.07356456359966745406f,0.99729045667869020697f, -0.07050457338961400866f,0.99751145614030345410f,0.06744391956366410645f, -0.99772306664419163624f,0.06438263092985740954f,0.99792528619859599548f, -0.06132073630220864768f,0.99811811290014917919f,0.05825826450043573163f, -0.99830154493389289261f,0.05519524434969003135f,0.99847558057329477421f, -0.05213170468028331672f,0.99864021818026527111f,0.04906767432741812596f, -0.99879545620517240501f,0.04600318213091464381f,0.99894129318685687124f, -0.04293825693494095902f,0.99907772775264536147f,0.03987292758773984536f, -0.99920475861836388631f,0.03680722294135899131f,0.99932238458834954375f, -0.03374117185137764235f,0.99943060455546173237f,0.03067480317663658085f, -0.99952941750109314256f,0.02760814577896581953f,0.99961882249517863830f, -0.02454122852291226384f,0.99969881869620424997f,0.02147408027546960502f, -0.99976940535121527898f,0.01840672990580482019f,0.99983058179582340319f, -0.01533920628498821985f,0.99988234745421256111f,0.01227153828571994447f, -0.99992470183914450299f,0.00920375478205995995f,0.99995764455196389786f, -0.00613588464915451517f,0.99998117528260110909f,0.00306795676296613791f, -0.99999529380957619118f,0.00000000000000006123f,1.00000000000000000000f, --0.00306795676296601561f,0.99999529380957619118f,-0.00613588464915439287f, -0.99998117528260110909f,-0.00920375478205983678f,0.99995764455196389786f, --0.01227153828571982304f,0.99992470183914450299f,-0.01533920628498809842f, -0.99988234745421256111f,-0.01840672990580469529f,0.99983058179582340319f, --0.02147408027546948359f,0.99976940535121527898f,-0.02454122852291214241f, -0.99969881869620424997f,-0.02760814577896569810f,0.99961882249517863830f, --0.03067480317663645942f,0.99952941750109314256f,-0.03374117185137751745f, -0.99943060455546173237f,-0.03680722294135886641f,0.99932238458834954375f, --0.03987292758773972740f,0.99920475861836388631f,-0.04293825693494083412f, -0.99907772775264536147f,-0.04600318213091451891f,0.99894129318685687124f, --0.04906767432741800800f,0.99879545620517240501f,-0.05213170468028319182f, -0.99864021818026527111f,-0.05519524434968991339f,0.99847558057329477421f, --0.05825826450043560673f,0.99830154493389289261f,-0.06132073630220852972f, -0.99811811290014917919f,-0.06438263092985728464f,0.99792528619859599548f, --0.06744391956366398155f,0.99772306664419163624f,-0.07050457338961389764f, -0.99751145614030345410f,-0.07356456359966732916f,0.99729045667869020697f, --0.07662386139203150592f,0.99706007033948296225f,-0.07968243797143001461f, -0.99682029929116577893f,-0.08274026454937567776f,0.99657114579055483539f, --0.08579731234443975507f,0.99631261218277800129f,-0.08885355258252455868f, -0.99604470090125196702f,-0.09190895649713257121f,0.99576741446765981713f, --0.09496349532963895002f,0.99548075549192693856f,-0.09801714032956064526f, -0.99518472667219692873f,-0.10106986275482775228f,0.99487933079480561638f, --0.10412163387205460030f,0.99456457073425541537f,-0.10717242495680875947f, -0.99424044945318790223f,-0.11022220729388305938f,0.99390697000235606051f, --0.11327095217756423529f,0.99356413552059530403f,-0.11631863091190475235f, -0.99321194923479450001f,-0.11936521481099122977f,0.99285041445986510489f, --0.12241067519921615403f,0.99247953459870996706f,-0.12545498341154606714f, -0.99209931314219179654f,-0.12849811079379311329f,0.99170975366909952520f, --0.13154002870288314386f,0.99131085984611544415f,-0.13458070850712611222f, -0.99090263542778000971f,-0.13762012158648606608f,0.99048508425645698239f, --0.14065823933284912761f,0.99005821026229712256f,-0.14369503315029444335f, -0.98962201746320088702f,-0.14673047445536163691f,0.98917650996478101444f, --0.14976453467732150915f,0.98872169196032377858f,-0.15279718525844329657f, -0.98825756773074946437f,-0.15582839765426520495f,0.98778414164457217783f, --0.15885814333386127917f,0.98730141815785843473f,-0.16188639378011177028f, -0.98680940181418552726f,-0.16491312048996994988f,0.98630809724459866938f, --0.16793829497473108936f,0.98579750916756747614f,-0.17096188876030124493f, -0.98527764238894122162f,-0.17398387338746371111f,0.98474850180190420801f, --0.17700422041214874946f,0.98421009238692902521f,-0.18002290140569940369f, -0.98366241921173025453f,-0.18303988795514092303f,0.98310548743121628501f, --0.18605515166344649414f,0.98253930228744124076f,-0.18906866414980616486f, -0.98196386910955524296f,-0.19208039704989227081f,0.98137919331375456089f, --0.19509032201612819257f,0.98078528040323043058f,-0.19809841071795361578f, -0.98018213596811742949f,-0.20110463484209181728f,0.97956976568544051887f, --0.20410896609281689584f,0.97894817531906219710f,-0.20711137619221844930f, -0.97831737071962765473f,-0.21011183688046961016f,0.97767735782450992943f, --0.21311031991609125091f,0.97702814265775439484f,-0.21610679707621949230f, -0.97636973133002114000f,-0.21910124015686965881f,0.97570213003852857003f, --0.22209362097320348162f,0.97502534506699412020f,-0.22508391135979266551f, -0.97433938278557585821f,-0.22807208317088567551f,0.97364424965081197705f, --0.23105810828067113727f,0.97293995220556017678f,-0.23404195858354331916f, -0.97222649707893638027f,-0.23702360599436722577f,0.97150389098625178352f, --0.24000302244874138768f,0.97077214072895035013f,-0.24298017990326387094f, -0.97003125319454397424f,-0.24595505033579448395f,0.96928123535654853171f, --0.24892760574572012078f,0.96852209427441737777f,-0.25189781815421680156f, -0.96775383709347551076f,-0.25486565960451451618f,0.96697647104485207059f, --0.25783110216215882060f,0.96619000344541261516f,-0.26079411791527545850f, -0.96539444169768939830f,-0.26375467897483140245f,0.96458979328981275803f, --0.26671275747489830987f,0.96377606579543984022f,-0.26966832557291509076f, -0.96295326687368387741f,-0.27262135544994886560f,0.96212140426904158019f, --0.27557181931095814376f,0.96128048581132063966f,-0.27851968938505294870f, -0.96043051941556589757f,-0.28146493792575794091f,0.95957151308198451733f, --0.28440753721127171039f,0.95870347489587159906f,-0.28734745954472945551f, -0.95782641302753290802f,-0.29028467725446216452f,0.95694033573220893540f, --0.29321916269425857271f,0.95604525134999651659f,-0.29615088824362384434f, -0.95514116830577067141f,-0.29907982630804036406f,0.95422809510910566733f, --0.30200594931922808417f,0.95330604035419386211f,-0.30492922973540226295f, -0.95237501271976587880f,-0.30784964004153486661f,0.95143502096900833820f, --0.31076715274961136393f,0.95048607394948181337f,-0.31368174039889140658f, -0.94952818059303667475f,-0.31659337555616573479f,0.94856134991573037851f, --0.31950203081601563637f,0.94758559101774120226f,-0.32240767880106985244f, -0.94660091308328353499f,-0.32531029216226287071f,0.94560732538052139073f, --0.32820984357909255280f,0.94460483726148025685f,-0.33110630575987631818f, -0.94359345816196038559f,-0.33399965144200938205f,0.94257319760144686605f, --0.33688985339221994009f,0.94154406518302080631f,-0.33977688440682685123f, -0.94050607059326829518f,-0.34266071731199426731f,0.93945922360218991898f, --0.34554132496398903829f,0.93840353406310816897f,-0.34841868024943439819f, -0.93733901191257495977f,-0.35129275608556703725f,0.93626566717027825959f, --0.35416352542049039931f,0.93518350993894761025f,-0.35703096123342992207f, -0.93409255040425898109f,-0.35989503653498816638f,0.93299279883473884567f, --0.36275572436739711435f,0.93188426558166814750f,-0.36561299780477385379f, -0.93076696107898371224f,-0.36846682995337221023f,0.92964089584318132520f, --0.37131719395183748755f,0.92850608047321558924f,-0.37416406297145787807f, -0.92736252565040111495f,-0.37700741021641820394f,0.92621024213831137928f, --0.37984720892405099413f,0.92504924078267769527f,-0.38268343236508972627f, -0.92387953251128673848f,-0.38551605384391890441f,0.92270112833387851747f, --0.38834504669882619066f,0.92151403934204201285f,-0.39117038430225387069f, -0.92031827670911059425f,-0.39399204006104798781f,0.91911385169005777040f, --0.39680998741671030805f,0.91790077562139049672f,-0.39962419984564667708f, -0.91667905992104270485f,-0.40243465085941843018f,0.91544871608826783316f, --0.40524131400498974998f,0.91420975570353069095f,-0.40804416286497863231f, -0.91296219042839821256f,-0.41084317105790379987f,0.91170603200542987832f, --0.41363831223843450235f,0.91044129225806724737f,-0.41642956009763698599f, -0.90916798309052249127f,-0.41921688836322407168f,0.90788611648766615048f, --0.42200027079979968159f,0.90659570451491533483f,-0.42477968120910869487f, -0.90529675931811881551f,-0.42755509343028186287f,0.90398929312344344922f, --0.43032648134008272267f,0.90267331823725871498f,-0.43309381885315190175f, -0.90134884704602202810f,-0.43585707992225536378f,0.90001589201616027935f, --0.43861623853852738097f,0.89867446569395392775f,-0.44137126873171672603f, -0.89732458070541831763f,-0.44412214457042914484f,0.89596624975618521791f, --0.44686884016237399253f,0.89459948563138280697f,-0.44961132965460670619f, -0.89322430119551521344f,-0.45234958723377088896f,0.89184070939234272313f, --0.45508358712634372489f,0.89044872324475798919f,-0.45781330359887700832f, -0.88904835585466468473f,-0.46053871095824006066f,0.88763962040285393496f, --0.46325978355186014923f,0.88622253014888063838f,-0.46597649576796601467f, -0.88479709843093790056f,-0.46868882203582767909f,0.88336333866573168994f, --0.47139673682599769755f,0.88192126434835504956f,-0.47410021465054991152f, -0.88047088905216086552f,-0.47679923006332192159f,0.87901222642863352519f, --0.47949375766015311928f,0.87754529020726124156f,-0.48218377207912271887f, -0.87607009419540660122f,-0.48486924800079100883f,0.87458665227817622423f, --0.48755016014843571837f,0.87309497841829020182f,-0.49022648328829121489f, -0.87159508665595097909f,-0.49289819222978398239f,0.87008699110871146054f, --0.49556526182577237405f,0.86857070597134100609f,-0.49822766697278159098f, -0.86704624551569275948f,-0.50088538261124082585f,0.86551362409056908920f, --0.50353838372571746440f,0.86397285612158680745f,-0.50618664534515511733f, -0.86242395611104061270f,-0.50883014254310710012f,0.86086693863776719837f, --0.51146885043797041259f,0.85930181835700847337f,-0.51410274419322155026f, -0.85772861000027211809f,-0.51673179901764965116f,0.85614732837519458286f, --0.51935599016558964269f,0.85455798836540053376f,-0.52197529293715427823f, -0.85296060493036374162f,-0.52458968267846872724f,0.85135519310526519554f, --0.52719913478190105760f,0.84974176800085265970f,-0.52980362468629471628f, -0.84812034480329723252f,-0.53240312787719790144f,0.84649093877405212627f, --0.53499761988709704230f,0.84485356524970722791f,-0.53758707629564561614f, -0.84320823964184532517f,-0.54017147272989285423f,0.84155497743689844370f, --0.54275078486451577842f,0.83989379419599952126f,-0.54532498842204624179f, -0.83822470555483818977f,-0.54789405917310018967f,0.83654772722351200542f, --0.55045797293660470029f,0.83486287498638012128f,-0.55301670558002735678f, -0.83317016470191329613f,-0.55557023301960195560f,0.83146961230254534669f, --0.55811853122055610221f,0.82976123379452304540f,-0.56066157619733592021f, -0.82804504525775579626f,-0.56319934401383386913f,0.82632106284566364529f, --0.56573181078361323149f,0.82458930278502517996f,-0.56825895267013148970f, -0.82284978137582631685f,-0.57078074588696714464f,0.82110251499110475937f, --0.57329716669804198226f,0.81934752007679712005f,-0.57580819141784533866f, -0.81758481315158371139f,-0.57831379641165547856f,0.81581441080673378075f, --0.58081395809576441547f,0.81403632970594852480f,-0.58330865293769840196f, -0.81225058658520388200f,-0.58579785745643886408f,0.81045719825259476821f, --0.58828154822264522306f,0.80865618158817509364f,-0.59075970185887405339f, -0.80684755354379944503f,-0.59323229503979990618f,0.80503133114296354655f, --0.59569930449243335691f,0.80320753148064494287f,-0.59816070699634216190f, -0.80137617172314024039f,-0.60061647938386875101f,0.79953726910790523519f, --0.60306659854034827539f,0.79769084094339104407f,-0.60551104140432543410f, -0.79583690460888356633f,-0.60794978496777352106f,0.79397547755433728334f, --0.61038280627630958630f,0.79210657730021227785f,-0.61281008242940970820f, -0.79023022143731003197f,-0.61523159058062670823f,0.78834642762660633863f, --0.61764730793780375784f,0.78645521359908587833f,-0.62005721176328920663f, -0.78455659715557513056f,-0.62246127937414996723f,0.78265059616657572938f, --0.62485948814238623239f,0.78073722857209459924f,-0.62725181549514386070f, -0.77881651238147608929f,-0.62963823891492709528f,0.77688846567323244230f, --0.63201873593980895105f,0.77495310659487393057f,-0.63439328416364537677f, -0.77301045336273710440f,-0.63676186123628431002f,0.77106052426181370674f, --0.63912444486377573138f,0.76910333764557958780f,-0.64148101280858305095f, -0.76713891193582040007f,-0.64383154288979127511f,0.76516726562245906962f, --0.64617601298331639459f,0.76318841726338115805f,-0.64851440102211244110f, -0.76120238548426188974f,-0.65084668499638076433f,0.75920918897838807204f, --0.65317284295377653347f,0.75720884650648467851f,-0.65549285299961546070f, -0.75520137689653643598f,-0.65780669329707852633f,0.75318679904361252042f, --0.66011434206742036768f,0.75116513190968658975f,-0.66241577759017189475f, -0.74913639452345925918f,-0.66471097820334490436f,0.74710060598018013245f, --0.66699992230363736034f,0.74505778544146605835f,-0.66928258834663589827f, -0.74300795213512182968f,-0.67155895484701844111f,0.74095112535495899486f, --0.67382900037875603783f,0.73888732446061522463f,-0.67609270357531581208f, -0.73681656887737001504f,-0.67835004312986124653f,0.73473887809596372112f, --0.68060099779545302212f,0.73265427167241281570f,-0.68284554638524797010f, -0.73056276922782759087f,-0.68508366777270024439f,0.72846439044822530740f, --0.68731534089175916336f,0.72635915508434589771f,-0.68954054473706694051f, -0.72424708295146689174f,-0.69175925836415763648f,0.72212819392921545614f, --0.69397146088965377952f,0.72000250796138176579f,-0.69617713149146298601f, -0.71787004505573170920f,-0.69837624940897280457f,0.71573082528381870571f, --0.70056879394324822474f,0.71358486878079363525f,-0.70275474445722507788f, -0.71143219574521665560f,-0.70493408037590488124f,0.70927282643886557789f, --0.70710678118654746172f,0.70710678118654757274f,-0.70927282643886546687f, -0.70493408037590510329f,-0.71143219574521654458f,0.70275474445722518890f, --0.71358486878079352422f,0.70056879394324833576f,-0.71573082528381859468f, -0.69837624940897291559f,-0.71787004505573159818f,0.69617713149146309703f, --0.72000250796138165477f,0.69397146088965389055f,-0.72212819392921523409f, -0.69175925836415785852f,-0.72424708295146678072f,0.68954054473706705153f, --0.72635915508434578669f,0.68731534089175927438f,-0.72846439044822519637f, -0.68508366777270035541f,-0.73056276922782747985f,0.68284554638524808112f, --0.73265427167241270467f,0.68060099779545324417f,-0.73473887809596349907f, -0.67835004312986135755f,-0.73681656887736979300f,0.67609270357531592310f, --0.73888732446061511361f,0.67382900037875614885f,-0.74095112535495888384f, -0.67155895484701855214f,-0.74300795213512171866f,0.66928258834663600929f, --0.74505778544146594733f,0.66699992230363758239f,-0.74710060598018002143f, -0.66471097820334501538f,-0.74913639452345914815f,0.66241577759017200577f, --0.75116513190968636771f,0.66011434206742047870f,-0.75318679904361240940f, -0.65780669329707874837f,-0.75520137689653643598f,0.65549285299961557172f, --0.75720884650648467851f,0.65317284295377664449f,-0.75920918897838796102f, -0.65084668499638098638f,-0.76120238548426166769f,0.64851440102211255212f, --0.76318841726338115805f,0.64617601298331661663f,-0.76516726562245895860f, -0.64383154288979138613f,-0.76713891193582040007f,0.64148101280858316198f, --0.76910333764557947678f,0.63912444486377584241f,-0.77106052426181359571f, -0.63676186123628442104f,-0.77301045336273699338f,0.63439328416364548779f, --0.77495310659487381955f,0.63201873593980906207f,-0.77688846567323233128f, -0.62963823891492720630f,-0.77881651238147597827f,0.62725181549514408275f, --0.78073722857209448822f,0.62485948814238634341f,-0.78265059616657561836f, -0.62246127937415007825f,-0.78455659715557501954f,0.62005721176328942867f, --0.78645521359908576731f,0.61764730793780386886f,-0.78834642762660622761f, -0.61523159058062693028f,-0.79023022143730992095f,0.61281008242940981923f, --0.79210657730021216683f,0.61038280627630969732f,-0.79397547755433717231f, -0.60794978496777363208f,-0.79583690460888345530f,0.60551104140432565615f, --0.79769084094339093305f,0.60306659854034838641f,-0.79953726910790512417f, -0.60061647938386886203f,-0.80137617172314024039f,0.59816070699634238395f, --0.80320753148064483184f,0.59569930449243346793f,-0.80503133114296343553f, -0.59323229503980001720f,-0.80684755354379933401f,0.59075970185887416442f, --0.80865618158817498262f,0.58828154822264533408f,-0.81045719825259465718f, -0.58579785745643897510f,-0.81225058658520377097f,0.58330865293769851299f, --0.81403632970594841378f,0.58081395809576452649f,-0.81581441080673378075f, -0.57831379641165570060f,-0.81758481315158360037f,0.57580819141784544968f, --0.81934752007679700903f,0.57329716669804209328f,-0.82110251499110464835f, -0.57078074588696725566f,-0.82284978137582620583f,0.56825895267013171175f, --0.82458930278502506894f,0.56573181078361345353f,-0.82632106284566353427f, -0.56319934401383409117f,-0.82804504525775568524f,0.56066157619733614226f, --0.82976123379452293438f,0.55811853122055632426f,-0.83146961230254534669f, -0.55557023301960217765f,-0.83317016470191318511f,0.55301670558002746780f, --0.83486287498638001026f,0.55045797293660492233f,-0.83654772722351189440f, -0.54789405917310041172f,-0.83822470555483807875f,0.54532498842204635281f, --0.83989379419599952126f,0.54275078486451588944f,-0.84155497743689833268f, -0.54017147272989296525f,-0.84320823964184532517f,0.53758707629564572716f, --0.84485356524970711689f,0.53499761988709715332f,-0.84649093877405201525f, -0.53240312787719801246f,-0.84812034480329712149f,0.52980362468629482731f, --0.84974176800085254868f,0.52719913478190127964f,-0.85135519310526519554f, -0.52458968267846894928f,-0.85296060493036363059f,0.52197529293715438925f, --0.85455798836540042274f,0.51935599016558975372f,-0.85614732837519447184f, -0.51673179901764976218f,-0.85772861000027200706f,0.51410274419322177231f, --0.85930181835700836235f,0.51146885043797052361f,-0.86086693863776719837f, -0.50883014254310732216f,-0.86242395611104050168f,0.50618664534515522835f, --0.86397285612158669643f,0.50353838372571757542f,-0.86551362409056897818f, -0.50088538261124093687f,-0.86704624551569264845f,0.49822766697278175752f, --0.86857070597134089507f,0.49556526182577254058f,-0.87008699110871134952f, -0.49289819222978414892f,-0.87159508665595086807f,0.49022648328829138142f, --0.87309497841829009079f,0.48755016014843588490f,-0.87458665227817611321f, -0.48486924800079111986f,-0.87607009419540649020f,0.48218377207912288540f, --0.87754529020726113053f,0.47949375766015328582f,-0.87901222642863352519f, -0.47679923006332208812f,-0.88047088905216075450f,0.47410021465055007805f, --0.88192126434835493853f,0.47139673682599780857f,-0.88336333866573168994f, -0.46868882203582784562f,-0.88479709843093778954f,0.46597649576796618121f, --0.88622253014888052736f,0.46325978355186031576f,-0.88763962040285382393f, -0.46053871095824022719f,-0.88904835585466457371f,0.45781330359887717485f, --0.89044872324475787817f,0.45508358712634389143f,-0.89184070939234261211f, -0.45234958723377105549f,-0.89322430119551521344f,0.44961132965460687272f, --0.89459948563138269595f,0.44686884016237415906f,-0.89596624975618510689f, -0.44412214457042931137f,-0.89732458070541820661f,0.44137126873171689256f, --0.89867446569395392775f,0.43861623853852754751f,-0.90001589201616016833f, -0.43585707992225553031f,-0.90134884704602191707f,0.43309381885315206828f, --0.90267331823725871498f,0.43032648134008288920f,-0.90398929312344333820f, -0.42755509343028202940f,-0.90529675931811870448f,0.42477968120910886141f, --0.90659570451491533483f,0.42200027079979984812f,-0.90788611648766603945f, -0.41921688836322423821f,-0.90916798309052238025f,0.41642956009763715253f, --0.91044129225806713634f,0.41363831223843466889f,-0.91170603200542976730f, -0.41084317105790413294f,-0.91296219042839821256f,0.40804416286497857680f, --0.91420975570353069095f,0.40524131400498991651f,-0.91544871608826772214f, -0.40243465085941859671f,-0.91667905992104259383f,0.39962419984564706565f, --0.91790077562139049672f,0.39680998741671025254f,-0.91911385169005777040f, -0.39399204006104815434f,-0.92031827670911048322f,0.39117038430225403722f, --0.92151403934204179080f,0.38834504669882657923f,-0.92270112833387862850f, -0.38551605384391884890f,-0.92387953251128673848f,0.38268343236508989280f, --0.92504924078267747323f,0.37984720892405138271f,-0.92621024213831137928f, -0.37700741021641814843f,-0.92736252565040111495f,0.37416406297145804460f, --0.92850608047321547822f,0.37131719395183770960f,-0.92964089584318121418f, -0.36846682995337259880f,-0.93076696107898371224f,0.36561299780477379828f, --0.93188426558166803648f,0.36275572436739728088f,-0.93299279883473884567f, -0.35989503653498833291f,-0.93409255040425875904f,0.35703096123343031065f, --0.93518350993894761025f,0.35416352542049039931f,-0.93626566717027825959f, -0.35129275608556720378f,-0.93733901191257484875f,0.34841868024943478677f, --0.93840353406310816897f,0.34554132496398898278f,-0.93945922360218991898f, -0.34266071731199443384f,-0.94050607059326829518f,0.33977688440682701776f, --0.94154406518302069529f,0.33688985339222032867f,-0.94257319760144686605f, -0.33399965144200938205f,-0.94359345816196038559f,0.33110630575987648472f, --0.94460483726148014583f,0.32820984357909271933f,-0.94560732538052116869f, -0.32531029216226325929f,-0.94660091308328353499f,0.32240767880106985244f, --0.94758559101774109124f,0.31950203081601580291f,-0.94856134991573026749f, -0.31659337555616606785f,-0.94952818059303667475f,0.31368174039889140658f, --0.95048607394948170235f,0.31076715274961153046f,-0.95143502096900833820f, -0.30784964004153503314f,-0.95237501271976576778f,0.30492922973540265152f, --0.95330604035419386211f,0.30200594931922802866f,-0.95422809510910555630f, -0.29907982630804053059f,-0.95514116830577067141f,0.29615088824362401088f, --0.95604525134999629454f,0.29321916269425896129f,-0.95694033573220882438f, -0.29028467725446238656f,-0.95782641302753290802f,0.28734745954472962204f, --0.95870347489587148804f,0.28440753721127209896f,-0.95957151308198451733f, -0.28146493792575788540f,-0.96043051941556578655f,0.27851968938505317075f, --0.96128048581132063966f,0.27557181931095831029f,-0.96212140426904146917f, -0.27262135544994925418f,-0.96295326687368387741f,0.26966832557291509076f, --0.96377606579543984022f,0.26671275747489847641f,-0.96458979328981264700f, -0.26375467897483156898f,-0.96539444169768928727f,0.26079411791527584707f, --0.96619000344541250413f,0.25783110216215898713f,-0.96697647104485207059f, -0.25486565960451468271f,-0.96775383709347539973f,0.25189781815421719013f, --0.96852209427441737777f,0.24892760574572009302f,-0.96928123535654842069f, -0.24595505033579465048f,-0.97003125319454397424f,0.24298017990326406523f, --0.97077214072895023911f,0.24000302244874177626f,-0.97150389098625178352f, -0.23702360599436717026f,-0.97222649707893626925f,0.23404195858354351345f, --0.97293995220556006576f,0.23105810828067133156f,-0.97364424965081186603f, -0.22807208317088606409f,-0.97433938278557585821f,0.22508391135979283204f, --0.97502534506699412020f,0.22209362097320364815f,-0.97570213003852845901f, -0.21910124015687004739f,-0.97636973133002114000f,0.21610679707621943679f, --0.97702814265775439484f,0.21311031991609141745f,-0.97767735782450992943f, -0.21011183688046980444f,-0.97831737071962754371f,0.20711137619221883788f, --0.97894817531906219710f,0.20410896609281684033f,-0.97956976568544051887f, -0.20110463484209201157f,-0.98018213596811731847f,0.19809841071795381007f, --0.98078528040323043058f,0.19509032201612860891f,-0.98137919331375456089f, -0.19208039704989246510f,-0.98196386910955524296f,0.18906866414980635915f, --0.98253930228744124076f,0.18605515166344691047f,-0.98310548743121628501f, -0.18303988795514089527f,-0.98366241921173025453f,0.18002290140569957022f, --0.98421009238692902521f,0.17700422041214894375f,-0.98474850180190420801f, -0.17398387338746412745f,-0.98527764238894122162f,0.17096188876030121717f, --0.98579750916756736512f,0.16793829497473128365f,-0.98630809724459855836f, -0.16491312048997014417f,-0.98680940181418552726f,0.16188639378011174252f, --0.98730141815785843473f,0.15885814333386147346f,-0.98778414164457217783f, -0.15582839765426537149f,-0.98825756773074946437f,0.15279718525844368515f, --0.98872169196032377858f,0.14976453467732145364f,-0.98917650996478101444f, -0.14673047445536180344f,-0.98962201746320077600f,0.14369503315029463764f, --0.99005821026229701154f,0.14065823933284954395f,-0.99048508425645709341f, -0.13762012158648603832f,-0.99090263542778000971f,0.13458070850712627875f, --0.99131085984611544415f,0.13154002870288333815f,-0.99170975366909952520f, -0.12849811079379308554f,-0.99209931314219179654f,0.12545498341154626143f, --0.99247953459870996706f,0.12241067519921634832f,-0.99285041445986510489f, -0.11936521481099163222f,-0.99321194923479450001f,0.11631863091190471071f, --0.99356413552059530403f,0.11327095217756441570f,-0.99390697000235606051f, -0.11022220729388323979f,-0.99424044945318790223f,0.10717242495680916192f, --0.99456457073425541537f,0.10412163387205457254f,-0.99487933079480561638f, -0.10106986275482793269f,-0.99518472667219681771f,0.09801714032956082567f, --0.99548075549192693856f,0.09496349532963890838f,-0.99576741446765981713f, -0.09190895649713275162f,-0.99604470090125196702f,0.08885355258252475297f, --0.99631261218277800129f,0.08579731234444015753f,-0.99657114579055483539f, -0.08274026454937563613f,-0.99682029929116566791f,0.07968243797143019502f, --0.99706007033948296225f,0.07662386139203168633f,-0.99729045667869020697f, -0.07356456359966773162f,-0.99751145614030345410f,0.07050457338961385600f, --0.99772306664419163624f,0.06744391956366417584f,-0.99792528619859599548f, -0.06438263092985770097f,-0.99811811290014917919f,0.06132073630220848809f, --0.99830154493389289261f,0.05825826450043579408f,-0.99847558057329477421f, -0.05519524434969009380f,-0.99864021818026516009f,0.05213170468028359428f, --0.99879545620517240501f,0.04906767432741796636f,-0.99894129318685687124f, -0.04600318213091470626f,-0.99907772775264536147f,0.04293825693494102147f, --0.99920475861836388631f,0.03987292758774012985f,-0.99932238458834954375f, -0.03680722294135883171f,-0.99943060455546173237f,0.03374117185137770480f, --0.99952941750109314256f,0.03067480317663686534f,-0.99961882249517863830f, -0.02760814577896565994f,-0.99969881869620424997f,0.02454122852291232629f, --0.99976940535121527898f,0.02147408027546966747f,-0.99983058179582340319f, -0.01840672990580510121f,-0.99988234745421256111f,0.01533920628498806026f, --0.99992470183914450299f,0.01227153828572000692f,-0.99995764455196389786f, -0.00920375478206002066f,-0.99998117528260110909f,0.00613588464915479880f, --0.99999529380957619118f,0.00306795676296597701f,1.00000000000000000000f, -0.00000000000000000000f,0.99992470183914450299f,0.01227153828571992539f, -0.99969881869620424997f,0.02454122852291228812f,0.99932238458834954375f, -0.03680722294135883171f,0.99879545620517240501f,0.04906767432741801493f, -0.99811811290014917919f,0.06132073630220857829f,0.99729045667869020697f, -0.07356456359966742631f,0.99631261218277800129f,0.08579731234443989385f, -0.99518472667219692873f,0.09801714032956060363f,0.99390697000235606051f, -0.11022220729388305938f,0.99247953459870996706f,0.12241067519921619566f, -0.99090263542778000971f,0.13458070850712616773f,0.98917650996478101444f, -0.14673047445536174793f,0.98730141815785843473f,0.15885814333386144570f, -0.98527764238894122162f,0.17096188876030121717f,0.98310548743121628501f, -0.18303988795514095078f,0.98078528040323043058f,0.19509032201612824808f, -0.97831737071962765473f,0.20711137619221856032f,0.97570213003852857003f, -0.21910124015686979759f,0.97293995220556017678f,0.23105810828067110951f, -0.97003125319454397424f,0.24298017990326387094f,0.96697647104485207059f, -0.25486565960451457169f,0.96377606579543984022f,0.26671275747489836538f, -0.96043051941556578655f,0.27851968938505305973f,0.95694033573220882438f, -0.29028467725446233105f,0.95330604035419386211f,0.30200594931922808417f, -0.94952818059303667475f,0.31368174039889151761f,0.94560732538052127971f, -0.32531029216226292622f,0.94154406518302080631f,0.33688985339222005111f, -0.93733901191257495977f,0.34841868024943456472f,0.93299279883473895669f, -0.35989503653498811087f,0.92850608047321558924f,0.37131719395183754306f, -0.92387953251128673848f,0.38268343236508978178f,0.91911385169005777040f, -0.39399204006104809883f,0.91420975570353069095f,0.40524131400498986100f, -0.90916798309052238025f,0.41642956009763715253f,0.90398929312344333820f, -0.42755509343028208491f,0.89867446569395381673f,0.43861623853852765853f, -0.89322430119551532446f,0.44961132965460653965f,0.88763962040285393496f, -0.46053871095824000514f,0.88192126434835504956f,0.47139673682599764204f, -0.87607009419540660122f,0.48218377207912271887f,0.87008699110871146054f, -0.49289819222978403790f,0.86397285612158669643f,0.50353838372571757542f, -0.85772861000027211809f,0.51410274419322166128f,0.85135519310526519554f, -0.52458968267846894928f,0.84485356524970711689f,0.53499761988709715332f, -0.83822470555483807875f,0.54532498842204646383f,0.83146961230254523567f, -0.55557023301960217765f,0.82458930278502529099f,0.56573181078361312046f, -0.81758481315158371139f,0.57580819141784533866f,0.81045719825259476821f, -0.58579785745643886408f,0.80320753148064494287f,0.59569930449243335691f, -0.79583690460888356633f,0.60551104140432554512f,0.78834642762660622761f, -0.61523159058062681925f,0.78073722857209448822f,0.62485948814238634341f, -0.77301045336273699338f,0.63439328416364548779f,0.76516726562245895860f, -0.64383154288979138613f,0.75720884650648456748f,0.65317284295377675551f, -0.74913639452345937020f,0.66241577759017178373f,0.74095112535495921691f, -0.67155895484701833009f,0.73265427167241281570f,0.68060099779545302212f, -0.72424708295146700276f,0.68954054473706682948f,0.71573082528381870571f, -0.69837624940897280457f,0.70710678118654757274f,0.70710678118654757274f, -0.69837624940897291559f,0.71573082528381859468f,0.68954054473706694051f, -0.72424708295146689174f,0.68060099779545302212f,0.73265427167241281570f, -0.67155895484701833009f,0.74095112535495910588f,0.66241577759017178373f, -0.74913639452345925918f,0.65317284295377686654f,0.75720884650648456748f, -0.64383154288979149715f,0.76516726562245895860f,0.63439328416364548779f, -0.77301045336273688235f,0.62485948814238645443f,0.78073722857209448822f, -0.61523159058062681925f,0.78834642762660622761f,0.60551104140432554512f, -0.79583690460888345530f,0.59569930449243346793f,0.80320753148064483184f, -0.58579785745643886408f,0.81045719825259476821f,0.57580819141784533866f, -0.81758481315158371139f,0.56573181078361323149f,0.82458930278502529099f, -0.55557023301960228867f,0.83146961230254523567f,0.54532498842204646383f, -0.83822470555483796772f,0.53499761988709726435f,0.84485356524970700587f, -0.52458968267846883826f,0.85135519310526519554f,0.51410274419322166128f, -0.85772861000027211809f,0.50353838372571757542f,0.86397285612158669643f, -0.49289819222978409341f,0.87008699110871134952f,0.48218377207912282989f, -0.87607009419540660122f,0.47139673682599780857f,0.88192126434835493853f, -0.46053871095824000514f,0.88763962040285393496f,0.44961132965460659516f, -0.89322430119551532446f,0.43861623853852771404f,0.89867446569395381673f, -0.42755509343028219593f,0.90398929312344333820f,0.41642956009763731906f, -0.90916798309052226923f,0.40524131400498986100f,0.91420975570353069095f, -0.39399204006104809883f,0.91911385169005777040f,0.38268343236508983729f, -0.92387953251128673848f,0.37131719395183759858f,0.92850608047321558924f, -0.35989503653498827740f,0.93299279883473884567f,0.34841868024943450921f, -0.93733901191257495977f,0.33688985339222005111f,0.94154406518302080631f, -0.32531029216226298173f,0.94560732538052127971f,0.31368174039889157312f, -0.94952818059303667475f,0.30200594931922819519f,0.95330604035419375109f, -0.29028467725446233105f,0.95694033573220893540f,0.27851968938505305973f, -0.96043051941556578655f,0.26671275747489842090f,0.96377606579543984022f, -0.25486565960451462720f,0.96697647104485207059f,0.24298017990326398197f, -0.97003125319454397424f,0.23105810828067127605f,0.97293995220556006576f, -0.21910124015686976984f,0.97570213003852857003f,0.20711137619221856032f, -0.97831737071962765473f,0.19509032201612833135f,0.98078528040323043058f, -0.18303988795514106180f,0.98310548743121628501f,0.17096188876030135595f, -0.98527764238894122162f,0.15885814333386139019f,0.98730141815785843473f, -0.14673047445536174793f,0.98917650996478101444f,0.13458070850712622324f, -0.99090263542778000971f,0.12241067519921627893f,0.99247953459870996706f, -0.11022220729388318428f,0.99390697000235606051f,0.09801714032956077016f, -0.99518472667219681771f,0.08579731234443987997f,0.99631261218277800129f, -0.07356456359966745406f,0.99729045667869020697f,0.06132073630220864768f, -0.99811811290014917919f,0.04906767432741812596f,0.99879545620517240501f, -0.03680722294135899131f,0.99932238458834954375f,0.02454122852291226384f, -0.99969881869620424997f,0.01227153828571994447f,0.99992470183914450299f, -0.00000000000000006123f,1.00000000000000000000f,-0.01227153828571982304f, -0.99992470183914450299f,-0.02454122852291214241f,0.99969881869620424997f, --0.03680722294135886641f,0.99932238458834954375f,-0.04906767432741800800f, -0.99879545620517240501f,-0.06132073630220852972f,0.99811811290014917919f, --0.07356456359966732916f,0.99729045667869020697f,-0.08579731234443975507f, -0.99631261218277800129f,-0.09801714032956064526f,0.99518472667219692873f, --0.11022220729388305938f,0.99390697000235606051f,-0.12241067519921615403f, -0.99247953459870996706f,-0.13458070850712611222f,0.99090263542778000971f, --0.14673047445536163691f,0.98917650996478101444f,-0.15885814333386127917f, -0.98730141815785843473f,-0.17096188876030124493f,0.98527764238894122162f, --0.18303988795514092303f,0.98310548743121628501f,-0.19509032201612819257f, -0.98078528040323043058f,-0.20711137619221844930f,0.97831737071962765473f, --0.21910124015686965881f,0.97570213003852857003f,-0.23105810828067113727f, -0.97293995220556017678f,-0.24298017990326387094f,0.97003125319454397424f, --0.25486565960451451618f,0.96697647104485207059f,-0.26671275747489830987f, -0.96377606579543984022f,-0.27851968938505294870f,0.96043051941556589757f, --0.29028467725446216452f,0.95694033573220893540f,-0.30200594931922808417f, -0.95330604035419386211f,-0.31368174039889140658f,0.94952818059303667475f, --0.32531029216226287071f,0.94560732538052139073f,-0.33688985339221994009f, -0.94154406518302080631f,-0.34841868024943439819f,0.93733901191257495977f, --0.35989503653498816638f,0.93299279883473884567f,-0.37131719395183748755f, -0.92850608047321558924f,-0.38268343236508972627f,0.92387953251128673848f, --0.39399204006104798781f,0.91911385169005777040f,-0.40524131400498974998f, -0.91420975570353069095f,-0.41642956009763698599f,0.90916798309052249127f, --0.42755509343028186287f,0.90398929312344344922f,-0.43861623853852738097f, -0.89867446569395392775f,-0.44961132965460670619f,0.89322430119551521344f, --0.46053871095824006066f,0.88763962040285393496f,-0.47139673682599769755f, -0.88192126434835504956f,-0.48218377207912271887f,0.87607009419540660122f, --0.49289819222978398239f,0.87008699110871146054f,-0.50353838372571746440f, -0.86397285612158680745f,-0.51410274419322155026f,0.85772861000027211809f, --0.52458968267846872724f,0.85135519310526519554f,-0.53499761988709704230f, -0.84485356524970722791f,-0.54532498842204624179f,0.83822470555483818977f, --0.55557023301960195560f,0.83146961230254534669f,-0.56573181078361323149f, -0.82458930278502517996f,-0.57580819141784533866f,0.81758481315158371139f, --0.58579785745643886408f,0.81045719825259476821f,-0.59569930449243335691f, -0.80320753148064494287f,-0.60551104140432543410f,0.79583690460888356633f, --0.61523159058062670823f,0.78834642762660633863f,-0.62485948814238623239f, -0.78073722857209459924f,-0.63439328416364537677f,0.77301045336273710440f, --0.64383154288979127511f,0.76516726562245906962f,-0.65317284295377653347f, -0.75720884650648467851f,-0.66241577759017189475f,0.74913639452345925918f, --0.67155895484701844111f,0.74095112535495899486f,-0.68060099779545302212f, -0.73265427167241281570f,-0.68954054473706694051f,0.72424708295146689174f, --0.69837624940897280457f,0.71573082528381870571f,-0.70710678118654746172f, -0.70710678118654757274f,-0.71573082528381859468f,0.69837624940897291559f, --0.72424708295146678072f,0.68954054473706705153f,-0.73265427167241270467f, -0.68060099779545324417f,-0.74095112535495888384f,0.67155895484701855214f, --0.74913639452345914815f,0.66241577759017200577f,-0.75720884650648467851f, -0.65317284295377664449f,-0.76516726562245895860f,0.64383154288979138613f, --0.77301045336273699338f,0.63439328416364548779f,-0.78073722857209448822f, -0.62485948814238634341f,-0.78834642762660622761f,0.61523159058062693028f, --0.79583690460888345530f,0.60551104140432565615f,-0.80320753148064483184f, -0.59569930449243346793f,-0.81045719825259465718f,0.58579785745643897510f, --0.81758481315158360037f,0.57580819141784544968f,-0.82458930278502506894f, -0.56573181078361345353f,-0.83146961230254534669f,0.55557023301960217765f, --0.83822470555483807875f,0.54532498842204635281f,-0.84485356524970711689f, -0.53499761988709715332f,-0.85135519310526519554f,0.52458968267846894928f, --0.85772861000027200706f,0.51410274419322177231f,-0.86397285612158669643f, -0.50353838372571757542f,-0.87008699110871134952f,0.49289819222978414892f, --0.87607009419540649020f,0.48218377207912288540f,-0.88192126434835493853f, -0.47139673682599780857f,-0.88763962040285382393f,0.46053871095824022719f, --0.89322430119551521344f,0.44961132965460687272f,-0.89867446569395392775f, -0.43861623853852754751f,-0.90398929312344333820f,0.42755509343028202940f, --0.90916798309052238025f,0.41642956009763715253f,-0.91420975570353069095f, -0.40524131400498991651f,-0.91911385169005777040f,0.39399204006104815434f, --0.92387953251128673848f,0.38268343236508989280f,-0.92850608047321547822f, -0.37131719395183770960f,-0.93299279883473884567f,0.35989503653498833291f, --0.93733901191257484875f,0.34841868024943478677f,-0.94154406518302069529f, -0.33688985339222032867f,-0.94560732538052116869f,0.32531029216226325929f, --0.94952818059303667475f,0.31368174039889140658f,-0.95330604035419386211f, -0.30200594931922802866f,-0.95694033573220882438f,0.29028467725446238656f, --0.96043051941556578655f,0.27851968938505317075f,-0.96377606579543984022f, -0.26671275747489847641f,-0.96697647104485207059f,0.25486565960451468271f, --0.97003125319454397424f,0.24298017990326406523f,-0.97293995220556006576f, -0.23105810828067133156f,-0.97570213003852845901f,0.21910124015687004739f, --0.97831737071962754371f,0.20711137619221883788f,-0.98078528040323043058f, -0.19509032201612860891f,-0.98310548743121628501f,0.18303988795514089527f, --0.98527764238894122162f,0.17096188876030121717f,-0.98730141815785843473f, -0.15885814333386147346f,-0.98917650996478101444f,0.14673047445536180344f, --0.99090263542778000971f,0.13458070850712627875f,-0.99247953459870996706f, -0.12241067519921634832f,-0.99390697000235606051f,0.11022220729388323979f, --0.99518472667219681771f,0.09801714032956082567f,-0.99631261218277800129f, -0.08579731234444015753f,-0.99729045667869020697f,0.07356456359966773162f, --0.99811811290014917919f,0.06132073630220848809f,-0.99879545620517240501f, -0.04906767432741796636f,-0.99932238458834954375f,0.03680722294135883171f, --0.99969881869620424997f,0.02454122852291232629f,-0.99992470183914450299f, -0.01227153828572000692f,1.00000000000000000000f,0.00000000000000000000f, -0.99879545620517240501f,0.04906767432741801493f,0.99518472667219692873f, -0.09801714032956060363f,0.98917650996478101444f,0.14673047445536174793f, -0.98078528040323043058f,0.19509032201612824808f,0.97003125319454397424f, -0.24298017990326387094f,0.95694033573220882438f,0.29028467725446233105f, -0.94154406518302080631f,0.33688985339222005111f,0.92387953251128673848f, -0.38268343236508978178f,0.90398929312344333820f,0.42755509343028208491f, -0.88192126434835504956f,0.47139673682599764204f,0.85772861000027211809f, -0.51410274419322166128f,0.83146961230254523567f,0.55557023301960217765f, -0.80320753148064494287f,0.59569930449243335691f,0.77301045336273699338f, -0.63439328416364548779f,0.74095112535495921691f,0.67155895484701833009f, -0.70710678118654757274f,0.70710678118654757274f,0.67155895484701833009f, -0.74095112535495910588f,0.63439328416364548779f,0.77301045336273688235f, -0.59569930449243346793f,0.80320753148064483184f,0.55557023301960228867f, -0.83146961230254523567f,0.51410274419322166128f,0.85772861000027211809f, -0.47139673682599780857f,0.88192126434835493853f,0.42755509343028219593f, -0.90398929312344333820f,0.38268343236508983729f,0.92387953251128673848f, -0.33688985339222005111f,0.94154406518302080631f,0.29028467725446233105f, -0.95694033573220893540f,0.24298017990326398197f,0.97003125319454397424f, -0.19509032201612833135f,0.98078528040323043058f,0.14673047445536174793f, -0.98917650996478101444f,0.09801714032956077016f,0.99518472667219681771f, -0.04906767432741812596f,0.99879545620517240501f,0.00000000000000006123f, -1.00000000000000000000f,-0.04906767432741800800f,0.99879545620517240501f, --0.09801714032956064526f,0.99518472667219692873f,-0.14673047445536163691f, -0.98917650996478101444f,-0.19509032201612819257f,0.98078528040323043058f, --0.24298017990326387094f,0.97003125319454397424f,-0.29028467725446216452f, -0.95694033573220893540f,-0.33688985339221994009f,0.94154406518302080631f, --0.38268343236508972627f,0.92387953251128673848f,-0.42755509343028186287f, -0.90398929312344344922f,-0.47139673682599769755f,0.88192126434835504956f, --0.51410274419322155026f,0.85772861000027211809f,-0.55557023301960195560f, -0.83146961230254534669f,-0.59569930449243335691f,0.80320753148064494287f, --0.63439328416364537677f,0.77301045336273710440f,-0.67155895484701844111f, -0.74095112535495899486f,-0.70710678118654746172f,0.70710678118654757274f, --0.74095112535495888384f,0.67155895484701855214f,-0.77301045336273699338f, -0.63439328416364548779f,-0.80320753148064483184f,0.59569930449243346793f, --0.83146961230254534669f,0.55557023301960217765f,-0.85772861000027200706f, -0.51410274419322177231f,-0.88192126434835493853f,0.47139673682599780857f, --0.90398929312344333820f,0.42755509343028202940f,-0.92387953251128673848f, -0.38268343236508989280f,-0.94154406518302069529f,0.33688985339222032867f, --0.95694033573220882438f,0.29028467725446238656f,-0.97003125319454397424f, -0.24298017990326406523f,-0.98078528040323043058f,0.19509032201612860891f, --0.98917650996478101444f,0.14673047445536180344f,-0.99518472667219681771f, -0.09801714032956082567f,-0.99879545620517240501f,0.04906767432741796636f, -1.00000000000000000000f,0.00000000000000000000f,0.98078528040323043058f, -0.19509032201612824808f,0.92387953251128673848f,0.38268343236508978178f, -0.83146961230254523567f,0.55557023301960217765f,0.70710678118654757274f, -0.70710678118654757274f,0.55557023301960228867f,0.83146961230254523567f, -0.38268343236508983729f,0.92387953251128673848f,0.19509032201612833135f, -0.98078528040323043058f,0.00000000000000006123f,1.00000000000000000000f, --0.19509032201612819257f,0.98078528040323043058f,-0.38268343236508972627f, -0.92387953251128673848f,-0.55557023301960195560f,0.83146961230254534669f, --0.70710678118654746172f,0.70710678118654757274f,-0.83146961230254534669f, -0.55557023301960217765f,-0.92387953251128673848f,0.38268343236508989280f, --0.98078528040323043058f,0.19509032201612860891f,1.00000000000000000000f, -0.00000000000000000000f,0.70710678118654757274f,0.70710678118654757274f, -0.00000000000000006123f,1.00000000000000000000f,-0.70710678118654746172f, -0.70710678118654757274f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99999529123306274414f, +0.00306795677170157433f,0.99998116493225097656f,0.00613588467240333557f, +0.99995762109756469727f,0.00920375436544418335f,0.99992471933364868164f, +0.01227153837680816650f,0.99988234043121337891f,0.01533920597285032272f, +0.99983060359954833984f,0.01840673014521598816f,0.99976938962936401367f, +0.02147408016026020050f,0.99969881772994995117f,0.02454122900962829590f, +0.99961882829666137695f,0.02760814502835273743f,0.99952942132949829102f, +0.03067480400204658508f,0.99943059682846069336f,0.03374117240309715271f, +0.99932235479354858398f,0.03680722415447235107f,0.99920475482940673828f, +0.03987292572855949402f,0.99907773733139038086f,0.04293825849890708923f, +0.99894130229949951172f,0.04600318148732185364f,0.99879544973373413086f, +0.04906767606735229492f,0.99864023923873901367f,0.05213170498609542847f, +0.99847555160522460938f,0.05519524589180946350f,0.99830156564712524414f, +0.05825826525688171387f,0.99811810255050659180f,0.06132073700428009033f, +0.99792528152465820312f,0.06438262760639190674f,0.99772304296493530273f, +0.06744392216205596924f,0.99751144647598266602f,0.07050457596778869629f, +0.99729043245315551758f,0.07356456667184829712f,0.99706006050109863281f, +0.07662386447191238403f,0.99682027101516723633f,0.07968243956565856934f, +0.99657112360000610352f,0.08274026215076446533f,0.99631261825561523438f, +0.08579730987548828125f,0.99604469537734985352f,0.08885355293750762939f, +0.99576741456985473633f,0.09190895408391952515f,0.99548077583312988281f, +0.09496349841356277466f,0.99518471956253051758f,0.09801714122295379639f, +0.99487930536270141602f,0.10106986016035079956f,0.99456459283828735352f, +0.10412163287401199341f,0.99424046277999877930f,0.10717242211103439331f, +0.99390697479248046875f,0.11022220551967620850f,0.99356412887573242188f, +0.11327095329761505127f,0.99321192502975463867f,0.11631862819194793701f, +0.99285042285919189453f,0.11936521530151367188f,0.99247956275939941406f, +0.12241067737340927124f,0.99209928512573242188f,0.12545497715473175049f, +0.99170976877212524414f,0.12849810719490051270f,0.99131083488464355469f, +0.13154003024101257324f,0.99090266227722167969f,0.13458070158958435059f, +0.99048507213592529297f,0.13762012124061584473f,0.99005818367004394531f, +0.14065824449062347412f,0.98962199687957763672f,0.14369502663612365723f, +0.98917651176452636719f,0.14673046767711639404f,0.98872166872024536133f, +0.14976453781127929688f,0.98825758695602416992f,0.15279719233512878418f, +0.98778414726257324219f,0.15582840144634246826f,0.98730140924453735352f, +0.15885815024375915527f,0.98680937290191650391f,0.16188639402389526367f, +0.98630809783935546875f,0.16491311788558959961f,0.98579752445220947266f, +0.16793829202651977539f,0.98527765274047851562f,0.17096188664436340332f, +0.98474848270416259766f,0.17398387193679809570f,0.98421007394790649414f, +0.17700421810150146484f,0.98366242647171020508f,0.18002289533615112305f, +0.98310548067092895508f,0.18303988873958587646f,0.98253929615020751953f, +0.18605515360832214355f,0.98196387290954589844f,0.18906866014003753662f, +0.98137921094894409180f,0.19208039343357086182f,0.98078525066375732422f, +0.19509032368659973145f,0.98018211126327514648f,0.19809840619564056396f, +0.97956979274749755859f,0.20110464096069335938f,0.97894817590713500977f, +0.20410896837711334229f,0.97831737995147705078f,0.20711137354373931885f, +0.97767734527587890625f,0.21011184155941009521f,0.97702813148498535156f, +0.21311031281948089600f,0.97636973857879638672f,0.21610680222511291504f, +0.97570210695266723633f,0.21910123527050018311f,0.97502535581588745117f, +0.22209362685680389404f,0.97433936595916748047f,0.22508391737937927246f, +0.97364425659179687500f,0.22807207703590393066f,0.97293996810913085938f, +0.23105810582637786865f,0.97222650051116943359f,0.23404195904731750488f, +0.97150391340255737305f,0.23702360689640045166f,0.97077214717864990234f, +0.24000301957130432129f,0.97003126144409179688f,0.24298018217086791992f, +0.96928125619888305664f,0.24595504999160766602f,0.96852207183837890625f, +0.24892760813236236572f,0.96775382757186889648f,0.25189781188964843750f, +0.96697646379470825195f,0.25486564636230468750f,0.96618998050689697266f, +0.25783109664916992188f,0.96539443731307983398f,0.26079410314559936523f, +0.96458977460861206055f,0.26375466585159301758f,0.96377605199813842773f, +0.26671275496482849121f,0.96295326948165893555f,0.26966831088066101074f, +0.96212142705917358398f,0.27262136340141296387f,0.96128046512603759766f, +0.27557182312011718750f,0.96043050289154052734f,0.27851969003677368164f, +0.95957154035568237305f,0.28146493434906005859f,0.95870345830917358398f, +0.28440752625465393066f,0.95782643556594848633f,0.28734746575355529785f, +0.95694035291671752930f,0.29028466343879699707f,0.95604526996612548828f, +0.29321914911270141602f,0.95514118671417236328f,0.29615089297294616699f, +0.95422810316085815430f,0.29907983541488647461f,0.95330601930618286133f, +0.30200594663619995117f,0.95237499475479125977f,0.30492922663688659668f, +0.95143502950668334961f,0.30784964561462402344f,0.95048606395721435547f, +0.31076714396476745605f,0.94952815771102905273f,0.31368175148963928223f, +0.94856137037277221680f,0.31659337878227233887f,0.94758558273315429688f, +0.31950202584266662598f,0.94660091400146484375f,0.32240769267082214355f, +0.94560730457305908203f,0.32531028985977172852f,0.94460481405258178711f, +0.32820984721183776855f,0.94359344244003295898f,0.33110630512237548828f, +0.94257318973541259766f,0.33399966359138488770f,0.94154405593872070312f, +0.33688986301422119141f,0.94050604104995727539f,0.33977687358856201172f, +0.93945920467376708984f,0.34266072511672973633f,0.93840354681015014648f, +0.34554132819175720215f,0.93733900785446166992f,0.34841868281364440918f, +0.93626564741134643555f,0.35129275918006896973f,0.93518352508544921875f, +0.35416352748870849609f,0.93409252166748046875f,0.35703095793724060059f, +0.93299281597137451172f,0.35989505052566528320f,0.93188428878784179688f, +0.36275571584701538086f,0.93076694011688232422f,0.36561298370361328125f, +0.92964088916778564453f,0.36846682429313659668f,0.92850607633590698242f, +0.37131720781326293945f,0.92736250162124633789f,0.37416407465934753418f, +0.92621022462844848633f,0.37700742483139038086f,0.92504924535751342773f, +0.37984719872474670410f,0.92387950420379638672f,0.38268342614173889160f, +0.92270112037658691406f,0.38551604747772216797f,0.92151403427124023438f, +0.38834503293037414551f,0.92031830549240112305f,0.39117038249969482422f, +0.91911387443542480469f,0.39399203658103942871f,0.91790080070495605469f, +0.39680999517440795898f,0.91667908430099487305f,0.39962419867515563965f, +0.91544872522354125977f,0.40243464708328247070f,0.91420978307723999023f, +0.40524131059646606445f,0.91296219825744628906f,0.40804415941238403320f, +0.91170603036880493164f,0.41084316372871398926f,0.91044127941131591797f, +0.41363832354545593262f,0.90916800498962402344f,0.41642954945564270020f, +0.90788608789443969727f,0.41921690106391906738f,0.90659570693969726562f, +0.42200025916099548340f,0.90529674291610717773f,0.42477968335151672363f, +0.90398931503295898438f,0.42755508422851562500f,0.90267330408096313477f, +0.43032649159431457520f,0.90134882926940917969f,0.43309381604194641113f, +0.90001589059829711914f,0.43585708737373352051f,0.89867448806762695312f, +0.43861624598503112793f,0.89732456207275390625f,0.44137126207351684570f, +0.89596623182296752930f,0.44412213563919067383f,0.89459949731826782227f, +0.44686883687973022461f,0.89322429895401000977f,0.44961133599281311035f, +0.89184069633483886719f,0.45234957337379455566f,0.89044874906539916992f, +0.45508357882499694824f,0.88904833793640136719f,0.45781329274177551270f, +0.88763964176177978516f,0.46053871512413024902f,0.88622254133224487305f, +0.46325978636741638184f,0.88479709625244140625f,0.46597650647163391113f, +0.88336336612701416016f,0.46868881583213806152f,0.88192129135131835938f, +0.47139674425125122070f,0.88047087192535400391f,0.47410020232200622559f, +0.87901222705841064453f,0.47679921984672546387f,0.87754529714584350586f, +0.47949376702308654785f,0.87607008218765258789f,0.48218378424644470215f, +0.87458664178848266602f,0.48486924171447753906f,0.87309497594833374023f, +0.48755016922950744629f,0.87159508466720581055f,0.49022647738456726074f, +0.87008696794509887695f,0.49289819598197937012f,0.86857068538665771484f, +0.49556526541709899902f,0.86704623699188232422f,0.49822765588760375977f, +0.86551362276077270508f,0.50088536739349365234f,0.86397284269332885742f, +0.50353837013244628906f,0.86242395639419555664f,0.50618666410446166992f, +0.86086696386337280273f,0.50883013010025024414f,0.85930180549621582031f, +0.51146882772445678711f,0.85772860050201416016f,0.51410275697708129883f, +0.85614734888076782227f,0.51673179864883422852f,0.85455799102783203125f, +0.51935601234436035156f,0.85296058654785156250f,0.52197527885437011719f, +0.85135519504547119141f,0.52458965778350830078f,0.84974175691604614258f, +0.52719914913177490234f,0.84812033176422119141f,0.52980363368988037109f, +0.84649091958999633789f,0.53240311145782470703f,0.84485357999801635742f, +0.53499764204025268555f,0.84320825338363647461f,0.53758704662322998047f, +0.84155499935150146484f,0.54017144441604614258f,0.83989381790161132812f, +0.54275077581405639648f,0.83822470903396606445f,0.54532498121261596680f, +0.83654773235321044922f,0.54789406061172485352f,0.83486288785934448242f, +0.55045795440673828125f,0.83317017555236816406f,0.55301672220230102539f, +0.83146959543228149414f,0.55557024478912353516f,0.82976120710372924805f, +0.55811852216720581055f,0.82804507017135620117f,0.56066155433654785156f, +0.82632106542587280273f,0.56319934129714965820f,0.82458931207656860352f, +0.56573182344436645508f,0.82284981012344360352f,0.56825894117355346680f, +0.82110249996185302734f,0.57078075408935546875f,0.81934750080108642578f, +0.57329714298248291016f,0.81758481264114379883f,0.57580816745758056641f, +0.81581443548202514648f,0.57831376791000366211f,0.81403630971908569336f, +0.58081394433975219727f,0.81225061416625976562f,0.58330863714218139648f, +0.81045717000961303711f,0.58579784631729125977f,0.80865615606307983398f, +0.58828157186508178711f,0.80684757232666015625f,0.59075969457626342773f, +0.80503135919570922852f,0.59323227405548095703f,0.80320751667022705078f, +0.59569931030273437500f,0.80137616395950317383f,0.59816068410873413086f, +0.79953724145889282227f,0.60061645507812500000f,0.79769086837768554688f, +0.60306662321090698242f,0.79583692550659179688f,0.60551106929779052734f, +0.79397547245025634766f,0.60794979333877563477f,0.79210656881332397461f, +0.61038279533386230469f,0.79023021459579467773f,0.61281007528305053711f, +0.78834640979766845703f,0.61523157358169555664f,0.78645521402359008789f, +0.61764729022979736328f,0.78455656766891479492f,0.62005722522735595703f, +0.78265058994293212891f,0.62246125936508178711f,0.78073722124099731445f, +0.62485951185226440430f,0.77881652116775512695f,0.62725180387496948242f, +0.77688848972320556641f,0.62963825464248657227f,0.77495312690734863281f, +0.63201874494552612305f,0.77301043272018432617f,0.63439327478408813477f, +0.77106052637100219727f,0.63676184415817260742f,0.76910334825515747070f, +0.63912445306777954102f,0.76713889837265014648f,0.64148104190826416016f, +0.76516723632812500000f,0.64383155107498168945f,0.76318842172622680664f, +0.64617604017257690430f,0.76120239496231079102f,0.64851438999176025391f, +0.75920921564102172852f,0.65084666013717651367f,0.75720882415771484375f, +0.65317285060882568359f,0.75520139932632446289f,0.65549284219741821289f, +0.75318682193756103516f,0.65780669450759887695f,0.75116515159606933594f, +0.66011434793472290039f,0.74913638830184936523f,0.66241580247879028320f, +0.74710059165954589844f,0.66471099853515625000f,0.74505776166915893555f, +0.66699993610382080078f,0.74300795793533325195f,0.66928261518478393555f, +0.74095112085342407227f,0.67155897617340087891f,0.73888731002807617188f, +0.67382901906967163086f,0.73681658506393432617f,0.67609268426895141602f, +0.73473888635635375977f,0.67835003137588500977f,0.73265427350997924805f, +0.68060100078582763672f,0.73056274652481079102f,0.68284553289413452148f, +0.72846436500549316406f,0.68508368730545043945f,0.72635912895202636719f, +0.68731534481048583984f,0.72424709796905517578f,0.68954056501388549805f, +0.72212821245193481445f,0.69175922870635986328f,0.72000253200531005859f, +0.69397145509719848633f,0.71787005662918090820f,0.69617712497711181641f, +0.71573084592819213867f,0.69837623834609985352f,0.71358484029769897461f, +0.70056879520416259766f,0.71143221855163574219f,0.70275473594665527344f, +0.70927280187606811523f,0.70493406057357788086f,0.70710676908493041992f, +0.70710676908493041992f,0.70493406057357788086f,0.70927280187606811523f, +0.70275473594665527344f,0.71143221855163574219f,0.70056879520416259766f, +0.71358484029769897461f,0.69837623834609985352f,0.71573084592819213867f, +0.69617712497711181641f,0.71787005662918090820f,0.69397145509719848633f, +0.72000253200531005859f,0.69175922870635986328f,0.72212821245193481445f, +0.68954056501388549805f,0.72424709796905517578f,0.68731534481048583984f, +0.72635912895202636719f,0.68508368730545043945f,0.72846436500549316406f, +0.68284553289413452148f,0.73056274652481079102f,0.68060100078582763672f, +0.73265427350997924805f,0.67835003137588500977f,0.73473888635635375977f, +0.67609268426895141602f,0.73681658506393432617f,0.67382901906967163086f, +0.73888731002807617188f,0.67155897617340087891f,0.74095112085342407227f, +0.66928261518478393555f,0.74300795793533325195f,0.66699993610382080078f, +0.74505776166915893555f,0.66471099853515625000f,0.74710059165954589844f, +0.66241580247879028320f,0.74913638830184936523f,0.66011434793472290039f, +0.75116515159606933594f,0.65780669450759887695f,0.75318682193756103516f, +0.65549284219741821289f,0.75520139932632446289f,0.65317285060882568359f, +0.75720882415771484375f,0.65084666013717651367f,0.75920921564102172852f, +0.64851438999176025391f,0.76120239496231079102f,0.64617604017257690430f, +0.76318842172622680664f,0.64383155107498168945f,0.76516723632812500000f, +0.64148104190826416016f,0.76713889837265014648f,0.63912445306777954102f, +0.76910334825515747070f,0.63676184415817260742f,0.77106052637100219727f, +0.63439327478408813477f,0.77301043272018432617f,0.63201874494552612305f, +0.77495312690734863281f,0.62963825464248657227f,0.77688848972320556641f, +0.62725180387496948242f,0.77881652116775512695f,0.62485951185226440430f, +0.78073722124099731445f,0.62246125936508178711f,0.78265058994293212891f, +0.62005722522735595703f,0.78455656766891479492f,0.61764729022979736328f, +0.78645521402359008789f,0.61523157358169555664f,0.78834640979766845703f, +0.61281007528305053711f,0.79023021459579467773f,0.61038279533386230469f, +0.79210656881332397461f,0.60794979333877563477f,0.79397547245025634766f, +0.60551106929779052734f,0.79583692550659179688f,0.60306662321090698242f, +0.79769086837768554688f,0.60061645507812500000f,0.79953724145889282227f, +0.59816068410873413086f,0.80137616395950317383f,0.59569931030273437500f, +0.80320751667022705078f,0.59323227405548095703f,0.80503135919570922852f, +0.59075969457626342773f,0.80684757232666015625f,0.58828157186508178711f, +0.80865615606307983398f,0.58579784631729125977f,0.81045717000961303711f, +0.58330863714218139648f,0.81225061416625976562f,0.58081394433975219727f, +0.81403630971908569336f,0.57831376791000366211f,0.81581443548202514648f, +0.57580816745758056641f,0.81758481264114379883f,0.57329714298248291016f, +0.81934750080108642578f,0.57078075408935546875f,0.82110249996185302734f, +0.56825894117355346680f,0.82284981012344360352f,0.56573182344436645508f, +0.82458931207656860352f,0.56319934129714965820f,0.82632106542587280273f, +0.56066155433654785156f,0.82804507017135620117f,0.55811852216720581055f, +0.82976120710372924805f,0.55557024478912353516f,0.83146959543228149414f, +0.55301672220230102539f,0.83317017555236816406f,0.55045795440673828125f, +0.83486288785934448242f,0.54789406061172485352f,0.83654773235321044922f, +0.54532498121261596680f,0.83822470903396606445f,0.54275077581405639648f, +0.83989381790161132812f,0.54017144441604614258f,0.84155499935150146484f, +0.53758704662322998047f,0.84320825338363647461f,0.53499764204025268555f, +0.84485357999801635742f,0.53240311145782470703f,0.84649091958999633789f, +0.52980363368988037109f,0.84812033176422119141f,0.52719914913177490234f, +0.84974175691604614258f,0.52458965778350830078f,0.85135519504547119141f, +0.52197527885437011719f,0.85296058654785156250f,0.51935601234436035156f, +0.85455799102783203125f,0.51673179864883422852f,0.85614734888076782227f, +0.51410275697708129883f,0.85772860050201416016f,0.51146882772445678711f, +0.85930180549621582031f,0.50883013010025024414f,0.86086696386337280273f, +0.50618666410446166992f,0.86242395639419555664f,0.50353837013244628906f, +0.86397284269332885742f,0.50088536739349365234f,0.86551362276077270508f, +0.49822765588760375977f,0.86704623699188232422f,0.49556526541709899902f, +0.86857068538665771484f,0.49289819598197937012f,0.87008696794509887695f, +0.49022647738456726074f,0.87159508466720581055f,0.48755016922950744629f, +0.87309497594833374023f,0.48486924171447753906f,0.87458664178848266602f, +0.48218378424644470215f,0.87607008218765258789f,0.47949376702308654785f, +0.87754529714584350586f,0.47679921984672546387f,0.87901222705841064453f, +0.47410020232200622559f,0.88047087192535400391f,0.47139674425125122070f, +0.88192129135131835938f,0.46868881583213806152f,0.88336336612701416016f, +0.46597650647163391113f,0.88479709625244140625f,0.46325978636741638184f, +0.88622254133224487305f,0.46053871512413024902f,0.88763964176177978516f, +0.45781329274177551270f,0.88904833793640136719f,0.45508357882499694824f, +0.89044874906539916992f,0.45234957337379455566f,0.89184069633483886719f, +0.44961133599281311035f,0.89322429895401000977f,0.44686883687973022461f, +0.89459949731826782227f,0.44412213563919067383f,0.89596623182296752930f, +0.44137126207351684570f,0.89732456207275390625f,0.43861624598503112793f, +0.89867448806762695312f,0.43585708737373352051f,0.90001589059829711914f, +0.43309381604194641113f,0.90134882926940917969f,0.43032649159431457520f, +0.90267330408096313477f,0.42755508422851562500f,0.90398931503295898438f, +0.42477968335151672363f,0.90529674291610717773f,0.42200025916099548340f, +0.90659570693969726562f,0.41921690106391906738f,0.90788608789443969727f, +0.41642954945564270020f,0.90916800498962402344f,0.41363832354545593262f, +0.91044127941131591797f,0.41084316372871398926f,0.91170603036880493164f, +0.40804415941238403320f,0.91296219825744628906f,0.40524131059646606445f, +0.91420978307723999023f,0.40243464708328247070f,0.91544872522354125977f, +0.39962419867515563965f,0.91667908430099487305f,0.39680999517440795898f, +0.91790080070495605469f,0.39399203658103942871f,0.91911387443542480469f, +0.39117038249969482422f,0.92031830549240112305f,0.38834503293037414551f, +0.92151403427124023438f,0.38551604747772216797f,0.92270112037658691406f, +0.38268342614173889160f,0.92387950420379638672f,0.37984719872474670410f, +0.92504924535751342773f,0.37700742483139038086f,0.92621022462844848633f, +0.37416407465934753418f,0.92736250162124633789f,0.37131720781326293945f, +0.92850607633590698242f,0.36846682429313659668f,0.92964088916778564453f, +0.36561298370361328125f,0.93076694011688232422f,0.36275571584701538086f, +0.93188428878784179688f,0.35989505052566528320f,0.93299281597137451172f, +0.35703095793724060059f,0.93409252166748046875f,0.35416352748870849609f, +0.93518352508544921875f,0.35129275918006896973f,0.93626564741134643555f, +0.34841868281364440918f,0.93733900785446166992f,0.34554132819175720215f, +0.93840354681015014648f,0.34266072511672973633f,0.93945920467376708984f, +0.33977687358856201172f,0.94050604104995727539f,0.33688986301422119141f, +0.94154405593872070312f,0.33399966359138488770f,0.94257318973541259766f, +0.33110630512237548828f,0.94359344244003295898f,0.32820984721183776855f, +0.94460481405258178711f,0.32531028985977172852f,0.94560730457305908203f, +0.32240769267082214355f,0.94660091400146484375f,0.31950202584266662598f, +0.94758558273315429688f,0.31659337878227233887f,0.94856137037277221680f, +0.31368175148963928223f,0.94952815771102905273f,0.31076714396476745605f, +0.95048606395721435547f,0.30784964561462402344f,0.95143502950668334961f, +0.30492922663688659668f,0.95237499475479125977f,0.30200594663619995117f, +0.95330601930618286133f,0.29907983541488647461f,0.95422810316085815430f, +0.29615089297294616699f,0.95514118671417236328f,0.29321914911270141602f, +0.95604526996612548828f,0.29028466343879699707f,0.95694035291671752930f, +0.28734746575355529785f,0.95782643556594848633f,0.28440752625465393066f, +0.95870345830917358398f,0.28146493434906005859f,0.95957154035568237305f, +0.27851969003677368164f,0.96043050289154052734f,0.27557182312011718750f, +0.96128046512603759766f,0.27262136340141296387f,0.96212142705917358398f, +0.26966831088066101074f,0.96295326948165893555f,0.26671275496482849121f, +0.96377605199813842773f,0.26375466585159301758f,0.96458977460861206055f, +0.26079410314559936523f,0.96539443731307983398f,0.25783109664916992188f, +0.96618998050689697266f,0.25486564636230468750f,0.96697646379470825195f, +0.25189781188964843750f,0.96775382757186889648f,0.24892760813236236572f, +0.96852207183837890625f,0.24595504999160766602f,0.96928125619888305664f, +0.24298018217086791992f,0.97003126144409179688f,0.24000301957130432129f, +0.97077214717864990234f,0.23702360689640045166f,0.97150391340255737305f, +0.23404195904731750488f,0.97222650051116943359f,0.23105810582637786865f, +0.97293996810913085938f,0.22807207703590393066f,0.97364425659179687500f, +0.22508391737937927246f,0.97433936595916748047f,0.22209362685680389404f, +0.97502535581588745117f,0.21910123527050018311f,0.97570210695266723633f, +0.21610680222511291504f,0.97636973857879638672f,0.21311031281948089600f, +0.97702813148498535156f,0.21011184155941009521f,0.97767734527587890625f, +0.20711137354373931885f,0.97831737995147705078f,0.20410896837711334229f, +0.97894817590713500977f,0.20110464096069335938f,0.97956979274749755859f, +0.19809840619564056396f,0.98018211126327514648f,0.19509032368659973145f, +0.98078525066375732422f,0.19208039343357086182f,0.98137921094894409180f, +0.18906866014003753662f,0.98196387290954589844f,0.18605515360832214355f, +0.98253929615020751953f,0.18303988873958587646f,0.98310548067092895508f, +0.18002289533615112305f,0.98366242647171020508f,0.17700421810150146484f, +0.98421007394790649414f,0.17398387193679809570f,0.98474848270416259766f, +0.17096188664436340332f,0.98527765274047851562f,0.16793829202651977539f, +0.98579752445220947266f,0.16491311788558959961f,0.98630809783935546875f, +0.16188639402389526367f,0.98680937290191650391f,0.15885815024375915527f, +0.98730140924453735352f,0.15582840144634246826f,0.98778414726257324219f, +0.15279719233512878418f,0.98825758695602416992f,0.14976453781127929688f, +0.98872166872024536133f,0.14673046767711639404f,0.98917651176452636719f, +0.14369502663612365723f,0.98962199687957763672f,0.14065824449062347412f, +0.99005818367004394531f,0.13762012124061584473f,0.99048507213592529297f, +0.13458070158958435059f,0.99090266227722167969f,0.13154003024101257324f, +0.99131083488464355469f,0.12849810719490051270f,0.99170976877212524414f, +0.12545497715473175049f,0.99209928512573242188f,0.12241067737340927124f, +0.99247956275939941406f,0.11936521530151367188f,0.99285042285919189453f, +0.11631862819194793701f,0.99321192502975463867f,0.11327095329761505127f, +0.99356412887573242188f,0.11022220551967620850f,0.99390697479248046875f, +0.10717242211103439331f,0.99424046277999877930f,0.10412163287401199341f, +0.99456459283828735352f,0.10106986016035079956f,0.99487930536270141602f, +0.09801714122295379639f,0.99518471956253051758f,0.09496349841356277466f, +0.99548077583312988281f,0.09190895408391952515f,0.99576741456985473633f, +0.08885355293750762939f,0.99604469537734985352f,0.08579730987548828125f, +0.99631261825561523438f,0.08274026215076446533f,0.99657112360000610352f, +0.07968243956565856934f,0.99682027101516723633f,0.07662386447191238403f, +0.99706006050109863281f,0.07356456667184829712f,0.99729043245315551758f, +0.07050457596778869629f,0.99751144647598266602f,0.06744392216205596924f, +0.99772304296493530273f,0.06438262760639190674f,0.99792528152465820312f, +0.06132073700428009033f,0.99811810255050659180f,0.05825826525688171387f, +0.99830156564712524414f,0.05519524589180946350f,0.99847555160522460938f, +0.05213170498609542847f,0.99864023923873901367f,0.04906767606735229492f, +0.99879544973373413086f,0.04600318148732185364f,0.99894130229949951172f, +0.04293825849890708923f,0.99907773733139038086f,0.03987292572855949402f, +0.99920475482940673828f,0.03680722415447235107f,0.99932235479354858398f, +0.03374117240309715271f,0.99943059682846069336f,0.03067480400204658508f, +0.99952942132949829102f,0.02760814502835273743f,0.99961882829666137695f, +0.02454122900962829590f,0.99969881772994995117f,0.02147408016026020050f, +0.99976938962936401367f,0.01840673014521598816f,0.99983060359954833984f, +0.01533920597285032272f,0.99988234043121337891f,0.01227153837680816650f, +0.99992471933364868164f,0.00920375436544418335f,0.99995762109756469727f, +0.00613588467240333557f,0.99998116493225097656f,0.00306795677170157433f, +0.99999529123306274414f,0.00000000000000006123f,1.00000000000000000000f, +-0.00306795677170157433f,0.99999529123306274414f,-0.00613588467240333557f, +0.99998116493225097656f,-0.00920375436544418335f,0.99995762109756469727f, +-0.01227153837680816650f,0.99992471933364868164f,-0.01533920597285032272f, +0.99988234043121337891f,-0.01840673014521598816f,0.99983060359954833984f, +-0.02147408016026020050f,0.99976938962936401367f,-0.02454122900962829590f, +0.99969881772994995117f,-0.02760814502835273743f,0.99961882829666137695f, +-0.03067480400204658508f,0.99952942132949829102f,-0.03374117240309715271f, +0.99943059682846069336f,-0.03680722415447235107f,0.99932235479354858398f, +-0.03987292572855949402f,0.99920475482940673828f,-0.04293825849890708923f, +0.99907773733139038086f,-0.04600318148732185364f,0.99894130229949951172f, +-0.04906767606735229492f,0.99879544973373413086f,-0.05213170498609542847f, +0.99864023923873901367f,-0.05519524589180946350f,0.99847555160522460938f, +-0.05825826525688171387f,0.99830156564712524414f,-0.06132073700428009033f, +0.99811810255050659180f,-0.06438262760639190674f,0.99792528152465820312f, +-0.06744392216205596924f,0.99772304296493530273f,-0.07050457596778869629f, +0.99751144647598266602f,-0.07356456667184829712f,0.99729043245315551758f, +-0.07662386447191238403f,0.99706006050109863281f,-0.07968243956565856934f, +0.99682027101516723633f,-0.08274026215076446533f,0.99657112360000610352f, +-0.08579730987548828125f,0.99631261825561523438f,-0.08885355293750762939f, +0.99604469537734985352f,-0.09190895408391952515f,0.99576741456985473633f, +-0.09496349841356277466f,0.99548077583312988281f,-0.09801714122295379639f, +0.99518471956253051758f,-0.10106986016035079956f,0.99487930536270141602f, +-0.10412163287401199341f,0.99456459283828735352f,-0.10717242211103439331f, +0.99424046277999877930f,-0.11022220551967620850f,0.99390697479248046875f, +-0.11327095329761505127f,0.99356412887573242188f,-0.11631862819194793701f, +0.99321192502975463867f,-0.11936521530151367188f,0.99285042285919189453f, +-0.12241067737340927124f,0.99247956275939941406f,-0.12545497715473175049f, +0.99209928512573242188f,-0.12849810719490051270f,0.99170976877212524414f, +-0.13154003024101257324f,0.99131083488464355469f,-0.13458070158958435059f, +0.99090266227722167969f,-0.13762012124061584473f,0.99048507213592529297f, +-0.14065824449062347412f,0.99005818367004394531f,-0.14369502663612365723f, +0.98962199687957763672f,-0.14673046767711639404f,0.98917651176452636719f, +-0.14976453781127929688f,0.98872166872024536133f,-0.15279719233512878418f, +0.98825758695602416992f,-0.15582840144634246826f,0.98778414726257324219f, +-0.15885815024375915527f,0.98730140924453735352f,-0.16188639402389526367f, +0.98680937290191650391f,-0.16491311788558959961f,0.98630809783935546875f, +-0.16793829202651977539f,0.98579752445220947266f,-0.17096188664436340332f, +0.98527765274047851562f,-0.17398387193679809570f,0.98474848270416259766f, +-0.17700421810150146484f,0.98421007394790649414f,-0.18002289533615112305f, +0.98366242647171020508f,-0.18303988873958587646f,0.98310548067092895508f, +-0.18605515360832214355f,0.98253929615020751953f,-0.18906866014003753662f, +0.98196387290954589844f,-0.19208039343357086182f,0.98137921094894409180f, +-0.19509032368659973145f,0.98078525066375732422f,-0.19809840619564056396f, +0.98018211126327514648f,-0.20110464096069335938f,0.97956979274749755859f, +-0.20410896837711334229f,0.97894817590713500977f,-0.20711137354373931885f, +0.97831737995147705078f,-0.21011184155941009521f,0.97767734527587890625f, +-0.21311031281948089600f,0.97702813148498535156f,-0.21610680222511291504f, +0.97636973857879638672f,-0.21910123527050018311f,0.97570210695266723633f, +-0.22209362685680389404f,0.97502535581588745117f,-0.22508391737937927246f, +0.97433936595916748047f,-0.22807207703590393066f,0.97364425659179687500f, +-0.23105810582637786865f,0.97293996810913085938f,-0.23404195904731750488f, +0.97222650051116943359f,-0.23702360689640045166f,0.97150391340255737305f, +-0.24000301957130432129f,0.97077214717864990234f,-0.24298018217086791992f, +0.97003126144409179688f,-0.24595504999160766602f,0.96928125619888305664f, +-0.24892760813236236572f,0.96852207183837890625f,-0.25189781188964843750f, +0.96775382757186889648f,-0.25486564636230468750f,0.96697646379470825195f, +-0.25783109664916992188f,0.96618998050689697266f,-0.26079410314559936523f, +0.96539443731307983398f,-0.26375466585159301758f,0.96458977460861206055f, +-0.26671275496482849121f,0.96377605199813842773f,-0.26966831088066101074f, +0.96295326948165893555f,-0.27262136340141296387f,0.96212142705917358398f, +-0.27557182312011718750f,0.96128046512603759766f,-0.27851969003677368164f, +0.96043050289154052734f,-0.28146493434906005859f,0.95957154035568237305f, +-0.28440752625465393066f,0.95870345830917358398f,-0.28734746575355529785f, +0.95782643556594848633f,-0.29028466343879699707f,0.95694035291671752930f, +-0.29321914911270141602f,0.95604526996612548828f,-0.29615089297294616699f, +0.95514118671417236328f,-0.29907983541488647461f,0.95422810316085815430f, +-0.30200594663619995117f,0.95330601930618286133f,-0.30492922663688659668f, +0.95237499475479125977f,-0.30784964561462402344f,0.95143502950668334961f, +-0.31076714396476745605f,0.95048606395721435547f,-0.31368175148963928223f, +0.94952815771102905273f,-0.31659337878227233887f,0.94856137037277221680f, +-0.31950202584266662598f,0.94758558273315429688f,-0.32240769267082214355f, +0.94660091400146484375f,-0.32531028985977172852f,0.94560730457305908203f, +-0.32820984721183776855f,0.94460481405258178711f,-0.33110630512237548828f, +0.94359344244003295898f,-0.33399966359138488770f,0.94257318973541259766f, +-0.33688986301422119141f,0.94154405593872070312f,-0.33977687358856201172f, +0.94050604104995727539f,-0.34266072511672973633f,0.93945920467376708984f, +-0.34554132819175720215f,0.93840354681015014648f,-0.34841868281364440918f, +0.93733900785446166992f,-0.35129275918006896973f,0.93626564741134643555f, +-0.35416352748870849609f,0.93518352508544921875f,-0.35703095793724060059f, +0.93409252166748046875f,-0.35989505052566528320f,0.93299281597137451172f, +-0.36275571584701538086f,0.93188428878784179688f,-0.36561298370361328125f, +0.93076694011688232422f,-0.36846682429313659668f,0.92964088916778564453f, +-0.37131720781326293945f,0.92850607633590698242f,-0.37416407465934753418f, +0.92736250162124633789f,-0.37700742483139038086f,0.92621022462844848633f, +-0.37984719872474670410f,0.92504924535751342773f,-0.38268342614173889160f, +0.92387950420379638672f,-0.38551604747772216797f,0.92270112037658691406f, +-0.38834503293037414551f,0.92151403427124023438f,-0.39117038249969482422f, +0.92031830549240112305f,-0.39399203658103942871f,0.91911387443542480469f, +-0.39680999517440795898f,0.91790080070495605469f,-0.39962419867515563965f, +0.91667908430099487305f,-0.40243464708328247070f,0.91544872522354125977f, +-0.40524131059646606445f,0.91420978307723999023f,-0.40804415941238403320f, +0.91296219825744628906f,-0.41084316372871398926f,0.91170603036880493164f, +-0.41363832354545593262f,0.91044127941131591797f,-0.41642954945564270020f, +0.90916800498962402344f,-0.41921690106391906738f,0.90788608789443969727f, +-0.42200025916099548340f,0.90659570693969726562f,-0.42477968335151672363f, +0.90529674291610717773f,-0.42755508422851562500f,0.90398931503295898438f, +-0.43032649159431457520f,0.90267330408096313477f,-0.43309381604194641113f, +0.90134882926940917969f,-0.43585708737373352051f,0.90001589059829711914f, +-0.43861624598503112793f,0.89867448806762695312f,-0.44137126207351684570f, +0.89732456207275390625f,-0.44412213563919067383f,0.89596623182296752930f, +-0.44686883687973022461f,0.89459949731826782227f,-0.44961133599281311035f, +0.89322429895401000977f,-0.45234957337379455566f,0.89184069633483886719f, +-0.45508357882499694824f,0.89044874906539916992f,-0.45781329274177551270f, +0.88904833793640136719f,-0.46053871512413024902f,0.88763964176177978516f, +-0.46325978636741638184f,0.88622254133224487305f,-0.46597650647163391113f, +0.88479709625244140625f,-0.46868881583213806152f,0.88336336612701416016f, +-0.47139674425125122070f,0.88192129135131835938f,-0.47410020232200622559f, +0.88047087192535400391f,-0.47679921984672546387f,0.87901222705841064453f, +-0.47949376702308654785f,0.87754529714584350586f,-0.48218378424644470215f, +0.87607008218765258789f,-0.48486924171447753906f,0.87458664178848266602f, +-0.48755016922950744629f,0.87309497594833374023f,-0.49022647738456726074f, +0.87159508466720581055f,-0.49289819598197937012f,0.87008696794509887695f, +-0.49556526541709899902f,0.86857068538665771484f,-0.49822765588760375977f, +0.86704623699188232422f,-0.50088536739349365234f,0.86551362276077270508f, +-0.50353837013244628906f,0.86397284269332885742f,-0.50618666410446166992f, +0.86242395639419555664f,-0.50883013010025024414f,0.86086696386337280273f, +-0.51146882772445678711f,0.85930180549621582031f,-0.51410275697708129883f, +0.85772860050201416016f,-0.51673179864883422852f,0.85614734888076782227f, +-0.51935601234436035156f,0.85455799102783203125f,-0.52197527885437011719f, +0.85296058654785156250f,-0.52458965778350830078f,0.85135519504547119141f, +-0.52719914913177490234f,0.84974175691604614258f,-0.52980363368988037109f, +0.84812033176422119141f,-0.53240311145782470703f,0.84649091958999633789f, +-0.53499764204025268555f,0.84485357999801635742f,-0.53758704662322998047f, +0.84320825338363647461f,-0.54017144441604614258f,0.84155499935150146484f, +-0.54275077581405639648f,0.83989381790161132812f,-0.54532498121261596680f, +0.83822470903396606445f,-0.54789406061172485352f,0.83654773235321044922f, +-0.55045795440673828125f,0.83486288785934448242f,-0.55301672220230102539f, +0.83317017555236816406f,-0.55557024478912353516f,0.83146959543228149414f, +-0.55811852216720581055f,0.82976120710372924805f,-0.56066155433654785156f, +0.82804507017135620117f,-0.56319934129714965820f,0.82632106542587280273f, +-0.56573182344436645508f,0.82458931207656860352f,-0.56825894117355346680f, +0.82284981012344360352f,-0.57078075408935546875f,0.82110249996185302734f, +-0.57329714298248291016f,0.81934750080108642578f,-0.57580816745758056641f, +0.81758481264114379883f,-0.57831376791000366211f,0.81581443548202514648f, +-0.58081394433975219727f,0.81403630971908569336f,-0.58330863714218139648f, +0.81225061416625976562f,-0.58579784631729125977f,0.81045717000961303711f, +-0.58828157186508178711f,0.80865615606307983398f,-0.59075969457626342773f, +0.80684757232666015625f,-0.59323227405548095703f,0.80503135919570922852f, +-0.59569931030273437500f,0.80320751667022705078f,-0.59816068410873413086f, +0.80137616395950317383f,-0.60061645507812500000f,0.79953724145889282227f, +-0.60306662321090698242f,0.79769086837768554688f,-0.60551106929779052734f, +0.79583692550659179688f,-0.60794979333877563477f,0.79397547245025634766f, +-0.61038279533386230469f,0.79210656881332397461f,-0.61281007528305053711f, +0.79023021459579467773f,-0.61523157358169555664f,0.78834640979766845703f, +-0.61764729022979736328f,0.78645521402359008789f,-0.62005722522735595703f, +0.78455656766891479492f,-0.62246125936508178711f,0.78265058994293212891f, +-0.62485951185226440430f,0.78073722124099731445f,-0.62725180387496948242f, +0.77881652116775512695f,-0.62963825464248657227f,0.77688848972320556641f, +-0.63201874494552612305f,0.77495312690734863281f,-0.63439327478408813477f, +0.77301043272018432617f,-0.63676184415817260742f,0.77106052637100219727f, +-0.63912445306777954102f,0.76910334825515747070f,-0.64148104190826416016f, +0.76713889837265014648f,-0.64383155107498168945f,0.76516723632812500000f, +-0.64617604017257690430f,0.76318842172622680664f,-0.64851438999176025391f, +0.76120239496231079102f,-0.65084666013717651367f,0.75920921564102172852f, +-0.65317285060882568359f,0.75720882415771484375f,-0.65549284219741821289f, +0.75520139932632446289f,-0.65780669450759887695f,0.75318682193756103516f, +-0.66011434793472290039f,0.75116515159606933594f,-0.66241580247879028320f, +0.74913638830184936523f,-0.66471099853515625000f,0.74710059165954589844f, +-0.66699993610382080078f,0.74505776166915893555f,-0.66928261518478393555f, +0.74300795793533325195f,-0.67155897617340087891f,0.74095112085342407227f, +-0.67382901906967163086f,0.73888731002807617188f,-0.67609268426895141602f, +0.73681658506393432617f,-0.67835003137588500977f,0.73473888635635375977f, +-0.68060100078582763672f,0.73265427350997924805f,-0.68284553289413452148f, +0.73056274652481079102f,-0.68508368730545043945f,0.72846436500549316406f, +-0.68731534481048583984f,0.72635912895202636719f,-0.68954056501388549805f, +0.72424709796905517578f,-0.69175922870635986328f,0.72212821245193481445f, +-0.69397145509719848633f,0.72000253200531005859f,-0.69617712497711181641f, +0.71787005662918090820f,-0.69837623834609985352f,0.71573084592819213867f, +-0.70056879520416259766f,0.71358484029769897461f,-0.70275473594665527344f, +0.71143221855163574219f,-0.70493406057357788086f,0.70927280187606811523f, +-0.70710676908493041992f,0.70710676908493041992f,-0.70927280187606811523f, +0.70493406057357788086f,-0.71143221855163574219f,0.70275473594665527344f, +-0.71358484029769897461f,0.70056879520416259766f,-0.71573084592819213867f, +0.69837623834609985352f,-0.71787005662918090820f,0.69617712497711181641f, +-0.72000253200531005859f,0.69397145509719848633f,-0.72212821245193481445f, +0.69175922870635986328f,-0.72424709796905517578f,0.68954056501388549805f, +-0.72635912895202636719f,0.68731534481048583984f,-0.72846436500549316406f, +0.68508368730545043945f,-0.73056274652481079102f,0.68284553289413452148f, +-0.73265427350997924805f,0.68060100078582763672f,-0.73473888635635375977f, +0.67835003137588500977f,-0.73681658506393432617f,0.67609268426895141602f, +-0.73888731002807617188f,0.67382901906967163086f,-0.74095112085342407227f, +0.67155897617340087891f,-0.74300795793533325195f,0.66928261518478393555f, +-0.74505776166915893555f,0.66699993610382080078f,-0.74710059165954589844f, +0.66471099853515625000f,-0.74913638830184936523f,0.66241580247879028320f, +-0.75116515159606933594f,0.66011434793472290039f,-0.75318682193756103516f, +0.65780669450759887695f,-0.75520139932632446289f,0.65549284219741821289f, +-0.75720882415771484375f,0.65317285060882568359f,-0.75920921564102172852f, +0.65084666013717651367f,-0.76120239496231079102f,0.64851438999176025391f, +-0.76318842172622680664f,0.64617604017257690430f,-0.76516723632812500000f, +0.64383155107498168945f,-0.76713889837265014648f,0.64148104190826416016f, +-0.76910334825515747070f,0.63912445306777954102f,-0.77106052637100219727f, +0.63676184415817260742f,-0.77301043272018432617f,0.63439327478408813477f, +-0.77495312690734863281f,0.63201874494552612305f,-0.77688848972320556641f, +0.62963825464248657227f,-0.77881652116775512695f,0.62725180387496948242f, +-0.78073722124099731445f,0.62485951185226440430f,-0.78265058994293212891f, +0.62246125936508178711f,-0.78455656766891479492f,0.62005722522735595703f, +-0.78645521402359008789f,0.61764729022979736328f,-0.78834640979766845703f, +0.61523157358169555664f,-0.79023021459579467773f,0.61281007528305053711f, +-0.79210656881332397461f,0.61038279533386230469f,-0.79397547245025634766f, +0.60794979333877563477f,-0.79583692550659179688f,0.60551106929779052734f, +-0.79769086837768554688f,0.60306662321090698242f,-0.79953724145889282227f, +0.60061645507812500000f,-0.80137616395950317383f,0.59816068410873413086f, +-0.80320751667022705078f,0.59569931030273437500f,-0.80503135919570922852f, +0.59323227405548095703f,-0.80684757232666015625f,0.59075969457626342773f, +-0.80865615606307983398f,0.58828157186508178711f,-0.81045717000961303711f, +0.58579784631729125977f,-0.81225061416625976562f,0.58330863714218139648f, +-0.81403630971908569336f,0.58081394433975219727f,-0.81581443548202514648f, +0.57831376791000366211f,-0.81758481264114379883f,0.57580816745758056641f, +-0.81934750080108642578f,0.57329714298248291016f,-0.82110249996185302734f, +0.57078075408935546875f,-0.82284981012344360352f,0.56825894117355346680f, +-0.82458931207656860352f,0.56573182344436645508f,-0.82632106542587280273f, +0.56319934129714965820f,-0.82804507017135620117f,0.56066155433654785156f, +-0.82976120710372924805f,0.55811852216720581055f,-0.83146959543228149414f, +0.55557024478912353516f,-0.83317017555236816406f,0.55301672220230102539f, +-0.83486288785934448242f,0.55045795440673828125f,-0.83654773235321044922f, +0.54789406061172485352f,-0.83822470903396606445f,0.54532498121261596680f, +-0.83989381790161132812f,0.54275077581405639648f,-0.84155499935150146484f, +0.54017144441604614258f,-0.84320825338363647461f,0.53758704662322998047f, +-0.84485357999801635742f,0.53499764204025268555f,-0.84649091958999633789f, +0.53240311145782470703f,-0.84812033176422119141f,0.52980363368988037109f, +-0.84974175691604614258f,0.52719914913177490234f,-0.85135519504547119141f, +0.52458965778350830078f,-0.85296058654785156250f,0.52197527885437011719f, +-0.85455799102783203125f,0.51935601234436035156f,-0.85614734888076782227f, +0.51673179864883422852f,-0.85772860050201416016f,0.51410275697708129883f, +-0.85930180549621582031f,0.51146882772445678711f,-0.86086696386337280273f, +0.50883013010025024414f,-0.86242395639419555664f,0.50618666410446166992f, +-0.86397284269332885742f,0.50353837013244628906f,-0.86551362276077270508f, +0.50088536739349365234f,-0.86704623699188232422f,0.49822765588760375977f, +-0.86857068538665771484f,0.49556526541709899902f,-0.87008696794509887695f, +0.49289819598197937012f,-0.87159508466720581055f,0.49022647738456726074f, +-0.87309497594833374023f,0.48755016922950744629f,-0.87458664178848266602f, +0.48486924171447753906f,-0.87607008218765258789f,0.48218378424644470215f, +-0.87754529714584350586f,0.47949376702308654785f,-0.87901222705841064453f, +0.47679921984672546387f,-0.88047087192535400391f,0.47410020232200622559f, +-0.88192129135131835938f,0.47139674425125122070f,-0.88336336612701416016f, +0.46868881583213806152f,-0.88479709625244140625f,0.46597650647163391113f, +-0.88622254133224487305f,0.46325978636741638184f,-0.88763964176177978516f, +0.46053871512413024902f,-0.88904833793640136719f,0.45781329274177551270f, +-0.89044874906539916992f,0.45508357882499694824f,-0.89184069633483886719f, +0.45234957337379455566f,-0.89322429895401000977f,0.44961133599281311035f, +-0.89459949731826782227f,0.44686883687973022461f,-0.89596623182296752930f, +0.44412213563919067383f,-0.89732456207275390625f,0.44137126207351684570f, +-0.89867448806762695312f,0.43861624598503112793f,-0.90001589059829711914f, +0.43585708737373352051f,-0.90134882926940917969f,0.43309381604194641113f, +-0.90267330408096313477f,0.43032649159431457520f,-0.90398931503295898438f, +0.42755508422851562500f,-0.90529674291610717773f,0.42477968335151672363f, +-0.90659570693969726562f,0.42200025916099548340f,-0.90788608789443969727f, +0.41921690106391906738f,-0.90916800498962402344f,0.41642954945564270020f, +-0.91044127941131591797f,0.41363832354545593262f,-0.91170603036880493164f, +0.41084316372871398926f,-0.91296219825744628906f,0.40804415941238403320f, +-0.91420978307723999023f,0.40524131059646606445f,-0.91544872522354125977f, +0.40243464708328247070f,-0.91667908430099487305f,0.39962419867515563965f, +-0.91790080070495605469f,0.39680999517440795898f,-0.91911387443542480469f, +0.39399203658103942871f,-0.92031830549240112305f,0.39117038249969482422f, +-0.92151403427124023438f,0.38834503293037414551f,-0.92270112037658691406f, +0.38551604747772216797f,-0.92387950420379638672f,0.38268342614173889160f, +-0.92504924535751342773f,0.37984719872474670410f,-0.92621022462844848633f, +0.37700742483139038086f,-0.92736250162124633789f,0.37416407465934753418f, +-0.92850607633590698242f,0.37131720781326293945f,-0.92964088916778564453f, +0.36846682429313659668f,-0.93076694011688232422f,0.36561298370361328125f, +-0.93188428878784179688f,0.36275571584701538086f,-0.93299281597137451172f, +0.35989505052566528320f,-0.93409252166748046875f,0.35703095793724060059f, +-0.93518352508544921875f,0.35416352748870849609f,-0.93626564741134643555f, +0.35129275918006896973f,-0.93733900785446166992f,0.34841868281364440918f, +-0.93840354681015014648f,0.34554132819175720215f,-0.93945920467376708984f, +0.34266072511672973633f,-0.94050604104995727539f,0.33977687358856201172f, +-0.94154405593872070312f,0.33688986301422119141f,-0.94257318973541259766f, +0.33399966359138488770f,-0.94359344244003295898f,0.33110630512237548828f, +-0.94460481405258178711f,0.32820984721183776855f,-0.94560730457305908203f, +0.32531028985977172852f,-0.94660091400146484375f,0.32240769267082214355f, +-0.94758558273315429688f,0.31950202584266662598f,-0.94856137037277221680f, +0.31659337878227233887f,-0.94952815771102905273f,0.31368175148963928223f, +-0.95048606395721435547f,0.31076714396476745605f,-0.95143502950668334961f, +0.30784964561462402344f,-0.95237499475479125977f,0.30492922663688659668f, +-0.95330601930618286133f,0.30200594663619995117f,-0.95422810316085815430f, +0.29907983541488647461f,-0.95514118671417236328f,0.29615089297294616699f, +-0.95604526996612548828f,0.29321914911270141602f,-0.95694035291671752930f, +0.29028466343879699707f,-0.95782643556594848633f,0.28734746575355529785f, +-0.95870345830917358398f,0.28440752625465393066f,-0.95957154035568237305f, +0.28146493434906005859f,-0.96043050289154052734f,0.27851969003677368164f, +-0.96128046512603759766f,0.27557182312011718750f,-0.96212142705917358398f, +0.27262136340141296387f,-0.96295326948165893555f,0.26966831088066101074f, +-0.96377605199813842773f,0.26671275496482849121f,-0.96458977460861206055f, +0.26375466585159301758f,-0.96539443731307983398f,0.26079410314559936523f, +-0.96618998050689697266f,0.25783109664916992188f,-0.96697646379470825195f, +0.25486564636230468750f,-0.96775382757186889648f,0.25189781188964843750f, +-0.96852207183837890625f,0.24892760813236236572f,-0.96928125619888305664f, +0.24595504999160766602f,-0.97003126144409179688f,0.24298018217086791992f, +-0.97077214717864990234f,0.24000301957130432129f,-0.97150391340255737305f, +0.23702360689640045166f,-0.97222650051116943359f,0.23404195904731750488f, +-0.97293996810913085938f,0.23105810582637786865f,-0.97364425659179687500f, +0.22807207703590393066f,-0.97433936595916748047f,0.22508391737937927246f, +-0.97502535581588745117f,0.22209362685680389404f,-0.97570210695266723633f, +0.21910123527050018311f,-0.97636973857879638672f,0.21610680222511291504f, +-0.97702813148498535156f,0.21311031281948089600f,-0.97767734527587890625f, +0.21011184155941009521f,-0.97831737995147705078f,0.20711137354373931885f, +-0.97894817590713500977f,0.20410896837711334229f,-0.97956979274749755859f, +0.20110464096069335938f,-0.98018211126327514648f,0.19809840619564056396f, +-0.98078525066375732422f,0.19509032368659973145f,-0.98137921094894409180f, +0.19208039343357086182f,-0.98196387290954589844f,0.18906866014003753662f, +-0.98253929615020751953f,0.18605515360832214355f,-0.98310548067092895508f, +0.18303988873958587646f,-0.98366242647171020508f,0.18002289533615112305f, +-0.98421007394790649414f,0.17700421810150146484f,-0.98474848270416259766f, +0.17398387193679809570f,-0.98527765274047851562f,0.17096188664436340332f, +-0.98579752445220947266f,0.16793829202651977539f,-0.98630809783935546875f, +0.16491311788558959961f,-0.98680937290191650391f,0.16188639402389526367f, +-0.98730140924453735352f,0.15885815024375915527f,-0.98778414726257324219f, +0.15582840144634246826f,-0.98825758695602416992f,0.15279719233512878418f, +-0.98872166872024536133f,0.14976453781127929688f,-0.98917651176452636719f, +0.14673046767711639404f,-0.98962199687957763672f,0.14369502663612365723f, +-0.99005818367004394531f,0.14065824449062347412f,-0.99048507213592529297f, +0.13762012124061584473f,-0.99090266227722167969f,0.13458070158958435059f, +-0.99131083488464355469f,0.13154003024101257324f,-0.99170976877212524414f, +0.12849810719490051270f,-0.99209928512573242188f,0.12545497715473175049f, +-0.99247956275939941406f,0.12241067737340927124f,-0.99285042285919189453f, +0.11936521530151367188f,-0.99321192502975463867f,0.11631862819194793701f, +-0.99356412887573242188f,0.11327095329761505127f,-0.99390697479248046875f, +0.11022220551967620850f,-0.99424046277999877930f,0.10717242211103439331f, +-0.99456459283828735352f,0.10412163287401199341f,-0.99487930536270141602f, +0.10106986016035079956f,-0.99518471956253051758f,0.09801714122295379639f, +-0.99548077583312988281f,0.09496349841356277466f,-0.99576741456985473633f, +0.09190895408391952515f,-0.99604469537734985352f,0.08885355293750762939f, +-0.99631261825561523438f,0.08579730987548828125f,-0.99657112360000610352f, +0.08274026215076446533f,-0.99682027101516723633f,0.07968243956565856934f, +-0.99706006050109863281f,0.07662386447191238403f,-0.99729043245315551758f, +0.07356456667184829712f,-0.99751144647598266602f,0.07050457596778869629f, +-0.99772304296493530273f,0.06744392216205596924f,-0.99792528152465820312f, +0.06438262760639190674f,-0.99811810255050659180f,0.06132073700428009033f, +-0.99830156564712524414f,0.05825826525688171387f,-0.99847555160522460938f, +0.05519524589180946350f,-0.99864023923873901367f,0.05213170498609542847f, +-0.99879544973373413086f,0.04906767606735229492f,-0.99894130229949951172f, +0.04600318148732185364f,-0.99907773733139038086f,0.04293825849890708923f, +-0.99920475482940673828f,0.03987292572855949402f,-0.99932235479354858398f, +0.03680722415447235107f,-0.99943059682846069336f,0.03374117240309715271f, +-0.99952942132949829102f,0.03067480400204658508f,-0.99961882829666137695f, +0.02760814502835273743f,-0.99969881772994995117f,0.02454122900962829590f, +-0.99976938962936401367f,0.02147408016026020050f,-0.99983060359954833984f, +0.01840673014521598816f,-0.99988234043121337891f,0.01533920597285032272f, +-0.99992471933364868164f,0.01227153837680816650f,-0.99995762109756469727f, +0.00920375436544418335f,-0.99998116493225097656f,0.00613588467240333557f, +-0.99999529123306274414f,0.00306795677170157433f,1.00000000000000000000f, +0.00000000000000000000f,0.99992471933364868164f,0.01227153837680816650f, +0.99969881772994995117f,0.02454122900962829590f,0.99932235479354858398f, +0.03680722415447235107f,0.99879544973373413086f,0.04906767606735229492f, +0.99811810255050659180f,0.06132073700428009033f,0.99729043245315551758f, +0.07356456667184829712f,0.99631261825561523438f,0.08579730987548828125f, +0.99518471956253051758f,0.09801714122295379639f,0.99390697479248046875f, +0.11022220551967620850f,0.99247956275939941406f,0.12241067737340927124f, +0.99090266227722167969f,0.13458070158958435059f,0.98917651176452636719f, +0.14673046767711639404f,0.98730140924453735352f,0.15885815024375915527f, +0.98527765274047851562f,0.17096188664436340332f,0.98310548067092895508f, +0.18303988873958587646f,0.98078525066375732422f,0.19509032368659973145f, +0.97831737995147705078f,0.20711137354373931885f,0.97570210695266723633f, +0.21910123527050018311f,0.97293996810913085938f,0.23105810582637786865f, +0.97003126144409179688f,0.24298018217086791992f,0.96697646379470825195f, +0.25486564636230468750f,0.96377605199813842773f,0.26671275496482849121f, +0.96043050289154052734f,0.27851969003677368164f,0.95694035291671752930f, +0.29028466343879699707f,0.95330601930618286133f,0.30200594663619995117f, +0.94952815771102905273f,0.31368175148963928223f,0.94560730457305908203f, +0.32531028985977172852f,0.94154405593872070312f,0.33688986301422119141f, +0.93733900785446166992f,0.34841868281364440918f,0.93299281597137451172f, +0.35989505052566528320f,0.92850607633590698242f,0.37131720781326293945f, +0.92387950420379638672f,0.38268342614173889160f,0.91911387443542480469f, +0.39399203658103942871f,0.91420978307723999023f,0.40524131059646606445f, +0.90916800498962402344f,0.41642954945564270020f,0.90398931503295898438f, +0.42755508422851562500f,0.89867448806762695312f,0.43861624598503112793f, +0.89322429895401000977f,0.44961133599281311035f,0.88763964176177978516f, +0.46053871512413024902f,0.88192129135131835938f,0.47139674425125122070f, +0.87607008218765258789f,0.48218378424644470215f,0.87008696794509887695f, +0.49289819598197937012f,0.86397284269332885742f,0.50353837013244628906f, +0.85772860050201416016f,0.51410275697708129883f,0.85135519504547119141f, +0.52458965778350830078f,0.84485357999801635742f,0.53499764204025268555f, +0.83822470903396606445f,0.54532498121261596680f,0.83146959543228149414f, +0.55557024478912353516f,0.82458931207656860352f,0.56573182344436645508f, +0.81758481264114379883f,0.57580816745758056641f,0.81045717000961303711f, +0.58579784631729125977f,0.80320751667022705078f,0.59569931030273437500f, +0.79583692550659179688f,0.60551106929779052734f,0.78834640979766845703f, +0.61523157358169555664f,0.78073722124099731445f,0.62485951185226440430f, +0.77301043272018432617f,0.63439327478408813477f,0.76516723632812500000f, +0.64383155107498168945f,0.75720882415771484375f,0.65317285060882568359f, +0.74913638830184936523f,0.66241580247879028320f,0.74095112085342407227f, +0.67155897617340087891f,0.73265427350997924805f,0.68060100078582763672f, +0.72424709796905517578f,0.68954056501388549805f,0.71573084592819213867f, +0.69837623834609985352f,0.70710676908493041992f,0.70710676908493041992f, +0.69837623834609985352f,0.71573084592819213867f,0.68954056501388549805f, +0.72424709796905517578f,0.68060100078582763672f,0.73265427350997924805f, +0.67155897617340087891f,0.74095112085342407227f,0.66241580247879028320f, +0.74913638830184936523f,0.65317285060882568359f,0.75720882415771484375f, +0.64383155107498168945f,0.76516723632812500000f,0.63439327478408813477f, +0.77301043272018432617f,0.62485951185226440430f,0.78073722124099731445f, +0.61523157358169555664f,0.78834640979766845703f,0.60551106929779052734f, +0.79583692550659179688f,0.59569931030273437500f,0.80320751667022705078f, +0.58579784631729125977f,0.81045717000961303711f,0.57580816745758056641f, +0.81758481264114379883f,0.56573182344436645508f,0.82458931207656860352f, +0.55557024478912353516f,0.83146959543228149414f,0.54532498121261596680f, +0.83822470903396606445f,0.53499764204025268555f,0.84485357999801635742f, +0.52458965778350830078f,0.85135519504547119141f,0.51410275697708129883f, +0.85772860050201416016f,0.50353837013244628906f,0.86397284269332885742f, +0.49289819598197937012f,0.87008696794509887695f,0.48218378424644470215f, +0.87607008218765258789f,0.47139674425125122070f,0.88192129135131835938f, +0.46053871512413024902f,0.88763964176177978516f,0.44961133599281311035f, +0.89322429895401000977f,0.43861624598503112793f,0.89867448806762695312f, +0.42755508422851562500f,0.90398931503295898438f,0.41642954945564270020f, +0.90916800498962402344f,0.40524131059646606445f,0.91420978307723999023f, +0.39399203658103942871f,0.91911387443542480469f,0.38268342614173889160f, +0.92387950420379638672f,0.37131720781326293945f,0.92850607633590698242f, +0.35989505052566528320f,0.93299281597137451172f,0.34841868281364440918f, +0.93733900785446166992f,0.33688986301422119141f,0.94154405593872070312f, +0.32531028985977172852f,0.94560730457305908203f,0.31368175148963928223f, +0.94952815771102905273f,0.30200594663619995117f,0.95330601930618286133f, +0.29028466343879699707f,0.95694035291671752930f,0.27851969003677368164f, +0.96043050289154052734f,0.26671275496482849121f,0.96377605199813842773f, +0.25486564636230468750f,0.96697646379470825195f,0.24298018217086791992f, +0.97003126144409179688f,0.23105810582637786865f,0.97293996810913085938f, +0.21910123527050018311f,0.97570210695266723633f,0.20711137354373931885f, +0.97831737995147705078f,0.19509032368659973145f,0.98078525066375732422f, +0.18303988873958587646f,0.98310548067092895508f,0.17096188664436340332f, +0.98527765274047851562f,0.15885815024375915527f,0.98730140924453735352f, +0.14673046767711639404f,0.98917651176452636719f,0.13458070158958435059f, +0.99090266227722167969f,0.12241067737340927124f,0.99247956275939941406f, +0.11022220551967620850f,0.99390697479248046875f,0.09801714122295379639f, +0.99518471956253051758f,0.08579730987548828125f,0.99631261825561523438f, +0.07356456667184829712f,0.99729043245315551758f,0.06132073700428009033f, +0.99811810255050659180f,0.04906767606735229492f,0.99879544973373413086f, +0.03680722415447235107f,0.99932235479354858398f,0.02454122900962829590f, +0.99969881772994995117f,0.01227153837680816650f,0.99992471933364868164f, +0.00000000000000006123f,1.00000000000000000000f,-0.01227153837680816650f, +0.99992471933364868164f,-0.02454122900962829590f,0.99969881772994995117f, +-0.03680722415447235107f,0.99932235479354858398f,-0.04906767606735229492f, +0.99879544973373413086f,-0.06132073700428009033f,0.99811810255050659180f, +-0.07356456667184829712f,0.99729043245315551758f,-0.08579730987548828125f, +0.99631261825561523438f,-0.09801714122295379639f,0.99518471956253051758f, +-0.11022220551967620850f,0.99390697479248046875f,-0.12241067737340927124f, +0.99247956275939941406f,-0.13458070158958435059f,0.99090266227722167969f, +-0.14673046767711639404f,0.98917651176452636719f,-0.15885815024375915527f, +0.98730140924453735352f,-0.17096188664436340332f,0.98527765274047851562f, +-0.18303988873958587646f,0.98310548067092895508f,-0.19509032368659973145f, +0.98078525066375732422f,-0.20711137354373931885f,0.97831737995147705078f, +-0.21910123527050018311f,0.97570210695266723633f,-0.23105810582637786865f, +0.97293996810913085938f,-0.24298018217086791992f,0.97003126144409179688f, +-0.25486564636230468750f,0.96697646379470825195f,-0.26671275496482849121f, +0.96377605199813842773f,-0.27851969003677368164f,0.96043050289154052734f, +-0.29028466343879699707f,0.95694035291671752930f,-0.30200594663619995117f, +0.95330601930618286133f,-0.31368175148963928223f,0.94952815771102905273f, +-0.32531028985977172852f,0.94560730457305908203f,-0.33688986301422119141f, +0.94154405593872070312f,-0.34841868281364440918f,0.93733900785446166992f, +-0.35989505052566528320f,0.93299281597137451172f,-0.37131720781326293945f, +0.92850607633590698242f,-0.38268342614173889160f,0.92387950420379638672f, +-0.39399203658103942871f,0.91911387443542480469f,-0.40524131059646606445f, +0.91420978307723999023f,-0.41642954945564270020f,0.90916800498962402344f, +-0.42755508422851562500f,0.90398931503295898438f,-0.43861624598503112793f, +0.89867448806762695312f,-0.44961133599281311035f,0.89322429895401000977f, +-0.46053871512413024902f,0.88763964176177978516f,-0.47139674425125122070f, +0.88192129135131835938f,-0.48218378424644470215f,0.87607008218765258789f, +-0.49289819598197937012f,0.87008696794509887695f,-0.50353837013244628906f, +0.86397284269332885742f,-0.51410275697708129883f,0.85772860050201416016f, +-0.52458965778350830078f,0.85135519504547119141f,-0.53499764204025268555f, +0.84485357999801635742f,-0.54532498121261596680f,0.83822470903396606445f, +-0.55557024478912353516f,0.83146959543228149414f,-0.56573182344436645508f, +0.82458931207656860352f,-0.57580816745758056641f,0.81758481264114379883f, +-0.58579784631729125977f,0.81045717000961303711f,-0.59569931030273437500f, +0.80320751667022705078f,-0.60551106929779052734f,0.79583692550659179688f, +-0.61523157358169555664f,0.78834640979766845703f,-0.62485951185226440430f, +0.78073722124099731445f,-0.63439327478408813477f,0.77301043272018432617f, +-0.64383155107498168945f,0.76516723632812500000f,-0.65317285060882568359f, +0.75720882415771484375f,-0.66241580247879028320f,0.74913638830184936523f, +-0.67155897617340087891f,0.74095112085342407227f,-0.68060100078582763672f, +0.73265427350997924805f,-0.68954056501388549805f,0.72424709796905517578f, +-0.69837623834609985352f,0.71573084592819213867f,-0.70710676908493041992f, +0.70710676908493041992f,-0.71573084592819213867f,0.69837623834609985352f, +-0.72424709796905517578f,0.68954056501388549805f,-0.73265427350997924805f, +0.68060100078582763672f,-0.74095112085342407227f,0.67155897617340087891f, +-0.74913638830184936523f,0.66241580247879028320f,-0.75720882415771484375f, +0.65317285060882568359f,-0.76516723632812500000f,0.64383155107498168945f, +-0.77301043272018432617f,0.63439327478408813477f,-0.78073722124099731445f, +0.62485951185226440430f,-0.78834640979766845703f,0.61523157358169555664f, +-0.79583692550659179688f,0.60551106929779052734f,-0.80320751667022705078f, +0.59569931030273437500f,-0.81045717000961303711f,0.58579784631729125977f, +-0.81758481264114379883f,0.57580816745758056641f,-0.82458931207656860352f, +0.56573182344436645508f,-0.83146959543228149414f,0.55557024478912353516f, +-0.83822470903396606445f,0.54532498121261596680f,-0.84485357999801635742f, +0.53499764204025268555f,-0.85135519504547119141f,0.52458965778350830078f, +-0.85772860050201416016f,0.51410275697708129883f,-0.86397284269332885742f, +0.50353837013244628906f,-0.87008696794509887695f,0.49289819598197937012f, +-0.87607008218765258789f,0.48218378424644470215f,-0.88192129135131835938f, +0.47139674425125122070f,-0.88763964176177978516f,0.46053871512413024902f, +-0.89322429895401000977f,0.44961133599281311035f,-0.89867448806762695312f, +0.43861624598503112793f,-0.90398931503295898438f,0.42755508422851562500f, +-0.90916800498962402344f,0.41642954945564270020f,-0.91420978307723999023f, +0.40524131059646606445f,-0.91911387443542480469f,0.39399203658103942871f, +-0.92387950420379638672f,0.38268342614173889160f,-0.92850607633590698242f, +0.37131720781326293945f,-0.93299281597137451172f,0.35989505052566528320f, +-0.93733900785446166992f,0.34841868281364440918f,-0.94154405593872070312f, +0.33688986301422119141f,-0.94560730457305908203f,0.32531028985977172852f, +-0.94952815771102905273f,0.31368175148963928223f,-0.95330601930618286133f, +0.30200594663619995117f,-0.95694035291671752930f,0.29028466343879699707f, +-0.96043050289154052734f,0.27851969003677368164f,-0.96377605199813842773f, +0.26671275496482849121f,-0.96697646379470825195f,0.25486564636230468750f, +-0.97003126144409179688f,0.24298018217086791992f,-0.97293996810913085938f, +0.23105810582637786865f,-0.97570210695266723633f,0.21910123527050018311f, +-0.97831737995147705078f,0.20711137354373931885f,-0.98078525066375732422f, +0.19509032368659973145f,-0.98310548067092895508f,0.18303988873958587646f, +-0.98527765274047851562f,0.17096188664436340332f,-0.98730140924453735352f, +0.15885815024375915527f,-0.98917651176452636719f,0.14673046767711639404f, +-0.99090266227722167969f,0.13458070158958435059f,-0.99247956275939941406f, +0.12241067737340927124f,-0.99390697479248046875f,0.11022220551967620850f, +-0.99518471956253051758f,0.09801714122295379639f,-0.99631261825561523438f, +0.08579730987548828125f,-0.99729043245315551758f,0.07356456667184829712f, +-0.99811810255050659180f,0.06132073700428009033f,-0.99879544973373413086f, +0.04906767606735229492f,-0.99932235479354858398f,0.03680722415447235107f, +-0.99969881772994995117f,0.02454122900962829590f,-0.99992471933364868164f, +0.01227153837680816650f,1.00000000000000000000f,0.00000000000000000000f, +0.99879544973373413086f,0.04906767606735229492f,0.99518471956253051758f, +0.09801714122295379639f,0.98917651176452636719f,0.14673046767711639404f, +0.98078525066375732422f,0.19509032368659973145f,0.97003126144409179688f, +0.24298018217086791992f,0.95694035291671752930f,0.29028466343879699707f, +0.94154405593872070312f,0.33688986301422119141f,0.92387950420379638672f, +0.38268342614173889160f,0.90398931503295898438f,0.42755508422851562500f, +0.88192129135131835938f,0.47139674425125122070f,0.85772860050201416016f, +0.51410275697708129883f,0.83146959543228149414f,0.55557024478912353516f, +0.80320751667022705078f,0.59569931030273437500f,0.77301043272018432617f, +0.63439327478408813477f,0.74095112085342407227f,0.67155897617340087891f, +0.70710676908493041992f,0.70710676908493041992f,0.67155897617340087891f, +0.74095112085342407227f,0.63439327478408813477f,0.77301043272018432617f, +0.59569931030273437500f,0.80320751667022705078f,0.55557024478912353516f, +0.83146959543228149414f,0.51410275697708129883f,0.85772860050201416016f, +0.47139674425125122070f,0.88192129135131835938f,0.42755508422851562500f, +0.90398931503295898438f,0.38268342614173889160f,0.92387950420379638672f, +0.33688986301422119141f,0.94154405593872070312f,0.29028466343879699707f, +0.95694035291671752930f,0.24298018217086791992f,0.97003126144409179688f, +0.19509032368659973145f,0.98078525066375732422f,0.14673046767711639404f, +0.98917651176452636719f,0.09801714122295379639f,0.99518471956253051758f, +0.04906767606735229492f,0.99879544973373413086f,0.00000000000000006123f, +1.00000000000000000000f,-0.04906767606735229492f,0.99879544973373413086f, +-0.09801714122295379639f,0.99518471956253051758f,-0.14673046767711639404f, +0.98917651176452636719f,-0.19509032368659973145f,0.98078525066375732422f, +-0.24298018217086791992f,0.97003126144409179688f,-0.29028466343879699707f, +0.95694035291671752930f,-0.33688986301422119141f,0.94154405593872070312f, +-0.38268342614173889160f,0.92387950420379638672f,-0.42755508422851562500f, +0.90398931503295898438f,-0.47139674425125122070f,0.88192129135131835938f, +-0.51410275697708129883f,0.85772860050201416016f,-0.55557024478912353516f, +0.83146959543228149414f,-0.59569931030273437500f,0.80320751667022705078f, +-0.63439327478408813477f,0.77301043272018432617f,-0.67155897617340087891f, +0.74095112085342407227f,-0.70710676908493041992f,0.70710676908493041992f, +-0.74095112085342407227f,0.67155897617340087891f,-0.77301043272018432617f, +0.63439327478408813477f,-0.80320751667022705078f,0.59569931030273437500f, +-0.83146959543228149414f,0.55557024478912353516f,-0.85772860050201416016f, +0.51410275697708129883f,-0.88192129135131835938f,0.47139674425125122070f, +-0.90398931503295898438f,0.42755508422851562500f,-0.92387950420379638672f, +0.38268342614173889160f,-0.94154405593872070312f,0.33688986301422119141f, +-0.95694035291671752930f,0.29028466343879699707f,-0.97003126144409179688f, +0.24298018217086791992f,-0.98078525066375732422f,0.19509032368659973145f, +-0.98917651176452636719f,0.14673046767711639404f,-0.99518471956253051758f, +0.09801714122295379639f,-0.99879544973373413086f,0.04906767606735229492f, +1.00000000000000000000f,0.00000000000000000000f,0.98078525066375732422f, +0.19509032368659973145f,0.92387950420379638672f,0.38268342614173889160f, +0.83146959543228149414f,0.55557024478912353516f,0.70710676908493041992f, +0.70710676908493041992f,0.55557024478912353516f,0.83146959543228149414f, +0.38268342614173889160f,0.92387950420379638672f,0.19509032368659973145f, +0.98078525066375732422f,0.00000000000000006123f,1.00000000000000000000f, +-0.19509032368659973145f,0.98078525066375732422f,-0.38268342614173889160f, +0.92387950420379638672f,-0.55557024478912353516f,0.83146959543228149414f, +-0.70710676908493041992f,0.70710676908493041992f,-0.83146959543228149414f, +0.55557024478912353516f,-0.92387950420379638672f,0.38268342614173889160f, +-0.98078525066375732422f,0.19509032368659973145f,1.00000000000000000000f, +0.00000000000000000000f,0.70710676908493041992f,0.70710676908493041992f, +0.00000000000000006123f,1.00000000000000000000f,-0.70710676908493041992f, +0.70710676908493041992f,}; float32_t rearranged_twiddle_stride3_4096_f32[2728]={ -1.00000000000000000000f,0.00000000000000000000f,0.99998941108192840321f, -0.00460192612044857050f,0.99995764455196389786f,0.00920375478205981944f, -0.99990470108285289808f,0.01380538852806039059f,0.99983058179582340319f, -0.01840672990580482019f,0.99973528826056168306f,0.02300768146883936868f, -0.99961882249517863830f,0.02760814577896573974f,0.99948118696616694567f, -0.03220802540830458582f,0.99932238458834954375f,0.03680722294135883171f, -0.99914241872481690532f,0.04140564097707673946f,0.99894129318685687124f, -0.04600318213091462299f,0.99871901223387293811f,0.05059974903689928166f, -0.99847558057329477421f,0.05519524434968993420f,0.99821100336047818846f, -0.05978957074663986820f,0.99792528619859599548f,0.06438263092985746505f, -0.99761843513851955478f,0.06897432762826674613f,0.99729045667869020697f, -0.07356456359966742631f,0.99694135776498216117f,0.07815324163279423197f, -0.99657114579055483539f,0.08274026454937569164f,0.99617982859569698117f, -0.08732553520619205922f,0.99576741446765981713f,0.09190895649713272386f, -0.99533391214048227980f,0.09649043135525259274f,0.99487933079480561638f, -0.10106986275482782167f,0.99440368005767909576f,0.10564715371341061589f, -0.99390697000235606051f,0.11022220729388305938f,0.99338921114808065305f, -0.11479492660651008373f,0.99285041445986510489f,0.11936521481099135467f, -0.99229059134825736699f,0.12393297511851215920f,0.99170975366909952520f, -0.12849811079379316880f,0.99110791372327688986f,0.13306052515713906459f, -0.99048508425645709341f,0.13762012158648603832f,0.98984127845882052821f, -0.14217680351944803063f,0.98917650996478101444f,0.14673047445536174793f, -0.98849079285269658701f,0.15128103795733022219f,0.98778414164457217783f, -0.15582839765426523271f,0.98705657130575097380f,0.16037245724292828464f, -0.98630809724459866938f,0.16491312048996989437f,0.98553873531217606185f, -0.16945029123396795900f,0.98474850180190420801f,0.17398387338746382214f, -0.98393741344921892278f,0.17851377093899750692f,0.98310548743121628501f, -0.18303988795514095078f,0.98225274136628937249f,0.18756212858252960252f, -0.98137919331375456089f,0.19208039704989243734f,0.98048486177346938497f, -0.19659459767008022335f,0.97956976568544051887f,0.20110463484209190055f, -0.97863392442942320759f,0.20561041305309923910f,0.97767735782450992943f, -0.21011183688046961016f,0.97670008612871184184f,0.21460881099378675829f, -0.97570213003852857003f,0.21910124015686979759f,0.97468351068851066810f, -0.22358902922978998729f,0.97364424965081197705f,0.22807208317088573102f, -0.97258436893473221296f,0.23255030703877524467f,0.97150389098625178352f, -0.23702360599436719801f,0.97040283868755550234f,0.24149188530286933019f, -0.96928123535654853171f,0.24595505033579459497f,0.96813910474636244441f, -0.25041300657296522436f,0.96697647104485207059f,0.25486565960451457169f, -0.96579335887408368500f,0.25931291513288623474f,0.96458979328981275803f, -0.26375467897483134694f,0.96336579978095404631f,0.26819085706340317632f, -0.96212140426904158019f,0.27262135544994897662f,0.96085663310767965850f, -0.27704608030609989555f,0.95957151308198451733f,0.28146493792575794091f, -0.95826607140801767226f,0.28587783472708061527f,0.95694033573220882438f, -0.29028467725446233105f,0.95559433413077110586f,0.29468537218051432669f, -0.95422809510910566733f,0.29907982630804047508f,0.95284164760119871573f, -0.30346794657201131562f,0.95143502096900833820f,0.30784964004153486661f, -0.95000824500184299914f,0.31222481392182488413f,0.94856134991573026749f, -0.31659337555616584581f,0.94709436635277721717f,0.32095523242787521445f, -0.94560732538052127971f,0.32531029216226292622f,0.94410025849127265918f, -0.32965846252858749255f,0.94257319760144686605f,0.33399965144200938205f, -0.94102617505088925753f,0.33833376696554112728f,0.93945922360218991898f, -0.34266071731199437833f,0.93787237643998988545f,0.34698041084592368133f, -0.93626566717027825959f,0.35129275608556709276f,0.93463912981968078064f, -0.35559766170478385172f,0.93299279883473895669f,0.35989503653498811087f, -0.93132670908118042608f,0.36418478956707989180f,0.92964089584318121418f, -0.36846682995337232125f,0.92793539482261788720f,0.37274106700951575855f, -0.92621024213831137928f,0.37700741021641825945f,0.92446547432526260391f, -0.38126576922216237620f,0.92270112833387862850f,0.38551605384391884890f, -0.92091724152918941204f,0.38975817406985641123f,0.91911385169005777040f, -0.39399204006104809883f,0.91729099700837790632f,0.39821756215337356100f, -0.91544871608826783316f,0.40243465085941843018f,0.91358704794525080750f, -0.40664321687036902864f,0.91170603200542987832f,0.41084317105790391089f, -0.90980570810465222209f,0.41503442447608163146f,0.90788611648766626150f, -0.41921688836322390515f,0.90594729780726845902f,0.42339047414379604728f, -0.90398929312344333820f,0.42755509343028208491f,0.90201214390249317976f, -0.43171065802505725895f,0.90001589201616016833f,0.43585707992225547480f, -0.89800057974073987932f,0.43999427130963325583f,0.89596624975618521791f, -0.44412214457042920035f,0.89391294514520325265f,0.44824061228521988598f, -0.89184070939234272313f,0.45234958723377088896f,0.88974958638307277692f, -0.45644898239688391772f,0.88763962040285393496f,0.46053871095824000514f, -0.88551085613619995307f,0.46461868630623781584f,0.88336333866573157891f, -0.46868882203582790114f,0.88119711347122209322f,0.47274903195034279069f, -0.87901222642863352519f,0.47679923006332208812f,0.87680872380914565145f, -0.48083933060033395845f,0.87458665227817611321f,0.48486924800079106435f, -0.87234605889439154058f,0.48888889691976317176f,0.87008699110871146054f, -0.49289819222978403790f,0.86780949676330332299f,0.49689704902265446895f, -0.86551362409056908920f,0.50088538261124071482f,0.86319942171212415971f, -0.50486310853126759035f,0.86086693863776730939f,0.50883014254310698909f, -0.85851622426444273994f,0.51278640063356295542f,0.85614732837519447184f, -0.51673179901764987321f,0.85376030113811141042f,0.52066625414036715735f, -0.85135519310526519554f,0.52458968267846894928f,0.84893205521163961347f, -0.52850200154222848337f,0.84649093877405212627f,0.53240312787719790144f, -0.84403189549006640835f,0.53629297906596318235f,0.84155497743689844370f, -0.54017147272989285423f,0.83906023707031274217f,0.54403852673088382019f, -0.83654772722351200542f,0.54789405917310018967f,0.83401750110601813315f, -0.55173798840470733573f,0.83146961230254523567f,0.55557023301960217765f, -0.82890411477186487499f,0.55939071185913613604f,0.82632106284566353427f, -0.56319934401383409117f,0.82372051122739142759f,0.56699604882510867832f, -0.82110251499110464835f,0.57078074588696725566f,0.81846712958029865792f, -0.57455335504771576360f,0.81581441080673378075f,0.57831379641165558958f, -0.81314441484925359394f,0.58206199034077543697f,0.81045719825259476821f, -0.58579785745643886408f,0.80775281792619035848f,0.58952131864106394055f, -0.80503133114296365758f,0.59323229503979979516f,0.80229279553811572168f, -0.59693070806219639124f,0.79953726910790501314f,0.60061647938386897305f, -0.79676481020841882774f,0.60428953094815596181f,0.79397547755433717231f, -0.60794978496777363208f,0.79116933021769020318f,0.61159716392646190641f, -0.78834642762660622761f,0.61523159058062681925f,0.78550682956405393220f, -0.61885298796097631957f,0.78265059616657572938f,0.62246127937414996723f, -0.77977778792301455368f,0.62605638840434352232f,0.77688846567323244230f, -0.62963823891492698426f,0.77398269060682289844f,0.63320675505005719064f, -0.77106052426181381776f,0.63676186123628419899f,0.76812202852336541881f, -0.64030348218415167327f,0.76516726562245895860f,0.64383154288979138613f, -0.76219629813457900891f,0.64734596863651205911f,0.75920918897838796102f, -0.65084668499638087535f,0.75620600141439453523f,0.65433361783180044036f, -0.75318679904361252042f,0.65780669329707863735f,0.75015164580621507273f, -0.66126583783999226540f,0.74710060598018013245f,0.66471097820334479334f, -0.74403374417992929057f,0.66814204142651845153f,0.74095112535495921691f, -0.67155895484701833009f,0.73785281478846598269f,0.67496164610201192513f, -0.73473887809596349907f,0.67835004312986146857f,0.73160938122389262972f, -0.68172407417164970767f,0.72846439044822519637f,0.68508366777270035541f, -0.72530397237306076796f,0.68842875278409043638f,0.72212819392921534511f, -0.69175925836415774750f,0.71893712237280449351f,0.69507511398000088043f, -0.71573082528381870571f,0.69837624940897280457f,0.71250937056469243469f, -0.70166259474016845488f,0.70927282643886568891f,0.70493408037590488124f, -0.70602126144933974317f,0.70819063703319540259f,0.70275474445722529993f, -0.71143219574521643356f,0.69947334464028376733f,0.71465868786276909308f, -0.69617713149146298601f,0.71787004505573170920f,0.69286617481742474034f, -0.72106619931450810501f,0.68954054473706694051f,0.72424708295146689174f, -0.68620031168003858824f,0.72741262860237576593f,0.68284554638524808112f, -0.73056276922782759087f,0.67947631989936496666f,0.73369743811466026084f, -0.67609270357531603413f,0.73681656887736979300f,0.67269476907077296879f, -0.73992009545951609173f,0.66928258834663600929f,0.74300795213512171866f, -0.66585623366550972246f,0.74608007351006366825f,0.66241577759017178373f, -0.74913639452345925918f,0.65896129298203731661f,0.75217685044904269986f, -0.65549285299961546070f,0.75520137689653654700f,0.65201053109695950027f, -0.75820990981301528144f,0.64851440102211255212f,0.76120238548426177871f, -0.64500453681554403840f,0.76417874053611667406f,0.64148101280858316198f, -0.76713891193582040007f,0.63794390362184416610f,0.77008283699334789674f, -0.63439328416364548779f,0.77301045336273688235f,0.63082922962842458148f, -0.77592169904340757558f,0.62725181549514419377f,0.77881651238147586724f, -0.62366111752569464155f,0.78169483207105938671f,0.62005721176328920663f, -0.78455659715557524159f,0.61644017453085364622f,0.78740174702903131809f, -0.61281008242940970820f,0.79023022143731003197f,0.60916701233645320634f, -0.79304196047944364167f,0.60551104140432554512f,0.79583690460888345530f, -0.60184224705858002658f,0.79861499463476082195f,0.59816070699634238395f, -0.80137617172314012937f,0.59446649918466454299f,0.80412037739826569549f, -0.59075970185887427544f,0.80684755354379922299f,0.58704039352091808013f, -0.80955764240405125864f,0.58330865293769829094f,0.81225058658520388200f, -0.57956455913940574387f,0.81492632905652662156f,0.57580819141784533866f, -0.81758481315158371139f,0.57203962932475704850f,0.82022598256943468620f, -0.56825895267013148970f,0.82284978137582631685f,0.56446624152051949608f, -0.82545615400437744036f,0.56066157619733603124f,0.82804504525775579626f, -0.55684503727516010407f,0.83061640030884620334f,0.55301670558002757883f, -0.83317016470191318511f,0.54917666218771976627f,0.83570628435375260423f, -0.54532498842204646383f,0.83822470555483796772f,0.54146176585312355556f, -0.84072537497045796151f,0.53758707629564550512f,0.84320823964184543620f, -0.53370100180715296379f,0.84567324698729906540f,0.52980362468629482731f, -0.84812034480329712149f,0.52589502747108474168f,0.85054948126560336874f, -0.52197529293715438925f,0.85296060493036363059f,0.51804450409599933636f, -0.85535366473519602870f,0.51410274419322166128f,0.85772861000027211809f, -0.51015009670676669806f,0.86008539042939025077f,0.50618664534515533937f, -0.86242395611104050168f,0.50221247404571089934f,0.86474425751946237817f, -0.49822766697278186854f,0.86704624551569264845f,0.49423230851595972846f, -0.86932987134860673084f,0.49022648328829110387f,0.87159508665595109012f, -0.48621027612448652899f,0.87384184346536675214f,0.48218377207912282989f, -0.87607009419540660122f,0.47814705642484311987f,0.87827979165654146421f, -0.47410021465055002254f,0.88047088905216075450f,0.47004333245959561971f, -0.88264333997956279099f,0.46597649576796612569f,0.88479709843093778954f, -0.46189979070246284243f,0.88693211879434208367f,0.45781330359887728587f, -0.88904835585466457371f,0.45371712100016392544f,0.89114576479458318392f, -0.44961132965460659516f,0.89322430119551532446f,0.44549601651398174074f, -0.89528392103855758410f,0.44137126873171661501f,0.89732458070541831763f, -0.43723717366104419835f,0.89934623697934146236f,0.43309381885315201277f, -0.90134884704602202810f,0.42894129205532954829f,0.90333236849451181705f, -0.42477968120910880589f,0.90529675931811881551f,0.42060907444840250902f, -0.90724197791529592738f,0.41642956009763731906f,0.90916798309052226923f, -0.41224122666988299857f,0.91107473405517624965f,0.40804416286497874333f, -0.91296219042839810154f,0.40383845756765412993f,0.91483031223794608611f, -0.39962419984564678810f,0.91667905992104270485f,0.39540147894781629834f, -0.91850839432521225181f,0.39117038430225398171f,0.92031827670911048322f, -0.38693100551438869283f,0.92210866874334507237f,0.38268343236508983729f, -0.92387953251128673848f,0.37842775480876561511f,0.92563083050987271516f, -0.37416406297145798909f,0.92736252565040111495f,0.36989244714893426691f, -0.92907458125931574600f,0.36561299780477396482f,0.93076696107898371224f, -0.36132580556845433906f,0.93243962926846235550f,0.35703096123343003310f, -0.93409255040425887007f,0.35272855575521072646f,0.93572568948108036935f, -0.34841868024943450921f,0.93733901191257495977f,0.34410142598993898044f, -0.93893248353206448797f,0.33977688440682696225f,0.94050607059326829518f, -0.33544514708453165852f,0.94205973977101731265f,0.33110630575987642921f, -0.94359345816196038559f,0.32676045232013178898f,0.94510719328526060501f, -0.32240767880107001897f,0.94660091308328353499f,0.31804807738501505998f, -0.94807458592227622507f,0.31368174039889157312f,0.94952818059303667475f, -0.30930876031226878231f,0.95096166631157508231f,0.30492922973540242948f, -0.95237501271976587880f,0.30054324141727339903f,0.95376818988599032512f, -0.29615088824362395536f,0.95514116830577067141f,0.29175226323498937298f, -0.95649391890239499059f,0.28734745954472956653f,0.95782641302753290802f, -0.28293657045705539188f,0.95913862246184189431f,0.27851968938505305973f, -0.96043051941556578655f,0.27409690986870632878f,0.96170207652912254037f, -0.26966832557291520178f,0.96295326687368387741f,0.26523403028551190141f, -0.96418406395174571788f,0.26079411791527556952f,0.96539444169768939830f, -0.25634868248994291395f,0.96658437447833311928f,0.25189781815421691258f, -0.96775383709347551076f,0.24744161916777343557f,0.96890280477642887202f, -0.24298017990326398197f,0.97003125319454397424f,0.23851359484431849944f, -0.97113915844972509284f,0.23404195858354345794f,0.97222649707893626925f, -0.22956536582051886852f,0.97329324605469824672f,0.22508391135979277653f, -0.97433938278557585821f,0.22059769010887364526f,0.97536488511665686563f, -0.21610679707621960333f,0.97636973133002114000f,0.21161132736922760866f, -0.97735390014519996082f,0.20711137619221856032f,0.97831737071962765473f, -0.20260703884442110567f,0.97926012264908202098f,0.19809841071795372680f, -0.98018213596811731847f,0.19358558729580374602f,0.98108339115048659451f, -0.18906866414980627589f,0.98196386910955524296f,0.18454773693861964423f, -0.98282355119870523641f,0.18002290140569951471f,0.98366241921173025453f, -0.17549425337727139751f,0.98448045538322093151f,0.17096188876030135595f, -0.98527764238894122162f,0.16642590354046421508f,0.98605396334619543897f, -0.16188639378011188130f,0.98680940181418541624f,0.15734345561623827581f, -0.98754394179435922574f,0.15279718525844340760f,0.98825756773074946437f, -0.14824767898689619749f,0.98895026451030298986f,0.14369503315029458212f, -0.98962201746320077600f,0.13913934416382628401f,0.99027281236316910817f, -0.13458070850712622324f,0.99090263542778000971f,0.13001922272223334631f, -0.99151147331874389668f,0.12545498341154620592f,0.99209931314219179654f, -0.12088808723577722237f,0.99266614244894801899f,0.11631863091190487725f, -0.99321194923479450001f,0.11174671121112665639f,0.99373672194072459884f, -0.10717242495680887049f,0.99424044945318790223f,0.10259586902243628126f, -0.99472312110432570265f,0.09801714032956077016f,0.99518472667219681771f, -0.09343633584574791151f,0.99562525638099430569f,0.08885355258252468358f, -0.99604470090125196702f,0.08426888759332412659f,0.99644305135004263008f, -0.07968243797143012563f,0.99682029929116566791f,0.07509430084792129145f, -0.99717643673532618820f,0.07050457338961400866f,0.99751145614030345410f, -0.06591335279700392957f,0.99782535041111164453f,0.06132073630220864768f, -0.99811811290014917919f,0.05672682116690778292f,0.99838973740734016094f, -0.05213170468028331672f,0.99864021818026527111f,0.04753548415695926094f, -0.99886954991428356099f,0.04293825693494095902f,0.99907772775264536147f, -0.03834012037355279123f,0.99926474728659442359f,0.03374117185137764235f, -0.99943060455546173237f,0.02914150876419373953f,0.99957529604674921764f, -0.02454122852291226384f,0.99969881869620424997f,0.01994042855151459750f, -0.99980116988788425569f,0.01533920628498821985f,0.99988234745421256111f, -0.01073765916726457208f,0.99994234967602391162f,0.00613588464915451517f, -0.99998117528260110909f,0.00153398018628476615f,0.99999882345170187925f, --0.00306795676296601561f,0.99999529380957619118f,-0.00766982873953095477f, -0.99997058643097413988f,-0.01227153828571982304f,0.99992470183914450299f, --0.01687298794728165144f,0.99985764100582386060f,-0.02147408027546948359f, -0.99976940535121527898f,-0.02607471782910391472f,0.99965999674395922270f, --0.03067480317663645942f,0.99952941750109314256f,-0.03527423889821382219f, -0.99937767038800284780f,-0.03987292758773972740f,0.99920475861836388631f, --0.04447077185493861912f,0.99901068585407337697f,-0.04906767432741800800f, -0.99879545620517240501f,-0.05366353765273055437f,0.99855907422975931365f, --0.05825826450043560673f,0.99830154493389289261f,-0.06285175756416130910f, -0.99802287377148624081f,-0.06744391956366398155f,0.99772306664419163624f, --0.07203465324688929083f,0.99740212990127530279f,-0.07662386139203150592f, -0.99706007033948296225f,-0.08121144680959226092f,0.99669689520289606044f, --0.08579731234443975507f,0.99631261218277800129f,-0.09038136087786488582f, -0.99590722941741172125f,-0.09496349532963895002f,0.99548075549192693856f, --0.09954361866006931903f,0.99503319943811863180f,-0.10412163387205460030f, -0.99456457073425541537f,-0.10869744401313856386f,0.99407487930487947736f, --0.11327095217756423529f,0.99356413552059530403f,-0.11784206150832489401f, -0.99303235019785141002f,-0.12241067519921615403f,0.99247953459870996706f, --0.12697669649688586579f,0.99190570043060932726f,-0.13154002870288314386f, -0.99131085984611544415f,-0.13610057517570606223f,0.99069502544266463406f, --0.14065823933284912761f,0.99005821026229712256f,-0.14521292465284740825f, -0.98940042779138037687f,-0.14976453467732150915f,0.98872169196032377858f, --0.15431297301302013270f,0.98802201714328352633f,-0.15885814333386127917f, -0.98730141815785843473f,-0.16339994938297311422f,0.98655991026477551920f, --0.16793829497473108936f,0.98579750916756747614f,-0.17247308399679592283f, -0.98501423101223983814f,-0.17700422041214874946f,0.98421009238692902521f, --0.18153160826112502146f,0.98338511032155118130f,-0.18605515166344649414f, -0.98253930228744124076f,-0.19057475482025265645f,0.98167268619698311305f, --0.19509032201612819257f,0.98078528040323043058f,-0.19960175762113094300f, -0.97987710369951763756f,-0.20410896609281689584f,0.97894817531906219710f, --0.20861185197826331850f,0.97799851493455713936f,-0.21311031991609125091f, -0.97702814265775439484f,-0.21760427463848355800f,0.97603707903903913490f, --0.22209362097320348162f,0.97502534506699412020f,-0.22657826384560997290f, -0.97399296216795583359f,-0.23105810828067113727f,0.97293995220556017678f, --0.23553305940497534787f,0.97186633748027939639f,-0.24000302244874138768f, -0.97077214072895035013f,-0.24446790274782409513f,0.96965738512429244800f, --0.24892760574572012078f,0.96852209427441737777f,-0.25338203699557015902f, -0.96736629222232850545f,-0.25783110216215882060f,0.96619000344541261516f, --0.26227470702391347812f,0.96499325285492043580f,-0.26671275747489830987f, -0.96377606579543984022f,-0.27114515952680795507f,0.96253846804435916340f, --0.27557181931095814376f,0.96128048581132063966f,-0.27999264308027327353f, -0.96000214573766584625f,-0.28440753721127171039f,0.95870347489587159906f, --0.28881640820604936870f,0.95738450078897596729f,-0.29321916269425857271f, -0.95604525134999651659f,-0.29761570743508619641f,0.95468575494133833814f, --0.30200594931922808417f,0.95330604035419386211f,-0.30638979537086097338f, -0.95190613680793234597f,-0.31076715274961136393f,0.95048607394948181337f, --0.31513792875252233383f,0.94904588185270055689f,-0.31950203081601563637f, -0.94758559101774120226f,-0.32385936651785285356f,0.94610523237040344835f, --0.32820984357909255280f,0.94460483726148025685f,-0.33255336986604405736f, -0.94308443746609349478f,-0.33688985339221994009f,0.94154406518302080631f, --0.34121920232028229991f,0.93998375303401404679f,-0.34554132496398903829f, -0.93840353406310816897f,-0.34985612979013491763f,0.93680344173592156043f, --0.35416352542049039931f,0.93518350993894761025f,-0.35846342063373642928f, -0.93354377297883628373f,-0.36275572436739711435f,0.93188426558166814750f, --0.36704034571976712487f,0.93020502289221906889f,-0.37131719395183748755f, -0.92850608047321558924f,-0.37558617848921721505f,0.92678747430458174872f, --0.37984720892405099413f,0.92504924078267769527f,-0.38410019501693493105f, -0.92329141671952774661f,-0.38834504669882619066f,0.92151403934204201285f, --0.39258167407295141427f,0.91971714629122736095f,-0.39680998741671030805f, -0.91790077562139049672f,-0.40102989718357567872f,0.91606496579933172075f, --0.40524131400498974998f,0.91420975570353069095f,-0.40944414869225753684f, -0.91233518462332285903f,-0.41363831223843450235f,0.91044129225806724737f, --0.41782371582021227141f,0.90852811871630612117f,-0.42200027079979968159f, -0.90659570451491533483f,-0.42616788872679967071f,0.90464409057824612947f, --0.43032648134008272267f,0.90267331823725871498f,-0.43447596056965581690f, -0.90068342922864685907f,-0.43861623853852738097f,0.89867446569395392775f, --0.44274722756456980077f,0.89664647017868026602f,-0.44686884016237399253f, -0.89459948563138280697f,-0.45098098904510369733f,0.89253355540276468894f, --0.45508358712634372489f,0.89044872324475798919f,-0.45917654752194403400f, -0.88834503330959635470f,-0.46325978355186014923f,0.88622253014888063838f, --0.46733320874198841510f,0.88408125871263498752f,-0.47139673682599769755f, -0.88192126434835504956f,-0.47545028174715592284f,0.87974259280004740713f, --0.47949375766015311928f,0.87754529020726124156f,-0.48352707893291846375f, -0.87532940310411100349f,-0.48755016014843571837f,0.87309497841829020182f, --0.49156291610654972990f,0.87084206347007897531f,-0.49556526182577237405f, -0.86857070597134100609f,-0.49955711254508178287f,0.86628095402451310569f, --0.50353838372571746440f,0.86397285612158680745f,-0.50750899105297075931f, -0.86164646114308141023f,-0.51146885043797041259f,0.85930181835700847337f, --0.51541787801946303826f,0.85693897741782865118f,-0.51935599016558964269f, -0.85455798836540053376f,-0.52328310347565654137f,0.85215890162391971785f, --0.52719913478190105760f,0.84974176800085265970f,-0.53110400115125477871f, -0.84730663868585853749f,-0.53499761988709704230f,0.84485356524970722791f, --0.53887990853100831146f,0.84238259964318595863f,-0.54275078486451577842f, -0.83989379419599952126f,-0.54661016691083474939f,0.83738720161566193578f, --0.55045797293660470029f,0.83486287498638012128f,-0.55429412145362011444f, -0.83232086776792968408f,-0.55811853122055610221f,0.82976123379452304540f, --0.56193112124468946877f,0.82718402727366902027f,-0.56573181078361323149f, -0.82458930278502517996f,-0.56952051934694725155f,0.82197711527924144370f, --0.57329716669804198226f,0.81934752007679712005f,-0.57706167285567933067f, -0.81670057286682795628f,-0.58081395809576441547f,0.81403632970594852480f, --0.58455394295301521534f,0.81135484701706384048f,-0.58828154822264522306f, -0.80865618158817509364f,-0.59199669496204088137f,0.80594039057117639047f, --0.59569930449243335691f,0.80320753148064494287f,-0.59938929840056454079f, -0.80045766219262282082f,-0.60306659854034827539f,0.79769084094339104407f, --0.60673112703452458661f,0.79490712632823690154f,-0.61038280627630958630f, -0.79210657730021227785f,-0.61402155893103815831f,0.78928925316888587371f, --0.61764730793780375784f,0.78645521359908587833f,-0.62125997651108744169f, -0.78360451860963831194f,-0.62485948814238623239f,0.78073722857209459924f, --0.62844576660183260053f,0.77785340420945314754f,-0.63201873593980895105f, -0.77495310659487393057f,-0.63557832048855611440f,0.77203639715038452351f, --0.63912444486377573138f,0.76910333764557958780f,-0.64265703396622686494f, -0.76615399019631280630f,-0.64617601298331639459f,0.76318841726338115805f, --0.64968130739068330470f,0.76020668165120230952f,-0.65317284295377653347f, -0.75720884650648467851f,-0.65665054572942882505f,0.75419497531688928227f, --0.66011434206742036768f,0.75116513190968658975f,-0.66356415861203965623f, -0.74811938045040371481f,-0.66699992230363736034f,0.74505778544146605835f, --0.67042156038017308717f,0.74198041172083106787f,-0.67382900037875603783f, -0.73888732446061522463f,-0.67722217013718044587f,0.73577858916571359238f, --0.68060099779545302212f,0.73265427167241281570f,-0.68396541179731551452f, -0.72951443814699701296f,-0.68731534089175916336f,0.72635915508434589771f, --0.69065071413453438254f,0.72318848930652757101f,-0.69397146088965377952f, -0.72000250796138176579f,-0.69727751083088640449f,0.71680127852109964959f, --0.70056879394324822474f,0.71358486878079363525f,-0.70384524052448482756f, -0.71035334685706241764f,-0.70710678118654746172f,0.70710678118654757274f, --0.71035334685706230662f,0.70384524052448504960f,-0.71358486878079352422f, -0.70056879394324833576f,-0.71680127852109953857f,0.69727751083088651551f, --0.72000250796138165477f,0.69397146088965389055f,-0.72318848930652745999f, -0.69065071413453460458f,-0.72635915508434578669f,0.68731534089175927438f, --0.72951443814699679091f,0.68396541179731562554f,-0.73265427167241270467f, -0.68060099779545324417f,-0.73577858916571337033f,0.67722217013718055689f, --0.73888732446061511361f,0.67382900037875614885f,-0.74198041172083095685f, -0.67042156038017319819f,-0.74505778544146594733f,0.66699992230363758239f, --0.74811938045040360379f,0.66356415861203976725f,-0.75116513190968636771f, -0.66011434206742047870f,-0.75419497531688917125f,0.65665054572942904709f, --0.75720884650648467851f,0.65317284295377664449f,-0.76020668165120219850f, -0.64968130739068341573f,-0.76318841726338115805f,0.64617601298331661663f, --0.76615399019631280630f,0.64265703396622708699f,-0.76910333764557947678f, -0.63912444486377584241f,-0.77203639715038441249f,0.63557832048855622542f, --0.77495310659487381955f,0.63201873593980906207f,-0.77785340420945303652f, -0.62844576660183271155f,-0.78073722857209448822f,0.62485948814238634341f, --0.78360451860963820092f,0.62125997651108755271f,-0.78645521359908576731f, -0.61764730793780386886f,-0.78928925316888576269f,0.61402155893103838036f, --0.79210657730021216683f,0.61038280627630969732f,-0.79490712632823679051f, -0.60673112703452469763f,-0.79769084094339093305f,0.60306659854034838641f, --0.80045766219262259877f,0.59938929840056465181f,-0.80320753148064483184f, -0.59569930449243346793f,-0.80594039057117627944f,0.59199669496204099239f, --0.80865618158817498262f,0.58828154822264533408f,-0.81135484701706372945f, -0.58455394295301532637f,-0.81403632970594841378f,0.58081395809576452649f, --0.81670057286682784525f,0.57706167285567944170f,-0.81934752007679700903f, -0.57329716669804209328f,-0.82197711527924133268f,0.56952051934694747359f, --0.82458930278502506894f,0.56573181078361345353f,-0.82718402727366902027f, -0.56193112124468957980f,-0.82976123379452293438f,0.55811853122055632426f, --0.83232086776792957306f,0.55429412145362022546f,-0.83486287498638001026f, -0.55045797293660492233f,-0.83738720161566182476f,0.54661016691083497143f, --0.83989379419599952126f,0.54275078486451588944f,-0.84238259964318584760f, -0.53887990853100842248f,-0.84485356524970711689f,0.53499761988709715332f, --0.84730663868585842646f,0.53110400115125488973f,-0.84974176800085254868f, -0.52719913478190127964f,-0.85215890162391960683f,0.52328310347565665239f, --0.85455798836540042274f,0.51935599016558975372f,-0.85693897741782865118f, -0.51541787801946314929f,-0.85930181835700836235f,0.51146885043797052361f, --0.86164646114308129921f,0.50750899105297098135f,-0.86397285612158669643f, -0.50353838372571757542f,-0.86628095402451299467f,0.49955711254508189390f, --0.86857070597134089507f,0.49556526182577254058f,-0.87084206347007886428f, -0.49156291610654989643f,-0.87309497841829009079f,0.48755016014843588490f, --0.87532940310411089246f,0.48352707893291863028f,-0.87754529020726113053f, -0.47949375766015328582f,-0.87974259280004729611f,0.47545028174715608937f, --0.88192126434835493853f,0.47139673682599780857f,-0.88408125871263487650f, -0.46733320874198858164f,-0.88622253014888052736f,0.46325978355186031576f, --0.88834503330959624368f,0.45917654752194420054f,-0.89044872324475787817f, -0.45508358712634389143f,-0.89253355540276457791f,0.45098098904510386387f, --0.89459948563138269595f,0.44686884016237415906f,-0.89664647017868026602f, -0.44274722756456996731f,-0.89867446569395392775f,0.43861623853852754751f, --0.90068342922864674804f,0.43447596056965598343f,-0.90267331823725871498f, -0.43032648134008288920f,-0.90464409057824612947f,0.42616788872679983724f, --0.90659570451491533483f,0.42200027079979984812f,-0.90852811871630612117f, -0.41782371582021243794f,-0.91044129225806713634f,0.41363831223843466889f, --0.91233518462332274801f,0.40944414869225770337f,-0.91420975570353069095f, -0.40524131400498991651f,-0.91606496579933172075f,0.40102989718357562321f, --0.91790077562139049672f,0.39680998741671025254f,-0.91971714629122736095f, -0.39258167407295141427f,-0.92151403934204179080f,0.38834504669882657923f, --0.92329141671952752457f,0.38410019501693531963f,-0.92504924078267747323f, -0.37984720892405138271f,-0.92678747430458174872f,0.37558617848921738158f, --0.92850608047321547822f,0.37131719395183770960f,-0.93020502289221906889f, -0.36704034571976729140f,-0.93188426558166803648f,0.36275572436739728088f, --0.93354377297883617270f,0.35846342063373659581f,-0.93518350993894761025f, -0.35416352542049039931f,-0.93680344173592167145f,0.34985612979013486212f, --0.93840353406310816897f,0.34554132496398898278f,-0.93998375303401382475f, -0.34121920232028268849f,-0.94154406518302069529f,0.33688985339222032867f, --0.94308443746609338376f,0.33255336986604444593f,-0.94460483726148014583f, -0.32820984357909271933f,-0.94610523237040333733f,0.32385936651785302010f, --0.94758559101774109124f,0.31950203081601580291f,-0.94904588185270055689f, -0.31513792875252250036f,-0.95048607394948170235f,0.31076715274961153046f, --0.95190613680793234597f,0.30638979537086091787f,-0.95330604035419386211f, -0.30200594931922802866f,-0.95468575494133833814f,0.29761570743508614090f, --0.95604525134999629454f,0.29321916269425896129f,-0.95738450078897585627f, -0.28881640820604975728f,-0.95870347489587148804f,0.28440753721127209896f, --0.96000214573766584625f,0.27999264308027344006f,-0.96128048581132063966f, -0.27557181931095831029f,-0.96253846804435916340f,0.27114515952680812161f, --0.96377606579543984022f,0.26671275747489847641f,-0.96499325285492032478f, -0.26227470702391370017f,-0.96619000344541250413f,0.25783110216215898713f, --0.96736629222232850545f,0.25338203699557010351f,-0.96852209427441737777f, -0.24892760574572009302f,-0.96965738512429233698f,0.24446790274782448371f, --0.97077214072895023911f,0.24000302244874177626f,-0.97186633748027928537f, -0.23553305940497573645f,-0.97293995220556006576f,0.23105810828067133156f, --0.97399296216795583359f,0.22657826384561016719f,-0.97502534506699412020f, -0.22209362097320364815f,-0.97603707903903902388f,0.21760427463848372454f, --0.97702814265775439484f,0.21311031991609141745f,-0.97799851493455713936f, -0.20861185197826351279f,-0.97894817531906219710f,0.20410896609281684033f, --0.97987710369951763756f,0.19960175762113091524f,-0.98078528040323043058f, -0.19509032201612860891f,-0.98167268619698311305f,0.19057475482025307278f, --0.98253930228744124076f,0.18605515166344691047f,-0.98338511032155118130f, -0.18153160826112521575f,-0.98421009238692902521f,0.17700422041214894375f, --0.98501423101223983814f,0.17247308399679611712f,-0.98579750916756736512f, -0.16793829497473128365f,-0.98655991026477540817f,0.16339994938297328075f, --0.98730141815785843473f,0.15885814333386147346f,-0.98802201714328352633f, -0.15431297301302007718f,-0.98872169196032377858f,0.14976453467732145364f, --0.98940042779138037687f,0.14521292465284735274f,-0.99005821026229701154f, -0.14065823933284954395f,-0.99069502544266463406f,0.13610057517570647856f, --0.99131085984611544415f,0.13154002870288333815f,-0.99190570043060932726f, -0.12697669649688606008f,-0.99247953459870996706f,0.12241067519921634832f, --0.99303235019785141002f,0.11784206150832508830f,-0.99356413552059530403f, -0.11327095217756441570f,-0.99407487930487936634f,0.10869744401313874427f, --0.99456457073425541537f,0.10412163387205457254f,-0.99503319943811863180f, -0.09954361866006927739f,-0.99548075549192693856f,0.09496349532963890838f, --0.99590722941741172125f,0.09038136087786528827f,-0.99631261218277800129f, -0.08579731234444015753f,-0.99669689520289606044f,0.08121144680959266338f, --0.99706007033948296225f,0.07662386139203168633f,-0.99740212990127530279f, -0.07203465324688947125f,-0.99772306664419163624f,0.06744391956366417584f, --0.99802287377148624081f,0.06285175756416148951f,-0.99830154493389289261f, -0.05825826450043579408f,-0.99855907422975931365f,0.05366353765273051968f, --0.99879545620517240501f,0.04906767432741796636f,-0.99901068585407337697f, -0.04447077185493858442f,-0.99920475861836388631f,0.03987292758774012985f, --0.99937767038800284780f,0.03527423889821423159f,-0.99952941750109314256f, -0.03067480317663686534f,-0.99965999674395922270f,0.02607471782910409860f, --0.99976940535121527898f,0.02147408027546966747f,-0.99985764100582386060f, -0.01687298794728183532f,-0.99992470183914450299f,0.01227153828572000692f, --0.99997058643097413988f,0.00766982873953113778f,-0.99999529380957619118f, -0.00306795676296597701f,-0.99999882345170187925f,-0.00153398018628480431f, --0.99998117528260110909f,-0.00613588464915455420f,-0.99994234967602391162f, --0.01073765916726416615f,-0.99988234745421256111f,-0.01533920628498781566f, --0.99980116988788425569f,-0.01994042855151419158f,-0.99969881869620424997f, --0.02454122852291207996f,-0.99957529604674921764f,-0.02914150876419355565f, --0.99943060455546173237f,-0.03374117185137745500f,-0.99926474728659442359f, --0.03834012037355261082f,-0.99907772775264536147f,-0.04293825693494077861f, --0.99886954991428356099f,-0.04753548415695929563f,-0.99864021818026527111f, --0.05213170468028335142f,-0.99838973740734016094f,-0.05672682116690781762f, --0.99811811290014917919f,-0.06132073630220824523f,-0.99782535041111164453f, --0.06591335279700352712f,-0.99751145614030345410f,-0.07050457338961360620f, --0.99717643673532618820f,-0.07509430084792109716f,-0.99682029929116577893f, --0.07968243797142994522f,-0.99644305135004263008f,-0.08426888759332393231f, --0.99604470090125196702f,-0.08885355258252450317f,-0.99562525638099430569f, --0.09343633584574773110f,-0.99518472667219692873f,-0.09801714032956058975f, --0.99472312110432570265f,-0.10259586902243630901f,-0.99424044945318790223f, --0.10717242495680891212f,-0.99373672194072470987f,-0.11174671121112625394f, --0.99321194923479461103f,-0.11631863091190447479f,-0.99266614244894801899f, --0.12088808723577681992f,-0.99209931314219179654f,-0.12545498341154601163f, --0.99151147331874400770f,-0.13001922272223317978f,-0.99090263542778000971f, --0.13458070850712605671f,-0.99027281236316910817f,-0.13913934416382611747f, --0.98962201746320088702f,-0.14369503315029438784f,-0.98895026451030298986f, --0.14824767898689603096f,-0.98825756773074946437f,-0.15279718525844343535f, --0.98754394179435922574f,-0.15734345561623830356f,-0.98680940181418552726f, --0.16188639378011149272f,-0.98605396334619543897f,-0.16642590354046382650f, --0.98527764238894133264f,-0.17096188876030096737f,-0.98448045538322093151f, --0.17549425337727120322f,-0.98366241921173025453f,-0.18002290140569934818f, --0.98282355119870534743f,-0.18454773693861947770f,-0.98196386910955524296f, --0.18906866414980610935f,-0.98108339115048670553f,-0.19358558729580355173f, --0.98018213596811742949f,-0.19809841071795356027f,-0.97926012264908202098f, --0.20260703884442113343f,-0.97831737071962765473f,-0.20711137619221858808f, --0.97735390014519996082f,-0.21161132736922766417f,-0.97636973133002125103f, --0.21610679707621921475f,-0.97536488511665697665f,-0.22059769010887325669f, --0.97433938278557585821f,-0.22508391135979261000f,-0.97329324605469824672f, --0.22956536582051870199f,-0.97222649707893638027f,-0.23404195858354326365f, --0.97113915844972520386f,-0.23851359484431830515f,-0.97003125319454397424f, --0.24298017990326381543f,-0.96890280477642887202f,-0.24744161916777326904f, --0.96775383709347551076f,-0.25189781815421696809f,-0.96658437447833311928f, --0.25634868248994291395f,-0.96539444169768939830f,-0.26079411791527562503f, --0.96418406395174582890f,-0.26523403028551151284f,-0.96295326687368398844f, --0.26966832557291481320f,-0.96170207652912265139f,-0.27409690986870616225f, --0.96043051941556589757f,-0.27851968938505289319f,-0.95913862246184200533f, --0.28293657045705516984f,-0.95782641302753290802f,-0.28734745954472939999f, --0.95649391890239510161f,-0.29175226323498920644f,-0.95514116830577078243f, --0.29615088824362378883f,-0.95376818988599032512f,-0.30054324141727345454f, --0.95237501271976587880f,-0.30492922973540242948f,-0.95096166631157508231f, --0.30930876031226878231f,-0.94952818059303678577f,-0.31368174039889118454f, --0.94807458592227633609f,-0.31804807738501467140f,-0.94660091308328364601f, --0.32240767880106963039f,-0.94510719328526060501f,-0.32676045232013156694f, --0.94359345816196038559f,-0.33110630575987626267f,-0.94205973977101742367f, --0.33544514708453149199f,-0.94050607059326840620f,-0.33977688440682679571f, --0.93893248353206459900f,-0.34410142598993881391f,-0.93733901191257495977f, --0.34841868024943456472f,-0.93572568948108036935f,-0.35272855575521072646f, --0.93409255040425887007f,-0.35703096123343008861f,-0.93243962926846246653f, --0.36132580556845395048f,-0.93076696107898382326f,-0.36561299780477357624f, --0.92907458125931585702f,-0.36989244714893387833f,-0.92736252565040111495f, --0.37416406297145782256f,-0.92563083050987282618f,-0.37842775480876539307f, --0.92387953251128684951f,-0.38268343236508967076f,-0.92210866874334518339f, --0.38693100551438852630f,-0.92031827670911059425f,-0.39117038430225381518f, --0.91850839432521225181f,-0.39540147894781629834f,-0.91667905992104270485f, --0.39962419984564684361f,-0.91483031223794608611f,-0.40383845756765418544f, --0.91296219042839832358f,-0.40804416286497835475f,-0.91107473405517647169f, --0.41224122666988260999f,-0.90916798309052249127f,-0.41642956009763693048f, --0.90724197791529592738f,-0.42060907444840234248f,-0.90529675931811881551f, --0.42477968120910863936f,-0.90333236849451192807f,-0.42894129205532938176f, --0.90134884704602202810f,-0.43309381885315184624f,-0.89934623697934157338f, --0.43723717366104403181f,-0.89732458070541831763f,-0.44137126873171667052f, --0.89528392103855747308f,-0.44549601651398174074f,-0.89322430119551532446f, --0.44961132965460665067f,-0.89114576479458340597f,-0.45371712100016353686f, --0.88904835585466468473f,-0.45781330359887695280f,-0.88693211879434230571f, --0.46189979070246250936f,-0.88479709843093790056f,-0.46597649576796595916f, --0.88264333997956290201f,-0.47004333245959545318f,-0.88047088905216086552f, --0.47410021465054985601f,-0.87827979165654157523f,-0.47814705642484295334f, --0.87607009419540660122f,-0.48218377207912266336f,-0.87384184346536686316f, --0.48621027612448636246f,-0.87159508665595109012f,-0.49022648328829115938f, --0.86932987134860673084f,-0.49423230851595978397f,-0.86704624551569287050f, --0.49822766697278153547f,-0.86474425751946248919f,-0.50221247404571056627f, --0.86242395611104072373f,-0.50618664534515500630f,-0.86008539042939025077f, --0.51015009670676658704f,-0.85772861000027211809f,-0.51410274419322155026f, --0.85535366473519613972f,-0.51804450409599922533f,-0.85296060493036374162f, --0.52197529293715427823f,-0.85054948126560347976f,-0.52589502747108463065f, --0.84812034480329723252f,-0.52980362468629460526f,-0.84567324698729906540f, --0.53370100180715296379f,-0.84320823964184543620f,-0.53758707629564550512f, --0.84072537497045818355f,-0.54146176585312322249f,-0.83822470555483818977f, --0.54532498842204613076f,-0.83570628435375271525f,-0.54917666218771943321f, --0.83317016470191329613f,-0.55301670558002735678f,-0.83061640030884642538f, --0.55684503727515988203f,-0.82804504525775590729f,-0.56066157619733592021f, --0.82545615400437755138f,-0.56446624152051938506f,-0.82284978137582642788f, --0.56825895267013148970f,-0.82022598256943468620f,-0.57203962932475704850f, --0.81758481315158371139f,-0.57580819141784533866f,-0.81492632905652662156f, --0.57956455913940574387f,-0.81225058658520388200f,-0.58330865293769829094f, --0.80955764240405148069f,-0.58704039352091774706f,-0.80684755354379944503f, --0.59075970185887394237f,-0.80412037739826591753f,-0.59446649918466420992f, --0.80137617172314035141f,-0.59816070699634216190f,-0.79861499463476093297f, --0.60184224705857991555f,-0.79583690460888356633f,-0.60551104140432543410f, --0.79304196047944375270f,-0.60916701233645309532f,-0.79023022143731003197f, --0.61281008242940970820f,-0.78740174702903142911f,-0.61644017453085364622f, --0.78455659715557524159f,-0.62005721176328920663f,-0.78169483207105938671f, --0.62366111752569464155f,-0.77881651238147620031f,-0.62725181549514386070f, --0.77592169904340779762f,-0.63082922962842424841f,-0.77301045336273710440f, --0.63439328416364526575f,-0.77008283699334811878f,-0.63794390362184394405f, --0.76713891193582051109f,-0.64148101280858305095f,-0.76417874053611678509f, --0.64500453681554381635f,-0.76120238548426188974f,-0.64851440102211233008f, --0.75820990981301539247f,-0.65201053109695950027f,-0.75520137689653654700f, --0.65549285299961534967f,-0.75217685044904269986f,-0.65896129298203731661f, --0.74913639452345925918f,-0.66241577759017178373f,-0.74608007351006400132f, --0.66585623366550938940f,-0.74300795213512194071f,-0.66928258834663578725f, --0.73992009545951631377f,-0.67269476907077274674f,-0.73681656887737001504f, --0.67609270357531581208f,-0.73369743811466037187f,-0.67947631989936485564f, --0.73056276922782770189f,-0.68284554638524797010f,-0.72741262860237587695f, --0.68620031168003847721f,-0.72424708295146700276f,-0.68954054473706682948f, --0.72106619931450810501f,-0.69286617481742462932f,-0.71787004505573170920f, --0.69617713149146298601f,-0.71465868786276898206f,-0.69947334464028387835f, --0.71143219574521665560f,-0.70275474445722507788f,-0.70819063703319551362f, --0.70602126144933952112f,-0.70493408037590510329f,-0.70927282643886546687f, --0.70166259474016867692f,-0.71250937056469221265f,-0.69837624940897302661f, --0.71573082528381848366f,-0.69507511398000099145f,-0.71893712237280438249f, --0.69175925836415785852f,-0.72212819392921523409f,-0.68842875278409054740f, --0.72530397237306065694f,-0.68508366777270035541f,-0.72846439044822519637f, --0.68172407417164981869f,-0.73160938122389251870f,-0.67835004312986146857f, --0.73473887809596349907f,-0.67496164610201225820f,-0.73785281478846576064f, --0.67155895484701866316f,-0.74095112535495888384f,-0.66814204142651867357f, --0.74403374417992906853f,-0.66471097820334501538f,-0.74710060598017991040f, --0.66126583783999237642f,-0.75015164580621496171f,-0.65780669329707874837f, --0.75318679904361240940f,-0.65433361783180066240f,-0.75620600141439442421f, --0.65084668499638098638f,-0.75920918897838796102f,-0.64734596863651250320f, --0.76219629813457856482f,-0.64383154288979149715f,-0.76516726562245895860f, --0.64030348218415200634f,-0.76812202852336519676f,-0.63676186123628419899f, --0.77106052426181381776f,-0.63320675505005752370f,-0.77398269060682256537f, --0.62963823891492687324f,-0.77688846567323255332f,-0.62605638840434374437f, --0.77977778792301433164f,-0.62246127937414974518f,-0.78265059616657584041f, --0.61885298796097643059f,-0.78550682956405382118f,-0.61523159058062726334f, --0.78834642762660589455f,-0.61159716392646201744f,-0.79116933021769009216f, --0.60794978496777407617f,-0.79397547755433683925f,-0.60428953094815607283f, --0.79676481020841871672f,-0.60061647938386930612f,-0.79953726910790479110f, --0.59693070806219639124f,-0.80229279553811572168f,-0.59323229503980012822f, --0.80503133114296343553f,-0.58952131864106382952f,-0.80775281792619046950f, --0.58579785745643908612f,-0.81045719825259465718f,-0.58206199034077532595f, --0.81314441484925370496f,-0.57831379641165570060f,-0.81581441080673366972f, --0.57455335504771631872f,-0.81846712958029832485f,-0.57078074588696736669f, --0.82110251499110464835f,-0.56699604882510901138f,-0.82372051122739109452f, --0.56319934401383409117f,-0.82632106284566342325f,-0.55939071185913646911f, --0.82890411477186465294f,-0.55557023301960217765f,-0.83146961230254523567f, --0.55173798840470766880f,-0.83401750110601791111f,-0.54789405917310007865f, --0.83654772722351211645f,-0.54403852673088415326f,-0.83906023707031252012f, --0.54017147272989274320f,-0.84155497743689855472f,-0.53629297906596329337f, --0.84403189549006629733f,-0.53240312787719845655f,-0.84649093877405179320f, --0.52850200154222859439f,-0.84893205521163961347f,-0.52458968267846928235f, --0.85135519310526486247f,-0.52066625414036715735f,-0.85376030113811141042f, --0.51673179901765020627f,-0.85614732837519424979f,-0.51278640063356295542f, --0.85851622426444285097f,-0.50883014254310732216f,-0.86086693863776708735f, --0.50486310853126736831f,-0.86319942171212427073f,-0.50088538261124104789f, --0.86551362409056897818f,-0.49689704902265435793f,-0.86780949676330332299f, --0.49289819222978420443f,-0.87008699110871134952f,-0.48888889691976367136f, --0.87234605889439120752f,-0.48486924800079117537f,-0.87458665227817611321f, --0.48083933060033440254f,-0.87680872380914542941f,-0.47679923006332214364f, --0.87901222642863341417f,-0.47274903195034317926f,-0.88119711347122187117f, --0.46868882203582790114f,-0.88336333866573157891f,-0.46461868630623814891f, --0.88551085613619973103f,-0.46053871095823989412f,-0.88763962040285404598f, --0.45644898239688419528f,-0.88974958638307266590f,-0.45234958723377066692f, --0.89184070939234283415f,-0.44824061228522010802f,-0.89391294514520314163f, --0.44412214457042975546f,-0.89596624975618488484f,-0.43999427130963336685f, --0.89800057974073976830f,-0.43585707992225597440f,-0.90001589201615994629f, --0.43171065802505731446f,-0.90201214390249317976f,-0.42755509343028247349f, --0.90398929312344311615f,-0.42339047414379599177f,-0.90594729780726845902f, --0.41921688836322429372f,-0.90788611648766603945f,-0.41503442447608152044f, --0.90980570810465233311f,-0.41084317105790418845f,-0.91170603200542976730f, --0.40664321687036886210f,-0.91358704794525091852f,-0.40243465085941865222f, --0.91544871608826772214f,-0.39821756215337417162f,-0.91729099700837768427f, --0.39399204006104820985f,-0.91911385169005765938f,-0.38975817406985696634f, --0.92091724152918930102f,-0.38551605384391890441f,-0.92270112833387851747f, --0.38126576922216276477f,-0.92446547432526249288f,-0.37700741021641820394f, --0.92621024213831137928f,-0.37274106700951614712f,-0.92793539482261766516f, --0.36846682995337221023f,-0.92964089584318132520f,-0.36418478956708016936f, --0.93132670908118031505f,-0.35989503653498794433f,-0.93299279883473895669f, --0.35559766170478407377f,-0.93463912981968066962f,-0.35129275608556687072f, --0.93626566717027837061f,-0.34698041084592379235f,-0.93787237643998977443f, --0.34266071731199487793f,-0.93945922360218969693f,-0.33833376696554123830f, --0.94102617505088925753f,-0.33399965144200982614f,-0.94257319760144675502f, --0.32965846252858749255f,-0.94410025849127265918f,-0.32531029216226331480f, --0.94560732538052116869f,-0.32095523242787515894f,-0.94709436635277721717f, --0.31659337555616617887f,-0.94856134991573015647f,-0.31222481392182477311f, --0.95000824500184311017f,-0.30784964004153508865f,-0.95143502096900833820f, --0.30346794657201103806f,-0.95284164760119871573f,-0.29907982630804058610f, --0.95422809510910555630f,-0.29468537218051488180f,-0.95559433413077088382f, --0.29028467725446244208f,-0.95694033573220882438f,-0.28587783472708105936f, --0.95826607140801756124f,-0.28146493792575794091f,-0.95957151308198451733f, --0.27704608030610028413f,-0.96085663310767954748f,-0.27262135544994886560f, --0.96212140426904158019f,-0.26819085706340350939f,-0.96336579978095393528f, --0.26375467897483123592f,-0.96458979328981275803f,-0.25931291513288645678f, --0.96579335887408357397f,-0.25486565960451434965f,-0.96697647104485218161f, --0.25041300657296539089f,-0.96813910474636233339f,-0.24595505033579515008f, --0.96928123535654830967f,-0.24149188530286941345f,-0.97040283868755550234f, --0.23702360599436766986f,-0.97150389098625167250f,-0.23255030703877521692f, --0.97258436893473221296f,-0.22807208317088611960f,-0.97364424965081186603f, --0.22358902922978990402f,-0.97468351068851066810f,-0.21910124015687010290f, --0.97570213003852845901f,-0.21460881099378659176f,-0.97670008612871184184f, --0.21011183688046985996f,-0.97767735782450992943f,-0.20561041305309901706f, --0.97863392442942320759f,-0.20110463484209206708f,-0.97956976568544051887f, --0.19659459767008077846f,-0.98048486177346927395f,-0.19208039704989252061f, --0.98137919331375456089f,-0.18756212858253007436f,-0.98225274136628937249f, --0.18303988795514095078f,-0.98310548743121628501f,-0.17851377093899792325f, --0.98393741344921881176f,-0.17398387338746373887f,-0.98474850180190420801f, --0.16945029123396829207f,-0.98553873531217606185f,-0.16491312048996975559f, --0.98630809724459866938f,-0.16037245724292850668f,-0.98705657130575097380f, --0.15582839765426498291f,-0.98778414164457217783f,-0.15128103795733036097f, --0.98849079285269658701f,-0.14673047445536230304f,-0.98917650996478090342f, --0.14217680351944814165f,-0.98984127845882052821f,-0.13762012158648653792f, --0.99048508425645698239f,-0.13306052515713906459f,-0.99110791372327688986f, --0.12849811079379358514f,-0.99170975366909952520f,-0.12393297511851208981f, --0.99229059134825736699f,-0.11936521481099168773f,-0.99285041445986510489f, --0.11479492660650993108f,-0.99338921114808065305f,-0.11022220729388330918f, --0.99390697000235606051f,-0.10564715371341037997f,-0.99440368005767909576f, --0.10106986275482798820f,-0.99487933079480561638f,-0.09649043135525316173f, --0.99533391214048216877f,-0.09190895649713282101f,-0.99576741446765981713f, --0.08732553520619255882f,-0.99617982859569687015f,-0.08274026454937570552f, --0.99657114579055483539f,-0.07815324163279464831f,-0.99694135776498205015f, --0.07356456359966735692f,-0.99729045667869020697f,-0.06897432762826707919f, --0.99761843513851955478f,-0.06438263092985731240f,-0.99792528619859599548f, --0.05978957074664013188f,-0.99821100336047818846f,-0.05519524434968971216f, --0.99847558057329477421f,-0.05059974903689945513f,-0.99871901223387293811f, --0.04600318213091520586f,-0.99894129318685687124f,-0.04140564097707683661f, --0.99914241872481690532f,-0.03680722294135933131f,-0.99932238458834943273f, --0.03220802540830459970f,-0.99948118696616694567f,-0.02760814577896616301f, --0.99961882249517863830f,-0.02300768146883930970f,-0.99973528826056168306f, --0.01840672990580516366f,-0.99983058179582340319f,-0.01380538852806025008f, --0.99990470108285289808f,-0.00920375478206008311f,-0.99995764455196389786f, --0.00460192612044835019f,-0.99998941108192840321f,1.00000000000000000000f, -0.00000000000000000000f,0.99983058179582340319f,0.01840672990580482019f, -0.99932238458834954375f,0.03680722294135883171f,0.99847558057329477421f, -0.05519524434968993420f,0.99729045667869020697f,0.07356456359966742631f, -0.99576741446765981713f,0.09190895649713272386f,0.99390697000235606051f, -0.11022220729388305938f,0.99170975366909952520f,0.12849811079379316880f, -0.98917650996478101444f,0.14673047445536174793f,0.98630809724459866938f, -0.16491312048996989437f,0.98310548743121628501f,0.18303988795514095078f, -0.97956976568544051887f,0.20110463484209190055f,0.97570213003852857003f, -0.21910124015686979759f,0.97150389098625178352f,0.23702360599436719801f, -0.96697647104485207059f,0.25486565960451457169f,0.96212140426904158019f, -0.27262135544994897662f,0.95694033573220882438f,0.29028467725446233105f, -0.95143502096900833820f,0.30784964004153486661f,0.94560732538052127971f, -0.32531029216226292622f,0.93945922360218991898f,0.34266071731199437833f, -0.93299279883473895669f,0.35989503653498811087f,0.92621024213831137928f, -0.37700741021641825945f,0.91911385169005777040f,0.39399204006104809883f, -0.91170603200542987832f,0.41084317105790391089f,0.90398929312344333820f, -0.42755509343028208491f,0.89596624975618521791f,0.44412214457042920035f, -0.88763962040285393496f,0.46053871095824000514f,0.87901222642863352519f, -0.47679923006332208812f,0.87008699110871146054f,0.49289819222978403790f, -0.86086693863776730939f,0.50883014254310698909f,0.85135519310526519554f, -0.52458968267846894928f,0.84155497743689844370f,0.54017147272989285423f, -0.83146961230254523567f,0.55557023301960217765f,0.82110251499110464835f, -0.57078074588696725566f,0.81045719825259476821f,0.58579785745643886408f, -0.79953726910790501314f,0.60061647938386897305f,0.78834642762660622761f, -0.61523159058062681925f,0.77688846567323244230f,0.62963823891492698426f, -0.76516726562245895860f,0.64383154288979138613f,0.75318679904361252042f, -0.65780669329707863735f,0.74095112535495921691f,0.67155895484701833009f, -0.72846439044822519637f,0.68508366777270035541f,0.71573082528381870571f, -0.69837624940897280457f,0.70275474445722529993f,0.71143219574521643356f, -0.68954054473706694051f,0.72424708295146689174f,0.67609270357531603413f, -0.73681656887736979300f,0.66241577759017178373f,0.74913639452345925918f, -0.64851440102211255212f,0.76120238548426177871f,0.63439328416364548779f, -0.77301045336273688235f,0.62005721176328920663f,0.78455659715557524159f, -0.60551104140432554512f,0.79583690460888345530f,0.59075970185887427544f, -0.80684755354379922299f,0.57580819141784533866f,0.81758481315158371139f, -0.56066157619733603124f,0.82804504525775579626f,0.54532498842204646383f, -0.83822470555483796772f,0.52980362468629482731f,0.84812034480329712149f, -0.51410274419322166128f,0.85772861000027211809f,0.49822766697278186854f, -0.86704624551569264845f,0.48218377207912282989f,0.87607009419540660122f, -0.46597649576796612569f,0.88479709843093778954f,0.44961132965460659516f, -0.89322430119551532446f,0.43309381885315201277f,0.90134884704602202810f, -0.41642956009763731906f,0.90916798309052226923f,0.39962419984564678810f, -0.91667905992104270485f,0.38268343236508983729f,0.92387953251128673848f, -0.36561299780477396482f,0.93076696107898371224f,0.34841868024943450921f, -0.93733901191257495977f,0.33110630575987642921f,0.94359345816196038559f, -0.31368174039889157312f,0.94952818059303667475f,0.29615088824362395536f, -0.95514116830577067141f,0.27851968938505305973f,0.96043051941556578655f, -0.26079411791527556952f,0.96539444169768939830f,0.24298017990326398197f, -0.97003125319454397424f,0.22508391135979277653f,0.97433938278557585821f, -0.20711137619221856032f,0.97831737071962765473f,0.18906866414980627589f, -0.98196386910955524296f,0.17096188876030135595f,0.98527764238894122162f, -0.15279718525844340760f,0.98825756773074946437f,0.13458070850712622324f, -0.99090263542778000971f,0.11631863091190487725f,0.99321194923479450001f, -0.09801714032956077016f,0.99518472667219681771f,0.07968243797143012563f, -0.99682029929116566791f,0.06132073630220864768f,0.99811811290014917919f, -0.04293825693494095902f,0.99907772775264536147f,0.02454122852291226384f, -0.99969881869620424997f,0.00613588464915451517f,0.99998117528260110909f, --0.01227153828571982304f,0.99992470183914450299f,-0.03067480317663645942f, -0.99952941750109314256f,-0.04906767432741800800f,0.99879545620517240501f, --0.06744391956366398155f,0.99772306664419163624f,-0.08579731234443975507f, -0.99631261218277800129f,-0.10412163387205460030f,0.99456457073425541537f, --0.12241067519921615403f,0.99247953459870996706f,-0.14065823933284912761f, -0.99005821026229712256f,-0.15885814333386127917f,0.98730141815785843473f, --0.17700422041214874946f,0.98421009238692902521f,-0.19509032201612819257f, -0.98078528040323043058f,-0.21311031991609125091f,0.97702814265775439484f, --0.23105810828067113727f,0.97293995220556017678f,-0.24892760574572012078f, -0.96852209427441737777f,-0.26671275747489830987f,0.96377606579543984022f, --0.28440753721127171039f,0.95870347489587159906f,-0.30200594931922808417f, -0.95330604035419386211f,-0.31950203081601563637f,0.94758559101774120226f, --0.33688985339221994009f,0.94154406518302080631f,-0.35416352542049039931f, -0.93518350993894761025f,-0.37131719395183748755f,0.92850608047321558924f, --0.38834504669882619066f,0.92151403934204201285f,-0.40524131400498974998f, -0.91420975570353069095f,-0.42200027079979968159f,0.90659570451491533483f, --0.43861623853852738097f,0.89867446569395392775f,-0.45508358712634372489f, -0.89044872324475798919f,-0.47139673682599769755f,0.88192126434835504956f, --0.48755016014843571837f,0.87309497841829020182f,-0.50353838372571746440f, -0.86397285612158680745f,-0.51935599016558964269f,0.85455798836540053376f, --0.53499761988709704230f,0.84485356524970722791f,-0.55045797293660470029f, -0.83486287498638012128f,-0.56573181078361323149f,0.82458930278502517996f, --0.58081395809576441547f,0.81403632970594852480f,-0.59569930449243335691f, -0.80320753148064494287f,-0.61038280627630958630f,0.79210657730021227785f, --0.62485948814238623239f,0.78073722857209459924f,-0.63912444486377573138f, -0.76910333764557958780f,-0.65317284295377653347f,0.75720884650648467851f, --0.66699992230363736034f,0.74505778544146605835f,-0.68060099779545302212f, -0.73265427167241281570f,-0.69397146088965377952f,0.72000250796138176579f, --0.70710678118654746172f,0.70710678118654757274f,-0.72000250796138165477f, -0.69397146088965389055f,-0.73265427167241270467f,0.68060099779545324417f, --0.74505778544146594733f,0.66699992230363758239f,-0.75720884650648467851f, -0.65317284295377664449f,-0.76910333764557947678f,0.63912444486377584241f, --0.78073722857209448822f,0.62485948814238634341f,-0.79210657730021216683f, -0.61038280627630969732f,-0.80320753148064483184f,0.59569930449243346793f, --0.81403632970594841378f,0.58081395809576452649f,-0.82458930278502506894f, -0.56573181078361345353f,-0.83486287498638001026f,0.55045797293660492233f, --0.84485356524970711689f,0.53499761988709715332f,-0.85455798836540042274f, -0.51935599016558975372f,-0.86397285612158669643f,0.50353838372571757542f, --0.87309497841829009079f,0.48755016014843588490f,-0.88192126434835493853f, -0.47139673682599780857f,-0.89044872324475787817f,0.45508358712634389143f, --0.89867446569395392775f,0.43861623853852754751f,-0.90659570451491533483f, -0.42200027079979984812f,-0.91420975570353069095f,0.40524131400498991651f, --0.92151403934204179080f,0.38834504669882657923f,-0.92850608047321547822f, -0.37131719395183770960f,-0.93518350993894761025f,0.35416352542049039931f, --0.94154406518302069529f,0.33688985339222032867f,-0.94758559101774109124f, -0.31950203081601580291f,-0.95330604035419386211f,0.30200594931922802866f, --0.95870347489587148804f,0.28440753721127209896f,-0.96377606579543984022f, -0.26671275747489847641f,-0.96852209427441737777f,0.24892760574572009302f, --0.97293995220556006576f,0.23105810828067133156f,-0.97702814265775439484f, -0.21311031991609141745f,-0.98078528040323043058f,0.19509032201612860891f, --0.98421009238692902521f,0.17700422041214894375f,-0.98730141815785843473f, -0.15885814333386147346f,-0.99005821026229701154f,0.14065823933284954395f, --0.99247953459870996706f,0.12241067519921634832f,-0.99456457073425541537f, -0.10412163387205457254f,-0.99631261218277800129f,0.08579731234444015753f, --0.99772306664419163624f,0.06744391956366417584f,-0.99879545620517240501f, -0.04906767432741796636f,-0.99952941750109314256f,0.03067480317663686534f, --0.99992470183914450299f,0.01227153828572000692f,-0.99998117528260110909f, --0.00613588464915455420f,-0.99969881869620424997f,-0.02454122852291207996f, --0.99907772775264536147f,-0.04293825693494077861f,-0.99811811290014917919f, --0.06132073630220824523f,-0.99682029929116577893f,-0.07968243797142994522f, --0.99518472667219692873f,-0.09801714032956058975f,-0.99321194923479461103f, --0.11631863091190447479f,-0.99090263542778000971f,-0.13458070850712605671f, --0.98825756773074946437f,-0.15279718525844343535f,-0.98527764238894133264f, --0.17096188876030096737f,-0.98196386910955524296f,-0.18906866414980610935f, --0.97831737071962765473f,-0.20711137619221858808f,-0.97433938278557585821f, --0.22508391135979261000f,-0.97003125319454397424f,-0.24298017990326381543f, --0.96539444169768939830f,-0.26079411791527562503f,-0.96043051941556589757f, --0.27851968938505289319f,-0.95514116830577078243f,-0.29615088824362378883f, --0.94952818059303678577f,-0.31368174039889118454f,-0.94359345816196038559f, --0.33110630575987626267f,-0.93733901191257495977f,-0.34841868024943456472f, --0.93076696107898382326f,-0.36561299780477357624f,-0.92387953251128684951f, --0.38268343236508967076f,-0.91667905992104270485f,-0.39962419984564684361f, --0.90916798309052249127f,-0.41642956009763693048f,-0.90134884704602202810f, --0.43309381885315184624f,-0.89322430119551532446f,-0.44961132965460665067f, --0.88479709843093790056f,-0.46597649576796595916f,-0.87607009419540660122f, --0.48218377207912266336f,-0.86704624551569287050f,-0.49822766697278153547f, --0.85772861000027211809f,-0.51410274419322155026f,-0.84812034480329723252f, --0.52980362468629460526f,-0.83822470555483818977f,-0.54532498842204613076f, --0.82804504525775590729f,-0.56066157619733592021f,-0.81758481315158371139f, --0.57580819141784533866f,-0.80684755354379944503f,-0.59075970185887394237f, --0.79583690460888356633f,-0.60551104140432543410f,-0.78455659715557524159f, --0.62005721176328920663f,-0.77301045336273710440f,-0.63439328416364526575f, --0.76120238548426188974f,-0.64851440102211233008f,-0.74913639452345925918f, --0.66241577759017178373f,-0.73681656887737001504f,-0.67609270357531581208f, --0.72424708295146700276f,-0.68954054473706682948f,-0.71143219574521665560f, --0.70275474445722507788f,-0.69837624940897302661f,-0.71573082528381848366f, --0.68508366777270035541f,-0.72846439044822519637f,-0.67155895484701866316f, --0.74095112535495888384f,-0.65780669329707874837f,-0.75318679904361240940f, --0.64383154288979149715f,-0.76516726562245895860f,-0.62963823891492687324f, --0.77688846567323255332f,-0.61523159058062726334f,-0.78834642762660589455f, --0.60061647938386930612f,-0.79953726910790479110f,-0.58579785745643908612f, --0.81045719825259465718f,-0.57078074588696736669f,-0.82110251499110464835f, --0.55557023301960217765f,-0.83146961230254523567f,-0.54017147272989274320f, --0.84155497743689855472f,-0.52458968267846928235f,-0.85135519310526486247f, --0.50883014254310732216f,-0.86086693863776708735f,-0.49289819222978420443f, --0.87008699110871134952f,-0.47679923006332214364f,-0.87901222642863341417f, --0.46053871095823989412f,-0.88763962040285404598f,-0.44412214457042975546f, --0.89596624975618488484f,-0.42755509343028247349f,-0.90398929312344311615f, --0.41084317105790418845f,-0.91170603200542976730f,-0.39399204006104820985f, --0.91911385169005765938f,-0.37700741021641820394f,-0.92621024213831137928f, --0.35989503653498794433f,-0.93299279883473895669f,-0.34266071731199487793f, --0.93945922360218969693f,-0.32531029216226331480f,-0.94560732538052116869f, --0.30784964004153508865f,-0.95143502096900833820f,-0.29028467725446244208f, --0.95694033573220882438f,-0.27262135544994886560f,-0.96212140426904158019f, --0.25486565960451434965f,-0.96697647104485218161f,-0.23702360599436766986f, --0.97150389098625167250f,-0.21910124015687010290f,-0.97570213003852845901f, --0.20110463484209206708f,-0.97956976568544051887f,-0.18303988795514095078f, --0.98310548743121628501f,-0.16491312048996975559f,-0.98630809724459866938f, --0.14673047445536230304f,-0.98917650996478090342f,-0.12849811079379358514f, --0.99170975366909952520f,-0.11022220729388330918f,-0.99390697000235606051f, --0.09190895649713282101f,-0.99576741446765981713f,-0.07356456359966735692f, --0.99729045667869020697f,-0.05519524434968971216f,-0.99847558057329477421f, --0.03680722294135933131f,-0.99932238458834943273f,-0.01840672990580516366f, --0.99983058179582340319f,1.00000000000000000000f,0.00000000000000000000f, -0.99729045667869020697f,0.07356456359966742631f,0.98917650996478101444f, -0.14673047445536174793f,0.97570213003852857003f,0.21910124015686979759f, -0.95694033573220882438f,0.29028467725446233105f,0.93299279883473895669f, -0.35989503653498811087f,0.90398929312344333820f,0.42755509343028208491f, -0.87008699110871146054f,0.49289819222978403790f,0.83146961230254523567f, -0.55557023301960217765f,0.78834642762660622761f,0.61523159058062681925f, -0.74095112535495921691f,0.67155895484701833009f,0.68954054473706694051f, -0.72424708295146689174f,0.63439328416364548779f,0.77301045336273688235f, -0.57580819141784533866f,0.81758481315158371139f,0.51410274419322166128f, -0.85772861000027211809f,0.44961132965460659516f,0.89322430119551532446f, -0.38268343236508983729f,0.92387953251128673848f,0.31368174039889157312f, -0.94952818059303667475f,0.24298017990326398197f,0.97003125319454397424f, -0.17096188876030135595f,0.98527764238894122162f,0.09801714032956077016f, -0.99518472667219681771f,0.02454122852291226384f,0.99969881869620424997f, --0.04906767432741800800f,0.99879545620517240501f,-0.12241067519921615403f, -0.99247953459870996706f,-0.19509032201612819257f,0.98078528040323043058f, --0.26671275747489830987f,0.96377606579543984022f,-0.33688985339221994009f, -0.94154406518302080631f,-0.40524131400498974998f,0.91420975570353069095f, --0.47139673682599769755f,0.88192126434835504956f,-0.53499761988709704230f, -0.84485356524970722791f,-0.59569930449243335691f,0.80320753148064494287f, --0.65317284295377653347f,0.75720884650648467851f,-0.70710678118654746172f, -0.70710678118654757274f,-0.75720884650648467851f,0.65317284295377664449f, --0.80320753148064483184f,0.59569930449243346793f,-0.84485356524970711689f, -0.53499761988709715332f,-0.88192126434835493853f,0.47139673682599780857f, --0.91420975570353069095f,0.40524131400498991651f,-0.94154406518302069529f, -0.33688985339222032867f,-0.96377606579543984022f,0.26671275747489847641f, --0.98078528040323043058f,0.19509032201612860891f,-0.99247953459870996706f, -0.12241067519921634832f,-0.99879545620517240501f,0.04906767432741796636f, --0.99969881869620424997f,-0.02454122852291207996f,-0.99518472667219692873f, --0.09801714032956058975f,-0.98527764238894133264f,-0.17096188876030096737f, --0.97003125319454397424f,-0.24298017990326381543f,-0.94952818059303678577f, --0.31368174039889118454f,-0.92387953251128684951f,-0.38268343236508967076f, --0.89322430119551532446f,-0.44961132965460665067f,-0.85772861000027211809f, --0.51410274419322155026f,-0.81758481315158371139f,-0.57580819141784533866f, --0.77301045336273710440f,-0.63439328416364526575f,-0.72424708295146700276f, --0.68954054473706682948f,-0.67155895484701866316f,-0.74095112535495888384f, --0.61523159058062726334f,-0.78834642762660589455f,-0.55557023301960217765f, --0.83146961230254523567f,-0.49289819222978420443f,-0.87008699110871134952f, --0.42755509343028247349f,-0.90398929312344311615f,-0.35989503653498794433f, --0.93299279883473895669f,-0.29028467725446244208f,-0.95694033573220882438f, --0.21910124015687010290f,-0.97570213003852845901f,-0.14673047445536230304f, --0.98917650996478090342f,-0.07356456359966735692f,-0.99729045667869020697f, -1.00000000000000000000f,0.00000000000000000000f,0.95694033573220882438f, -0.29028467725446233105f,0.83146961230254523567f,0.55557023301960217765f, -0.63439328416364548779f,0.77301045336273688235f,0.38268343236508983729f, -0.92387953251128673848f,0.09801714032956077016f,0.99518472667219681771f, --0.19509032201612819257f,0.98078528040323043058f,-0.47139673682599769755f, -0.88192126434835504956f,-0.70710678118654746172f,0.70710678118654757274f, --0.88192126434835493853f,0.47139673682599780857f,-0.98078528040323043058f, -0.19509032201612860891f,-0.99518472667219692873f,-0.09801714032956058975f, --0.92387953251128684951f,-0.38268343236508967076f,-0.77301045336273710440f, --0.63439328416364526575f,-0.55557023301960217765f,-0.83146961230254523567f, --0.29028467725446244208f,-0.95694033573220882438f,1.00000000000000000000f, -0.00000000000000000000f,0.38268343236508983729f,0.92387953251128673848f, --0.70710678118654746172f,0.70710678118654757274f,-0.92387953251128684951f, --0.38268343236508967076f,}; +1.00000000000000000000f,0.00000000000000000000f,0.99998939037322998047f, +0.00460192607715725899f,0.99995762109756469727f,0.00920375436544418335f, +0.99990469217300415039f,0.01380538847297430038f,0.99983060359954833984f, +0.01840673014521598816f,0.99973529577255249023f,0.02300768159329891205f, +0.99961882829666137695f,0.02760814502835273743f,0.99948120117187500000f, +0.03220802545547485352f,0.99932235479354858398f,0.03680722415447235107f, +0.99914240837097167969f,0.04140564054250717163f,0.99894130229949951172f, +0.04600318148732185364f,0.99871903657913208008f,0.05059975013136863708f, +0.99847555160522460938f,0.05519524589180946350f,0.99821102619171142578f, +0.05978957191109657288f,0.99792528152465820312f,0.06438262760639190674f, +0.99761843681335449219f,0.06897433102130889893f,0.99729043245315551758f, +0.07356456667184829712f,0.99694132804870605469f,0.07815324515104293823f, +0.99657112360000610352f,0.08274026215076446533f,0.99617981910705566406f, +0.08732553571462631226f,0.99576741456985473633f,0.09190895408391952515f, +0.99533390998840332031f,0.09649042785167694092f,0.99487930536270141602f, +0.10106986016035079956f,0.99440366029739379883f,0.10564715415239334106f, +0.99390697479248046875f,0.11022220551967620850f,0.99338918924331665039f, +0.11479492485523223877f,0.99285042285919189453f,0.11936521530151367188f, +0.99229061603546142578f,0.12393297255039215088f,0.99170976877212524414f, +0.12849810719490051270f,0.99110794067382812500f,0.13306052982807159424f, +0.99048507213592529297f,0.13762012124061584473f,0.98984128236770629883f, +0.14217680692672729492f,0.98917651176452636719f,0.14673046767711639404f, +0.98849081993103027344f,0.15128104388713836670f,0.98778414726257324219f, +0.15582840144634246826f,0.98705655336380004883f,0.16037245094776153564f, +0.98630809783935546875f,0.16491311788558959961f,0.98553872108459472656f, +0.16945029795169830322f,0.98474848270416259766f,0.17398387193679809570f, +0.98393744230270385742f,0.17851376533508300781f,0.98310548067092895508f, +0.18303988873958587646f,0.98225271701812744141f,0.18756212294101715088f, +0.98137921094894409180f,0.19208039343357086182f,0.98048484325408935547f, +0.19659459590911865234f,0.97956979274749755859f,0.20110464096069335938f, +0.97863394021987915039f,0.20561040937900543213f,0.97767734527587890625f, +0.21011184155941009521f,0.97670006752014160156f,0.21460881829261779785f, +0.97570210695266723633f,0.21910123527050018311f,0.97468352317810058594f, +0.22358903288841247559f,0.97364425659179687500f,0.22807207703590393066f, +0.97258436679840087891f,0.23255030810832977295f,0.97150391340255737305f, +0.23702360689640045166f,0.97040283679962158203f,0.24149188399314880371f, +0.96928125619888305664f,0.24595504999160766602f,0.96813911199569702148f, +0.25041300058364868164f,0.96697646379470825195f,0.25486564636230468750f, +0.96579337120056152344f,0.25931292772293090820f,0.96458977460861206055f, +0.26375466585159301758f,0.96336579322814941406f,0.26819086074829101562f, +0.96212142705917358398f,0.27262136340141296387f,0.96085661649703979492f, +0.27704608440399169922f,0.95957154035568237305f,0.28146493434906005859f, +0.95826607942581176758f,0.28587782382965087891f,0.95694035291671752930f, +0.29028466343879699707f,0.95559436082839965820f,0.29468536376953125000f, +0.95422810316085815430f,0.29907983541488647461f,0.95284163951873779297f, +0.30346795916557312012f,0.95143502950668334961f,0.30784964561462402344f, +0.95000827312469482422f,0.31222480535507202148f,0.94856137037277221680f, +0.31659337878227233887f,0.94709438085556030273f,0.32095524668693542480f, +0.94560730457305908203f,0.32531028985977172852f,0.94410026073455810547f, +0.32965844869613647461f,0.94257318973541259766f,0.33399966359138488770f, +0.94102615118026733398f,0.33833375573158264160f,0.93945920467376708984f, +0.34266072511672973633f,0.93787235021591186523f,0.34698042273521423340f, +0.93626564741134643555f,0.35129275918006896973f,0.93463915586471557617f, +0.35559767484664916992f,0.93299281597137451172f,0.35989505052566528320f, +0.93132668733596801758f,0.36418479681015014648f,0.92964088916778564453f, +0.36846682429313659668f,0.92793542146682739258f,0.37274107336997985840f, +0.92621022462844848633f,0.37700742483139038086f,0.92446547746658325195f, +0.38126575946807861328f,0.92270112037658691406f,0.38551604747772216797f, +0.92091721296310424805f,0.38975816965103149414f,0.91911387443542480469f, +0.39399203658103942871f,0.91729098558425903320f,0.39821755886077880859f, +0.91544872522354125977f,0.40243464708328247070f,0.91358703374862670898f, +0.40664321184158325195f,0.91170603036880493164f,0.41084316372871398926f, +0.90980571508407592773f,0.41503441333770751953f,0.90788608789443969727f, +0.41921690106391906738f,0.90594726800918579102f,0.42339047789573669434f, +0.90398931503295898438f,0.42755508422851562500f,0.90201216936111450195f, +0.43171066045761108398f,0.90001589059829711914f,0.43585708737373352051f, +0.89800059795379638672f,0.43999427556991577148f,0.89596623182296752930f, +0.44412213563919067383f,0.89391297101974487305f,0.44824060797691345215f, +0.89184069633483886719f,0.45234957337379455566f,0.88974958658218383789f, +0.45644897222518920898f,0.88763964176177978516f,0.46053871512413024902f, +0.88551086187362670898f,0.46461868286132812500f,0.88336336612701416016f, +0.46868881583213806152f,0.88119709491729736328f,0.47274902462959289551f, +0.87901222705841064453f,0.47679921984672546387f,0.87680870294570922852f, +0.48083934187889099121f,0.87458664178848266602f,0.48486924171447753906f, +0.87234604358673095703f,0.48888888955116271973f,0.87008696794509887695f, +0.49289819598197937012f,0.86780947446823120117f,0.49689704179763793945f, +0.86551362276077270508f,0.50088536739349365234f,0.86319941282272338867f, +0.50486308336257934570f,0.86086696386337280273f,0.50883013010025024414f, +0.85851621627807617188f,0.51278638839721679688f,0.85614734888076782227f, +0.51673179864883422852f,0.85376030206680297852f,0.52066624164581298828f, +0.85135519504547119141f,0.52458965778350830078f,0.84893202781677246094f, +0.52850198745727539062f,0.84649091958999633789f,0.53240311145782470703f, +0.84403187036514282227f,0.53629297018051147461f,0.84155499935150146484f, +0.54017144441604614258f,0.83906024694442749023f,0.54403853416442871094f, +0.83654773235321044922f,0.54789406061172485352f,0.83401751518249511719f, +0.55173796415328979492f,0.83146959543228149414f,0.55557024478912353516f, +0.82890409231185913086f,0.55939072370529174805f,0.82632106542587280273f, +0.56319934129714965820f,0.82372051477432250977f,0.56699603796005249023f, +0.82110249996185302734f,0.57078075408935546875f,0.81846714019775390625f, +0.57455337047576904297f,0.81581443548202514648f,0.57831376791000366211f, +0.81314438581466674805f,0.58206200599670410156f,0.81045717000961303711f, +0.58579784631729125977f,0.80775284767150878906f,0.58952128887176513672f, +0.80503135919570922852f,0.59323227405548095703f,0.80229282379150390625f, +0.59693068265914916992f,0.79953724145889282227f,0.60061645507812500000f, +0.79676479101181030273f,0.60428953170776367188f,0.79397547245025634766f, +0.60794979333877563477f,0.79116934537887573242f,0.61159718036651611328f, +0.78834640979766845703f,0.61523157358169555664f,0.78550684452056884766f, +0.61885297298431396484f,0.78265058994293212891f,0.62246125936508178711f, +0.77977776527404785156f,0.62605637311935424805f,0.77688848972320556641f, +0.62963825464248657227f,0.77398270368576049805f,0.63320678472518920898f, +0.77106052637100219727f,0.63676184415817260742f,0.76812201738357543945f, +0.64030349254608154297f,0.76516723632812500000f,0.64383155107498168945f, +0.76219630241394042969f,0.64734596014022827148f,0.75920921564102172852f, +0.65084666013717651367f,0.75620597600936889648f,0.65433359146118164062f, +0.75318682193756103516f,0.65780669450759887695f,0.75015163421630859375f, +0.66126585006713867188f,0.74710059165954589844f,0.66471099853515625000f, +0.74403375387191772461f,0.66814202070236206055f,0.74095112085342407227f, +0.67155897617340087891f,0.73785281181335449219f,0.67496162652969360352f, +0.73473888635635375977f,0.67835003137588500977f,0.73160940408706665039f, +0.68172407150268554688f,0.72846436500549316406f,0.68508368730545043945f, +0.72530394792556762695f,0.68842875957489013672f,0.72212821245193481445f, +0.69175922870635986328f,0.71893709897994995117f,0.69507509469985961914f, +0.71573084592819213867f,0.69837623834609985352f,0.71250939369201660156f, +0.70166260004043579102f,0.70927280187606811523f,0.70493406057357788086f, +0.70602124929428100586f,0.70819061994552612305f,0.70275473594665527344f, +0.71143221855163574219f,0.69947332143783569336f,0.71465867757797241211f, +0.69617712497711181641f,0.71787005662918090820f,0.69286614656448364258f, +0.72106617689132690430f,0.68954056501388549805f,0.72424709796905517578f, +0.68620032072067260742f,0.72741264104843139648f,0.68284553289413452148f, +0.73056274652481079102f,0.67947632074356079102f,0.73369741439819335938f, +0.67609268426895141602f,0.73681658506393432617f,0.67269474267959594727f, +0.73992007970809936523f,0.66928261518478393555f,0.74300795793533325195f, +0.66585624217987060547f,0.74608010053634643555f,0.66241580247879028320f, +0.74913638830184936523f,0.65896129608154296875f,0.75217682123184204102f, +0.65549284219741821289f,0.75520139932632446289f,0.65201056003570556641f, +0.75820988416671752930f,0.64851438999176025391f,0.76120239496231079102f, +0.64500451087951660156f,0.76417875289916992188f,0.64148104190826416016f, +0.76713889837265014648f,0.63794392347335815430f,0.77008283138275146484f, +0.63439327478408813477f,0.77301043272018432617f,0.63082921504974365234f, +0.77592170238494873047f,0.62725180387496948242f,0.77881652116775512695f, +0.62366110086441040039f,0.78169482946395874023f,0.62005722522735595703f, +0.78455656766891479492f,0.61644017696380615234f,0.78740173578262329102f, +0.61281007528305053711f,0.79023021459579467773f,0.60916703939437866211f, +0.79304194450378417969f,0.60551106929779052734f,0.79583692550659179688f, +0.60184222459793090820f,0.79861497879028320312f,0.59816068410873413086f, +0.80137616395950317383f,0.59446650743484497070f,0.80412036180496215820f, +0.59075969457626342773f,0.80684757232666015625f,0.58704036474227905273f, +0.80955761671066284180f,0.58330863714218139648f,0.81225061416625976562f, +0.57956457138061523438f,0.81492632627487182617f,0.57580816745758056641f, +0.81758481264114379883f,0.57203960418701171875f,0.82022595405578613281f, +0.56825894117355346680f,0.82284981012344360352f,0.56446623802185058594f, +0.82545614242553710938f,0.56066155433654785156f,0.82804507017135620117f, +0.55684500932693481445f,0.83061641454696655273f,0.55301672220230102539f, +0.83317017555236816406f,0.54917663335800170898f,0.83570629358291625977f, +0.54532498121261596680f,0.83822470903396606445f,0.54146176576614379883f, +0.84072536230087280273f,0.53758704662322998047f,0.84320825338363647461f, +0.53370100259780883789f,0.84567326307296752930f,0.52980363368988037109f, +0.84812033176422119141f,0.52589499950408935547f,0.85054945945739746094f, +0.52197527885437011719f,0.85296058654785156250f,0.51804453134536743164f, +0.85535365343093872070f,0.51410275697708129883f,0.85772860050201416016f, +0.51015007495880126953f,0.86008536815643310547f,0.50618666410446166992f, +0.86242395639419555664f,0.50221246480941772461f,0.86474424600601196289f, +0.49822765588760375977f,0.86704623699188232422f,0.49423229694366455078f, +0.86932986974716186523f,0.49022647738456726074f,0.87159508466720581055f, +0.48621028661727905273f,0.87384182214736938477f,0.48218378424644470215f, +0.87607008218765258789f,0.47814705967903137207f,0.87827980518341064453f, +0.47410020232200622559f,0.88047087192535400391f,0.47004333138465881348f, +0.88264334201812744141f,0.46597650647163391113f,0.88479709625244140625f, +0.46189978718757629395f,0.88693213462829589844f,0.45781329274177551270f, +0.88904833793640136719f,0.45371711254119873047f,0.89114576578140258789f, +0.44961133599281311035f,0.89322429895401000977f,0.44549602270126342773f, +0.89528393745422363281f,0.44137126207351684570f,0.89732456207275390625f, +0.43723717331886291504f,0.89934623241424560547f,0.43309381604194641113f, +0.90134882926940917969f,0.42894127964973449707f,0.90333235263824462891f, +0.42477968335151672363f,0.90529674291610717773f,0.42060908675193786621f, +0.90724200010299682617f,0.41642954945564270020f,0.90916800498962402344f, +0.41224122047424316406f,0.91107475757598876953f,0.40804415941238403320f, +0.91296219825744628906f,0.40383845567703247070f,0.91483032703399658203f, +0.39962419867515563965f,0.91667908430099487305f,0.39540147781372070312f, +0.91850841045379638672f,0.39117038249969482422f,0.92031830549240112305f, +0.38693100214004516602f,0.92210865020751953125f,0.38268342614173889160f, +0.92387950420379638672f,0.37842774391174316406f,0.92563080787658691406f, +0.37416407465934753418f,0.92736250162124633789f,0.36989244818687438965f, +0.92907458543777465820f,0.36561298370361328125f,0.93076694011688232422f, +0.36132580041885375977f,0.93243962526321411133f,0.35703095793724060059f, +0.93409252166748046875f,0.35272854566574096680f,0.93572568893432617188f, +0.34841868281364440918f,0.93733900785446166992f,0.34410142898559570312f, +0.93893247842788696289f,0.33977687358856201172f,0.94050604104995727539f, +0.33544513583183288574f,0.94205975532531738281f,0.33110630512237548828f, +0.94359344244003295898f,0.32676044106483459473f,0.94510722160339355469f, +0.32240769267082214355f,0.94660091400146484375f,0.31804808974266052246f, +0.94807457923889160156f,0.31368175148963928223f,0.94952815771102905273f, +0.30930876731872558594f,0.95096164941787719727f,0.30492922663688659668f, +0.95237499475479125977f,0.30054324865341186523f,0.95376819372177124023f, +0.29615089297294616699f,0.95514118671417236328f,0.29175224900245666504f, +0.95649391412734985352f,0.28734746575355529785f,0.95782643556594848633f, +0.28293657302856445312f,0.95913863182067871094f,0.27851969003677368164f, +0.96043050289154052734f,0.27409690618515014648f,0.96170204877853393555f, +0.26966831088066101074f,0.96295326948165893555f,0.26523402333259582520f, +0.96418404579162597656f,0.26079410314559936523f,0.96539443731307983398f, +0.25634866952896118164f,0.96658438444137573242f,0.25189781188964843750f, +0.96775382757186889648f,0.24744161963462829590f,0.96890282630920410156f, +0.24298018217086791992f,0.97003126144409179688f,0.23851358890533447266f, +0.97113913297653198242f,0.23404195904731750488f,0.97222650051116943359f, +0.22956536710262298584f,0.97329324483871459961f,0.22508391737937927246f, +0.97433936595916748047f,0.22059768438339233398f,0.97536486387252807617f, +0.21610680222511291504f,0.97636973857879638672f,0.21161133050918579102f, +0.97735387086868286133f,0.20711137354373931885f,0.97831737995147705078f, +0.20260703563690185547f,0.97926014661788940430f,0.19809840619564056396f, +0.98018211126327514648f,0.19358558952808380127f,0.98108339309692382812f, +0.18906866014003753662f,0.98196387290954589844f,0.18454773724079132080f, +0.98282355070114135742f,0.18002289533615112305f,0.98366242647171020508f, +0.17549425363540649414f,0.98448044061660766602f,0.17096188664436340332f, +0.98527765274047851562f,0.16642589867115020752f,0.98605394363403320312f, +0.16188639402389526367f,0.98680937290191650391f,0.15734346210956573486f, +0.98754394054412841797f,0.15279719233512878418f,0.98825758695602416992f, +0.14824767410755157471f,0.98895025253295898438f,0.14369502663612365723f, +0.98962199687957763672f,0.13913933932781219482f,0.99027281999588012695f, +0.13458070158958435059f,0.99090266227722167969f,0.13001921772956848145f, +0.99151146411895751953f,0.12545497715473175049f,0.99209928512573242188f, +0.12088808417320251465f,0.99266612529754638672f,0.11631862819194793701f, +0.99321192502975463867f,0.11174671351909637451f,0.99373674392700195312f, +0.10717242211103439331f,0.99424046277999877930f,0.10259586572647094727f, +0.99472314119338989258f,0.09801714122295379639f,0.99518471956253051758f, +0.09343633800745010376f,0.99562525749206542969f,0.08885355293750762939f, +0.99604469537734985352f,0.08426889032125473022f,0.99644303321838378906f, +0.07968243956565856934f,0.99682027101516723633f,0.07509429752826690674f, +0.99717640876770019531f,0.07050457596778869629f,0.99751144647598266602f, +0.06591334939002990723f,0.99782532453536987305f,0.06132073700428009033f, +0.99811810255050659180f,0.05672682076692581177f,0.99838972091674804688f, +0.05213170498609542847f,0.99864023923873901367f,0.04753548279404640198f, +0.99886953830718994141f,0.04293825849890708923f,0.99907773733139038086f, +0.03834012150764465332f,0.99926477670669555664f,0.03374117240309715271f, +0.99943059682846069336f,0.02914150804281234741f,0.99957531690597534180f, +0.02454122900962829590f,0.99969881772994995117f,0.01994042843580245972f, +0.99980115890502929688f,0.01533920597285032272f,0.99988234043121337891f, +0.01073765940964221954f,0.99994236230850219727f,0.00613588467240333557f, +0.99998116493225097656f,0.00153398013208061457f,0.99999880790710449219f, +-0.00306795677170157433f,0.99999529123306274414f,-0.00766982883214950562f, +0.99997061491012573242f,-0.01227153837680816650f,0.99992471933364868164f, +-0.01687298715114593506f,0.99985766410827636719f,-0.02147408016026020050f, +0.99976938962936401367f,-0.02607471868395805359f,0.99966001510620117188f, +-0.03067480400204658508f,0.99952942132949829102f,-0.03527423739433288574f, +0.99937766790390014648f,-0.03987292572855949402f,0.99920475482940673828f, +-0.04447077214717864990f,0.99901068210601806641f,-0.04906767606735229492f, +0.99879544973373413086f,-0.05366353690624237061f,0.99855905771255493164f, +-0.05825826525688171387f,0.99830156564712524414f,-0.06285175681114196777f, +0.99802285432815551758f,-0.06744392216205596924f,0.99772304296493530273f, +-0.07203464955091476440f,0.99740213155746459961f,-0.07662386447191238403f, +0.99706006050109863281f,-0.08121144771575927734f,0.99669688940048217773f, +-0.08579730987548828125f,0.99631261825561523438f,-0.09038136154413223267f, +0.99590724706649780273f,-0.09496349841356277466f,0.99548077583312988281f, +-0.09954361617565155029f,0.99503320455551147461f,-0.10412163287401199341f, +0.99456459283828735352f,-0.10869744420051574707f,0.99407488107681274414f, +-0.11327095329761505127f,0.99356412887573242188f,-0.11784206330776214600f, +0.99303233623504638672f,-0.12241067737340927124f,0.99247956275939941406f, +-0.12697669863700866699f,0.99190568923950195312f,-0.13154003024101257324f, +0.99131083488464355469f,-0.13610057532787322998f,0.99069499969482421875f, +-0.14065824449062347412f,0.99005818367004394531f,-0.14521291851997375488f, +0.98940044641494750977f,-0.14976453781127929688f,0.98872166872024536133f, +-0.15431296825408935547f,0.98802202939987182617f,-0.15885815024375915527f, +0.98730140924453735352f,-0.16339994966983795166f,0.98655992746353149414f, +-0.16793829202651977539f,0.98579752445220947266f,-0.17247308790683746338f, +0.98501425981521606445f,-0.17700421810150146484f,0.98421007394790649414f, +-0.18153160810470581055f,0.98338508605957031250f,-0.18605515360832214355f, +0.98253929615020751953f,-0.19057475030422210693f,0.98167270421981811523f, +-0.19509032368659973145f,0.98078525066375732422f,-0.19960175454616546631f, +0.97987711429595947266f,-0.20410896837711334229f,0.97894817590713500977f, +-0.20861184597015380859f,0.97799849510192871094f,-0.21311031281948089600f, +0.97702813148498535156f,-0.21760427951812744141f,0.97603708505630493164f, +-0.22209362685680389404f,0.97502535581588745117f,-0.22657826542854309082f, +0.97399294376373291016f,-0.23105810582637786865f,0.97293996810913085938f, +-0.23553305864334106445f,0.97186630964279174805f,-0.24000301957130432129f, +0.97077214717864990234f,-0.24446789920330047607f,0.96965736150741577148f, +-0.24892760813236236572f,0.96852207183837890625f,-0.25338202714920043945f, +0.96736627817153930664f,-0.25783109664916992188f,0.96618998050689697266f, +-0.26227471232414245605f,0.96499323844909667969f,-0.26671275496482849121f, +0.96377605199813842773f,-0.27114516496658325195f,0.96253848075866699219f, +-0.27557182312011718750f,0.96128046512603759766f,-0.27999264001846313477f, +0.96000212430953979492f,-0.28440752625465393066f,0.95870345830917358398f, +-0.28881642222404479980f,0.95738452672958374023f,-0.29321914911270141602f, +0.95604526996612548828f,-0.29761570692062377930f,0.95468574762344360352f, +-0.30200594663619995117f,0.95330601930618286133f,-0.30638980865478515625f, +0.95190614461898803711f,-0.31076714396476745605f,0.95048606395721435547f, +-0.31513792276382446289f,0.94904589653015136719f,-0.31950202584266662598f, +0.94758558273315429688f,-0.32385936379432678223f,0.94610524177551269531f, +-0.32820984721183776855f,0.94460481405258178711f,-0.33255335688591003418f, +0.94308441877365112305f,-0.33688986301422119141f,0.94154405593872070312f, +-0.34121921658515930176f,0.93998372554779052734f,-0.34554132819175720215f, +0.93840354681015014648f,-0.34985613822937011719f,0.93680346012115478516f, +-0.35416352748870849609f,0.93518352508544921875f,-0.35846340656280517578f, +0.93354380130767822266f,-0.36275571584701538086f,0.93188428878784179688f, +-0.36704033613204956055f,0.93020504713058471680f,-0.37131720781326293945f, +0.92850607633590698242f,-0.37558618187904357910f,0.92678749561309814453f, +-0.37984719872474670410f,0.92504924535751342773f,-0.38410019874572753906f, +0.92329144477844238281f,-0.38834503293037414551f,0.92151403427124023438f, +-0.39258167147636413574f,0.91971713304519653320f,-0.39680999517440795898f, +0.91790080070495605469f,-0.40102988481521606445f,0.91606497764587402344f, +-0.40524131059646606445f,0.91420978307723999023f,-0.40944415330886840820f, +0.91233515739440917969f,-0.41363832354545593262f,0.91044127941131591797f, +-0.41782370209693908691f,0.90852808952331542969f,-0.42200025916099548340f, +0.90659570693969726562f,-0.42616787552833557129f,0.90464407205581665039f, +-0.43032649159431457520f,0.90267330408096313477f,-0.43447595834732055664f, +0.90068340301513671875f,-0.43861624598503112793f,0.89867448806762695312f, +-0.44274723529815673828f,0.89664649963378906250f,-0.44686883687973022461f, +0.89459949731826782227f,-0.45098099112510681152f,0.89253354072570800781f, +-0.45508357882499694824f,0.89044874906539916992f,-0.45917654037475585938f, +0.88834506273269653320f,-0.46325978636741638184f,0.88622254133224487305f, +-0.46733319759368896484f,0.88408124446868896484f,-0.47139674425125122070f, +0.88192129135131835938f,-0.47545027732849121094f,0.87974262237548828125f, +-0.47949376702308654785f,0.87754529714584350586f,-0.48352706432342529297f, +0.87532937526702880859f,-0.48755016922950744629f,0.87309497594833374023f, +-0.49156290292739868164f,0.87084203958511352539f,-0.49556526541709899902f, +0.86857068538665771484f,-0.49955710768699645996f,0.86628097295761108398f, +-0.50353837013244628906f,0.86397284269332885742f,-0.50750899314880371094f, +0.86164647340774536133f,-0.51146882772445678711f,0.85930180549621582031f, +-0.51541787385940551758f,0.85693895816802978516f,-0.51935601234436035156f, +0.85455799102783203125f,-0.52328312397003173828f,0.85215890407562255859f, +-0.52719914913177490234f,0.84974175691604614258f,-0.53110402822494506836f, +0.84730660915374755859f,-0.53499764204025268555f,0.84485357999801635742f, +-0.53887993097305297852f,0.84238260984420776367f,-0.54275077581405639648f, +0.83989381790161132812f,-0.54661017656326293945f,0.83738720417022705078f, +-0.55045795440673828125f,0.83486288785934448242f,-0.55429410934448242188f, +0.83232086896896362305f,-0.55811852216720581055f,0.82976120710372924805f, +-0.56193113327026367188f,0.82718402147293090820f,-0.56573182344436645508f, +0.82458931207656860352f,-0.56952053308486938477f,0.82197713851928710938f, +-0.57329714298248291016f,0.81934750080108642578f,-0.57706165313720703125f, +0.81670057773590087891f,-0.58081394433975219727f,0.81403630971908569336f, +-0.58455395698547363281f,0.81135487556457519531f,-0.58828157186508178711f, +0.80865615606307983398f,-0.59199666976928710938f,0.80594038963317871094f, +-0.59569931030273437500f,0.80320751667022705078f,-0.59938931465148925781f, +0.80045765638351440430f,-0.60306662321090698242f,0.79769086837768554688f, +-0.60673111677169799805f,0.79490715265274047852f,-0.61038279533386230469f, +0.79210656881332397461f,-0.61402153968811035156f,0.78928923606872558594f, +-0.61764729022979736328f,0.78645521402359008789f,-0.62125998735427856445f, +0.78360450267791748047f,-0.62485951185226440430f,0.78073722124099731445f, +-0.62844574451446533203f,0.77785342931747436523f,-0.63201874494552612305f, +0.77495312690734863281f,-0.63557833433151245117f,0.77203637361526489258f, +-0.63912445306777954102f,0.76910334825515747070f,-0.64265704154968261719f, +0.76615399122238159180f,-0.64617604017257690430f,0.76318842172622680664f, +-0.64968132972717285156f,0.76020669937133789062f,-0.65317285060882568359f, +0.75720882415771484375f,-0.65665054321289062500f,0.75419497489929199219f, +-0.66011434793472290039f,0.75116515159606933594f,-0.66356414556503295898f, +0.74811935424804687500f,-0.66699993610382080078f,0.74505776166915893555f, +-0.67042154073715209961f,0.74198043346405029297f,-0.67382901906967163086f, +0.73888731002807617188f,-0.67722219228744506836f,0.73577857017517089844f, +-0.68060100078582763672f,0.73265427350997924805f,-0.68396538496017456055f, +0.72951442003250122070f,-0.68731534481048583984f,0.72635912895202636719f, +-0.69065070152282714844f,0.72318845987319946289f,-0.69397145509719848633f, +0.72000253200531005859f,-0.69727748632431030273f,0.71680128574371337891f, +-0.70056879520416259766f,0.71358484029769897461f,-0.70384526252746582031f, +0.71035337448120117188f,-0.70710676908493041992f,0.70710676908493041992f, +-0.71035337448120117188f,0.70384526252746582031f,-0.71358484029769897461f, +0.70056879520416259766f,-0.71680128574371337891f,0.69727748632431030273f, +-0.72000253200531005859f,0.69397145509719848633f,-0.72318845987319946289f, +0.69065070152282714844f,-0.72635912895202636719f,0.68731534481048583984f, +-0.72951442003250122070f,0.68396538496017456055f,-0.73265427350997924805f, +0.68060100078582763672f,-0.73577857017517089844f,0.67722219228744506836f, +-0.73888731002807617188f,0.67382901906967163086f,-0.74198043346405029297f, +0.67042154073715209961f,-0.74505776166915893555f,0.66699993610382080078f, +-0.74811935424804687500f,0.66356414556503295898f,-0.75116515159606933594f, +0.66011434793472290039f,-0.75419497489929199219f,0.65665054321289062500f, +-0.75720882415771484375f,0.65317285060882568359f,-0.76020669937133789062f, +0.64968132972717285156f,-0.76318842172622680664f,0.64617604017257690430f, +-0.76615399122238159180f,0.64265704154968261719f,-0.76910334825515747070f, +0.63912445306777954102f,-0.77203637361526489258f,0.63557833433151245117f, +-0.77495312690734863281f,0.63201874494552612305f,-0.77785342931747436523f, +0.62844574451446533203f,-0.78073722124099731445f,0.62485951185226440430f, +-0.78360450267791748047f,0.62125998735427856445f,-0.78645521402359008789f, +0.61764729022979736328f,-0.78928923606872558594f,0.61402153968811035156f, +-0.79210656881332397461f,0.61038279533386230469f,-0.79490715265274047852f, +0.60673111677169799805f,-0.79769086837768554688f,0.60306662321090698242f, +-0.80045765638351440430f,0.59938931465148925781f,-0.80320751667022705078f, +0.59569931030273437500f,-0.80594038963317871094f,0.59199666976928710938f, +-0.80865615606307983398f,0.58828157186508178711f,-0.81135487556457519531f, +0.58455395698547363281f,-0.81403630971908569336f,0.58081394433975219727f, +-0.81670057773590087891f,0.57706165313720703125f,-0.81934750080108642578f, +0.57329714298248291016f,-0.82197713851928710938f,0.56952053308486938477f, +-0.82458931207656860352f,0.56573182344436645508f,-0.82718402147293090820f, +0.56193113327026367188f,-0.82976120710372924805f,0.55811852216720581055f, +-0.83232086896896362305f,0.55429410934448242188f,-0.83486288785934448242f, +0.55045795440673828125f,-0.83738720417022705078f,0.54661017656326293945f, +-0.83989381790161132812f,0.54275077581405639648f,-0.84238260984420776367f, +0.53887993097305297852f,-0.84485357999801635742f,0.53499764204025268555f, +-0.84730660915374755859f,0.53110402822494506836f,-0.84974175691604614258f, +0.52719914913177490234f,-0.85215890407562255859f,0.52328312397003173828f, +-0.85455799102783203125f,0.51935601234436035156f,-0.85693895816802978516f, +0.51541787385940551758f,-0.85930180549621582031f,0.51146882772445678711f, +-0.86164647340774536133f,0.50750899314880371094f,-0.86397284269332885742f, +0.50353837013244628906f,-0.86628097295761108398f,0.49955710768699645996f, +-0.86857068538665771484f,0.49556526541709899902f,-0.87084203958511352539f, +0.49156290292739868164f,-0.87309497594833374023f,0.48755016922950744629f, +-0.87532937526702880859f,0.48352706432342529297f,-0.87754529714584350586f, +0.47949376702308654785f,-0.87974262237548828125f,0.47545027732849121094f, +-0.88192129135131835938f,0.47139674425125122070f,-0.88408124446868896484f, +0.46733319759368896484f,-0.88622254133224487305f,0.46325978636741638184f, +-0.88834506273269653320f,0.45917654037475585938f,-0.89044874906539916992f, +0.45508357882499694824f,-0.89253354072570800781f,0.45098099112510681152f, +-0.89459949731826782227f,0.44686883687973022461f,-0.89664649963378906250f, +0.44274723529815673828f,-0.89867448806762695312f,0.43861624598503112793f, +-0.90068340301513671875f,0.43447595834732055664f,-0.90267330408096313477f, +0.43032649159431457520f,-0.90464407205581665039f,0.42616787552833557129f, +-0.90659570693969726562f,0.42200025916099548340f,-0.90852808952331542969f, +0.41782370209693908691f,-0.91044127941131591797f,0.41363832354545593262f, +-0.91233515739440917969f,0.40944415330886840820f,-0.91420978307723999023f, +0.40524131059646606445f,-0.91606497764587402344f,0.40102988481521606445f, +-0.91790080070495605469f,0.39680999517440795898f,-0.91971713304519653320f, +0.39258167147636413574f,-0.92151403427124023438f,0.38834503293037414551f, +-0.92329144477844238281f,0.38410019874572753906f,-0.92504924535751342773f, +0.37984719872474670410f,-0.92678749561309814453f,0.37558618187904357910f, +-0.92850607633590698242f,0.37131720781326293945f,-0.93020504713058471680f, +0.36704033613204956055f,-0.93188428878784179688f,0.36275571584701538086f, +-0.93354380130767822266f,0.35846340656280517578f,-0.93518352508544921875f, +0.35416352748870849609f,-0.93680346012115478516f,0.34985613822937011719f, +-0.93840354681015014648f,0.34554132819175720215f,-0.93998372554779052734f, +0.34121921658515930176f,-0.94154405593872070312f,0.33688986301422119141f, +-0.94308441877365112305f,0.33255335688591003418f,-0.94460481405258178711f, +0.32820984721183776855f,-0.94610524177551269531f,0.32385936379432678223f, +-0.94758558273315429688f,0.31950202584266662598f,-0.94904589653015136719f, +0.31513792276382446289f,-0.95048606395721435547f,0.31076714396476745605f, +-0.95190614461898803711f,0.30638980865478515625f,-0.95330601930618286133f, +0.30200594663619995117f,-0.95468574762344360352f,0.29761570692062377930f, +-0.95604526996612548828f,0.29321914911270141602f,-0.95738452672958374023f, +0.28881642222404479980f,-0.95870345830917358398f,0.28440752625465393066f, +-0.96000212430953979492f,0.27999264001846313477f,-0.96128046512603759766f, +0.27557182312011718750f,-0.96253848075866699219f,0.27114516496658325195f, +-0.96377605199813842773f,0.26671275496482849121f,-0.96499323844909667969f, +0.26227471232414245605f,-0.96618998050689697266f,0.25783109664916992188f, +-0.96736627817153930664f,0.25338202714920043945f,-0.96852207183837890625f, +0.24892760813236236572f,-0.96965736150741577148f,0.24446789920330047607f, +-0.97077214717864990234f,0.24000301957130432129f,-0.97186630964279174805f, +0.23553305864334106445f,-0.97293996810913085938f,0.23105810582637786865f, +-0.97399294376373291016f,0.22657826542854309082f,-0.97502535581588745117f, +0.22209362685680389404f,-0.97603708505630493164f,0.21760427951812744141f, +-0.97702813148498535156f,0.21311031281948089600f,-0.97799849510192871094f, +0.20861184597015380859f,-0.97894817590713500977f,0.20410896837711334229f, +-0.97987711429595947266f,0.19960175454616546631f,-0.98078525066375732422f, +0.19509032368659973145f,-0.98167270421981811523f,0.19057475030422210693f, +-0.98253929615020751953f,0.18605515360832214355f,-0.98338508605957031250f, +0.18153160810470581055f,-0.98421007394790649414f,0.17700421810150146484f, +-0.98501425981521606445f,0.17247308790683746338f,-0.98579752445220947266f, +0.16793829202651977539f,-0.98655992746353149414f,0.16339994966983795166f, +-0.98730140924453735352f,0.15885815024375915527f,-0.98802202939987182617f, +0.15431296825408935547f,-0.98872166872024536133f,0.14976453781127929688f, +-0.98940044641494750977f,0.14521291851997375488f,-0.99005818367004394531f, +0.14065824449062347412f,-0.99069499969482421875f,0.13610057532787322998f, +-0.99131083488464355469f,0.13154003024101257324f,-0.99190568923950195312f, +0.12697669863700866699f,-0.99247956275939941406f,0.12241067737340927124f, +-0.99303233623504638672f,0.11784206330776214600f,-0.99356412887573242188f, +0.11327095329761505127f,-0.99407488107681274414f,0.10869744420051574707f, +-0.99456459283828735352f,0.10412163287401199341f,-0.99503320455551147461f, +0.09954361617565155029f,-0.99548077583312988281f,0.09496349841356277466f, +-0.99590724706649780273f,0.09038136154413223267f,-0.99631261825561523438f, +0.08579730987548828125f,-0.99669688940048217773f,0.08121144771575927734f, +-0.99706006050109863281f,0.07662386447191238403f,-0.99740213155746459961f, +0.07203464955091476440f,-0.99772304296493530273f,0.06744392216205596924f, +-0.99802285432815551758f,0.06285175681114196777f,-0.99830156564712524414f, +0.05825826525688171387f,-0.99855905771255493164f,0.05366353690624237061f, +-0.99879544973373413086f,0.04906767606735229492f,-0.99901068210601806641f, +0.04447077214717864990f,-0.99920475482940673828f,0.03987292572855949402f, +-0.99937766790390014648f,0.03527423739433288574f,-0.99952942132949829102f, +0.03067480400204658508f,-0.99966001510620117188f,0.02607471868395805359f, +-0.99976938962936401367f,0.02147408016026020050f,-0.99985766410827636719f, +0.01687298715114593506f,-0.99992471933364868164f,0.01227153837680816650f, +-0.99997061491012573242f,0.00766982883214950562f,-0.99999529123306274414f, +0.00306795677170157433f,-0.99999880790710449219f,-0.00153398013208061457f, +-0.99998116493225097656f,-0.00613588467240333557f,-0.99994236230850219727f, +-0.01073765940964221954f,-0.99988234043121337891f,-0.01533920597285032272f, +-0.99980115890502929688f,-0.01994042843580245972f,-0.99969881772994995117f, +-0.02454122900962829590f,-0.99957531690597534180f,-0.02914150804281234741f, +-0.99943059682846069336f,-0.03374117240309715271f,-0.99926477670669555664f, +-0.03834012150764465332f,-0.99907773733139038086f,-0.04293825849890708923f, +-0.99886953830718994141f,-0.04753548279404640198f,-0.99864023923873901367f, +-0.05213170498609542847f,-0.99838972091674804688f,-0.05672682076692581177f, +-0.99811810255050659180f,-0.06132073700428009033f,-0.99782532453536987305f, +-0.06591334939002990723f,-0.99751144647598266602f,-0.07050457596778869629f, +-0.99717640876770019531f,-0.07509429752826690674f,-0.99682027101516723633f, +-0.07968243956565856934f,-0.99644303321838378906f,-0.08426889032125473022f, +-0.99604469537734985352f,-0.08885355293750762939f,-0.99562525749206542969f, +-0.09343633800745010376f,-0.99518471956253051758f,-0.09801714122295379639f, +-0.99472314119338989258f,-0.10259586572647094727f,-0.99424046277999877930f, +-0.10717242211103439331f,-0.99373674392700195312f,-0.11174671351909637451f, +-0.99321192502975463867f,-0.11631862819194793701f,-0.99266612529754638672f, +-0.12088808417320251465f,-0.99209928512573242188f,-0.12545497715473175049f, +-0.99151146411895751953f,-0.13001921772956848145f,-0.99090266227722167969f, +-0.13458070158958435059f,-0.99027281999588012695f,-0.13913933932781219482f, +-0.98962199687957763672f,-0.14369502663612365723f,-0.98895025253295898438f, +-0.14824767410755157471f,-0.98825758695602416992f,-0.15279719233512878418f, +-0.98754394054412841797f,-0.15734346210956573486f,-0.98680937290191650391f, +-0.16188639402389526367f,-0.98605394363403320312f,-0.16642589867115020752f, +-0.98527765274047851562f,-0.17096188664436340332f,-0.98448044061660766602f, +-0.17549425363540649414f,-0.98366242647171020508f,-0.18002289533615112305f, +-0.98282355070114135742f,-0.18454773724079132080f,-0.98196387290954589844f, +-0.18906866014003753662f,-0.98108339309692382812f,-0.19358558952808380127f, +-0.98018211126327514648f,-0.19809840619564056396f,-0.97926014661788940430f, +-0.20260703563690185547f,-0.97831737995147705078f,-0.20711137354373931885f, +-0.97735387086868286133f,-0.21161133050918579102f,-0.97636973857879638672f, +-0.21610680222511291504f,-0.97536486387252807617f,-0.22059768438339233398f, +-0.97433936595916748047f,-0.22508391737937927246f,-0.97329324483871459961f, +-0.22956536710262298584f,-0.97222650051116943359f,-0.23404195904731750488f, +-0.97113913297653198242f,-0.23851358890533447266f,-0.97003126144409179688f, +-0.24298018217086791992f,-0.96890282630920410156f,-0.24744161963462829590f, +-0.96775382757186889648f,-0.25189781188964843750f,-0.96658438444137573242f, +-0.25634866952896118164f,-0.96539443731307983398f,-0.26079410314559936523f, +-0.96418404579162597656f,-0.26523402333259582520f,-0.96295326948165893555f, +-0.26966831088066101074f,-0.96170204877853393555f,-0.27409690618515014648f, +-0.96043050289154052734f,-0.27851969003677368164f,-0.95913863182067871094f, +-0.28293657302856445312f,-0.95782643556594848633f,-0.28734746575355529785f, +-0.95649391412734985352f,-0.29175224900245666504f,-0.95514118671417236328f, +-0.29615089297294616699f,-0.95376819372177124023f,-0.30054324865341186523f, +-0.95237499475479125977f,-0.30492922663688659668f,-0.95096164941787719727f, +-0.30930876731872558594f,-0.94952815771102905273f,-0.31368175148963928223f, +-0.94807457923889160156f,-0.31804808974266052246f,-0.94660091400146484375f, +-0.32240769267082214355f,-0.94510722160339355469f,-0.32676044106483459473f, +-0.94359344244003295898f,-0.33110630512237548828f,-0.94205975532531738281f, +-0.33544513583183288574f,-0.94050604104995727539f,-0.33977687358856201172f, +-0.93893247842788696289f,-0.34410142898559570312f,-0.93733900785446166992f, +-0.34841868281364440918f,-0.93572568893432617188f,-0.35272854566574096680f, +-0.93409252166748046875f,-0.35703095793724060059f,-0.93243962526321411133f, +-0.36132580041885375977f,-0.93076694011688232422f,-0.36561298370361328125f, +-0.92907458543777465820f,-0.36989244818687438965f,-0.92736250162124633789f, +-0.37416407465934753418f,-0.92563080787658691406f,-0.37842774391174316406f, +-0.92387950420379638672f,-0.38268342614173889160f,-0.92210865020751953125f, +-0.38693100214004516602f,-0.92031830549240112305f,-0.39117038249969482422f, +-0.91850841045379638672f,-0.39540147781372070312f,-0.91667908430099487305f, +-0.39962419867515563965f,-0.91483032703399658203f,-0.40383845567703247070f, +-0.91296219825744628906f,-0.40804415941238403320f,-0.91107475757598876953f, +-0.41224122047424316406f,-0.90916800498962402344f,-0.41642954945564270020f, +-0.90724200010299682617f,-0.42060908675193786621f,-0.90529674291610717773f, +-0.42477968335151672363f,-0.90333235263824462891f,-0.42894127964973449707f, +-0.90134882926940917969f,-0.43309381604194641113f,-0.89934623241424560547f, +-0.43723717331886291504f,-0.89732456207275390625f,-0.44137126207351684570f, +-0.89528393745422363281f,-0.44549602270126342773f,-0.89322429895401000977f, +-0.44961133599281311035f,-0.89114576578140258789f,-0.45371711254119873047f, +-0.88904833793640136719f,-0.45781329274177551270f,-0.88693213462829589844f, +-0.46189978718757629395f,-0.88479709625244140625f,-0.46597650647163391113f, +-0.88264334201812744141f,-0.47004333138465881348f,-0.88047087192535400391f, +-0.47410020232200622559f,-0.87827980518341064453f,-0.47814705967903137207f, +-0.87607008218765258789f,-0.48218378424644470215f,-0.87384182214736938477f, +-0.48621028661727905273f,-0.87159508466720581055f,-0.49022647738456726074f, +-0.86932986974716186523f,-0.49423229694366455078f,-0.86704623699188232422f, +-0.49822765588760375977f,-0.86474424600601196289f,-0.50221246480941772461f, +-0.86242395639419555664f,-0.50618666410446166992f,-0.86008536815643310547f, +-0.51015007495880126953f,-0.85772860050201416016f,-0.51410275697708129883f, +-0.85535365343093872070f,-0.51804453134536743164f,-0.85296058654785156250f, +-0.52197527885437011719f,-0.85054945945739746094f,-0.52589499950408935547f, +-0.84812033176422119141f,-0.52980363368988037109f,-0.84567326307296752930f, +-0.53370100259780883789f,-0.84320825338363647461f,-0.53758704662322998047f, +-0.84072536230087280273f,-0.54146176576614379883f,-0.83822470903396606445f, +-0.54532498121261596680f,-0.83570629358291625977f,-0.54917663335800170898f, +-0.83317017555236816406f,-0.55301672220230102539f,-0.83061641454696655273f, +-0.55684500932693481445f,-0.82804507017135620117f,-0.56066155433654785156f, +-0.82545614242553710938f,-0.56446623802185058594f,-0.82284981012344360352f, +-0.56825894117355346680f,-0.82022595405578613281f,-0.57203960418701171875f, +-0.81758481264114379883f,-0.57580816745758056641f,-0.81492632627487182617f, +-0.57956457138061523438f,-0.81225061416625976562f,-0.58330863714218139648f, +-0.80955761671066284180f,-0.58704036474227905273f,-0.80684757232666015625f, +-0.59075969457626342773f,-0.80412036180496215820f,-0.59446650743484497070f, +-0.80137616395950317383f,-0.59816068410873413086f,-0.79861497879028320312f, +-0.60184222459793090820f,-0.79583692550659179688f,-0.60551106929779052734f, +-0.79304194450378417969f,-0.60916703939437866211f,-0.79023021459579467773f, +-0.61281007528305053711f,-0.78740173578262329102f,-0.61644017696380615234f, +-0.78455656766891479492f,-0.62005722522735595703f,-0.78169482946395874023f, +-0.62366110086441040039f,-0.77881652116775512695f,-0.62725180387496948242f, +-0.77592170238494873047f,-0.63082921504974365234f,-0.77301043272018432617f, +-0.63439327478408813477f,-0.77008283138275146484f,-0.63794392347335815430f, +-0.76713889837265014648f,-0.64148104190826416016f,-0.76417875289916992188f, +-0.64500451087951660156f,-0.76120239496231079102f,-0.64851438999176025391f, +-0.75820988416671752930f,-0.65201056003570556641f,-0.75520139932632446289f, +-0.65549284219741821289f,-0.75217682123184204102f,-0.65896129608154296875f, +-0.74913638830184936523f,-0.66241580247879028320f,-0.74608010053634643555f, +-0.66585624217987060547f,-0.74300795793533325195f,-0.66928261518478393555f, +-0.73992007970809936523f,-0.67269474267959594727f,-0.73681658506393432617f, +-0.67609268426895141602f,-0.73369741439819335938f,-0.67947632074356079102f, +-0.73056274652481079102f,-0.68284553289413452148f,-0.72741264104843139648f, +-0.68620032072067260742f,-0.72424709796905517578f,-0.68954056501388549805f, +-0.72106617689132690430f,-0.69286614656448364258f,-0.71787005662918090820f, +-0.69617712497711181641f,-0.71465867757797241211f,-0.69947332143783569336f, +-0.71143221855163574219f,-0.70275473594665527344f,-0.70819061994552612305f, +-0.70602124929428100586f,-0.70493406057357788086f,-0.70927280187606811523f, +-0.70166260004043579102f,-0.71250939369201660156f,-0.69837623834609985352f, +-0.71573084592819213867f,-0.69507509469985961914f,-0.71893709897994995117f, +-0.69175922870635986328f,-0.72212821245193481445f,-0.68842875957489013672f, +-0.72530394792556762695f,-0.68508368730545043945f,-0.72846436500549316406f, +-0.68172407150268554688f,-0.73160940408706665039f,-0.67835003137588500977f, +-0.73473888635635375977f,-0.67496162652969360352f,-0.73785281181335449219f, +-0.67155897617340087891f,-0.74095112085342407227f,-0.66814202070236206055f, +-0.74403375387191772461f,-0.66471099853515625000f,-0.74710059165954589844f, +-0.66126585006713867188f,-0.75015163421630859375f,-0.65780669450759887695f, +-0.75318682193756103516f,-0.65433359146118164062f,-0.75620597600936889648f, +-0.65084666013717651367f,-0.75920921564102172852f,-0.64734596014022827148f, +-0.76219630241394042969f,-0.64383155107498168945f,-0.76516723632812500000f, +-0.64030349254608154297f,-0.76812201738357543945f,-0.63676184415817260742f, +-0.77106052637100219727f,-0.63320678472518920898f,-0.77398270368576049805f, +-0.62963825464248657227f,-0.77688848972320556641f,-0.62605637311935424805f, +-0.77977776527404785156f,-0.62246125936508178711f,-0.78265058994293212891f, +-0.61885297298431396484f,-0.78550684452056884766f,-0.61523157358169555664f, +-0.78834640979766845703f,-0.61159718036651611328f,-0.79116934537887573242f, +-0.60794979333877563477f,-0.79397547245025634766f,-0.60428953170776367188f, +-0.79676479101181030273f,-0.60061645507812500000f,-0.79953724145889282227f, +-0.59693068265914916992f,-0.80229282379150390625f,-0.59323227405548095703f, +-0.80503135919570922852f,-0.58952128887176513672f,-0.80775284767150878906f, +-0.58579784631729125977f,-0.81045717000961303711f,-0.58206200599670410156f, +-0.81314438581466674805f,-0.57831376791000366211f,-0.81581443548202514648f, +-0.57455337047576904297f,-0.81846714019775390625f,-0.57078075408935546875f, +-0.82110249996185302734f,-0.56699603796005249023f,-0.82372051477432250977f, +-0.56319934129714965820f,-0.82632106542587280273f,-0.55939072370529174805f, +-0.82890409231185913086f,-0.55557024478912353516f,-0.83146959543228149414f, +-0.55173796415328979492f,-0.83401751518249511719f,-0.54789406061172485352f, +-0.83654773235321044922f,-0.54403853416442871094f,-0.83906024694442749023f, +-0.54017144441604614258f,-0.84155499935150146484f,-0.53629297018051147461f, +-0.84403187036514282227f,-0.53240311145782470703f,-0.84649091958999633789f, +-0.52850198745727539062f,-0.84893202781677246094f,-0.52458965778350830078f, +-0.85135519504547119141f,-0.52066624164581298828f,-0.85376030206680297852f, +-0.51673179864883422852f,-0.85614734888076782227f,-0.51278638839721679688f, +-0.85851621627807617188f,-0.50883013010025024414f,-0.86086696386337280273f, +-0.50486308336257934570f,-0.86319941282272338867f,-0.50088536739349365234f, +-0.86551362276077270508f,-0.49689704179763793945f,-0.86780947446823120117f, +-0.49289819598197937012f,-0.87008696794509887695f,-0.48888888955116271973f, +-0.87234604358673095703f,-0.48486924171447753906f,-0.87458664178848266602f, +-0.48083934187889099121f,-0.87680870294570922852f,-0.47679921984672546387f, +-0.87901222705841064453f,-0.47274902462959289551f,-0.88119709491729736328f, +-0.46868881583213806152f,-0.88336336612701416016f,-0.46461868286132812500f, +-0.88551086187362670898f,-0.46053871512413024902f,-0.88763964176177978516f, +-0.45644897222518920898f,-0.88974958658218383789f,-0.45234957337379455566f, +-0.89184069633483886719f,-0.44824060797691345215f,-0.89391297101974487305f, +-0.44412213563919067383f,-0.89596623182296752930f,-0.43999427556991577148f, +-0.89800059795379638672f,-0.43585708737373352051f,-0.90001589059829711914f, +-0.43171066045761108398f,-0.90201216936111450195f,-0.42755508422851562500f, +-0.90398931503295898438f,-0.42339047789573669434f,-0.90594726800918579102f, +-0.41921690106391906738f,-0.90788608789443969727f,-0.41503441333770751953f, +-0.90980571508407592773f,-0.41084316372871398926f,-0.91170603036880493164f, +-0.40664321184158325195f,-0.91358703374862670898f,-0.40243464708328247070f, +-0.91544872522354125977f,-0.39821755886077880859f,-0.91729098558425903320f, +-0.39399203658103942871f,-0.91911387443542480469f,-0.38975816965103149414f, +-0.92091721296310424805f,-0.38551604747772216797f,-0.92270112037658691406f, +-0.38126575946807861328f,-0.92446547746658325195f,-0.37700742483139038086f, +-0.92621022462844848633f,-0.37274107336997985840f,-0.92793542146682739258f, +-0.36846682429313659668f,-0.92964088916778564453f,-0.36418479681015014648f, +-0.93132668733596801758f,-0.35989505052566528320f,-0.93299281597137451172f, +-0.35559767484664916992f,-0.93463915586471557617f,-0.35129275918006896973f, +-0.93626564741134643555f,-0.34698042273521423340f,-0.93787235021591186523f, +-0.34266072511672973633f,-0.93945920467376708984f,-0.33833375573158264160f, +-0.94102615118026733398f,-0.33399966359138488770f,-0.94257318973541259766f, +-0.32965844869613647461f,-0.94410026073455810547f,-0.32531028985977172852f, +-0.94560730457305908203f,-0.32095524668693542480f,-0.94709438085556030273f, +-0.31659337878227233887f,-0.94856137037277221680f,-0.31222480535507202148f, +-0.95000827312469482422f,-0.30784964561462402344f,-0.95143502950668334961f, +-0.30346795916557312012f,-0.95284163951873779297f,-0.29907983541488647461f, +-0.95422810316085815430f,-0.29468536376953125000f,-0.95559436082839965820f, +-0.29028466343879699707f,-0.95694035291671752930f,-0.28587782382965087891f, +-0.95826607942581176758f,-0.28146493434906005859f,-0.95957154035568237305f, +-0.27704608440399169922f,-0.96085661649703979492f,-0.27262136340141296387f, +-0.96212142705917358398f,-0.26819086074829101562f,-0.96336579322814941406f, +-0.26375466585159301758f,-0.96458977460861206055f,-0.25931292772293090820f, +-0.96579337120056152344f,-0.25486564636230468750f,-0.96697646379470825195f, +-0.25041300058364868164f,-0.96813911199569702148f,-0.24595504999160766602f, +-0.96928125619888305664f,-0.24149188399314880371f,-0.97040283679962158203f, +-0.23702360689640045166f,-0.97150391340255737305f,-0.23255030810832977295f, +-0.97258436679840087891f,-0.22807207703590393066f,-0.97364425659179687500f, +-0.22358903288841247559f,-0.97468352317810058594f,-0.21910123527050018311f, +-0.97570210695266723633f,-0.21460881829261779785f,-0.97670006752014160156f, +-0.21011184155941009521f,-0.97767734527587890625f,-0.20561040937900543213f, +-0.97863394021987915039f,-0.20110464096069335938f,-0.97956979274749755859f, +-0.19659459590911865234f,-0.98048484325408935547f,-0.19208039343357086182f, +-0.98137921094894409180f,-0.18756212294101715088f,-0.98225271701812744141f, +-0.18303988873958587646f,-0.98310548067092895508f,-0.17851376533508300781f, +-0.98393744230270385742f,-0.17398387193679809570f,-0.98474848270416259766f, +-0.16945029795169830322f,-0.98553872108459472656f,-0.16491311788558959961f, +-0.98630809783935546875f,-0.16037245094776153564f,-0.98705655336380004883f, +-0.15582840144634246826f,-0.98778414726257324219f,-0.15128104388713836670f, +-0.98849081993103027344f,-0.14673046767711639404f,-0.98917651176452636719f, +-0.14217680692672729492f,-0.98984128236770629883f,-0.13762012124061584473f, +-0.99048507213592529297f,-0.13306052982807159424f,-0.99110794067382812500f, +-0.12849810719490051270f,-0.99170976877212524414f,-0.12393297255039215088f, +-0.99229061603546142578f,-0.11936521530151367188f,-0.99285042285919189453f, +-0.11479492485523223877f,-0.99338918924331665039f,-0.11022220551967620850f, +-0.99390697479248046875f,-0.10564715415239334106f,-0.99440366029739379883f, +-0.10106986016035079956f,-0.99487930536270141602f,-0.09649042785167694092f, +-0.99533390998840332031f,-0.09190895408391952515f,-0.99576741456985473633f, +-0.08732553571462631226f,-0.99617981910705566406f,-0.08274026215076446533f, +-0.99657112360000610352f,-0.07815324515104293823f,-0.99694132804870605469f, +-0.07356456667184829712f,-0.99729043245315551758f,-0.06897433102130889893f, +-0.99761843681335449219f,-0.06438262760639190674f,-0.99792528152465820312f, +-0.05978957191109657288f,-0.99821102619171142578f,-0.05519524589180946350f, +-0.99847555160522460938f,-0.05059975013136863708f,-0.99871903657913208008f, +-0.04600318148732185364f,-0.99894130229949951172f,-0.04140564054250717163f, +-0.99914240837097167969f,-0.03680722415447235107f,-0.99932235479354858398f, +-0.03220802545547485352f,-0.99948120117187500000f,-0.02760814502835273743f, +-0.99961882829666137695f,-0.02300768159329891205f,-0.99973529577255249023f, +-0.01840673014521598816f,-0.99983060359954833984f,-0.01380538847297430038f, +-0.99990469217300415039f,-0.00920375436544418335f,-0.99995762109756469727f, +-0.00460192607715725899f,-0.99998939037322998047f,1.00000000000000000000f, +0.00000000000000000000f,0.99983060359954833984f,0.01840673014521598816f, +0.99932235479354858398f,0.03680722415447235107f,0.99847555160522460938f, +0.05519524589180946350f,0.99729043245315551758f,0.07356456667184829712f, +0.99576741456985473633f,0.09190895408391952515f,0.99390697479248046875f, +0.11022220551967620850f,0.99170976877212524414f,0.12849810719490051270f, +0.98917651176452636719f,0.14673046767711639404f,0.98630809783935546875f, +0.16491311788558959961f,0.98310548067092895508f,0.18303988873958587646f, +0.97956979274749755859f,0.20110464096069335938f,0.97570210695266723633f, +0.21910123527050018311f,0.97150391340255737305f,0.23702360689640045166f, +0.96697646379470825195f,0.25486564636230468750f,0.96212142705917358398f, +0.27262136340141296387f,0.95694035291671752930f,0.29028466343879699707f, +0.95143502950668334961f,0.30784964561462402344f,0.94560730457305908203f, +0.32531028985977172852f,0.93945920467376708984f,0.34266072511672973633f, +0.93299281597137451172f,0.35989505052566528320f,0.92621022462844848633f, +0.37700742483139038086f,0.91911387443542480469f,0.39399203658103942871f, +0.91170603036880493164f,0.41084316372871398926f,0.90398931503295898438f, +0.42755508422851562500f,0.89596623182296752930f,0.44412213563919067383f, +0.88763964176177978516f,0.46053871512413024902f,0.87901222705841064453f, +0.47679921984672546387f,0.87008696794509887695f,0.49289819598197937012f, +0.86086696386337280273f,0.50883013010025024414f,0.85135519504547119141f, +0.52458965778350830078f,0.84155499935150146484f,0.54017144441604614258f, +0.83146959543228149414f,0.55557024478912353516f,0.82110249996185302734f, +0.57078075408935546875f,0.81045717000961303711f,0.58579784631729125977f, +0.79953724145889282227f,0.60061645507812500000f,0.78834640979766845703f, +0.61523157358169555664f,0.77688848972320556641f,0.62963825464248657227f, +0.76516723632812500000f,0.64383155107498168945f,0.75318682193756103516f, +0.65780669450759887695f,0.74095112085342407227f,0.67155897617340087891f, +0.72846436500549316406f,0.68508368730545043945f,0.71573084592819213867f, +0.69837623834609985352f,0.70275473594665527344f,0.71143221855163574219f, +0.68954056501388549805f,0.72424709796905517578f,0.67609268426895141602f, +0.73681658506393432617f,0.66241580247879028320f,0.74913638830184936523f, +0.64851438999176025391f,0.76120239496231079102f,0.63439327478408813477f, +0.77301043272018432617f,0.62005722522735595703f,0.78455656766891479492f, +0.60551106929779052734f,0.79583692550659179688f,0.59075969457626342773f, +0.80684757232666015625f,0.57580816745758056641f,0.81758481264114379883f, +0.56066155433654785156f,0.82804507017135620117f,0.54532498121261596680f, +0.83822470903396606445f,0.52980363368988037109f,0.84812033176422119141f, +0.51410275697708129883f,0.85772860050201416016f,0.49822765588760375977f, +0.86704623699188232422f,0.48218378424644470215f,0.87607008218765258789f, +0.46597650647163391113f,0.88479709625244140625f,0.44961133599281311035f, +0.89322429895401000977f,0.43309381604194641113f,0.90134882926940917969f, +0.41642954945564270020f,0.90916800498962402344f,0.39962419867515563965f, +0.91667908430099487305f,0.38268342614173889160f,0.92387950420379638672f, +0.36561298370361328125f,0.93076694011688232422f,0.34841868281364440918f, +0.93733900785446166992f,0.33110630512237548828f,0.94359344244003295898f, +0.31368175148963928223f,0.94952815771102905273f,0.29615089297294616699f, +0.95514118671417236328f,0.27851969003677368164f,0.96043050289154052734f, +0.26079410314559936523f,0.96539443731307983398f,0.24298018217086791992f, +0.97003126144409179688f,0.22508391737937927246f,0.97433936595916748047f, +0.20711137354373931885f,0.97831737995147705078f,0.18906866014003753662f, +0.98196387290954589844f,0.17096188664436340332f,0.98527765274047851562f, +0.15279719233512878418f,0.98825758695602416992f,0.13458070158958435059f, +0.99090266227722167969f,0.11631862819194793701f,0.99321192502975463867f, +0.09801714122295379639f,0.99518471956253051758f,0.07968243956565856934f, +0.99682027101516723633f,0.06132073700428009033f,0.99811810255050659180f, +0.04293825849890708923f,0.99907773733139038086f,0.02454122900962829590f, +0.99969881772994995117f,0.00613588467240333557f,0.99998116493225097656f, +-0.01227153837680816650f,0.99992471933364868164f,-0.03067480400204658508f, +0.99952942132949829102f,-0.04906767606735229492f,0.99879544973373413086f, +-0.06744392216205596924f,0.99772304296493530273f,-0.08579730987548828125f, +0.99631261825561523438f,-0.10412163287401199341f,0.99456459283828735352f, +-0.12241067737340927124f,0.99247956275939941406f,-0.14065824449062347412f, +0.99005818367004394531f,-0.15885815024375915527f,0.98730140924453735352f, +-0.17700421810150146484f,0.98421007394790649414f,-0.19509032368659973145f, +0.98078525066375732422f,-0.21311031281948089600f,0.97702813148498535156f, +-0.23105810582637786865f,0.97293996810913085938f,-0.24892760813236236572f, +0.96852207183837890625f,-0.26671275496482849121f,0.96377605199813842773f, +-0.28440752625465393066f,0.95870345830917358398f,-0.30200594663619995117f, +0.95330601930618286133f,-0.31950202584266662598f,0.94758558273315429688f, +-0.33688986301422119141f,0.94154405593872070312f,-0.35416352748870849609f, +0.93518352508544921875f,-0.37131720781326293945f,0.92850607633590698242f, +-0.38834503293037414551f,0.92151403427124023438f,-0.40524131059646606445f, +0.91420978307723999023f,-0.42200025916099548340f,0.90659570693969726562f, +-0.43861624598503112793f,0.89867448806762695312f,-0.45508357882499694824f, +0.89044874906539916992f,-0.47139674425125122070f,0.88192129135131835938f, +-0.48755016922950744629f,0.87309497594833374023f,-0.50353837013244628906f, +0.86397284269332885742f,-0.51935601234436035156f,0.85455799102783203125f, +-0.53499764204025268555f,0.84485357999801635742f,-0.55045795440673828125f, +0.83486288785934448242f,-0.56573182344436645508f,0.82458931207656860352f, +-0.58081394433975219727f,0.81403630971908569336f,-0.59569931030273437500f, +0.80320751667022705078f,-0.61038279533386230469f,0.79210656881332397461f, +-0.62485951185226440430f,0.78073722124099731445f,-0.63912445306777954102f, +0.76910334825515747070f,-0.65317285060882568359f,0.75720882415771484375f, +-0.66699993610382080078f,0.74505776166915893555f,-0.68060100078582763672f, +0.73265427350997924805f,-0.69397145509719848633f,0.72000253200531005859f, +-0.70710676908493041992f,0.70710676908493041992f,-0.72000253200531005859f, +0.69397145509719848633f,-0.73265427350997924805f,0.68060100078582763672f, +-0.74505776166915893555f,0.66699993610382080078f,-0.75720882415771484375f, +0.65317285060882568359f,-0.76910334825515747070f,0.63912445306777954102f, +-0.78073722124099731445f,0.62485951185226440430f,-0.79210656881332397461f, +0.61038279533386230469f,-0.80320751667022705078f,0.59569931030273437500f, +-0.81403630971908569336f,0.58081394433975219727f,-0.82458931207656860352f, +0.56573182344436645508f,-0.83486288785934448242f,0.55045795440673828125f, +-0.84485357999801635742f,0.53499764204025268555f,-0.85455799102783203125f, +0.51935601234436035156f,-0.86397284269332885742f,0.50353837013244628906f, +-0.87309497594833374023f,0.48755016922950744629f,-0.88192129135131835938f, +0.47139674425125122070f,-0.89044874906539916992f,0.45508357882499694824f, +-0.89867448806762695312f,0.43861624598503112793f,-0.90659570693969726562f, +0.42200025916099548340f,-0.91420978307723999023f,0.40524131059646606445f, +-0.92151403427124023438f,0.38834503293037414551f,-0.92850607633590698242f, +0.37131720781326293945f,-0.93518352508544921875f,0.35416352748870849609f, +-0.94154405593872070312f,0.33688986301422119141f,-0.94758558273315429688f, +0.31950202584266662598f,-0.95330601930618286133f,0.30200594663619995117f, +-0.95870345830917358398f,0.28440752625465393066f,-0.96377605199813842773f, +0.26671275496482849121f,-0.96852207183837890625f,0.24892760813236236572f, +-0.97293996810913085938f,0.23105810582637786865f,-0.97702813148498535156f, +0.21311031281948089600f,-0.98078525066375732422f,0.19509032368659973145f, +-0.98421007394790649414f,0.17700421810150146484f,-0.98730140924453735352f, +0.15885815024375915527f,-0.99005818367004394531f,0.14065824449062347412f, +-0.99247956275939941406f,0.12241067737340927124f,-0.99456459283828735352f, +0.10412163287401199341f,-0.99631261825561523438f,0.08579730987548828125f, +-0.99772304296493530273f,0.06744392216205596924f,-0.99879544973373413086f, +0.04906767606735229492f,-0.99952942132949829102f,0.03067480400204658508f, +-0.99992471933364868164f,0.01227153837680816650f,-0.99998116493225097656f, +-0.00613588467240333557f,-0.99969881772994995117f,-0.02454122900962829590f, +-0.99907773733139038086f,-0.04293825849890708923f,-0.99811810255050659180f, +-0.06132073700428009033f,-0.99682027101516723633f,-0.07968243956565856934f, +-0.99518471956253051758f,-0.09801714122295379639f,-0.99321192502975463867f, +-0.11631862819194793701f,-0.99090266227722167969f,-0.13458070158958435059f, +-0.98825758695602416992f,-0.15279719233512878418f,-0.98527765274047851562f, +-0.17096188664436340332f,-0.98196387290954589844f,-0.18906866014003753662f, +-0.97831737995147705078f,-0.20711137354373931885f,-0.97433936595916748047f, +-0.22508391737937927246f,-0.97003126144409179688f,-0.24298018217086791992f, +-0.96539443731307983398f,-0.26079410314559936523f,-0.96043050289154052734f, +-0.27851969003677368164f,-0.95514118671417236328f,-0.29615089297294616699f, +-0.94952815771102905273f,-0.31368175148963928223f,-0.94359344244003295898f, +-0.33110630512237548828f,-0.93733900785446166992f,-0.34841868281364440918f, +-0.93076694011688232422f,-0.36561298370361328125f,-0.92387950420379638672f, +-0.38268342614173889160f,-0.91667908430099487305f,-0.39962419867515563965f, +-0.90916800498962402344f,-0.41642954945564270020f,-0.90134882926940917969f, +-0.43309381604194641113f,-0.89322429895401000977f,-0.44961133599281311035f, +-0.88479709625244140625f,-0.46597650647163391113f,-0.87607008218765258789f, +-0.48218378424644470215f,-0.86704623699188232422f,-0.49822765588760375977f, +-0.85772860050201416016f,-0.51410275697708129883f,-0.84812033176422119141f, +-0.52980363368988037109f,-0.83822470903396606445f,-0.54532498121261596680f, +-0.82804507017135620117f,-0.56066155433654785156f,-0.81758481264114379883f, +-0.57580816745758056641f,-0.80684757232666015625f,-0.59075969457626342773f, +-0.79583692550659179688f,-0.60551106929779052734f,-0.78455656766891479492f, +-0.62005722522735595703f,-0.77301043272018432617f,-0.63439327478408813477f, +-0.76120239496231079102f,-0.64851438999176025391f,-0.74913638830184936523f, +-0.66241580247879028320f,-0.73681658506393432617f,-0.67609268426895141602f, +-0.72424709796905517578f,-0.68954056501388549805f,-0.71143221855163574219f, +-0.70275473594665527344f,-0.69837623834609985352f,-0.71573084592819213867f, +-0.68508368730545043945f,-0.72846436500549316406f,-0.67155897617340087891f, +-0.74095112085342407227f,-0.65780669450759887695f,-0.75318682193756103516f, +-0.64383155107498168945f,-0.76516723632812500000f,-0.62963825464248657227f, +-0.77688848972320556641f,-0.61523157358169555664f,-0.78834640979766845703f, +-0.60061645507812500000f,-0.79953724145889282227f,-0.58579784631729125977f, +-0.81045717000961303711f,-0.57078075408935546875f,-0.82110249996185302734f, +-0.55557024478912353516f,-0.83146959543228149414f,-0.54017144441604614258f, +-0.84155499935150146484f,-0.52458965778350830078f,-0.85135519504547119141f, +-0.50883013010025024414f,-0.86086696386337280273f,-0.49289819598197937012f, +-0.87008696794509887695f,-0.47679921984672546387f,-0.87901222705841064453f, +-0.46053871512413024902f,-0.88763964176177978516f,-0.44412213563919067383f, +-0.89596623182296752930f,-0.42755508422851562500f,-0.90398931503295898438f, +-0.41084316372871398926f,-0.91170603036880493164f,-0.39399203658103942871f, +-0.91911387443542480469f,-0.37700742483139038086f,-0.92621022462844848633f, +-0.35989505052566528320f,-0.93299281597137451172f,-0.34266072511672973633f, +-0.93945920467376708984f,-0.32531028985977172852f,-0.94560730457305908203f, +-0.30784964561462402344f,-0.95143502950668334961f,-0.29028466343879699707f, +-0.95694035291671752930f,-0.27262136340141296387f,-0.96212142705917358398f, +-0.25486564636230468750f,-0.96697646379470825195f,-0.23702360689640045166f, +-0.97150391340255737305f,-0.21910123527050018311f,-0.97570210695266723633f, +-0.20110464096069335938f,-0.97956979274749755859f,-0.18303988873958587646f, +-0.98310548067092895508f,-0.16491311788558959961f,-0.98630809783935546875f, +-0.14673046767711639404f,-0.98917651176452636719f,-0.12849810719490051270f, +-0.99170976877212524414f,-0.11022220551967620850f,-0.99390697479248046875f, +-0.09190895408391952515f,-0.99576741456985473633f,-0.07356456667184829712f, +-0.99729043245315551758f,-0.05519524589180946350f,-0.99847555160522460938f, +-0.03680722415447235107f,-0.99932235479354858398f,-0.01840673014521598816f, +-0.99983060359954833984f,1.00000000000000000000f,0.00000000000000000000f, +0.99729043245315551758f,0.07356456667184829712f,0.98917651176452636719f, +0.14673046767711639404f,0.97570210695266723633f,0.21910123527050018311f, +0.95694035291671752930f,0.29028466343879699707f,0.93299281597137451172f, +0.35989505052566528320f,0.90398931503295898438f,0.42755508422851562500f, +0.87008696794509887695f,0.49289819598197937012f,0.83146959543228149414f, +0.55557024478912353516f,0.78834640979766845703f,0.61523157358169555664f, +0.74095112085342407227f,0.67155897617340087891f,0.68954056501388549805f, +0.72424709796905517578f,0.63439327478408813477f,0.77301043272018432617f, +0.57580816745758056641f,0.81758481264114379883f,0.51410275697708129883f, +0.85772860050201416016f,0.44961133599281311035f,0.89322429895401000977f, +0.38268342614173889160f,0.92387950420379638672f,0.31368175148963928223f, +0.94952815771102905273f,0.24298018217086791992f,0.97003126144409179688f, +0.17096188664436340332f,0.98527765274047851562f,0.09801714122295379639f, +0.99518471956253051758f,0.02454122900962829590f,0.99969881772994995117f, +-0.04906767606735229492f,0.99879544973373413086f,-0.12241067737340927124f, +0.99247956275939941406f,-0.19509032368659973145f,0.98078525066375732422f, +-0.26671275496482849121f,0.96377605199813842773f,-0.33688986301422119141f, +0.94154405593872070312f,-0.40524131059646606445f,0.91420978307723999023f, +-0.47139674425125122070f,0.88192129135131835938f,-0.53499764204025268555f, +0.84485357999801635742f,-0.59569931030273437500f,0.80320751667022705078f, +-0.65317285060882568359f,0.75720882415771484375f,-0.70710676908493041992f, +0.70710676908493041992f,-0.75720882415771484375f,0.65317285060882568359f, +-0.80320751667022705078f,0.59569931030273437500f,-0.84485357999801635742f, +0.53499764204025268555f,-0.88192129135131835938f,0.47139674425125122070f, +-0.91420978307723999023f,0.40524131059646606445f,-0.94154405593872070312f, +0.33688986301422119141f,-0.96377605199813842773f,0.26671275496482849121f, +-0.98078525066375732422f,0.19509032368659973145f,-0.99247956275939941406f, +0.12241067737340927124f,-0.99879544973373413086f,0.04906767606735229492f, +-0.99969881772994995117f,-0.02454122900962829590f,-0.99518471956253051758f, +-0.09801714122295379639f,-0.98527765274047851562f,-0.17096188664436340332f, +-0.97003126144409179688f,-0.24298018217086791992f,-0.94952815771102905273f, +-0.31368175148963928223f,-0.92387950420379638672f,-0.38268342614173889160f, +-0.89322429895401000977f,-0.44961133599281311035f,-0.85772860050201416016f, +-0.51410275697708129883f,-0.81758481264114379883f,-0.57580816745758056641f, +-0.77301043272018432617f,-0.63439327478408813477f,-0.72424709796905517578f, +-0.68954056501388549805f,-0.67155897617340087891f,-0.74095112085342407227f, +-0.61523157358169555664f,-0.78834640979766845703f,-0.55557024478912353516f, +-0.83146959543228149414f,-0.49289819598197937012f,-0.87008696794509887695f, +-0.42755508422851562500f,-0.90398931503295898438f,-0.35989505052566528320f, +-0.93299281597137451172f,-0.29028466343879699707f,-0.95694035291671752930f, +-0.21910123527050018311f,-0.97570210695266723633f,-0.14673046767711639404f, +-0.98917651176452636719f,-0.07356456667184829712f,-0.99729043245315551758f, +1.00000000000000000000f,0.00000000000000000000f,0.95694035291671752930f, +0.29028466343879699707f,0.83146959543228149414f,0.55557024478912353516f, +0.63439327478408813477f,0.77301043272018432617f,0.38268342614173889160f, +0.92387950420379638672f,0.09801714122295379639f,0.99518471956253051758f, +-0.19509032368659973145f,0.98078525066375732422f,-0.47139674425125122070f, +0.88192129135131835938f,-0.70710676908493041992f,0.70710676908493041992f, +-0.88192129135131835938f,0.47139674425125122070f,-0.98078525066375732422f, +0.19509032368659973145f,-0.99518471956253051758f,-0.09801714122295379639f, +-0.92387950420379638672f,-0.38268342614173889160f,-0.77301043272018432617f, +-0.63439327478408813477f,-0.55557024478912353516f,-0.83146959543228149414f, +-0.29028466343879699707f,-0.95694035291671752930f,1.00000000000000000000f, +0.00000000000000000000f,0.38268342614173889160f,0.92387950420379638672f, +-0.70710676908493041992f,0.70710676908493041992f,-0.92387950420379638672f, +-0.38268342614173889160f,}; #endif @@ -3766,7 +3769,8 @@ float32_t rearranged_twiddle_stride3_4096_f32[2728]={ #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) @@ -5431,7 +5435,8 @@ q31_t rearranged_twiddle_stride3_4096_q31[2728]={ #endif /* defined(ARM_MATH_MVEI) */ -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables_f16.c index d3f2d34..56e3acd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/CommonTables/arm_mve_tables_f16.c @@ -6,12 +6,13 @@ * Description: common tables like fft twiddle factors, Bitreverse, reciprocal etc * used for MVE implementation only * - * $Date: 14. April 2020 + * @version V1.10.0 + * @date 04 October 2021 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -28,10 +29,12 @@ * limitations under the License. */ -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h" + #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types_f16.h" + #if defined(ARM_FLOAT16_SUPPORTED) + #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) @@ -48,22 +51,22 @@ uint32_t rearranged_twiddle_tab_stride3_arr_16_f16[2]={ 0,0,}; float16_t rearranged_twiddle_stride1_16_f16[8]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f,}; float16_t rearranged_twiddle_stride2_16_f16[8]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f,}; float16_t rearranged_twiddle_stride3_16_f16[8]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f,}; #endif @@ -79,70 +82,70 @@ uint32_t rearranged_twiddle_tab_stride3_arr_64_f16[3]={ 0,32,0,}; float16_t rearranged_twiddle_stride1_64_f16[40]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f,}; float16_t rearranged_twiddle_stride2_64_f16[40]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f,}; float16_t rearranged_twiddle_stride3_64_f16[40]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f,}; #endif @@ -158,262 +161,262 @@ uint32_t rearranged_twiddle_tab_stride3_arr_256_f16[4]={ 0,128,160,0,}; float16_t rearranged_twiddle_stride1_256_f16[168]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f,}; float16_t rearranged_twiddle_stride2_256_f16[168]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f,}; float16_t rearranged_twiddle_stride3_256_f16[168]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f,}; #endif @@ -429,1030 +432,1030 @@ uint32_t rearranged_twiddle_tab_stride3_arr_1024_f16[5]={ 0,512,640,672,0,}; float16_t rearranged_twiddle_stride1_1024_f16[680]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99998117528260110909f,(float16_t)0.00613588464915447527f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99952941750109314256f,(float16_t)0.03067480317663662595f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99907772775264536147f,(float16_t)0.04293825693494082024f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99772306664419163624f,(float16_t)0.06744391956366405094f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99682029929116566791f,(float16_t)0.07968243797143012563f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99456457073425541537f,(float16_t)0.10412163387205458642f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99321194923479450001f,(float16_t)0.11631863091190475235f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.99005821026229712256f,(float16_t)0.14065823933284921088f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98825756773074946437f,(float16_t)0.15279718525844343535f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98421009238692902521f,(float16_t)0.17700422041214874946f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98196386910955524296f,(float16_t)0.18906866414980619262f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97702814265775439484f,(float16_t)0.21311031991609136194f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97433938278557585821f,(float16_t)0.22508391135979283204f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96852209427441737777f,(float16_t)0.24892760574572014853f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96539444169768939830f,(float16_t)0.26079411791527551401f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.95870347489587159906f,(float16_t)0.28440753721127187692f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95514116830577078243f,(float16_t)0.29615088824362378883f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94758559101774109124f,(float16_t)0.31950203081601569188f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94359345816196038559f,(float16_t)0.33110630575987642921f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93518350993894761025f,(float16_t)0.35416352542049034380f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.93076696107898371224f,(float16_t)0.36561299780477385379f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.92151403934204190183f,(float16_t)0.38834504669882624617f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91667905992104270485f,(float16_t)0.39962419984564678810f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90659570451491533483f,(float16_t)0.42200027079979968159f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.90134884704602202810f,(float16_t)0.43309381885315195726f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.89044872324475787817f,(float16_t)0.45508358712634383592f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88479709843093778954f,(float16_t)0.46597649576796618121f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87309497841829009079f,(float16_t)0.48755016014843599592f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86704624551569264845f,(float16_t)0.49822766697278181303f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85455798836540053376f,(float16_t)0.51935599016558964269f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84812034480329723252f,(float16_t)0.52980362468629460526f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83486287498638001026f,(float16_t)0.55045797293660481131f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82804504525775579626f,(float16_t)0.56066157619733603124f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80684755354379933401f,(float16_t)0.59075970185887416442f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.79210657730021238887f,(float16_t)0.61038280627630947528f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78455659715557524159f,(float16_t)0.62005721176328909561f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.76910333764557969882f,(float16_t)0.63912444486377573138f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.76120238548426177871f,(float16_t)0.64851440102211244110f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74505778544146594733f,(float16_t)0.66699992230363747137f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73681656887736979300f,(float16_t)0.67609270357531592310f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.71143219574521643356f,(float16_t)0.70275474445722529993f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.69397146088965400157f,(float16_t)0.72000250796138165477f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68508366777270035541f,(float16_t)0.72846439044822519637f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.66699992230363747137f,(float16_t)0.74505778544146594733f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.65780669329707874837f,(float16_t)0.75318679904361252042f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62963823891492709528f,(float16_t)0.77688846567323244230f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.61038280627630947528f,(float16_t)0.79210657730021227785f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.60061647938386897305f,(float16_t)0.79953726910790501314f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.58081395809576452649f,(float16_t)0.81403632970594830276f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.57078074588696736669f,(float16_t)0.82110251499110464835f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.55045797293660481131f,(float16_t)0.83486287498638001026f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.54017147272989296525f,(float16_t)0.84155497743689833268f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.51935599016558953167f,(float16_t)0.85455798836540053376f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.50883014254310698909f,(float16_t)0.86086693863776730939f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.48755016014843605143f,(float16_t)0.87309497841829009079f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47679923006332225466f,(float16_t)0.87901222642863341417f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.45508358712634383592f,(float16_t)0.89044872324475787817f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.44412214457042925586f,(float16_t)0.89596624975618510689f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.42200027079979979261f,(float16_t)0.90659570451491533483f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.41084317105790391089f,(float16_t)0.91170603200542987832f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.38834504669882630168f,(float16_t)0.92151403934204190183f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37700741021641831496f,(float16_t)0.92621024213831126826f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.35416352542049051033f,(float16_t)0.93518350993894749923f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.34266071731199437833f,(float16_t)0.93945922360218991898f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.31950203081601574739f,(float16_t)0.94758559101774109124f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.30784964004153497763f,(float16_t)0.95143502096900833820f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.28440753721127182141f,(float16_t)0.95870347489587159906f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.27262135544994897662f,(float16_t)0.96212140426904158019f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.24892760574572025956f,(float16_t)0.96852209427441726675f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.23702360599436733679f,(float16_t)0.97150389098625178352f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.21311031991609136194f,(float16_t)0.97702814265775439484f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.20110463484209195606f,(float16_t)0.97956976568544051887f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.17700422041214886049f,(float16_t)0.98421009238692902521f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.16491312048997008866f,(float16_t)0.98630809724459866938f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.14065823933284923863f,(float16_t)0.99005821026229712256f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.12849811079379322432f,(float16_t)0.99170975366909952520f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.10412163387205472520f,(float16_t)0.99456457073425541537f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.09190895649713269611f,(float16_t)0.99576741446765981713f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.06744391956366410645f,(float16_t)0.99772306664419163624f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.05519524434969003135f,(float16_t)0.99847558057329477421f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.03067480317663658085f,(float16_t)0.99952941750109314256f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.01840672990580482019f,(float16_t)0.99983058179582340319f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0061340332031f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0429382324219f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9965820312500f,(float16_t)0.0797119140625f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9931640625000f,(float16_t)0.1163330078125f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9882812500000f,(float16_t)0.1528320312500f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9819335937500f,(float16_t)0.1890869140625f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9741210937500f,(float16_t)0.2250976562500f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9653320312500f,(float16_t)0.2607421875000f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9550781250000f,(float16_t)0.2961425781250f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9433593750000f,(float16_t)0.3310546875000f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9306640625000f,(float16_t)0.3657226562500f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9165039062500f,(float16_t)0.3996582031250f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.9013671875000f,(float16_t)0.4331054687500f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8847656250000f,(float16_t)0.4660644531250f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8671875000000f,(float16_t)0.4982910156250f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8481445312500f,(float16_t)0.5297851562500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8281250000000f,(float16_t)0.5605468750000f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8066406250000f,(float16_t)0.5908203125000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7846679687500f,(float16_t)0.6201171875000f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7612304687500f,(float16_t)0.6484375000000f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7368164062500f,(float16_t)0.6762695312500f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7114257812500f,(float16_t)0.7026367187500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6850585937500f,(float16_t)0.7285156250000f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6577148437500f,(float16_t)0.7534179687500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6293945312500f,(float16_t)0.7768554687500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.6005859375000f,(float16_t)0.7993164062500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5708007812500f,(float16_t)0.8212890625000f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5400390625000f,(float16_t)0.8417968750000f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5087890625000f,(float16_t)0.8608398437500f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4768066406250f,(float16_t)0.8789062500000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4440917968750f,(float16_t)0.8959960937500f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4108886718750f,(float16_t)0.9116210937500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3769531250000f,(float16_t)0.9262695312500f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3427734375000f,(float16_t)0.9394531250000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3078613281250f,(float16_t)0.9516601562500f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2727050781250f,(float16_t)0.9619140625000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2370605468750f,(float16_t)0.9716796875000f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.2010498046875f,(float16_t)0.9794921875000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1649169921875f,(float16_t)0.9863281250000f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1285400390625f,(float16_t)0.9916992187500f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0919189453125f,(float16_t)0.9956054687500f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0552062988281f,(float16_t)0.9985351562500f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0184020996094f,(float16_t)1.0000000000000f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f,}; float16_t rearranged_twiddle_stride2_1024_f16[680]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.02454122852291214241f,(float16_t)0.99969881869620424997f, -(float16_t)-0.03680722294135886641f,(float16_t)0.99932238458834954375f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.06132073630220852972f,(float16_t)0.99811811290014917919f, -(float16_t)-0.07356456359966732916f,(float16_t)0.99729045667869020697f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.11022220729388305938f,(float16_t)0.99390697000235606051f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.13458070850712611222f,(float16_t)0.99090263542778000971f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.17096188876030124493f,(float16_t)0.98527764238894122162f, -(float16_t)-0.18303988795514092303f,(float16_t)0.98310548743121628501f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.20711137619221844930f,(float16_t)0.97831737071962765473f, -(float16_t)-0.21910124015686965881f,(float16_t)0.97570213003852857003f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.25486565960451451618f,(float16_t)0.96697647104485207059f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.27851968938505294870f,(float16_t)0.96043051941556589757f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.31368174039889140658f,(float16_t)0.94952818059303667475f, -(float16_t)-0.32531029216226287071f,(float16_t)0.94560732538052139073f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.34841868024943439819f,(float16_t)0.93733901191257495977f, -(float16_t)-0.35989503653498816638f,(float16_t)0.93299279883473884567f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.39399204006104798781f,(float16_t)0.91911385169005777040f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.41642956009763698599f,(float16_t)0.90916798309052249127f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.44961132965460670619f,(float16_t)0.89322430119551521344f, -(float16_t)-0.46053871095824006066f,(float16_t)0.88763962040285393496f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.48218377207912271887f,(float16_t)0.87607009419540660122f, -(float16_t)-0.49289819222978398239f,(float16_t)0.87008699110871146054f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.52458968267846872724f,(float16_t)0.85135519310526519554f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.54532498842204624179f,(float16_t)0.83822470555483818977f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)-0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.60551104140432543410f,(float16_t)0.79583690460888356633f, -(float16_t)-0.61523159058062670823f,(float16_t)0.78834642762660633863f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.64383154288979127511f,(float16_t)0.76516726562245906962f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.66241577759017189475f,(float16_t)0.74913639452345925918f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)-0.69837624940897280457f,(float16_t)0.71573082528381870571f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.71573082528381859468f,(float16_t)0.69837624940897291559f, -(float16_t)-0.72424708295146678072f,(float16_t)0.68954054473706705153f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.74913639452345914815f,(float16_t)0.66241577759017200577f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.78834642762660622761f,(float16_t)0.61523159058062693028f, -(float16_t)-0.79583690460888345530f,(float16_t)0.60551104140432565615f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.81045719825259465718f,(float16_t)0.58579785745643897510f, -(float16_t)-0.81758481315158360037f,(float16_t)0.57580819141784544968f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.83822470555483807875f,(float16_t)0.54532498842204635281f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.87008699110871134952f,(float16_t)0.49289819222978414892f, -(float16_t)-0.87607009419540649020f,(float16_t)0.48218377207912288540f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.88763962040285382393f,(float16_t)0.46053871095824022719f, -(float16_t)-0.89322430119551521344f,(float16_t)0.44961132965460687272f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.91911385169005777040f,(float16_t)0.39399204006104815434f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.93299279883473884567f,(float16_t)0.35989503653498833291f, -(float16_t)-0.93733901191257484875f,(float16_t)0.34841868024943478677f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94560732538052116869f,(float16_t)0.32531029216226325929f, -(float16_t)-0.94952818059303667475f,(float16_t)0.31368174039889140658f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.96043051941556578655f,(float16_t)0.27851968938505317075f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96697647104485207059f,(float16_t)0.25486565960451468271f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97570213003852845901f,(float16_t)0.21910124015687004739f, -(float16_t)-0.97831737071962754371f,(float16_t)0.20711137619221883788f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98310548743121628501f,(float16_t)0.18303988795514089527f, -(float16_t)-0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.99090263542778000971f,(float16_t)0.13458070850712627875f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99390697000235606051f,(float16_t)0.11022220729388323979f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99729045667869020697f,(float16_t)0.07356456359966773162f, -(float16_t)-0.99811811290014917919f,(float16_t)0.06132073630220848809f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)-0.99969881869620424997f,(float16_t)0.02454122852291232629f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)-0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)-0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)-0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)-0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)-0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)-0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)-0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)-0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)-0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)-0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)-0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)-0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)-0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)-0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)-0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)-0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)-0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)-0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)-0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)-0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)-0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f,}; float16_t rearranged_twiddle_stride3_1024_f16[680]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.03067480317663645942f,(float16_t)0.99952941750109314256f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.06744391956366398155f,(float16_t)0.99772306664419163624f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.10412163387205460030f,(float16_t)0.99456457073425541537f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.14065823933284912761f,(float16_t)0.99005821026229712256f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.17700422041214874946f,(float16_t)0.98421009238692902521f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.21311031991609125091f,(float16_t)0.97702814265775439484f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.24892760574572012078f,(float16_t)0.96852209427441737777f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.28440753721127171039f,(float16_t)0.95870347489587159906f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.31950203081601563637f,(float16_t)0.94758559101774120226f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.35416352542049039931f,(float16_t)0.93518350993894761025f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.38834504669882619066f,(float16_t)0.92151403934204201285f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.42200027079979968159f,(float16_t)0.90659570451491533483f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.45508358712634372489f,(float16_t)0.89044872324475798919f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.48755016014843571837f,(float16_t)0.87309497841829020182f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.51935599016558964269f,(float16_t)0.85455798836540053376f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.55045797293660470029f,(float16_t)0.83486287498638012128f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.58081395809576441547f,(float16_t)0.81403632970594852480f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.61038280627630958630f,(float16_t)0.79210657730021227785f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.66699992230363736034f,(float16_t)0.74505778544146605835f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.69397146088965377952f,(float16_t)0.72000250796138176579f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.74505778544146594733f,(float16_t)0.66699992230363758239f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.76910333764557947678f,(float16_t)0.63912444486377584241f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.79210657730021216683f,(float16_t)0.61038280627630969732f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.83486287498638001026f,(float16_t)0.55045797293660492233f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.85455798836540042274f,(float16_t)0.51935599016558975372f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.87309497841829009079f,(float16_t)0.48755016014843588490f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.89044872324475787817f,(float16_t)0.45508358712634389143f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90659570451491533483f,(float16_t)0.42200027079979984812f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.92151403934204179080f,(float16_t)0.38834504669882657923f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.93518350993894761025f,(float16_t)0.35416352542049039931f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94758559101774109124f,(float16_t)0.31950203081601580291f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95870347489587148804f,(float16_t)0.28440753721127209896f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96852209427441737777f,(float16_t)0.24892760574572009302f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97702814265775439484f,(float16_t)0.21311031991609141745f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98421009238692902521f,(float16_t)0.17700422041214894375f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.99005821026229701154f,(float16_t)0.14065823933284954395f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99456457073425541537f,(float16_t)0.10412163387205457254f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99772306664419163624f,(float16_t)0.06744391956366417584f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99952941750109314256f,(float16_t)0.03067480317663686534f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)-0.99998117528260110909f,(float16_t)-0.00613588464915455420f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99907772775264536147f,(float16_t)-0.04293825693494077861f, -(float16_t)-0.99811811290014917919f,(float16_t)-0.06132073630220824523f, -(float16_t)-0.99682029929116577893f,(float16_t)-0.07968243797142994522f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.99321194923479461103f,(float16_t)-0.11631863091190447479f, -(float16_t)-0.99090263542778000971f,(float16_t)-0.13458070850712605671f, -(float16_t)-0.98825756773074946437f,(float16_t)-0.15279718525844343535f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.98196386910955524296f,(float16_t)-0.18906866414980610935f, -(float16_t)-0.97831737071962765473f,(float16_t)-0.20711137619221858808f, -(float16_t)-0.97433938278557585821f,(float16_t)-0.22508391135979261000f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.96539444169768939830f,(float16_t)-0.26079411791527562503f, -(float16_t)-0.96043051941556589757f,(float16_t)-0.27851968938505289319f, -(float16_t)-0.95514116830577078243f,(float16_t)-0.29615088824362378883f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.94359345816196038559f,(float16_t)-0.33110630575987626267f, -(float16_t)-0.93733901191257495977f,(float16_t)-0.34841868024943456472f, -(float16_t)-0.93076696107898382326f,(float16_t)-0.36561299780477357624f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.91667905992104270485f,(float16_t)-0.39962419984564684361f, -(float16_t)-0.90916798309052249127f,(float16_t)-0.41642956009763693048f, -(float16_t)-0.90134884704602202810f,(float16_t)-0.43309381885315184624f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.88479709843093790056f,(float16_t)-0.46597649576796595916f, -(float16_t)-0.87607009419540660122f,(float16_t)-0.48218377207912266336f, -(float16_t)-0.86704624551569287050f,(float16_t)-0.49822766697278153547f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.84812034480329723252f,(float16_t)-0.52980362468629460526f, -(float16_t)-0.83822470555483818977f,(float16_t)-0.54532498842204613076f, -(float16_t)-0.82804504525775590729f,(float16_t)-0.56066157619733592021f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.80684755354379944503f,(float16_t)-0.59075970185887394237f, -(float16_t)-0.79583690460888356633f,(float16_t)-0.60551104140432543410f, -(float16_t)-0.78455659715557524159f,(float16_t)-0.62005721176328920663f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.76120238548426188974f,(float16_t)-0.64851440102211233008f, -(float16_t)-0.74913639452345925918f,(float16_t)-0.66241577759017178373f, -(float16_t)-0.73681656887737001504f,(float16_t)-0.67609270357531581208f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.71143219574521665560f,(float16_t)-0.70275474445722507788f, -(float16_t)-0.69837624940897302661f,(float16_t)-0.71573082528381848366f, -(float16_t)-0.68508366777270035541f,(float16_t)-0.72846439044822519637f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.65780669329707874837f,(float16_t)-0.75318679904361240940f, -(float16_t)-0.64383154288979149715f,(float16_t)-0.76516726562245895860f, -(float16_t)-0.62963823891492687324f,(float16_t)-0.77688846567323255332f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.60061647938386930612f,(float16_t)-0.79953726910790479110f, -(float16_t)-0.58579785745643908612f,(float16_t)-0.81045719825259465718f, -(float16_t)-0.57078074588696736669f,(float16_t)-0.82110251499110464835f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.54017147272989274320f,(float16_t)-0.84155497743689855472f, -(float16_t)-0.52458968267846928235f,(float16_t)-0.85135519310526486247f, -(float16_t)-0.50883014254310732216f,(float16_t)-0.86086693863776708735f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.47679923006332214364f,(float16_t)-0.87901222642863341417f, -(float16_t)-0.46053871095823989412f,(float16_t)-0.88763962040285404598f, -(float16_t)-0.44412214457042975546f,(float16_t)-0.89596624975618488484f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.41084317105790418845f,(float16_t)-0.91170603200542976730f, -(float16_t)-0.39399204006104820985f,(float16_t)-0.91911385169005765938f, -(float16_t)-0.37700741021641820394f,(float16_t)-0.92621024213831137928f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.34266071731199487793f,(float16_t)-0.93945922360218969693f, -(float16_t)-0.32531029216226331480f,(float16_t)-0.94560732538052116869f, -(float16_t)-0.30784964004153508865f,(float16_t)-0.95143502096900833820f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.27262135544994886560f,(float16_t)-0.96212140426904158019f, -(float16_t)-0.25486565960451434965f,(float16_t)-0.96697647104485218161f, -(float16_t)-0.23702360599436766986f,(float16_t)-0.97150389098625167250f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.20110463484209206708f,(float16_t)-0.97956976568544051887f, -(float16_t)-0.18303988795514095078f,(float16_t)-0.98310548743121628501f, -(float16_t)-0.16491312048996975559f,(float16_t)-0.98630809724459866938f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.12849811079379358514f,(float16_t)-0.99170975366909952520f, -(float16_t)-0.11022220729388330918f,(float16_t)-0.99390697000235606051f, -(float16_t)-0.09190895649713282101f,(float16_t)-0.99576741446765981713f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)-0.05519524434968971216f,(float16_t)-0.99847558057329477421f, -(float16_t)-0.03680722294135933131f,(float16_t)-0.99932238458834943273f, -(float16_t)-0.01840672990580516366f,(float16_t)-0.99983058179582340319f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)-1.0000000000000f,(float16_t)-0.0061340332031f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9990234375000f,(float16_t)-0.0429382324219f, +(float16_t)-0.9980468750000f,(float16_t)-0.0613098144531f, +(float16_t)-0.9965820312500f,(float16_t)-0.0797119140625f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9931640625000f,(float16_t)-0.1163330078125f, +(float16_t)-0.9907226562500f,(float16_t)-0.1345214843750f, +(float16_t)-0.9882812500000f,(float16_t)-0.1528320312500f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9819335937500f,(float16_t)-0.1890869140625f, +(float16_t)-0.9785156250000f,(float16_t)-0.2071533203125f, +(float16_t)-0.9741210937500f,(float16_t)-0.2250976562500f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9653320312500f,(float16_t)-0.2607421875000f, +(float16_t)-0.9604492187500f,(float16_t)-0.2785644531250f, +(float16_t)-0.9550781250000f,(float16_t)-0.2961425781250f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9433593750000f,(float16_t)-0.3310546875000f, +(float16_t)-0.9375000000000f,(float16_t)-0.3483886718750f, +(float16_t)-0.9306640625000f,(float16_t)-0.3657226562500f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.9165039062500f,(float16_t)-0.3996582031250f, +(float16_t)-0.9091796875000f,(float16_t)-0.4165039062500f, +(float16_t)-0.9013671875000f,(float16_t)-0.4331054687500f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8847656250000f,(float16_t)-0.4660644531250f, +(float16_t)-0.8759765625000f,(float16_t)-0.4821777343750f, +(float16_t)-0.8671875000000f,(float16_t)-0.4982910156250f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8481445312500f,(float16_t)-0.5297851562500f, +(float16_t)-0.8383789062500f,(float16_t)-0.5454101562500f, +(float16_t)-0.8281250000000f,(float16_t)-0.5605468750000f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.8066406250000f,(float16_t)-0.5908203125000f, +(float16_t)-0.7958984375000f,(float16_t)-0.6054687500000f, +(float16_t)-0.7846679687500f,(float16_t)-0.6201171875000f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7612304687500f,(float16_t)-0.6484375000000f, +(float16_t)-0.7490234375000f,(float16_t)-0.6625976562500f, +(float16_t)-0.7368164062500f,(float16_t)-0.6762695312500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.7114257812500f,(float16_t)-0.7026367187500f, +(float16_t)-0.6982421875000f,(float16_t)-0.7158203125000f, +(float16_t)-0.6850585937500f,(float16_t)-0.7285156250000f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6577148437500f,(float16_t)-0.7534179687500f, +(float16_t)-0.6440429687500f,(float16_t)-0.7651367187500f, +(float16_t)-0.6293945312500f,(float16_t)-0.7768554687500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.6005859375000f,(float16_t)-0.7993164062500f, +(float16_t)-0.5859375000000f,(float16_t)-0.8105468750000f, +(float16_t)-0.5708007812500f,(float16_t)-0.8212890625000f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.5400390625000f,(float16_t)-0.8417968750000f, +(float16_t)-0.5244140625000f,(float16_t)-0.8515625000000f, +(float16_t)-0.5087890625000f,(float16_t)-0.8608398437500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4768066406250f,(float16_t)-0.8789062500000f, +(float16_t)-0.4604492187500f,(float16_t)-0.8876953125000f, +(float16_t)-0.4440917968750f,(float16_t)-0.8959960937500f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.4108886718750f,(float16_t)-0.9116210937500f, +(float16_t)-0.3940429687500f,(float16_t)-0.9189453125000f, +(float16_t)-0.3769531250000f,(float16_t)-0.9262695312500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.3427734375000f,(float16_t)-0.9394531250000f, +(float16_t)-0.3251953125000f,(float16_t)-0.9458007812500f, +(float16_t)-0.3078613281250f,(float16_t)-0.9516601562500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2727050781250f,(float16_t)-0.9619140625000f, +(float16_t)-0.2548828125000f,(float16_t)-0.9667968750000f, +(float16_t)-0.2370605468750f,(float16_t)-0.9716796875000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.2010498046875f,(float16_t)-0.9794921875000f, +(float16_t)-0.1829833984375f,(float16_t)-0.9829101562500f, +(float16_t)-0.1649169921875f,(float16_t)-0.9863281250000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.1285400390625f,(float16_t)-0.9916992187500f, +(float16_t)-0.1102294921875f,(float16_t)-0.9941406250000f, +(float16_t)-0.0919189453125f,(float16_t)-0.9956054687500f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)-0.0552062988281f,(float16_t)-0.9985351562500f, +(float16_t)-0.0368041992188f,(float16_t)-0.9995117187500f, +(float16_t)-0.0184020996094f,(float16_t)-1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f,}; #endif @@ -1468,4102 +1471,4102 @@ uint32_t rearranged_twiddle_tab_stride3_arr_4096_f16[6]={ 0,2048,2560,2688,2720,0,}; float16_t rearranged_twiddle_stride1_4096_f16[2728]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99999882345170187925f,(float16_t)0.00153398018628476550f, -(float16_t)0.99999529380957619118f,(float16_t)0.00306795676296597614f, -(float16_t)0.99998941108192840321f,(float16_t)0.00460192612044857050f, -(float16_t)0.99998117528260110909f,(float16_t)0.00613588464915447527f, -(float16_t)0.99997058643097413988f,(float16_t)0.00766982873953109701f, -(float16_t)0.99995764455196389786f,(float16_t)0.00920375478205981944f, -(float16_t)0.99994234967602391162f,(float16_t)0.01073765916726449055f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99990470108285289808f,(float16_t)0.01380538852806039059f, -(float16_t)0.99988234745421256111f,(float16_t)0.01533920628498810015f, -(float16_t)0.99985764100582386060f,(float16_t)0.01687298794728171042f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99980116988788425569f,(float16_t)0.01994042855151444138f, -(float16_t)0.99976940535121527898f,(float16_t)0.02147408027546950787f, -(float16_t)0.99973528826056168306f,(float16_t)0.02300768146883936868f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99965999674395922270f,(float16_t)0.02607471782910390085f, -(float16_t)0.99961882249517863830f,(float16_t)0.02760814577896573974f, -(float16_t)0.99957529604674921764f,(float16_t)0.02914150876419372219f, -(float16_t)0.99952941750109314256f,(float16_t)0.03067480317663662595f, -(float16_t)0.99948118696616694567f,(float16_t)0.03220802540830458582f, -(float16_t)0.99943060455546173237f,(float16_t)0.03374117185137757990f, -(float16_t)0.99937767038800284780f,(float16_t)0.03527423889821394709f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99926474728659442359f,(float16_t)0.03834012037355269409f, -(float16_t)0.99920475861836388631f,(float16_t)0.03987292758773981066f, -(float16_t)0.99914241872481690532f,(float16_t)0.04140564097707673946f, -(float16_t)0.99907772775264536147f,(float16_t)0.04293825693494082024f, -(float16_t)0.99901068585407337697f,(float16_t)0.04447077185493866769f, -(float16_t)0.99894129318685687124f,(float16_t)0.04600318213091462299f, -(float16_t)0.99886954991428356099f,(float16_t)0.04753548415695930257f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99871901223387293811f,(float16_t)0.05059974903689928166f, -(float16_t)0.99864021818026527111f,(float16_t)0.05213170468028332366f, -(float16_t)0.99855907422975931365f,(float16_t)0.05366353765273051968f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99838973740734016094f,(float16_t)0.05672682116690774823f, -(float16_t)0.99830154493389289261f,(float16_t)0.05825826450043575244f, -(float16_t)0.99821100336047818846f,(float16_t)0.05978957074663986820f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99802287377148624081f,(float16_t)0.06285175756416140624f, -(float16_t)0.99792528619859599548f,(float16_t)0.06438263092985746505f, -(float16_t)0.99782535041111164453f,(float16_t)0.06591335279700380467f, -(float16_t)0.99772306664419163624f,(float16_t)0.06744391956366405094f, -(float16_t)0.99761843513851955478f,(float16_t)0.06897432762826674613f, -(float16_t)0.99751145614030345410f,(float16_t)0.07050457338961385600f, -(float16_t)0.99740212990127530279f,(float16_t)0.07203465324688933247f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99717643673532618820f,(float16_t)0.07509430084792130533f, -(float16_t)0.99706007033948296225f,(float16_t)0.07662386139203149205f, -(float16_t)0.99694135776498216117f,(float16_t)0.07815324163279423197f, -(float16_t)0.99682029929116566791f,(float16_t)0.07968243797143012563f, -(float16_t)0.99669689520289606044f,(float16_t)0.08121144680959244133f, -(float16_t)0.99657114579055483539f,(float16_t)0.08274026454937569164f, -(float16_t)0.99644305135004263008f,(float16_t)0.08426888759332407108f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99617982859569698117f,(float16_t)0.08732553520619205922f, -(float16_t)0.99604470090125196702f,(float16_t)0.08885355258252460031f, -(float16_t)0.99590722941741172125f,(float16_t)0.09038136087786498296f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99562525638099430569f,(float16_t)0.09343633584574778661f, -(float16_t)0.99548075549192693856f,(float16_t)0.09496349532963899165f, -(float16_t)0.99533391214048227980f,(float16_t)0.09649043135525259274f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99503319943811863180f,(float16_t)0.09954361866006931903f, -(float16_t)0.99487933079480561638f,(float16_t)0.10106986275482782167f, -(float16_t)0.99472312110432570265f,(float16_t)0.10259586902243628126f, -(float16_t)0.99456457073425541537f,(float16_t)0.10412163387205458642f, -(float16_t)0.99440368005767909576f,(float16_t)0.10564715371341061589f, -(float16_t)0.99424044945318790223f,(float16_t)0.10717242495680884273f, -(float16_t)0.99407487930487936634f,(float16_t)0.10869744401313871651f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99373672194072459884f,(float16_t)0.11174671121112658700f, -(float16_t)0.99356413552059530403f,(float16_t)0.11327095217756434631f, -(float16_t)0.99338921114808065305f,(float16_t)0.11479492660651008373f, -(float16_t)0.99321194923479450001f,(float16_t)0.11631863091190475235f, -(float16_t)0.99303235019785141002f,(float16_t)0.11784206150832497728f, -(float16_t)0.99285041445986510489f,(float16_t)0.11936521481099135467f, -(float16_t)0.99266614244894801899f,(float16_t)0.12088808723577708359f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99229059134825736699f,(float16_t)0.12393297511851215920f, -(float16_t)0.99209931314219179654f,(float16_t)0.12545498341154623367f, -(float16_t)0.99190570043060932726f,(float16_t)0.12697669649688586579f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.99151147331874389668f,(float16_t)0.13001922272223334631f, -(float16_t)0.99131085984611544415f,(float16_t)0.13154002870288311611f, -(float16_t)0.99110791372327688986f,(float16_t)0.13306052515713906459f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.99069502544266463406f,(float16_t)0.13610057517570620100f, -(float16_t)0.99048508425645709341f,(float16_t)0.13762012158648603832f, -(float16_t)0.99027281236316910817f,(float16_t)0.13913934416382620074f, -(float16_t)0.99005821026229712256f,(float16_t)0.14065823933284921088f, -(float16_t)0.98984127845882052821f,(float16_t)0.14217680351944803063f, -(float16_t)0.98962201746320088702f,(float16_t)0.14369503315029447110f, -(float16_t)0.98940042779138037687f,(float16_t)0.14521292465284746376f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98895026451030298986f,(float16_t)0.14824767898689603096f, -(float16_t)0.98872169196032377858f,(float16_t)0.14976453467732150915f, -(float16_t)0.98849079285269658701f,(float16_t)0.15128103795733022219f, -(float16_t)0.98825756773074946437f,(float16_t)0.15279718525844343535f, -(float16_t)0.98802201714328352633f,(float16_t)0.15431297301302010494f, -(float16_t)0.98778414164457217783f,(float16_t)0.15582839765426523271f, -(float16_t)0.98754394179435922574f,(float16_t)0.15734345561623824805f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98705657130575097380f,(float16_t)0.16037245724292828464f, -(float16_t)0.98680940181418552726f,(float16_t)0.16188639378011182579f, -(float16_t)0.98655991026477540817f,(float16_t)0.16339994938297322524f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98605396334619543897f,(float16_t)0.16642590354046410406f, -(float16_t)0.98579750916756747614f,(float16_t)0.16793829497473117263f, -(float16_t)0.98553873531217606185f,(float16_t)0.16945029123396795900f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98501423101223983814f,(float16_t)0.17247308399679595059f, -(float16_t)0.98474850180190420801f,(float16_t)0.17398387338746382214f, -(float16_t)0.98448045538322093151f,(float16_t)0.17549425337727142526f, -(float16_t)0.98421009238692902521f,(float16_t)0.17700422041214874946f, -(float16_t)0.98393741344921892278f,(float16_t)0.17851377093899750692f, -(float16_t)0.98366241921173025453f,(float16_t)0.18002290140569951471f, -(float16_t)0.98338511032155118130f,(float16_t)0.18153160826112496595f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98282355119870523641f,(float16_t)0.18454773693861961648f, -(float16_t)0.98253930228744124076f,(float16_t)0.18605515166344663291f, -(float16_t)0.98225274136628937249f,(float16_t)0.18756212858252960252f, -(float16_t)0.98196386910955524296f,(float16_t)0.18906866414980619262f, -(float16_t)0.98167268619698311305f,(float16_t)0.19057475482025273972f, -(float16_t)0.98137919331375456089f,(float16_t)0.19208039704989243734f, -(float16_t)0.98108339115048670553f,(float16_t)0.19358558729580360724f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.98048486177346938497f,(float16_t)0.19659459767008022335f, -(float16_t)0.98018213596811742949f,(float16_t)0.19809841071795356027f, -(float16_t)0.97987710369951763756f,(float16_t)0.19960175762113097075f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97926012264908202098f,(float16_t)0.20260703884442113343f, -(float16_t)0.97894817531906219710f,(float16_t)0.20410896609281686809f, -(float16_t)0.97863392442942320759f,(float16_t)0.20561041305309923910f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97799851493455713936f,(float16_t)0.20861185197826348503f, -(float16_t)0.97767735782450992943f,(float16_t)0.21011183688046961016f, -(float16_t)0.97735390014519996082f,(float16_t)0.21161132736922755315f, -(float16_t)0.97702814265775439484f,(float16_t)0.21311031991609136194f, -(float16_t)0.97670008612871184184f,(float16_t)0.21460881099378675829f, -(float16_t)0.97636973133002114000f,(float16_t)0.21610679707621952006f, -(float16_t)0.97603707903903902388f,(float16_t)0.21760427463848364127f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97536488511665697665f,(float16_t)0.22059769010887350649f, -(float16_t)0.97502534506699412020f,(float16_t)0.22209362097320350937f, -(float16_t)0.97468351068851066810f,(float16_t)0.22358902922978998729f, -(float16_t)0.97433938278557585821f,(float16_t)0.22508391135979283204f, -(float16_t)0.97399296216795583359f,(float16_t)0.22657826384561000066f, -(float16_t)0.97364424965081197705f,(float16_t)0.22807208317088573102f, -(float16_t)0.97329324605469824672f,(float16_t)0.22956536582051886852f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97258436893473221296f,(float16_t)0.23255030703877524467f, -(float16_t)0.97222649707893626925f,(float16_t)0.23404195858354343018f, -(float16_t)0.97186633748027939639f,(float16_t)0.23553305940497548665f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.97113915844972509284f,(float16_t)0.23851359484431841618f, -(float16_t)0.97077214072895035013f,(float16_t)0.24000302244874149871f, -(float16_t)0.97040283868755550234f,(float16_t)0.24149188530286933019f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96965738512429244800f,(float16_t)0.24446790274782415064f, -(float16_t)0.96928123535654853171f,(float16_t)0.24595505033579459497f, -(float16_t)0.96890280477642887202f,(float16_t)0.24744161916777326904f, -(float16_t)0.96852209427441737777f,(float16_t)0.24892760574572014853f, -(float16_t)0.96813910474636244441f,(float16_t)0.25041300657296522436f, -(float16_t)0.96775383709347551076f,(float16_t)0.25189781815421696809f, -(float16_t)0.96736629222232850545f,(float16_t)0.25338203699557015902f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96658437447833311928f,(float16_t)0.25634868248994291395f, -(float16_t)0.96619000344541250413f,(float16_t)0.25783110216215898713f, -(float16_t)0.96579335887408368500f,(float16_t)0.25931291513288623474f, -(float16_t)0.96539444169768939830f,(float16_t)0.26079411791527551401f, -(float16_t)0.96499325285492032478f,(float16_t)0.26227470702391358914f, -(float16_t)0.96458979328981275803f,(float16_t)0.26375467897483134694f, -(float16_t)0.96418406395174582890f,(float16_t)0.26523403028551179039f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96336579978095404631f,(float16_t)0.26819085706340317632f, -(float16_t)0.96295326687368387741f,(float16_t)0.26966832557291509076f, -(float16_t)0.96253846804435916340f,(float16_t)0.27114515952680801059f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.96170207652912254037f,(float16_t)0.27409690986870638429f, -(float16_t)0.96128048581132063966f,(float16_t)0.27557181931095814376f, -(float16_t)0.96085663310767965850f,(float16_t)0.27704608030609989555f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.96000214573766595727f,(float16_t)0.27999264308027321801f, -(float16_t)0.95957151308198451733f,(float16_t)0.28146493792575794091f, -(float16_t)0.95913862246184189431f,(float16_t)0.28293657045705539188f, -(float16_t)0.95870347489587159906f,(float16_t)0.28440753721127187692f, -(float16_t)0.95826607140801767226f,(float16_t)0.28587783472708061527f, -(float16_t)0.95782641302753290802f,(float16_t)0.28734745954472951102f, -(float16_t)0.95738450078897585627f,(float16_t)0.28881640820604947972f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95649391890239510161f,(float16_t)0.29175226323498926195f, -(float16_t)0.95604525134999640557f,(float16_t)0.29321916269425862822f, -(float16_t)0.95559433413077110586f,(float16_t)0.29468537218051432669f, -(float16_t)0.95514116830577078243f,(float16_t)0.29615088824362378883f, -(float16_t)0.95468575494133833814f,(float16_t)0.29761570743508619641f, -(float16_t)0.95422809510910566733f,(float16_t)0.29907982630804047508f, -(float16_t)0.95376818988599032512f,(float16_t)0.30054324141727345454f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.95284164760119871573f,(float16_t)0.30346794657201131562f, -(float16_t)0.95237501271976587880f,(float16_t)0.30492922973540237397f, -(float16_t)0.95190613680793234597f,(float16_t)0.30638979537086091787f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.95096166631157508231f,(float16_t)0.30930876031226872680f, -(float16_t)0.95048607394948170235f,(float16_t)0.31076715274961147495f, -(float16_t)0.95000824500184299914f,(float16_t)0.31222481392182488413f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94904588185270055689f,(float16_t)0.31513792875252244485f, -(float16_t)0.94856134991573026749f,(float16_t)0.31659337555616584581f, -(float16_t)0.94807458592227622507f,(float16_t)0.31804807738501494896f, -(float16_t)0.94758559101774109124f,(float16_t)0.31950203081601569188f, -(float16_t)0.94709436635277721717f,(float16_t)0.32095523242787521445f, -(float16_t)0.94660091308328353499f,(float16_t)0.32240767880106985244f, -(float16_t)0.94610523237040344835f,(float16_t)0.32385936651785285356f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94510719328526060501f,(float16_t)0.32676045232013173347f, -(float16_t)0.94460483726148025685f,(float16_t)0.32820984357909249729f, -(float16_t)0.94410025849127265918f,(float16_t)0.32965846252858749255f, -(float16_t)0.94359345816196038559f,(float16_t)0.33110630575987642921f, -(float16_t)0.94308443746609349478f,(float16_t)0.33255336986604422389f, -(float16_t)0.94257319760144686605f,(float16_t)0.33399965144200938205f, -(float16_t)0.94205973977101731265f,(float16_t)0.33544514708453160301f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.94102617505088925753f,(float16_t)0.33833376696554112728f, -(float16_t)0.94050607059326829518f,(float16_t)0.33977688440682685123f, -(float16_t)0.93998375303401404679f,(float16_t)0.34121920232028235542f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93893248353206459900f,(float16_t)0.34410142598993881391f, -(float16_t)0.93840353406310805795f,(float16_t)0.34554132496398909380f, -(float16_t)0.93787237643998988545f,(float16_t)0.34698041084592368133f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93680344173592156043f,(float16_t)0.34985612979013491763f, -(float16_t)0.93626566717027825959f,(float16_t)0.35129275608556709276f, -(float16_t)0.93572568948108036935f,(float16_t)0.35272855575521072646f, -(float16_t)0.93518350993894761025f,(float16_t)0.35416352542049034380f, -(float16_t)0.93463912981968078064f,(float16_t)0.35559766170478385172f, -(float16_t)0.93409255040425887007f,(float16_t)0.35703096123342997759f, -(float16_t)0.93354377297883617270f,(float16_t)0.35846342063373654030f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.93243962926846235550f,(float16_t)0.36132580556845428355f, -(float16_t)0.93188426558166814750f,(float16_t)0.36275572436739722537f, -(float16_t)0.93132670908118042608f,(float16_t)0.36418478956707989180f, -(float16_t)0.93076696107898371224f,(float16_t)0.36561299780477385379f, -(float16_t)0.93020502289221906889f,(float16_t)0.36704034571976718038f, -(float16_t)0.92964089584318121418f,(float16_t)0.36846682995337232125f, -(float16_t)0.92907458125931585702f,(float16_t)0.36989244714893410038f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92793539482261788720f,(float16_t)0.37274106700951575855f, -(float16_t)0.92736252565040111495f,(float16_t)0.37416406297145793358f, -(float16_t)0.92678747430458174872f,(float16_t)0.37558617848921721505f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.92563083050987271516f,(float16_t)0.37842775480876555960f, -(float16_t)0.92504924078267758425f,(float16_t)0.37984720892405116066f, -(float16_t)0.92446547432526260391f,(float16_t)0.38126576922216237620f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.92329141671952763559f,(float16_t)0.38410019501693504207f, -(float16_t)0.92270112833387862850f,(float16_t)0.38551605384391884890f, -(float16_t)0.92210866874334518339f,(float16_t)0.38693100551438858181f, -(float16_t)0.92151403934204190183f,(float16_t)0.38834504669882624617f, -(float16_t)0.92091724152918941204f,(float16_t)0.38975817406985641123f, -(float16_t)0.92031827670911059425f,(float16_t)0.39117038430225387069f, -(float16_t)0.91971714629122736095f,(float16_t)0.39258167407295146978f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91850839432521225181f,(float16_t)0.39540147894781635385f, -(float16_t)0.91790077562139049672f,(float16_t)0.39680998741671030805f, -(float16_t)0.91729099700837790632f,(float16_t)0.39821756215337356100f, -(float16_t)0.91667905992104270485f,(float16_t)0.39962419984564678810f, -(float16_t)0.91606496579933172075f,(float16_t)0.40102989718357562321f, -(float16_t)0.91544871608826783316f,(float16_t)0.40243465085941843018f, -(float16_t)0.91483031223794619713f,(float16_t)0.40383845756765407442f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.91358704794525080750f,(float16_t)0.40664321687036902864f, -(float16_t)0.91296219042839821256f,(float16_t)0.40804416286497868782f, -(float16_t)0.91233518462332274801f,(float16_t)0.40944414869225759235f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.91107473405517636067f,(float16_t)0.41224122666988288755f, -(float16_t)0.91044129225806724737f,(float16_t)0.41363831223843450235f, -(float16_t)0.90980570810465222209f,(float16_t)0.41503442447608163146f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90852811871630612117f,(float16_t)0.41782371582021227141f, -(float16_t)0.90788611648766626150f,(float16_t)0.41921688836322390515f, -(float16_t)0.90724197791529581636f,(float16_t)0.42060907444840250902f, -(float16_t)0.90659570451491533483f,(float16_t)0.42200027079979968159f, -(float16_t)0.90594729780726845902f,(float16_t)0.42339047414379604728f, -(float16_t)0.90529675931811881551f,(float16_t)0.42477968120910880589f, -(float16_t)0.90464409057824624050f,(float16_t)0.42616788872679961520f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.90333236849451181705f,(float16_t)0.42894129205532949278f, -(float16_t)0.90267331823725882600f,(float16_t)0.43032648134008261165f, -(float16_t)0.90201214390249317976f,(float16_t)0.43171065802505725895f, -(float16_t)0.90134884704602202810f,(float16_t)0.43309381885315195726f, -(float16_t)0.90068342922864685907f,(float16_t)0.43447596056965565037f, -(float16_t)0.90001589201616016833f,(float16_t)0.43585707992225547480f, -(float16_t)0.89934623697934157338f,(float16_t)0.43723717366104408732f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89800057974073987932f,(float16_t)0.43999427130963325583f, -(float16_t)0.89732458070541831763f,(float16_t)0.44137126873171667052f, -(float16_t)0.89664647017868015499f,(float16_t)0.44274722756457002282f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.89528392103855758410f,(float16_t)0.44549601651398174074f, -(float16_t)0.89459948563138269595f,(float16_t)0.44686884016237415906f, -(float16_t)0.89391294514520325265f,(float16_t)0.44824061228521988598f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.89253355540276457791f,(float16_t)0.45098098904510386387f, -(float16_t)0.89184070939234272313f,(float16_t)0.45234958723377088896f, -(float16_t)0.89114576479458318392f,(float16_t)0.45371712100016386993f, -(float16_t)0.89044872324475787817f,(float16_t)0.45508358712634383592f, -(float16_t)0.88974958638307277692f,(float16_t)0.45644898239688391772f, -(float16_t)0.88904835585466457371f,(float16_t)0.45781330359887717485f, -(float16_t)0.88834503330959635470f,(float16_t)0.45917654752194408951f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88693211879434219469f,(float16_t)0.46189979070246273141f, -(float16_t)0.88622253014888063838f,(float16_t)0.46325978355186014923f, -(float16_t)0.88551085613619995307f,(float16_t)0.46461868630623781584f, -(float16_t)0.88479709843093778954f,(float16_t)0.46597649576796618121f, -(float16_t)0.88408125871263498752f,(float16_t)0.46733320874198841510f, -(float16_t)0.88336333866573157891f,(float16_t)0.46868882203582790114f, -(float16_t)0.88264333997956279099f,(float16_t)0.47004333245959561971f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.88119711347122209322f,(float16_t)0.47274903195034279069f, -(float16_t)0.88047088905216075450f,(float16_t)0.47410021465054996703f, -(float16_t)0.87974259280004740713f,(float16_t)0.47545028174715586733f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87827979165654157523f,(float16_t)0.47814705642484300885f, -(float16_t)0.87754529020726135258f,(float16_t)0.47949375766015295275f, -(float16_t)0.87680872380914565145f,(float16_t)0.48083933060033395845f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87532940310411089246f,(float16_t)0.48352707893291868579f, -(float16_t)0.87458665227817611321f,(float16_t)0.48486924800079106435f, -(float16_t)0.87384184346536686316f,(float16_t)0.48621027612448641797f, -(float16_t)0.87309497841829009079f,(float16_t)0.48755016014843599592f, -(float16_t)0.87234605889439154058f,(float16_t)0.48888889691976317176f, -(float16_t)0.87159508665595097909f,(float16_t)0.49022648328829115938f, -(float16_t)0.87084206347007897531f,(float16_t)0.49156291610654989643f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86932987134860684186f,(float16_t)0.49423230851595967295f, -(float16_t)0.86857070597134089507f,(float16_t)0.49556526182577254058f, -(float16_t)0.86780949676330332299f,(float16_t)0.49689704902265446895f, -(float16_t)0.86704624551569264845f,(float16_t)0.49822766697278181303f, -(float16_t)0.86628095402451299467f,(float16_t)0.49955711254508183838f, -(float16_t)0.86551362409056908920f,(float16_t)0.50088538261124071482f, -(float16_t)0.86474425751946237817f,(float16_t)0.50221247404571078832f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.86319942171212415971f,(float16_t)0.50486310853126759035f, -(float16_t)0.86242395611104050168f,(float16_t)0.50618664534515522835f, -(float16_t)0.86164646114308129921f,(float16_t)0.50750899105297087033f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.86008539042939013974f,(float16_t)0.51015009670676680908f, -(float16_t)0.85930181835700847337f,(float16_t)0.51146885043797030157f, -(float16_t)0.85851622426444273994f,(float16_t)0.51278640063356295542f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85693897741782876221f,(float16_t)0.51541787801946292724f, -(float16_t)0.85614732837519447184f,(float16_t)0.51673179901764987321f, -(float16_t)0.85535366473519602870f,(float16_t)0.51804450409599933636f, -(float16_t)0.85455798836540053376f,(float16_t)0.51935599016558964269f, -(float16_t)0.85376030113811141042f,(float16_t)0.52066625414036715735f, -(float16_t)0.85296060493036363059f,(float16_t)0.52197529293715438925f, -(float16_t)0.85215890162391982887f,(float16_t)0.52328310347565643035f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.85054948126560347976f,(float16_t)0.52589502747108463065f, -(float16_t)0.84974176800085254868f,(float16_t)0.52719913478190127964f, -(float16_t)0.84893205521163961347f,(float16_t)0.52850200154222848337f, -(float16_t)0.84812034480329723252f,(float16_t)0.52980362468629460526f, -(float16_t)0.84730663868585831544f,(float16_t)0.53110400115125500076f, -(float16_t)0.84649093877405212627f,(float16_t)0.53240312787719790144f, -(float16_t)0.84567324698729906540f,(float16_t)0.53370100180715296379f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.84403189549006640835f,(float16_t)0.53629297906596318235f, -(float16_t)0.84320823964184543620f,(float16_t)0.53758707629564539410f, -(float16_t)0.84238259964318584760f,(float16_t)0.53887990853100842248f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.84072537497045807253f,(float16_t)0.54146176585312344454f, -(float16_t)0.83989379419599952126f,(float16_t)0.54275078486451588944f, -(float16_t)0.83906023707031274217f,(float16_t)0.54403852673088382019f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83738720161566193578f,(float16_t)0.54661016691083486041f, -(float16_t)0.83654772722351200542f,(float16_t)0.54789405917310018967f, -(float16_t)0.83570628435375260423f,(float16_t)0.54917666218771965525f, -(float16_t)0.83486287498638001026f,(float16_t)0.55045797293660481131f, -(float16_t)0.83401750110601813315f,(float16_t)0.55173798840470733573f, -(float16_t)0.83317016470191318511f,(float16_t)0.55301670558002746780f, -(float16_t)0.83232086776792968408f,(float16_t)0.55429412145362000341f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.83061640030884631436f,(float16_t)0.55684503727516010407f, -(float16_t)0.82976123379452304540f,(float16_t)0.55811853122055610221f, -(float16_t)0.82890411477186487499f,(float16_t)0.55939071185913613604f, -(float16_t)0.82804504525775579626f,(float16_t)0.56066157619733603124f, -(float16_t)0.82718402727366913130f,(float16_t)0.56193112124468935775f, -(float16_t)0.82632106284566353427f,(float16_t)0.56319934401383409117f, -(float16_t)0.82545615400437755138f,(float16_t)0.56446624152051938506f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.82372051122739142759f,(float16_t)0.56699604882510867832f, -(float16_t)0.82284978137582642788f,(float16_t)0.56825895267013148970f, -(float16_t)0.82197711527924155472f,(float16_t)0.56952051934694714053f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.82022598256943468620f,(float16_t)0.57203962932475704850f, -(float16_t)0.81934752007679700903f,(float16_t)0.57329716669804220430f, -(float16_t)0.81846712958029865792f,(float16_t)0.57455335504771576360f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81670057286682784525f,(float16_t)0.57706167285567944170f, -(float16_t)0.81581441080673378075f,(float16_t)0.57831379641165558958f, -(float16_t)0.81492632905652662156f,(float16_t)0.57956455913940563285f, -(float16_t)0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)0.81314441484925359394f,(float16_t)0.58206199034077543697f, -(float16_t)0.81225058658520399302f,(float16_t)0.58330865293769829094f, -(float16_t)0.81135484701706372945f,(float16_t)0.58455394295301532637f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80955764240405125864f,(float16_t)0.58704039352091796911f, -(float16_t)0.80865618158817498262f,(float16_t)0.58828154822264522306f, -(float16_t)0.80775281792619035848f,(float16_t)0.58952131864106394055f, -(float16_t)0.80684755354379933401f,(float16_t)0.59075970185887416442f, -(float16_t)0.80594039057117627944f,(float16_t)0.59199669496204099239f, -(float16_t)0.80503133114296365758f,(float16_t)0.59323229503979979516f, -(float16_t)0.80412037739826569549f,(float16_t)0.59446649918466443197f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.80229279553811572168f,(float16_t)0.59693070806219639124f, -(float16_t)0.80137617172314024039f,(float16_t)0.59816070699634238395f, -(float16_t)0.80045766219262282082f,(float16_t)0.59938929840056454079f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.79861499463476093297f,(float16_t)0.60184224705858002658f, -(float16_t)0.79769084094339115509f,(float16_t)0.60306659854034816437f, -(float16_t)0.79676481020841882774f,(float16_t)0.60428953094815596181f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.79490712632823701256f,(float16_t)0.60673112703452447558f, -(float16_t)0.79397547755433717231f,(float16_t)0.60794978496777363208f, -(float16_t)0.79304196047944364167f,(float16_t)0.60916701233645320634f, -(float16_t)0.79210657730021238887f,(float16_t)0.61038280627630947528f, -(float16_t)0.79116933021769020318f,(float16_t)0.61159716392646190641f, -(float16_t)0.79023022143731003197f,(float16_t)0.61281008242940970820f, -(float16_t)0.78928925316888565167f,(float16_t)0.61402155893103849138f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78740174702903142911f,(float16_t)0.61644017453085364622f, -(float16_t)0.78645521359908576731f,(float16_t)0.61764730793780386886f, -(float16_t)0.78550682956405393220f,(float16_t)0.61885298796097631957f, -(float16_t)0.78455659715557524159f,(float16_t)0.62005721176328909561f, -(float16_t)0.78360451860963820092f,(float16_t)0.62125997651108755271f, -(float16_t)0.78265059616657572938f,(float16_t)0.62246127937414996723f, -(float16_t)0.78169483207105938671f,(float16_t)0.62366111752569453053f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77977778792301455368f,(float16_t)0.62605638840434352232f, -(float16_t)0.77881651238147597827f,(float16_t)0.62725181549514408275f, -(float16_t)0.77785340420945314754f,(float16_t)0.62844576660183271155f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.77592169904340768660f,(float16_t)0.63082922962842447046f, -(float16_t)0.77495310659487393057f,(float16_t)0.63201873593980906207f, -(float16_t)0.77398269060682289844f,(float16_t)0.63320675505005719064f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.77203639715038452351f,(float16_t)0.63557832048855611440f, -(float16_t)0.77106052426181381776f,(float16_t)0.63676186123628419899f, -(float16_t)0.77008283699334789674f,(float16_t)0.63794390362184405507f, -(float16_t)0.76910333764557969882f,(float16_t)0.63912444486377573138f, -(float16_t)0.76812202852336541881f,(float16_t)0.64030348218415167327f, -(float16_t)0.76713891193582040007f,(float16_t)0.64148101280858305095f, -(float16_t)0.76615399019631291733f,(float16_t)0.64265703396622686494f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.76417874053611667406f,(float16_t)0.64500453681554392737f, -(float16_t)0.76318841726338138010f,(float16_t)0.64617601298331628357f, -(float16_t)0.76219629813457900891f,(float16_t)0.64734596863651205911f, -(float16_t)0.76120238548426177871f,(float16_t)0.64851440102211244110f, -(float16_t)0.76020668165120242055f,(float16_t)0.64968130739068319368f, -(float16_t)0.75920918897838796102f,(float16_t)0.65084668499638087535f, -(float16_t)0.75820990981301528144f,(float16_t)0.65201053109695950027f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.75620600141439453523f,(float16_t)0.65433361783180044036f, -(float16_t)0.75520137689653654700f,(float16_t)0.65549285299961534967f, -(float16_t)0.75419497531688917125f,(float16_t)0.65665054572942893607f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.75217685044904269986f,(float16_t)0.65896129298203731661f, -(float16_t)0.75116513190968636771f,(float16_t)0.66011434206742047870f, -(float16_t)0.75015164580621507273f,(float16_t)0.66126583783999226540f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74811938045040360379f,(float16_t)0.66356415861203976725f, -(float16_t)0.74710060598018013245f,(float16_t)0.66471097820334479334f, -(float16_t)0.74608007351006377927f,(float16_t)0.66585623366550972246f, -(float16_t)0.74505778544146594733f,(float16_t)0.66699992230363747137f, -(float16_t)0.74403374417992929057f,(float16_t)0.66814204142651845153f, -(float16_t)0.74300795213512171866f,(float16_t)0.66928258834663600929f, -(float16_t)0.74198041172083106787f,(float16_t)0.67042156038017308717f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73992009545951620275f,(float16_t)0.67269476907077285777f, -(float16_t)0.73888732446061511361f,(float16_t)0.67382900037875603783f, -(float16_t)0.73785281478846598269f,(float16_t)0.67496164610201192513f, -(float16_t)0.73681656887736979300f,(float16_t)0.67609270357531592310f, -(float16_t)0.73577858916571359238f,(float16_t)0.67722217013718033485f, -(float16_t)0.73473887809596349907f,(float16_t)0.67835004312986146857f, -(float16_t)0.73369743811466026084f,(float16_t)0.67947631989936496666f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.73160938122389262972f,(float16_t)0.68172407417164970767f, -(float16_t)0.73056276922782759087f,(float16_t)0.68284554638524808112f, -(float16_t)0.72951443814699701296f,(float16_t)0.68396541179731540350f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.72741262860237576593f,(float16_t)0.68620031168003858824f, -(float16_t)0.72635915508434600873f,(float16_t)0.68731534089175905233f, -(float16_t)0.72530397237306076796f,(float16_t)0.68842875278409043638f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.72318848930652745999f,(float16_t)0.69065071413453460458f, -(float16_t)0.72212819392921534511f,(float16_t)0.69175925836415774750f, -(float16_t)0.72106619931450810501f,(float16_t)0.69286617481742462932f, -(float16_t)0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)0.71893712237280449351f,(float16_t)0.69507511398000088043f, -(float16_t)0.71787004505573170920f,(float16_t)0.69617713149146298601f, -(float16_t)0.71680127852109953857f,(float16_t)0.69727751083088651551f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.71465868786276909308f,(float16_t)0.69947334464028376733f, -(float16_t)0.71358486878079352422f,(float16_t)0.70056879394324833576f, -(float16_t)0.71250937056469243469f,(float16_t)0.70166259474016845488f, -(float16_t)0.71143219574521643356f,(float16_t)0.70275474445722529993f, -(float16_t)0.71035334685706241764f,(float16_t)0.70384524052448493858f, -(float16_t)0.70927282643886568891f,(float16_t)0.70493408037590488124f, -(float16_t)0.70819063703319540259f,(float16_t)0.70602126144933974317f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.70602126144933974317f,(float16_t)0.70819063703319540259f, -(float16_t)0.70493408037590499227f,(float16_t)0.70927282643886568891f, -(float16_t)0.70384524052448493858f,(float16_t)0.71035334685706241764f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.70166259474016845488f,(float16_t)0.71250937056469232367f, -(float16_t)0.70056879394324844679f,(float16_t)0.71358486878079352422f, -(float16_t)0.69947334464028376733f,(float16_t)0.71465868786276909308f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.69727751083088662654f,(float16_t)0.71680127852109942754f, -(float16_t)0.69617713149146298601f,(float16_t)0.71787004505573170920f, -(float16_t)0.69507511398000088043f,(float16_t)0.71893712237280438249f, -(float16_t)0.69397146088965400157f,(float16_t)0.72000250796138165477f, -(float16_t)0.69286617481742474034f,(float16_t)0.72106619931450810501f, -(float16_t)0.69175925836415774750f,(float16_t)0.72212819392921534511f, -(float16_t)0.69065071413453460458f,(float16_t)0.72318848930652734897f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68842875278409043638f,(float16_t)0.72530397237306076796f, -(float16_t)0.68731534089175905233f,(float16_t)0.72635915508434600873f, -(float16_t)0.68620031168003858824f,(float16_t)0.72741262860237576593f, -(float16_t)0.68508366777270035541f,(float16_t)0.72846439044822519637f, -(float16_t)0.68396541179731551452f,(float16_t)0.72951443814699690193f, -(float16_t)0.68284554638524808112f,(float16_t)0.73056276922782759087f, -(float16_t)0.68172407417164981869f,(float16_t)0.73160938122389262972f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67947631989936496666f,(float16_t)0.73369743811466026084f, -(float16_t)0.67835004312986146857f,(float16_t)0.73473887809596349907f, -(float16_t)0.67722217013718044587f,(float16_t)0.73577858916571348136f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.67496164610201203615f,(float16_t)0.73785281478846598269f, -(float16_t)0.67382900037875614885f,(float16_t)0.73888732446061511361f, -(float16_t)0.67269476907077296879f,(float16_t)0.73992009545951609173f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.67042156038017308717f,(float16_t)0.74198041172083095685f, -(float16_t)0.66928258834663600929f,(float16_t)0.74300795213512171866f, -(float16_t)0.66814204142651856255f,(float16_t)0.74403374417992929057f, -(float16_t)0.66699992230363747137f,(float16_t)0.74505778544146594733f, -(float16_t)0.66585623366550972246f,(float16_t)0.74608007351006366825f, -(float16_t)0.66471097820334490436f,(float16_t)0.74710060598018013245f, -(float16_t)0.66356415861203987827f,(float16_t)0.74811938045040349277f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.66126583783999226540f,(float16_t)0.75015164580621496171f, -(float16_t)0.66011434206742047870f,(float16_t)0.75116513190968636771f, -(float16_t)0.65896129298203731661f,(float16_t)0.75217685044904269986f, -(float16_t)0.65780669329707874837f,(float16_t)0.75318679904361252042f, -(float16_t)0.65665054572942904709f,(float16_t)0.75419497531688917125f, -(float16_t)0.65549285299961546070f,(float16_t)0.75520137689653654700f, -(float16_t)0.65433361783180055138f,(float16_t)0.75620600141439453523f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.65201053109695950027f,(float16_t)0.75820990981301528144f, -(float16_t)0.65084668499638098638f,(float16_t)0.75920918897838796102f, -(float16_t)0.64968130739068319368f,(float16_t)0.76020668165120242055f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.64734596863651205911f,(float16_t)0.76219629813457889789f, -(float16_t)0.64617601298331639459f,(float16_t)0.76318841726338126907f, -(float16_t)0.64500453681554403840f,(float16_t)0.76417874053611667406f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.64265703396622686494f,(float16_t)0.76615399019631280630f, -(float16_t)0.64148101280858316198f,(float16_t)0.76713891193582040007f, -(float16_t)0.64030348218415167327f,(float16_t)0.76812202852336530778f, -(float16_t)0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)0.63794390362184416610f,(float16_t)0.77008283699334789674f, -(float16_t)0.63676186123628419899f,(float16_t)0.77106052426181381776f, -(float16_t)0.63557832048855622542f,(float16_t)0.77203639715038441249f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.63320675505005719064f,(float16_t)0.77398269060682278742f, -(float16_t)0.63201873593980906207f,(float16_t)0.77495310659487381955f, -(float16_t)0.63082922962842458148f,(float16_t)0.77592169904340757558f, -(float16_t)0.62963823891492709528f,(float16_t)0.77688846567323244230f, -(float16_t)0.62844576660183271155f,(float16_t)0.77785340420945303652f, -(float16_t)0.62725181549514419377f,(float16_t)0.77881651238147586724f, -(float16_t)0.62605638840434352232f,(float16_t)0.77977778792301444266f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.62366111752569464155f,(float16_t)0.78169483207105938671f, -(float16_t)0.62246127937415007825f,(float16_t)0.78265059616657572938f, -(float16_t)0.62125997651108766373f,(float16_t)0.78360451860963820092f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.61885298796097631957f,(float16_t)0.78550682956405393220f, -(float16_t)0.61764730793780397988f,(float16_t)0.78645521359908576731f, -(float16_t)0.61644017453085364622f,(float16_t)0.78740174702903131809f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.61402155893103849138f,(float16_t)0.78928925316888565167f, -(float16_t)0.61281008242940970820f,(float16_t)0.79023022143731003197f, -(float16_t)0.61159716392646201744f,(float16_t)0.79116933021769009216f, -(float16_t)0.61038280627630947528f,(float16_t)0.79210657730021227785f, -(float16_t)0.60916701233645320634f,(float16_t)0.79304196047944364167f, -(float16_t)0.60794978496777374311f,(float16_t)0.79397547755433717231f, -(float16_t)0.60673112703452447558f,(float16_t)0.79490712632823701256f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.60428953094815607283f,(float16_t)0.79676481020841871672f, -(float16_t)0.60306659854034827539f,(float16_t)0.79769084094339104407f, -(float16_t)0.60184224705858002658f,(float16_t)0.79861499463476082195f, -(float16_t)0.60061647938386897305f,(float16_t)0.79953726910790501314f, -(float16_t)0.59938929840056454079f,(float16_t)0.80045766219262270980f, -(float16_t)0.59816070699634238395f,(float16_t)0.80137617172314012937f, -(float16_t)0.59693070806219650226f,(float16_t)0.80229279553811572168f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.59446649918466454299f,(float16_t)0.80412037739826569549f, -(float16_t)0.59323229503979979516f,(float16_t)0.80503133114296365758f, -(float16_t)0.59199669496204099239f,(float16_t)0.80594039057117627944f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.58952131864106394055f,(float16_t)0.80775281792619024746f, -(float16_t)0.58828154822264533408f,(float16_t)0.80865618158817498262f, -(float16_t)0.58704039352091808013f,(float16_t)0.80955764240405125864f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.58455394295301532637f,(float16_t)0.81135484701706372945f, -(float16_t)0.58330865293769829094f,(float16_t)0.81225058658520388200f, -(float16_t)0.58206199034077554799f,(float16_t)0.81314441484925359394f, -(float16_t)0.58081395809576452649f,(float16_t)0.81403632970594830276f, -(float16_t)0.57956455913940574387f,(float16_t)0.81492632905652662156f, -(float16_t)0.57831379641165558958f,(float16_t)0.81581441080673378075f, -(float16_t)0.57706167285567955272f,(float16_t)0.81670057286682784525f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.57455335504771576360f,(float16_t)0.81846712958029865792f, -(float16_t)0.57329716669804231532f,(float16_t)0.81934752007679689800f, -(float16_t)0.57203962932475704850f,(float16_t)0.82022598256943468620f, -(float16_t)0.57078074588696736669f,(float16_t)0.82110251499110464835f, -(float16_t)0.56952051934694725155f,(float16_t)0.82197711527924155472f, -(float16_t)0.56825895267013148970f,(float16_t)0.82284978137582631685f, -(float16_t)0.56699604882510867832f,(float16_t)0.82372051122739131657f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.56446624152051949608f,(float16_t)0.82545615400437744036f, -(float16_t)0.56319934401383409117f,(float16_t)0.82632106284566353427f, -(float16_t)0.56193112124468946877f,(float16_t)0.82718402727366913130f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.55939071185913613604f,(float16_t)0.82890411477186487499f, -(float16_t)0.55811853122055610221f,(float16_t)0.82976123379452304540f, -(float16_t)0.55684503727516010407f,(float16_t)0.83061640030884620334f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.55429412145362011444f,(float16_t)0.83232086776792968408f, -(float16_t)0.55301670558002757883f,(float16_t)0.83317016470191318511f, -(float16_t)0.55173798840470744675f,(float16_t)0.83401750110601813315f, -(float16_t)0.55045797293660481131f,(float16_t)0.83486287498638001026f, -(float16_t)0.54917666218771976627f,(float16_t)0.83570628435375260423f, -(float16_t)0.54789405917310018967f,(float16_t)0.83654772722351189440f, -(float16_t)0.54661016691083486041f,(float16_t)0.83738720161566193578f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.54403852673088393122f,(float16_t)0.83906023707031263115f, -(float16_t)0.54275078486451600046f,(float16_t)0.83989379419599941023f, -(float16_t)0.54146176585312355556f,(float16_t)0.84072537497045796151f, -(float16_t)0.54017147272989296525f,(float16_t)0.84155497743689833268f, -(float16_t)0.53887990853100842248f,(float16_t)0.84238259964318584760f, -(float16_t)0.53758707629564550512f,(float16_t)0.84320823964184543620f, -(float16_t)0.53629297906596318235f,(float16_t)0.84403189549006640835f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.53370100180715296379f,(float16_t)0.84567324698729906540f, -(float16_t)0.53240312787719801246f,(float16_t)0.84649093877405212627f, -(float16_t)0.53110400115125500076f,(float16_t)0.84730663868585831544f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.52850200154222848337f,(float16_t)0.84893205521163961347f, -(float16_t)0.52719913478190139067f,(float16_t)0.84974176800085243766f, -(float16_t)0.52589502747108474168f,(float16_t)0.85054948126560336874f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.52328310347565643035f,(float16_t)0.85215890162391982887f, -(float16_t)0.52197529293715438925f,(float16_t)0.85296060493036363059f, -(float16_t)0.52066625414036726838f,(float16_t)0.85376030113811129940f, -(float16_t)0.51935599016558953167f,(float16_t)0.85455798836540053376f, -(float16_t)0.51804450409599933636f,(float16_t)0.85535366473519602870f, -(float16_t)0.51673179901764998423f,(float16_t)0.85614732837519447184f, -(float16_t)0.51541787801946314929f,(float16_t)0.85693897741782865118f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.51278640063356306644f,(float16_t)0.85851622426444273994f, -(float16_t)0.51146885043797052361f,(float16_t)0.85930181835700836235f, -(float16_t)0.51015009670676669806f,(float16_t)0.86008539042939025077f, -(float16_t)0.50883014254310698909f,(float16_t)0.86086693863776730939f, -(float16_t)0.50750899105297087033f,(float16_t)0.86164646114308129921f, -(float16_t)0.50618664534515533937f,(float16_t)0.86242395611104050168f, -(float16_t)0.50486310853126747933f,(float16_t)0.86319942171212415971f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.50221247404571089934f,(float16_t)0.86474425751946237817f, -(float16_t)0.50088538261124093687f,(float16_t)0.86551362409056897818f, -(float16_t)0.49955711254508183838f,(float16_t)0.86628095402451299467f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.49689704902265463549f,(float16_t)0.86780949676330321196f, -(float16_t)0.49556526182577248507f,(float16_t)0.86857070597134089507f, -(float16_t)0.49423230851595972846f,(float16_t)0.86932987134860673084f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.49156291610655006297f,(float16_t)0.87084206347007886428f, -(float16_t)0.49022648328829110387f,(float16_t)0.87159508665595109012f, -(float16_t)0.48888889691976322727f,(float16_t)0.87234605889439142956f, -(float16_t)0.48755016014843605143f,(float16_t)0.87309497841829009079f, -(float16_t)0.48621027612448652899f,(float16_t)0.87384184346536675214f, -(float16_t)0.48486924800079111986f,(float16_t)0.87458665227817611321f, -(float16_t)0.48352707893291874131f,(float16_t)0.87532940310411078144f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.48083933060033390294f,(float16_t)0.87680872380914576247f, -(float16_t)0.47949375766015300826f,(float16_t)0.87754529020726124156f, -(float16_t)0.47814705642484311987f,(float16_t)0.87827979165654146421f, -(float16_t)0.47679923006332225466f,(float16_t)0.87901222642863341417f, -(float16_t)0.47545028174715586733f,(float16_t)0.87974259280004740713f, -(float16_t)0.47410021465055002254f,(float16_t)0.88047088905216075450f, -(float16_t)0.47274903195034290171f,(float16_t)0.88119711347122198219f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.47004333245959561971f,(float16_t)0.88264333997956279099f, -(float16_t)0.46868882203582795665f,(float16_t)0.88336333866573157891f, -(float16_t)0.46733320874198852612f,(float16_t)0.88408125871263498752f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.46461868630623781584f,(float16_t)0.88551085613619995307f, -(float16_t)0.46325978355186026025f,(float16_t)0.88622253014888063838f, -(float16_t)0.46189979070246284243f,(float16_t)0.88693211879434208367f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.45917654752194414502f,(float16_t)0.88834503330959635470f, -(float16_t)0.45781330359887728587f,(float16_t)0.88904835585466457371f, -(float16_t)0.45644898239688386221f,(float16_t)0.88974958638307288794f, -(float16_t)0.45508358712634383592f,(float16_t)0.89044872324475787817f, -(float16_t)0.45371712100016392544f,(float16_t)0.89114576479458318392f, -(float16_t)0.45234958723377099998f,(float16_t)0.89184070939234272313f, -(float16_t)0.45098098904510380835f,(float16_t)0.89253355540276468894f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.44824061228521999700f,(float16_t)0.89391294514520325265f, -(float16_t)0.44686884016237432560f,(float16_t)0.89459948563138258493f, -(float16_t)0.44549601651398174074f,(float16_t)0.89528392103855758410f, -(float16_t)0.44412214457042925586f,(float16_t)0.89596624975618510689f, -(float16_t)0.44274722756457013384f,(float16_t)0.89664647017868015499f, -(float16_t)0.44137126873171661501f,(float16_t)0.89732458070541831763f, -(float16_t)0.43999427130963325583f,(float16_t)0.89800057974073987932f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.43723717366104419835f,(float16_t)0.89934623697934146236f, -(float16_t)0.43585707992225547480f,(float16_t)0.90001589201616027935f, -(float16_t)0.43447596056965570588f,(float16_t)0.90068342922864685907f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.43171065802505736997f,(float16_t)0.90201214390249306874f, -(float16_t)0.43032648134008261165f,(float16_t)0.90267331823725882600f, -(float16_t)0.42894129205532954829f,(float16_t)0.90333236849451181705f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.42616788872679961520f,(float16_t)0.90464409057824624050f, -(float16_t)0.42477968120910880589f,(float16_t)0.90529675931811881551f, -(float16_t)0.42339047414379610279f,(float16_t)0.90594729780726845902f, -(float16_t)0.42200027079979979261f,(float16_t)0.90659570451491533483f, -(float16_t)0.42060907444840250902f,(float16_t)0.90724197791529592738f, -(float16_t)0.41921688836322396066f,(float16_t)0.90788611648766626150f, -(float16_t)0.41782371582021238243f,(float16_t)0.90852811871630612117f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.41503442447608163146f,(float16_t)0.90980570810465222209f, -(float16_t)0.41363831223843455787f,(float16_t)0.91044129225806713634f, -(float16_t)0.41224122666988299857f,(float16_t)0.91107473405517624965f, -(float16_t)0.41084317105790391089f,(float16_t)0.91170603200542987832f, -(float16_t)0.40944414869225764786f,(float16_t)0.91233518462332274801f, -(float16_t)0.40804416286497874333f,(float16_t)0.91296219042839810154f, -(float16_t)0.40664321687036913966f,(float16_t)0.91358704794525080750f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.40383845756765412993f,(float16_t)0.91483031223794608611f, -(float16_t)0.40243465085941854120f,(float16_t)0.91544871608826783316f, -(float16_t)0.40102989718357578974f,(float16_t)0.91606496579933160973f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.39821756215337361651f,(float16_t)0.91729099700837790632f, -(float16_t)0.39680998741671041907f,(float16_t)0.91790077562139038569f, -(float16_t)0.39540147894781629834f,(float16_t)0.91850839432521225181f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.39258167407295152529f,(float16_t)0.91971714629122736095f, -(float16_t)0.39117038430225398171f,(float16_t)0.92031827670911048322f, -(float16_t)0.38975817406985641123f,(float16_t)0.92091724152918941204f, -(float16_t)0.38834504669882630168f,(float16_t)0.92151403934204190183f, -(float16_t)0.38693100551438869283f,(float16_t)0.92210866874334507237f, -(float16_t)0.38551605384391901543f,(float16_t)0.92270112833387851747f, -(float16_t)0.38410019501693504207f,(float16_t)0.92329141671952763559f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.38126576922216248722f,(float16_t)0.92446547432526260391f, -(float16_t)0.37984720892405110515f,(float16_t)0.92504924078267758425f, -(float16_t)0.37842775480876561511f,(float16_t)0.92563083050987271516f, -(float16_t)0.37700741021641831496f,(float16_t)0.92621024213831126826f, -(float16_t)0.37558617848921732607f,(float16_t)0.92678747430458174872f, -(float16_t)0.37416406297145798909f,(float16_t)0.92736252565040111495f, -(float16_t)0.37274106700951581406f,(float16_t)0.92793539482261788720f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.36989244714893426691f,(float16_t)0.92907458125931574600f, -(float16_t)0.36846682995337232125f,(float16_t)0.92964089584318121418f, -(float16_t)0.36704034571976723589f,(float16_t)0.93020502289221906889f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.36418478956707983629f,(float16_t)0.93132670908118042608f, -(float16_t)0.36275572436739722537f,(float16_t)0.93188426558166814750f, -(float16_t)0.36132580556845433906f,(float16_t)0.93243962926846235550f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.35846342063373654030f,(float16_t)0.93354377297883617270f, -(float16_t)0.35703096123343003310f,(float16_t)0.93409255040425887007f, -(float16_t)0.35559766170478396274f,(float16_t)0.93463912981968078064f, -(float16_t)0.35416352542049051033f,(float16_t)0.93518350993894749923f, -(float16_t)0.35272855575521072646f,(float16_t)0.93572568948108036935f, -(float16_t)0.35129275608556714827f,(float16_t)0.93626566717027825959f, -(float16_t)0.34985612979013502866f,(float16_t)0.93680344173592156043f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.34698041084592368133f,(float16_t)0.93787237643998988545f, -(float16_t)0.34554132496398914931f,(float16_t)0.93840353406310805795f, -(float16_t)0.34410142598993898044f,(float16_t)0.93893248353206448797f, -(float16_t)0.34266071731199437833f,(float16_t)0.93945922360218991898f, -(float16_t)0.34121920232028241093f,(float16_t)0.93998375303401393577f, -(float16_t)0.33977688440682696225f,(float16_t)0.94050607059326829518f, -(float16_t)0.33833376696554129381f,(float16_t)0.94102617505088925753f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.33544514708453165852f,(float16_t)0.94205973977101731265f, -(float16_t)0.33399965144200949307f,(float16_t)0.94257319760144686605f, -(float16_t)0.33255336986604422389f,(float16_t)0.94308443746609349478f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.32965846252858754806f,(float16_t)0.94410025849127265918f, -(float16_t)0.32820984357909266382f,(float16_t)0.94460483726148025685f, -(float16_t)0.32676045232013178898f,(float16_t)0.94510719328526060501f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.32385936651785296458f,(float16_t)0.94610523237040333733f, -(float16_t)0.32240767880107001897f,(float16_t)0.94660091308328353499f, -(float16_t)0.32095523242787521445f,(float16_t)0.94709436635277721717f, -(float16_t)0.31950203081601574739f,(float16_t)0.94758559101774109124f, -(float16_t)0.31804807738501505998f,(float16_t)0.94807458592227622507f, -(float16_t)0.31659337555616584581f,(float16_t)0.94856134991573026749f, -(float16_t)0.31513792875252244485f,(float16_t)0.94904588185270055689f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.31222481392182505067f,(float16_t)0.95000824500184299914f, -(float16_t)0.31076715274961147495f,(float16_t)0.95048607394948170235f, -(float16_t)0.30930876031226878231f,(float16_t)0.95096166631157508231f, -(float16_t)0.30784964004153497763f,(float16_t)0.95143502096900833820f, -(float16_t)0.30638979537086108440f,(float16_t)0.95190613680793223494f, -(float16_t)0.30492922973540242948f,(float16_t)0.95237501271976587880f, -(float16_t)0.30346794657201137113f,(float16_t)0.95284164760119871573f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.30054324141727339903f,(float16_t)0.95376818988599032512f, -(float16_t)0.29907982630804047508f,(float16_t)0.95422809510910566733f, -(float16_t)0.29761570743508630743f,(float16_t)0.95468575494133833814f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.29468537218051432669f,(float16_t)0.95559433413077110586f, -(float16_t)0.29321916269425868373f,(float16_t)0.95604525134999640557f, -(float16_t)0.29175226323498937298f,(float16_t)0.95649391890239499059f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.28881640820604947972f,(float16_t)0.95738450078897585627f, -(float16_t)0.28734745954472956653f,(float16_t)0.95782641302753290802f, -(float16_t)0.28587783472708072630f,(float16_t)0.95826607140801767226f, -(float16_t)0.28440753721127182141f,(float16_t)0.95870347489587159906f, -(float16_t)0.28293657045705539188f,(float16_t)0.95913862246184189431f, -(float16_t)0.28146493792575805193f,(float16_t)0.95957151308198451733f, -(float16_t)0.27999264308027338455f,(float16_t)0.96000214573766584625f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.27704608030609995106f,(float16_t)0.96085663310767965850f, -(float16_t)0.27557181931095825478f,(float16_t)0.96128048581132063966f, -(float16_t)0.27409690986870632878f,(float16_t)0.96170207652912254037f, -(float16_t)0.27262135544994897662f,(float16_t)0.96212140426904158019f, -(float16_t)0.27114515952680806610f,(float16_t)0.96253846804435916340f, -(float16_t)0.26966832557291520178f,(float16_t)0.96295326687368387741f, -(float16_t)0.26819085706340317632f,(float16_t)0.96336579978095404631f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.26523403028551190141f,(float16_t)0.96418406395174571788f, -(float16_t)0.26375467897483151347f,(float16_t)0.96458979328981264700f, -(float16_t)0.26227470702391358914f,(float16_t)0.96499325285492032478f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.25931291513288634576f,(float16_t)0.96579335887408357397f, -(float16_t)0.25783110216215893162f,(float16_t)0.96619000344541261516f, -(float16_t)0.25634868248994291395f,(float16_t)0.96658437447833311928f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.25338203699557027004f,(float16_t)0.96736629222232850545f, -(float16_t)0.25189781815421691258f,(float16_t)0.96775383709347551076f, -(float16_t)0.25041300657296527987f,(float16_t)0.96813910474636244441f, -(float16_t)0.24892760574572025956f,(float16_t)0.96852209427441726675f, -(float16_t)0.24744161916777343557f,(float16_t)0.96890280477642887202f, -(float16_t)0.24595505033579459497f,(float16_t)0.96928123535654853171f, -(float16_t)0.24446790274782420616f,(float16_t)0.96965738512429244800f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.24149188530286930243f,(float16_t)0.97040283868755550234f, -(float16_t)0.24000302244874149871f,(float16_t)0.97077214072895035013f, -(float16_t)0.23851359484431849944f,(float16_t)0.97113915844972509284f, -(float16_t)0.23702360599436733679f,(float16_t)0.97150389098625178352f, -(float16_t)0.23553305940497545889f,(float16_t)0.97186633748027939639f, -(float16_t)0.23404195858354345794f,(float16_t)0.97222649707893626925f, -(float16_t)0.23255030703877532794f,(float16_t)0.97258436893473221296f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.22956536582051886852f,(float16_t)0.97329324605469824672f, -(float16_t)0.22807208317088578653f,(float16_t)0.97364424965081186603f, -(float16_t)0.22657826384561011168f,(float16_t)0.97399296216795583359f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.22358902922979001504f,(float16_t)0.97468351068851066810f, -(float16_t)0.22209362097320359264f,(float16_t)0.97502534506699412020f, -(float16_t)0.22059769010887364526f,(float16_t)0.97536488511665686563f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.21760427463848366902f,(float16_t)0.97603707903903902388f, -(float16_t)0.21610679707621960333f,(float16_t)0.97636973133002114000f, -(float16_t)0.21460881099378692483f,(float16_t)0.97670008612871184184f, -(float16_t)0.21311031991609136194f,(float16_t)0.97702814265775439484f, -(float16_t)0.21161132736922760866f,(float16_t)0.97735390014519996082f, -(float16_t)0.21011183688046972118f,(float16_t)0.97767735782450992943f, -(float16_t)0.20861185197826345727f,(float16_t)0.97799851493455713936f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.20561041305309932237f,(float16_t)0.97863392442942309657f, -(float16_t)0.20410896609281700687f,(float16_t)0.97894817531906219710f, -(float16_t)0.20260703884442110567f,(float16_t)0.97926012264908202098f, -(float16_t)0.20110463484209195606f,(float16_t)0.97956976568544051887f, -(float16_t)0.19960175762113105402f,(float16_t)0.97987710369951763756f, -(float16_t)0.19809841071795372680f,(float16_t)0.98018213596811731847f, -(float16_t)0.19659459767008022335f,(float16_t)0.98048486177346938497f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.19358558729580374602f,(float16_t)0.98108339115048659451f, -(float16_t)0.19208039704989238183f,(float16_t)0.98137919331375456089f, -(float16_t)0.19057475482025279523f,(float16_t)0.98167268619698311305f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.18756212858252974129f,(float16_t)0.98225274136628937249f, -(float16_t)0.18605515166344663291f,(float16_t)0.98253930228744124076f, -(float16_t)0.18454773693861964423f,(float16_t)0.98282355119870523641f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.18153160826112513249f,(float16_t)0.98338511032155118130f, -(float16_t)0.18002290140569951471f,(float16_t)0.98366241921173025453f, -(float16_t)0.17851377093899759019f,(float16_t)0.98393741344921892278f, -(float16_t)0.17700422041214886049f,(float16_t)0.98421009238692902521f, -(float16_t)0.17549425337727139751f,(float16_t)0.98448045538322093151f, -(float16_t)0.17398387338746384989f,(float16_t)0.98474850180190420801f, -(float16_t)0.17247308399679603386f,(float16_t)0.98501423101223983814f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.16945029123396793125f,(float16_t)0.98553873531217606185f, -(float16_t)0.16793829497473122814f,(float16_t)0.98579750916756736512f, -(float16_t)0.16642590354046421508f,(float16_t)0.98605396334619543897f, -(float16_t)0.16491312048997008866f,(float16_t)0.98630809724459866938f, -(float16_t)0.16339994938297322524f,(float16_t)0.98655991026477540817f, -(float16_t)0.16188639378011188130f,(float16_t)0.98680940181418541624f, -(float16_t)0.16037245724292839566f,(float16_t)0.98705657130575097380f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.15734345561623827581f,(float16_t)0.98754394179435922574f, -(float16_t)0.15582839765426531597f,(float16_t)0.98778414164457217783f, -(float16_t)0.15431297301302024372f,(float16_t)0.98802201714328352633f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.15128103795733024994f,(float16_t)0.98849079285269658701f, -(float16_t)0.14976453467732162017f,(float16_t)0.98872169196032377858f, -(float16_t)0.14824767898689619749f,(float16_t)0.98895026451030298986f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.14521292465284751927f,(float16_t)0.98940042779138037687f, -(float16_t)0.14369503315029458212f,(float16_t)0.98962201746320077600f, -(float16_t)0.14217680351944800288f,(float16_t)0.98984127845882052821f, -(float16_t)0.14065823933284923863f,(float16_t)0.99005821026229712256f, -(float16_t)0.13913934416382628401f,(float16_t)0.99027281236316910817f, -(float16_t)0.13762012158648617710f,(float16_t)0.99048508425645698239f, -(float16_t)0.13610057517570620100f,(float16_t)0.99069502544266463406f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.13306052515713917561f,(float16_t)0.99110791372327677884f, -(float16_t)0.13154002870288328264f,(float16_t)0.99131085984611544415f, -(float16_t)0.13001922272223334631f,(float16_t)0.99151147331874389668f, -(float16_t)0.12849811079379322432f,(float16_t)0.99170975366909952520f, -(float16_t)0.12697669649688597682f,(float16_t)0.99190570043060932726f, -(float16_t)0.12545498341154620592f,(float16_t)0.99209931314219179654f, -(float16_t)0.12393297511851220083f,(float16_t)0.99229059134825736699f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.12088808723577722237f,(float16_t)0.99266614244894801899f, -(float16_t)0.11936521481099135467f,(float16_t)0.99285041445986510489f, -(float16_t)0.11784206150832501891f,(float16_t)0.99303235019785141002f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.11479492660651025027f,(float16_t)0.99338921114808065305f, -(float16_t)0.11327095217756436019f,(float16_t)0.99356413552059530403f, -(float16_t)0.11174671121112665639f,(float16_t)0.99373672194072459884f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.10869744401313867488f,(float16_t)0.99407487930487936634f, -(float16_t)0.10717242495680887049f,(float16_t)0.99424044945318790223f, -(float16_t)0.10564715371341069916f,(float16_t)0.99440368005767909576f, -(float16_t)0.10412163387205472520f,(float16_t)0.99456457073425541537f, -(float16_t)0.10259586902243628126f,(float16_t)0.99472312110432570265f, -(float16_t)0.10106986275482787718f,(float16_t)0.99487933079480561638f, -(float16_t)0.09954361866006944393f,(float16_t)0.99503319943811863180f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.09649043135525260662f,(float16_t)0.99533391214048227980f, -(float16_t)0.09496349532963906104f,(float16_t)0.99548075549192693856f, -(float16_t)0.09343633584574791151f,(float16_t)0.99562525638099430569f, -(float16_t)0.09190895649713269611f,(float16_t)0.99576741446765981713f, -(float16_t)0.09038136087786501072f,(float16_t)0.99590722941741172125f, -(float16_t)0.08885355258252468358f,(float16_t)0.99604470090125196702f, -(float16_t)0.08732553520619222576f,(float16_t)0.99617982859569687015f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.08426888759332412659f,(float16_t)0.99644305135004263008f, -(float16_t)0.08274026454937580266f,(float16_t)0.99657114579055483539f, -(float16_t)0.08121144680959238582f,(float16_t)0.99669689520289606044f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.07815324163279431524f,(float16_t)0.99694135776498216117f, -(float16_t)0.07662386139203161695f,(float16_t)0.99706007033948296225f, -(float16_t)0.07509430084792129145f,(float16_t)0.99717643673532618820f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.07203465324688941573f,(float16_t)0.99740212990127530279f, -(float16_t)0.07050457338961400866f,(float16_t)0.99751145614030345410f, -(float16_t)0.06897432762826673225f,(float16_t)0.99761843513851955478f, -(float16_t)0.06744391956366410645f,(float16_t)0.99772306664419163624f, -(float16_t)0.06591335279700392957f,(float16_t)0.99782535041111164453f, -(float16_t)0.06438263092985740954f,(float16_t)0.99792528619859599548f, -(float16_t)0.06285175756416142012f,(float16_t)0.99802287377148624081f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.05978957074664000698f,(float16_t)0.99821100336047818846f, -(float16_t)0.05825826450043573163f,(float16_t)0.99830154493389289261f, -(float16_t)0.05672682116690778292f,(float16_t)0.99838973740734016094f, -(float16_t)0.05519524434969003135f,(float16_t)0.99847558057329477421f, -(float16_t)0.05366353765273067927f,(float16_t)0.99855907422975931365f, -(float16_t)0.05213170468028331672f,(float16_t)0.99864021818026527111f, -(float16_t)0.05059974903689933717f,(float16_t)0.99871901223387293811f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.04753548415695926094f,(float16_t)0.99886954991428356099f, -(float16_t)0.04600318213091464381f,(float16_t)0.99894129318685687124f, -(float16_t)0.04447077185493874402f,(float16_t)0.99901068585407337697f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.04140564097707671171f,(float16_t)0.99914241872481690532f, -(float16_t)0.03987292758773984536f,(float16_t)0.99920475861836388631f, -(float16_t)0.03834012037355279123f,(float16_t)0.99926474728659442359f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.03527423889821394709f,(float16_t)0.99937767038800284780f, -(float16_t)0.03374117185137764235f,(float16_t)0.99943060455546173237f, -(float16_t)0.03220802540830470378f,(float16_t)0.99948118696616694567f, -(float16_t)0.03067480317663658085f,(float16_t)0.99952941750109314256f, -(float16_t)0.02914150876419373953f,(float16_t)0.99957529604674921764f, -(float16_t)0.02760814577896581953f,(float16_t)0.99961882249517863830f, -(float16_t)0.02607471782910403962f,(float16_t)0.99965999674395922270f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.02300768146883941032f,(float16_t)0.99973528826056168306f, -(float16_t)0.02147408027546960502f,(float16_t)0.99976940535121527898f, -(float16_t)0.01994042855151459750f,(float16_t)0.99980116988788425569f, -(float16_t)0.01840672990580482019f,(float16_t)0.99983058179582340319f, -(float16_t)0.01687298794728177287f,(float16_t)0.99985764100582386060f, -(float16_t)0.01533920628498821985f,(float16_t)0.99988234745421256111f, -(float16_t)0.01380538852806034895f,(float16_t)0.99990470108285289808f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.01073765916726457208f,(float16_t)0.99994234967602391162f, -(float16_t)0.00920375478205995995f,(float16_t)0.99995764455196389786f, -(float16_t)0.00766982873953107706f,(float16_t)0.99997058643097413988f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)0.00460192612044867198f,(float16_t)0.99998941108192840321f, -(float16_t)0.00306795676296613791f,(float16_t)0.99999529380957619118f, -(float16_t)0.00153398018628476615f,(float16_t)0.99999882345170187925f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99998117528260110909f,(float16_t)0.00613588464915447527f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99952941750109314256f,(float16_t)0.03067480317663662595f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99907772775264536147f,(float16_t)0.04293825693494082024f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99772306664419163624f,(float16_t)0.06744391956366405094f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99682029929116566791f,(float16_t)0.07968243797143012563f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99456457073425541537f,(float16_t)0.10412163387205458642f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99321194923479450001f,(float16_t)0.11631863091190475235f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.99005821026229712256f,(float16_t)0.14065823933284921088f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98825756773074946437f,(float16_t)0.15279718525844343535f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98421009238692902521f,(float16_t)0.17700422041214874946f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98196386910955524296f,(float16_t)0.18906866414980619262f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97702814265775439484f,(float16_t)0.21311031991609136194f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97433938278557585821f,(float16_t)0.22508391135979283204f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96852209427441737777f,(float16_t)0.24892760574572014853f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96539444169768939830f,(float16_t)0.26079411791527551401f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.95870347489587159906f,(float16_t)0.28440753721127187692f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95514116830577078243f,(float16_t)0.29615088824362378883f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94758559101774109124f,(float16_t)0.31950203081601569188f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94359345816196038559f,(float16_t)0.33110630575987642921f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93518350993894761025f,(float16_t)0.35416352542049034380f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.93076696107898371224f,(float16_t)0.36561299780477385379f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.92151403934204190183f,(float16_t)0.38834504669882624617f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91667905992104270485f,(float16_t)0.39962419984564678810f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90659570451491533483f,(float16_t)0.42200027079979968159f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.90134884704602202810f,(float16_t)0.43309381885315195726f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.89044872324475787817f,(float16_t)0.45508358712634383592f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88479709843093778954f,(float16_t)0.46597649576796618121f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87309497841829009079f,(float16_t)0.48755016014843599592f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86704624551569264845f,(float16_t)0.49822766697278181303f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85455798836540053376f,(float16_t)0.51935599016558964269f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84812034480329723252f,(float16_t)0.52980362468629460526f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83486287498638001026f,(float16_t)0.55045797293660481131f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82804504525775579626f,(float16_t)0.56066157619733603124f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80684755354379933401f,(float16_t)0.59075970185887416442f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.79210657730021238887f,(float16_t)0.61038280627630947528f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78455659715557524159f,(float16_t)0.62005721176328909561f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.76910333764557969882f,(float16_t)0.63912444486377573138f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.76120238548426177871f,(float16_t)0.64851440102211244110f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74505778544146594733f,(float16_t)0.66699992230363747137f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73681656887736979300f,(float16_t)0.67609270357531592310f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.71143219574521643356f,(float16_t)0.70275474445722529993f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.69397146088965400157f,(float16_t)0.72000250796138165477f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68508366777270035541f,(float16_t)0.72846439044822519637f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.66699992230363747137f,(float16_t)0.74505778544146594733f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.65780669329707874837f,(float16_t)0.75318679904361252042f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62963823891492709528f,(float16_t)0.77688846567323244230f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.61038280627630947528f,(float16_t)0.79210657730021227785f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.60061647938386897305f,(float16_t)0.79953726910790501314f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.58081395809576452649f,(float16_t)0.81403632970594830276f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.57078074588696736669f,(float16_t)0.82110251499110464835f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.55045797293660481131f,(float16_t)0.83486287498638001026f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.54017147272989296525f,(float16_t)0.84155497743689833268f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.51935599016558953167f,(float16_t)0.85455798836540053376f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.50883014254310698909f,(float16_t)0.86086693863776730939f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.48755016014843605143f,(float16_t)0.87309497841829009079f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47679923006332225466f,(float16_t)0.87901222642863341417f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.45508358712634383592f,(float16_t)0.89044872324475787817f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.44412214457042925586f,(float16_t)0.89596624975618510689f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.42200027079979979261f,(float16_t)0.90659570451491533483f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.41084317105790391089f,(float16_t)0.91170603200542987832f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.38834504669882630168f,(float16_t)0.92151403934204190183f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37700741021641831496f,(float16_t)0.92621024213831126826f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.35416352542049051033f,(float16_t)0.93518350993894749923f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.34266071731199437833f,(float16_t)0.93945922360218991898f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.31950203081601574739f,(float16_t)0.94758559101774109124f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.30784964004153497763f,(float16_t)0.95143502096900833820f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.28440753721127182141f,(float16_t)0.95870347489587159906f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.27262135544994897662f,(float16_t)0.96212140426904158019f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.24892760574572025956f,(float16_t)0.96852209427441726675f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.23702360599436733679f,(float16_t)0.97150389098625178352f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.21311031991609136194f,(float16_t)0.97702814265775439484f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.20110463484209195606f,(float16_t)0.97956976568544051887f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.17700422041214886049f,(float16_t)0.98421009238692902521f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.16491312048997008866f,(float16_t)0.98630809724459866938f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.14065823933284923863f,(float16_t)0.99005821026229712256f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.12849811079379322432f,(float16_t)0.99170975366909952520f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.10412163387205472520f,(float16_t)0.99456457073425541537f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.09190895649713269611f,(float16_t)0.99576741446765981713f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.06744391956366410645f,(float16_t)0.99772306664419163624f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.05519524434969003135f,(float16_t)0.99847558057329477421f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.03067480317663658085f,(float16_t)0.99952941750109314256f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.01840672990580482019f,(float16_t)0.99983058179582340319f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0015335083008f, +(float16_t)1.0000000000000f,(float16_t)0.0030670166016f, +(float16_t)1.0000000000000f,(float16_t)0.0046005249023f, +(float16_t)1.0000000000000f,(float16_t)0.0061340332031f, +(float16_t)1.0000000000000f,(float16_t)0.0076713562012f, +(float16_t)1.0000000000000f,(float16_t)0.0092010498047f, +(float16_t)1.0000000000000f,(float16_t)0.0107345581055f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0138015747070f, +(float16_t)1.0000000000000f,(float16_t)0.0153427124023f, +(float16_t)1.0000000000000f,(float16_t)0.0168762207031f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)1.0000000000000f,(float16_t)0.0199432373047f, +(float16_t)1.0000000000000f,(float16_t)0.0214691162109f, +(float16_t)0.9995117187500f,(float16_t)0.0230102539062f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0260772705078f, +(float16_t)0.9995117187500f,(float16_t)0.0276031494141f, +(float16_t)0.9995117187500f,(float16_t)0.0291442871094f, +(float16_t)0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)0.9995117187500f,(float16_t)0.0321960449219f, +(float16_t)0.9995117187500f,(float16_t)0.0337524414062f, +(float16_t)0.9995117187500f,(float16_t)0.0352783203125f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0383300781250f, +(float16_t)0.9990234375000f,(float16_t)0.0398864746094f, +(float16_t)0.9990234375000f,(float16_t)0.0414123535156f, +(float16_t)0.9990234375000f,(float16_t)0.0429382324219f, +(float16_t)0.9990234375000f,(float16_t)0.0444641113281f, +(float16_t)0.9990234375000f,(float16_t)0.0459899902344f, +(float16_t)0.9990234375000f,(float16_t)0.0475463867188f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9985351562500f,(float16_t)0.0505981445312f, +(float16_t)0.9985351562500f,(float16_t)0.0521240234375f, +(float16_t)0.9985351562500f,(float16_t)0.0536499023438f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9985351562500f,(float16_t)0.0567321777344f, +(float16_t)0.9985351562500f,(float16_t)0.0582580566406f, +(float16_t)0.9980468750000f,(float16_t)0.0597839355469f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9980468750000f,(float16_t)0.0628662109375f, +(float16_t)0.9980468750000f,(float16_t)0.0643920898438f, +(float16_t)0.9980468750000f,(float16_t)0.0659179687500f, +(float16_t)0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)0.9975585937500f,(float16_t)0.0689697265625f, +(float16_t)0.9975585937500f,(float16_t)0.0704956054688f, +(float16_t)0.9975585937500f,(float16_t)0.0720214843750f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9970703125000f,(float16_t)0.0750732421875f, +(float16_t)0.9970703125000f,(float16_t)0.0765991210938f, +(float16_t)0.9970703125000f,(float16_t)0.0781250000000f, +(float16_t)0.9965820312500f,(float16_t)0.0797119140625f, +(float16_t)0.9965820312500f,(float16_t)0.0812377929688f, +(float16_t)0.9965820312500f,(float16_t)0.0827636718750f, +(float16_t)0.9965820312500f,(float16_t)0.0842895507812f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9960937500000f,(float16_t)0.0873413085938f, +(float16_t)0.9960937500000f,(float16_t)0.0888671875000f, +(float16_t)0.9960937500000f,(float16_t)0.0903930664062f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9956054687500f,(float16_t)0.0934448242188f, +(float16_t)0.9956054687500f,(float16_t)0.0949707031250f, +(float16_t)0.9951171875000f,(float16_t)0.0964965820312f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9951171875000f,(float16_t)0.0995483398438f, +(float16_t)0.9951171875000f,(float16_t)0.1010742187500f, +(float16_t)0.9946289062500f,(float16_t)0.1026000976562f, +(float16_t)0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)0.9946289062500f,(float16_t)0.1056518554688f, +(float16_t)0.9941406250000f,(float16_t)0.1071777343750f, +(float16_t)0.9941406250000f,(float16_t)0.1087036132812f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9936523437500f,(float16_t)0.1117553710938f, +(float16_t)0.9936523437500f,(float16_t)0.1132812500000f, +(float16_t)0.9931640625000f,(float16_t)0.1148071289062f, +(float16_t)0.9931640625000f,(float16_t)0.1163330078125f, +(float16_t)0.9931640625000f,(float16_t)0.1178588867188f, +(float16_t)0.9926757812500f,(float16_t)0.1193847656250f, +(float16_t)0.9926757812500f,(float16_t)0.1209106445312f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9921875000000f,(float16_t)0.1239624023438f, +(float16_t)0.9921875000000f,(float16_t)0.1254882812500f, +(float16_t)0.9916992187500f,(float16_t)0.1269531250000f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9916992187500f,(float16_t)0.1300048828125f, +(float16_t)0.9912109375000f,(float16_t)0.1315917968750f, +(float16_t)0.9912109375000f,(float16_t)0.1330566406250f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9907226562500f,(float16_t)0.1361083984375f, +(float16_t)0.9907226562500f,(float16_t)0.1375732421875f, +(float16_t)0.9902343750000f,(float16_t)0.1391601562500f, +(float16_t)0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)0.9897460937500f,(float16_t)0.1422119140625f, +(float16_t)0.9897460937500f,(float16_t)0.1436767578125f, +(float16_t)0.9892578125000f,(float16_t)0.1452636718750f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9887695312500f,(float16_t)0.1481933593750f, +(float16_t)0.9887695312500f,(float16_t)0.1497802734375f, +(float16_t)0.9882812500000f,(float16_t)0.1512451171875f, +(float16_t)0.9882812500000f,(float16_t)0.1528320312500f, +(float16_t)0.9877929687500f,(float16_t)0.1542968750000f, +(float16_t)0.9877929687500f,(float16_t)0.1558837890625f, +(float16_t)0.9873046875000f,(float16_t)0.1573486328125f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9868164062500f,(float16_t)0.1604003906250f, +(float16_t)0.9868164062500f,(float16_t)0.1618652343750f, +(float16_t)0.9863281250000f,(float16_t)0.1634521484375f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9858398437500f,(float16_t)0.1663818359375f, +(float16_t)0.9858398437500f,(float16_t)0.1679687500000f, +(float16_t)0.9853515625000f,(float16_t)0.1694335937500f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9848632812500f,(float16_t)0.1724853515625f, +(float16_t)0.9848632812500f,(float16_t)0.1739501953125f, +(float16_t)0.9843750000000f,(float16_t)0.1755371093750f, +(float16_t)0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)0.9838867187500f,(float16_t)0.1784667968750f, +(float16_t)0.9838867187500f,(float16_t)0.1800537109375f, +(float16_t)0.9833984375000f,(float16_t)0.1815185546875f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9829101562500f,(float16_t)0.1845703125000f, +(float16_t)0.9824218750000f,(float16_t)0.1860351562500f, +(float16_t)0.9824218750000f,(float16_t)0.1876220703125f, +(float16_t)0.9819335937500f,(float16_t)0.1890869140625f, +(float16_t)0.9814453125000f,(float16_t)0.1905517578125f, +(float16_t)0.9814453125000f,(float16_t)0.1921386718750f, +(float16_t)0.9809570312500f,(float16_t)0.1936035156250f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9804687500000f,(float16_t)0.1966552734375f, +(float16_t)0.9799804687500f,(float16_t)0.1981201171875f, +(float16_t)0.9799804687500f,(float16_t)0.1995849609375f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9794921875000f,(float16_t)0.2026367187500f, +(float16_t)0.9790039062500f,(float16_t)0.2041015625000f, +(float16_t)0.9785156250000f,(float16_t)0.2055664062500f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9780273437500f,(float16_t)0.2086181640625f, +(float16_t)0.9775390625000f,(float16_t)0.2100830078125f, +(float16_t)0.9775390625000f,(float16_t)0.2116699218750f, +(float16_t)0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)0.9765625000000f,(float16_t)0.2145996093750f, +(float16_t)0.9765625000000f,(float16_t)0.2160644531250f, +(float16_t)0.9760742187500f,(float16_t)0.2176513671875f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9755859375000f,(float16_t)0.2205810546875f, +(float16_t)0.9750976562500f,(float16_t)0.2220458984375f, +(float16_t)0.9746093750000f,(float16_t)0.2236328125000f, +(float16_t)0.9741210937500f,(float16_t)0.2250976562500f, +(float16_t)0.9741210937500f,(float16_t)0.2265625000000f, +(float16_t)0.9736328125000f,(float16_t)0.2280273437500f, +(float16_t)0.9731445312500f,(float16_t)0.2296142578125f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9726562500000f,(float16_t)0.2325439453125f, +(float16_t)0.9721679687500f,(float16_t)0.2340087890625f, +(float16_t)0.9716796875000f,(float16_t)0.2354736328125f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9711914062500f,(float16_t)0.2385253906250f, +(float16_t)0.9707031250000f,(float16_t)0.2399902343750f, +(float16_t)0.9702148437500f,(float16_t)0.2414550781250f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9697265625000f,(float16_t)0.2445068359375f, +(float16_t)0.9692382812500f,(float16_t)0.2459716796875f, +(float16_t)0.9687500000000f,(float16_t)0.2474365234375f, +(float16_t)0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)0.9682617187500f,(float16_t)0.2504882812500f, +(float16_t)0.9677734375000f,(float16_t)0.2519531250000f, +(float16_t)0.9672851562500f,(float16_t)0.2534179687500f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9667968750000f,(float16_t)0.2563476562500f, +(float16_t)0.9663085937500f,(float16_t)0.2578125000000f, +(float16_t)0.9658203125000f,(float16_t)0.2592773437500f, +(float16_t)0.9653320312500f,(float16_t)0.2607421875000f, +(float16_t)0.9648437500000f,(float16_t)0.2622070312500f, +(float16_t)0.9643554687500f,(float16_t)0.2636718750000f, +(float16_t)0.9643554687500f,(float16_t)0.2651367187500f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9633789062500f,(float16_t)0.2683105468750f, +(float16_t)0.9628906250000f,(float16_t)0.2697753906250f, +(float16_t)0.9624023437500f,(float16_t)0.2712402343750f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9619140625000f,(float16_t)0.2741699218750f, +(float16_t)0.9614257812500f,(float16_t)0.2756347656250f, +(float16_t)0.9609375000000f,(float16_t)0.2770996093750f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9599609375000f,(float16_t)0.2800292968750f, +(float16_t)0.9594726562500f,(float16_t)0.2814941406250f, +(float16_t)0.9589843750000f,(float16_t)0.2829589843750f, +(float16_t)0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)0.9584960937500f,(float16_t)0.2858886718750f, +(float16_t)0.9580078125000f,(float16_t)0.2873535156250f, +(float16_t)0.9575195312500f,(float16_t)0.2888183593750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9565429687500f,(float16_t)0.2917480468750f, +(float16_t)0.9560546875000f,(float16_t)0.2932128906250f, +(float16_t)0.9555664062500f,(float16_t)0.2946777343750f, +(float16_t)0.9550781250000f,(float16_t)0.2961425781250f, +(float16_t)0.9545898437500f,(float16_t)0.2976074218750f, +(float16_t)0.9541015625000f,(float16_t)0.2990722656250f, +(float16_t)0.9536132812500f,(float16_t)0.3005371093750f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9526367187500f,(float16_t)0.3034667968750f, +(float16_t)0.9521484375000f,(float16_t)0.3049316406250f, +(float16_t)0.9521484375000f,(float16_t)0.3063964843750f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9511718750000f,(float16_t)0.3093261718750f, +(float16_t)0.9506835937500f,(float16_t)0.3107910156250f, +(float16_t)0.9501953125000f,(float16_t)0.3122558593750f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9492187500000f,(float16_t)0.3151855468750f, +(float16_t)0.9487304687500f,(float16_t)0.3166503906250f, +(float16_t)0.9482421875000f,(float16_t)0.3181152343750f, +(float16_t)0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)0.9472656250000f,(float16_t)0.3210449218750f, +(float16_t)0.9467773437500f,(float16_t)0.3225097656250f, +(float16_t)0.9462890625000f,(float16_t)0.3239746093750f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9453125000000f,(float16_t)0.3266601562500f, +(float16_t)0.9448242187500f,(float16_t)0.3281250000000f, +(float16_t)0.9443359375000f,(float16_t)0.3295898437500f, +(float16_t)0.9433593750000f,(float16_t)0.3310546875000f, +(float16_t)0.9428710937500f,(float16_t)0.3325195312500f, +(float16_t)0.9423828125000f,(float16_t)0.3339843750000f, +(float16_t)0.9418945312500f,(float16_t)0.3354492187500f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9409179687500f,(float16_t)0.3383789062500f, +(float16_t)0.9404296875000f,(float16_t)0.3398437500000f, +(float16_t)0.9399414062500f,(float16_t)0.3413085937500f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9389648437500f,(float16_t)0.3439941406250f, +(float16_t)0.9384765625000f,(float16_t)0.3454589843750f, +(float16_t)0.9379882812500f,(float16_t)0.3469238281250f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9370117187500f,(float16_t)0.3498535156250f, +(float16_t)0.9360351562500f,(float16_t)0.3513183593750f, +(float16_t)0.9355468750000f,(float16_t)0.3527832031250f, +(float16_t)0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)0.9345703125000f,(float16_t)0.3557128906250f, +(float16_t)0.9340820312500f,(float16_t)0.3569335937500f, +(float16_t)0.9335937500000f,(float16_t)0.3583984375000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9326171875000f,(float16_t)0.3613281250000f, +(float16_t)0.9316406250000f,(float16_t)0.3627929687500f, +(float16_t)0.9311523437500f,(float16_t)0.3642578125000f, +(float16_t)0.9306640625000f,(float16_t)0.3657226562500f, +(float16_t)0.9301757812500f,(float16_t)0.3669433593750f, +(float16_t)0.9296875000000f,(float16_t)0.3684082031250f, +(float16_t)0.9291992187500f,(float16_t)0.3698730468750f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9277343750000f,(float16_t)0.3728027343750f, +(float16_t)0.9272460937500f,(float16_t)0.3742675781250f, +(float16_t)0.9267578125000f,(float16_t)0.3754882812500f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9257812500000f,(float16_t)0.3784179687500f, +(float16_t)0.9252929687500f,(float16_t)0.3798828125000f, +(float16_t)0.9243164062500f,(float16_t)0.3813476562500f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9233398437500f,(float16_t)0.3840332031250f, +(float16_t)0.9228515625000f,(float16_t)0.3854980468750f, +(float16_t)0.9218750000000f,(float16_t)0.3869628906250f, +(float16_t)0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)0.9208984375000f,(float16_t)0.3896484375000f, +(float16_t)0.9204101562500f,(float16_t)0.3911132812500f, +(float16_t)0.9199218750000f,(float16_t)0.3925781250000f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9184570312500f,(float16_t)0.3955078125000f, +(float16_t)0.9179687500000f,(float16_t)0.3967285156250f, +(float16_t)0.9174804687500f,(float16_t)0.3981933593750f, +(float16_t)0.9165039062500f,(float16_t)0.3996582031250f, +(float16_t)0.9160156250000f,(float16_t)0.4011230468750f, +(float16_t)0.9155273437500f,(float16_t)0.4023437500000f, +(float16_t)0.9150390625000f,(float16_t)0.4038085937500f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9135742187500f,(float16_t)0.4067382812500f, +(float16_t)0.9130859375000f,(float16_t)0.4079589843750f, +(float16_t)0.9121093750000f,(float16_t)0.4094238281250f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9111328125000f,(float16_t)0.4123535156250f, +(float16_t)0.9106445312500f,(float16_t)0.4135742187500f, +(float16_t)0.9096679687500f,(float16_t)0.4150390625000f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9086914062500f,(float16_t)0.4177246093750f, +(float16_t)0.9077148437500f,(float16_t)0.4191894531250f, +(float16_t)0.9072265625000f,(float16_t)0.4206542968750f, +(float16_t)0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)0.9057617187500f,(float16_t)0.4233398437500f, +(float16_t)0.9052734375000f,(float16_t)0.4248046875000f, +(float16_t)0.9047851562500f,(float16_t)0.4262695312500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.9033203125000f,(float16_t)0.4289550781250f, +(float16_t)0.9028320312500f,(float16_t)0.4304199218750f, +(float16_t)0.9018554687500f,(float16_t)0.4316406250000f, +(float16_t)0.9013671875000f,(float16_t)0.4331054687500f, +(float16_t)0.9008789062500f,(float16_t)0.4345703125000f, +(float16_t)0.8999023437500f,(float16_t)0.4357910156250f, +(float16_t)0.8994140625000f,(float16_t)0.4372558593750f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8979492187500f,(float16_t)0.4399414062500f, +(float16_t)0.8974609375000f,(float16_t)0.4414062500000f, +(float16_t)0.8964843750000f,(float16_t)0.4426269531250f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8955078125000f,(float16_t)0.4455566406250f, +(float16_t)0.8945312500000f,(float16_t)0.4467773437500f, +(float16_t)0.8940429687500f,(float16_t)0.4482421875000f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8925781250000f,(float16_t)0.4509277343750f, +(float16_t)0.8916015625000f,(float16_t)0.4523925781250f, +(float16_t)0.8911132812500f,(float16_t)0.4536132812500f, +(float16_t)0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)0.8896484375000f,(float16_t)0.4565429687500f, +(float16_t)0.8891601562500f,(float16_t)0.4577636718750f, +(float16_t)0.8881835937500f,(float16_t)0.4592285156250f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8867187500000f,(float16_t)0.4619140625000f, +(float16_t)0.8862304687500f,(float16_t)0.4633789062500f, +(float16_t)0.8857421875000f,(float16_t)0.4645996093750f, +(float16_t)0.8847656250000f,(float16_t)0.4660644531250f, +(float16_t)0.8842773437500f,(float16_t)0.4672851562500f, +(float16_t)0.8833007812500f,(float16_t)0.4687500000000f, +(float16_t)0.8828125000000f,(float16_t)0.4699707031250f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8813476562500f,(float16_t)0.4726562500000f, +(float16_t)0.8803710937500f,(float16_t)0.4741210937500f, +(float16_t)0.8798828125000f,(float16_t)0.4753417968750f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8784179687500f,(float16_t)0.4780273437500f, +(float16_t)0.8774414062500f,(float16_t)0.4794921875000f, +(float16_t)0.8769531250000f,(float16_t)0.4809570312500f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8754882812500f,(float16_t)0.4836425781250f, +(float16_t)0.8745117187500f,(float16_t)0.4848632812500f, +(float16_t)0.8740234375000f,(float16_t)0.4863281250000f, +(float16_t)0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)0.8725585937500f,(float16_t)0.4887695312500f, +(float16_t)0.8715820312500f,(float16_t)0.4902343750000f, +(float16_t)0.8706054687500f,(float16_t)0.4914550781250f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8691406250000f,(float16_t)0.4941406250000f, +(float16_t)0.8686523437500f,(float16_t)0.4956054687500f, +(float16_t)0.8676757812500f,(float16_t)0.4968261718750f, +(float16_t)0.8671875000000f,(float16_t)0.4982910156250f, +(float16_t)0.8662109375000f,(float16_t)0.4995117187500f, +(float16_t)0.8657226562500f,(float16_t)0.5009765625000f, +(float16_t)0.8647460937500f,(float16_t)0.5024414062500f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8632812500000f,(float16_t)0.5048828125000f, +(float16_t)0.8623046875000f,(float16_t)0.5063476562500f, +(float16_t)0.8618164062500f,(float16_t)0.5073242187500f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8598632812500f,(float16_t)0.5102539062500f, +(float16_t)0.8593750000000f,(float16_t)0.5112304687500f, +(float16_t)0.8583984375000f,(float16_t)0.5126953125000f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8569335937500f,(float16_t)0.5156250000000f, +(float16_t)0.8559570312500f,(float16_t)0.5166015625000f, +(float16_t)0.8554687500000f,(float16_t)0.5180664062500f, +(float16_t)0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)0.8540039062500f,(float16_t)0.5205078125000f, +(float16_t)0.8530273437500f,(float16_t)0.5219726562500f, +(float16_t)0.8520507812500f,(float16_t)0.5234375000000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8505859375000f,(float16_t)0.5258789062500f, +(float16_t)0.8496093750000f,(float16_t)0.5273437500000f, +(float16_t)0.8491210937500f,(float16_t)0.5283203125000f, +(float16_t)0.8481445312500f,(float16_t)0.5297851562500f, +(float16_t)0.8471679687500f,(float16_t)0.5312500000000f, +(float16_t)0.8466796875000f,(float16_t)0.5322265625000f, +(float16_t)0.8457031250000f,(float16_t)0.5336914062500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8442382812500f,(float16_t)0.5361328125000f, +(float16_t)0.8432617187500f,(float16_t)0.5375976562500f, +(float16_t)0.8422851562500f,(float16_t)0.5390625000000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8408203125000f,(float16_t)0.5415039062500f, +(float16_t)0.8398437500000f,(float16_t)0.5429687500000f, +(float16_t)0.8388671875000f,(float16_t)0.5439453125000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8374023437500f,(float16_t)0.5463867187500f, +(float16_t)0.8364257812500f,(float16_t)0.5478515625000f, +(float16_t)0.8359375000000f,(float16_t)0.5493164062500f, +(float16_t)0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)0.8339843750000f,(float16_t)0.5517578125000f, +(float16_t)0.8330078125000f,(float16_t)0.5532226562500f, +(float16_t)0.8325195312500f,(float16_t)0.5541992187500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8305664062500f,(float16_t)0.5566406250000f, +(float16_t)0.8295898437500f,(float16_t)0.5581054687500f, +(float16_t)0.8291015625000f,(float16_t)0.5595703125000f, +(float16_t)0.8281250000000f,(float16_t)0.5605468750000f, +(float16_t)0.8271484375000f,(float16_t)0.5620117187500f, +(float16_t)0.8261718750000f,(float16_t)0.5629882812500f, +(float16_t)0.8256835937500f,(float16_t)0.5644531250000f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8237304687500f,(float16_t)0.5668945312500f, +(float16_t)0.8227539062500f,(float16_t)0.5683593750000f, +(float16_t)0.8217773437500f,(float16_t)0.5693359375000f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8203125000000f,(float16_t)0.5722656250000f, +(float16_t)0.8193359375000f,(float16_t)0.5732421875000f, +(float16_t)0.8183593750000f,(float16_t)0.5747070312500f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8168945312500f,(float16_t)0.5771484375000f, +(float16_t)0.8159179687500f,(float16_t)0.5781250000000f, +(float16_t)0.8149414062500f,(float16_t)0.5795898437500f, +(float16_t)0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)0.8129882812500f,(float16_t)0.5820312500000f, +(float16_t)0.8120117187500f,(float16_t)0.5834960937500f, +(float16_t)0.8115234375000f,(float16_t)0.5844726562500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8095703125000f,(float16_t)0.5869140625000f, +(float16_t)0.8085937500000f,(float16_t)0.5883789062500f, +(float16_t)0.8076171875000f,(float16_t)0.5893554687500f, +(float16_t)0.8066406250000f,(float16_t)0.5908203125000f, +(float16_t)0.8061523437500f,(float16_t)0.5917968750000f, +(float16_t)0.8051757812500f,(float16_t)0.5932617187500f, +(float16_t)0.8041992187500f,(float16_t)0.5942382812500f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.8022460937500f,(float16_t)0.5971679687500f, +(float16_t)0.8012695312500f,(float16_t)0.5981445312500f, +(float16_t)0.8002929687500f,(float16_t)0.5996093750000f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7988281250000f,(float16_t)0.6020507812500f, +(float16_t)0.7978515625000f,(float16_t)0.6030273437500f, +(float16_t)0.7968750000000f,(float16_t)0.6044921875000f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7949218750000f,(float16_t)0.6069335937500f, +(float16_t)0.7939453125000f,(float16_t)0.6079101562500f, +(float16_t)0.7929687500000f,(float16_t)0.6093750000000f, +(float16_t)0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)0.7910156250000f,(float16_t)0.6118164062500f, +(float16_t)0.7900390625000f,(float16_t)0.6127929687500f, +(float16_t)0.7890625000000f,(float16_t)0.6142578125000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7875976562500f,(float16_t)0.6162109375000f, +(float16_t)0.7866210937500f,(float16_t)0.6176757812500f, +(float16_t)0.7856445312500f,(float16_t)0.6186523437500f, +(float16_t)0.7846679687500f,(float16_t)0.6201171875000f, +(float16_t)0.7836914062500f,(float16_t)0.6210937500000f, +(float16_t)0.7827148437500f,(float16_t)0.6225585937500f, +(float16_t)0.7817382812500f,(float16_t)0.6235351562500f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7797851562500f,(float16_t)0.6259765625000f, +(float16_t)0.7788085937500f,(float16_t)0.6274414062500f, +(float16_t)0.7778320312500f,(float16_t)0.6284179687500f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7758789062500f,(float16_t)0.6308593750000f, +(float16_t)0.7749023437500f,(float16_t)0.6318359375000f, +(float16_t)0.7739257812500f,(float16_t)0.6333007812500f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7719726562500f,(float16_t)0.6357421875000f, +(float16_t)0.7709960937500f,(float16_t)0.6367187500000f, +(float16_t)0.7700195312500f,(float16_t)0.6381835937500f, +(float16_t)0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)0.7680664062500f,(float16_t)0.6401367187500f, +(float16_t)0.7670898437500f,(float16_t)0.6416015625000f, +(float16_t)0.7661132812500f,(float16_t)0.6425781250000f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7641601562500f,(float16_t)0.6450195312500f, +(float16_t)0.7631835937500f,(float16_t)0.6459960937500f, +(float16_t)0.7622070312500f,(float16_t)0.6474609375000f, +(float16_t)0.7612304687500f,(float16_t)0.6484375000000f, +(float16_t)0.7602539062500f,(float16_t)0.6499023437500f, +(float16_t)0.7592773437500f,(float16_t)0.6508789062500f, +(float16_t)0.7583007812500f,(float16_t)0.6518554687500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7563476562500f,(float16_t)0.6542968750000f, +(float16_t)0.7553710937500f,(float16_t)0.6552734375000f, +(float16_t)0.7543945312500f,(float16_t)0.6567382812500f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7519531250000f,(float16_t)0.6591796875000f, +(float16_t)0.7509765625000f,(float16_t)0.6601562500000f, +(float16_t)0.7500000000000f,(float16_t)0.6611328125000f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7480468750000f,(float16_t)0.6635742187500f, +(float16_t)0.7470703125000f,(float16_t)0.6645507812500f, +(float16_t)0.7460937500000f,(float16_t)0.6660156250000f, +(float16_t)0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)0.7441406250000f,(float16_t)0.6679687500000f, +(float16_t)0.7431640625000f,(float16_t)0.6694335937500f, +(float16_t)0.7421875000000f,(float16_t)0.6704101562500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7397460937500f,(float16_t)0.6728515625000f, +(float16_t)0.7387695312500f,(float16_t)0.6738281250000f, +(float16_t)0.7377929687500f,(float16_t)0.6748046875000f, +(float16_t)0.7368164062500f,(float16_t)0.6762695312500f, +(float16_t)0.7358398437500f,(float16_t)0.6772460937500f, +(float16_t)0.7348632812500f,(float16_t)0.6782226562500f, +(float16_t)0.7338867187500f,(float16_t)0.6796875000000f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7314453125000f,(float16_t)0.6816406250000f, +(float16_t)0.7304687500000f,(float16_t)0.6826171875000f, +(float16_t)0.7294921875000f,(float16_t)0.6840820312500f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7275390625000f,(float16_t)0.6860351562500f, +(float16_t)0.7265625000000f,(float16_t)0.6875000000000f, +(float16_t)0.7250976562500f,(float16_t)0.6884765625000f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7231445312500f,(float16_t)0.6904296875000f, +(float16_t)0.7221679687500f,(float16_t)0.6918945312500f, +(float16_t)0.7211914062500f,(float16_t)0.6928710937500f, +(float16_t)0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)0.7187500000000f,(float16_t)0.6953125000000f, +(float16_t)0.7177734375000f,(float16_t)0.6962890625000f, +(float16_t)0.7167968750000f,(float16_t)0.6972656250000f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7148437500000f,(float16_t)0.6997070312500f, +(float16_t)0.7133789062500f,(float16_t)0.7006835937500f, +(float16_t)0.7124023437500f,(float16_t)0.7016601562500f, +(float16_t)0.7114257812500f,(float16_t)0.7026367187500f, +(float16_t)0.7104492187500f,(float16_t)0.7036132812500f, +(float16_t)0.7094726562500f,(float16_t)0.7050781250000f, +(float16_t)0.7080078125000f,(float16_t)0.7060546875000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.7060546875000f,(float16_t)0.7080078125000f, +(float16_t)0.7050781250000f,(float16_t)0.7094726562500f, +(float16_t)0.7036132812500f,(float16_t)0.7104492187500f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.7016601562500f,(float16_t)0.7124023437500f, +(float16_t)0.7006835937500f,(float16_t)0.7133789062500f, +(float16_t)0.6997070312500f,(float16_t)0.7148437500000f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6972656250000f,(float16_t)0.7167968750000f, +(float16_t)0.6962890625000f,(float16_t)0.7177734375000f, +(float16_t)0.6953125000000f,(float16_t)0.7187500000000f, +(float16_t)0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)0.6928710937500f,(float16_t)0.7211914062500f, +(float16_t)0.6918945312500f,(float16_t)0.7221679687500f, +(float16_t)0.6904296875000f,(float16_t)0.7231445312500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6884765625000f,(float16_t)0.7250976562500f, +(float16_t)0.6875000000000f,(float16_t)0.7265625000000f, +(float16_t)0.6860351562500f,(float16_t)0.7275390625000f, +(float16_t)0.6850585937500f,(float16_t)0.7285156250000f, +(float16_t)0.6840820312500f,(float16_t)0.7294921875000f, +(float16_t)0.6826171875000f,(float16_t)0.7304687500000f, +(float16_t)0.6816406250000f,(float16_t)0.7314453125000f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6796875000000f,(float16_t)0.7338867187500f, +(float16_t)0.6782226562500f,(float16_t)0.7348632812500f, +(float16_t)0.6772460937500f,(float16_t)0.7358398437500f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6748046875000f,(float16_t)0.7377929687500f, +(float16_t)0.6738281250000f,(float16_t)0.7387695312500f, +(float16_t)0.6728515625000f,(float16_t)0.7397460937500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6704101562500f,(float16_t)0.7421875000000f, +(float16_t)0.6694335937500f,(float16_t)0.7431640625000f, +(float16_t)0.6679687500000f,(float16_t)0.7441406250000f, +(float16_t)0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)0.6660156250000f,(float16_t)0.7460937500000f, +(float16_t)0.6645507812500f,(float16_t)0.7470703125000f, +(float16_t)0.6635742187500f,(float16_t)0.7480468750000f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6611328125000f,(float16_t)0.7500000000000f, +(float16_t)0.6601562500000f,(float16_t)0.7509765625000f, +(float16_t)0.6591796875000f,(float16_t)0.7519531250000f, +(float16_t)0.6577148437500f,(float16_t)0.7534179687500f, +(float16_t)0.6567382812500f,(float16_t)0.7543945312500f, +(float16_t)0.6552734375000f,(float16_t)0.7553710937500f, +(float16_t)0.6542968750000f,(float16_t)0.7563476562500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6518554687500f,(float16_t)0.7583007812500f, +(float16_t)0.6508789062500f,(float16_t)0.7592773437500f, +(float16_t)0.6499023437500f,(float16_t)0.7602539062500f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6474609375000f,(float16_t)0.7622070312500f, +(float16_t)0.6459960937500f,(float16_t)0.7631835937500f, +(float16_t)0.6450195312500f,(float16_t)0.7641601562500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6425781250000f,(float16_t)0.7661132812500f, +(float16_t)0.6416015625000f,(float16_t)0.7670898437500f, +(float16_t)0.6401367187500f,(float16_t)0.7680664062500f, +(float16_t)0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)0.6381835937500f,(float16_t)0.7700195312500f, +(float16_t)0.6367187500000f,(float16_t)0.7709960937500f, +(float16_t)0.6357421875000f,(float16_t)0.7719726562500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6333007812500f,(float16_t)0.7739257812500f, +(float16_t)0.6318359375000f,(float16_t)0.7749023437500f, +(float16_t)0.6308593750000f,(float16_t)0.7758789062500f, +(float16_t)0.6293945312500f,(float16_t)0.7768554687500f, +(float16_t)0.6284179687500f,(float16_t)0.7778320312500f, +(float16_t)0.6274414062500f,(float16_t)0.7788085937500f, +(float16_t)0.6259765625000f,(float16_t)0.7797851562500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6235351562500f,(float16_t)0.7817382812500f, +(float16_t)0.6225585937500f,(float16_t)0.7827148437500f, +(float16_t)0.6210937500000f,(float16_t)0.7836914062500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6186523437500f,(float16_t)0.7856445312500f, +(float16_t)0.6176757812500f,(float16_t)0.7866210937500f, +(float16_t)0.6162109375000f,(float16_t)0.7875976562500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6142578125000f,(float16_t)0.7890625000000f, +(float16_t)0.6127929687500f,(float16_t)0.7900390625000f, +(float16_t)0.6118164062500f,(float16_t)0.7910156250000f, +(float16_t)0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)0.6093750000000f,(float16_t)0.7929687500000f, +(float16_t)0.6079101562500f,(float16_t)0.7939453125000f, +(float16_t)0.6069335937500f,(float16_t)0.7949218750000f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.6044921875000f,(float16_t)0.7968750000000f, +(float16_t)0.6030273437500f,(float16_t)0.7978515625000f, +(float16_t)0.6020507812500f,(float16_t)0.7988281250000f, +(float16_t)0.6005859375000f,(float16_t)0.7993164062500f, +(float16_t)0.5996093750000f,(float16_t)0.8002929687500f, +(float16_t)0.5981445312500f,(float16_t)0.8012695312500f, +(float16_t)0.5971679687500f,(float16_t)0.8022460937500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5942382812500f,(float16_t)0.8041992187500f, +(float16_t)0.5932617187500f,(float16_t)0.8051757812500f, +(float16_t)0.5917968750000f,(float16_t)0.8061523437500f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5893554687500f,(float16_t)0.8076171875000f, +(float16_t)0.5883789062500f,(float16_t)0.8085937500000f, +(float16_t)0.5869140625000f,(float16_t)0.8095703125000f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5844726562500f,(float16_t)0.8115234375000f, +(float16_t)0.5834960937500f,(float16_t)0.8120117187500f, +(float16_t)0.5820312500000f,(float16_t)0.8129882812500f, +(float16_t)0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)0.5795898437500f,(float16_t)0.8149414062500f, +(float16_t)0.5781250000000f,(float16_t)0.8159179687500f, +(float16_t)0.5771484375000f,(float16_t)0.8168945312500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5747070312500f,(float16_t)0.8183593750000f, +(float16_t)0.5732421875000f,(float16_t)0.8193359375000f, +(float16_t)0.5722656250000f,(float16_t)0.8203125000000f, +(float16_t)0.5708007812500f,(float16_t)0.8212890625000f, +(float16_t)0.5693359375000f,(float16_t)0.8217773437500f, +(float16_t)0.5683593750000f,(float16_t)0.8227539062500f, +(float16_t)0.5668945312500f,(float16_t)0.8237304687500f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5644531250000f,(float16_t)0.8256835937500f, +(float16_t)0.5629882812500f,(float16_t)0.8261718750000f, +(float16_t)0.5620117187500f,(float16_t)0.8271484375000f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5595703125000f,(float16_t)0.8291015625000f, +(float16_t)0.5581054687500f,(float16_t)0.8295898437500f, +(float16_t)0.5566406250000f,(float16_t)0.8305664062500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5541992187500f,(float16_t)0.8325195312500f, +(float16_t)0.5532226562500f,(float16_t)0.8330078125000f, +(float16_t)0.5517578125000f,(float16_t)0.8339843750000f, +(float16_t)0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)0.5493164062500f,(float16_t)0.8359375000000f, +(float16_t)0.5478515625000f,(float16_t)0.8364257812500f, +(float16_t)0.5463867187500f,(float16_t)0.8374023437500f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5439453125000f,(float16_t)0.8388671875000f, +(float16_t)0.5429687500000f,(float16_t)0.8398437500000f, +(float16_t)0.5415039062500f,(float16_t)0.8408203125000f, +(float16_t)0.5400390625000f,(float16_t)0.8417968750000f, +(float16_t)0.5390625000000f,(float16_t)0.8422851562500f, +(float16_t)0.5375976562500f,(float16_t)0.8432617187500f, +(float16_t)0.5361328125000f,(float16_t)0.8442382812500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5336914062500f,(float16_t)0.8457031250000f, +(float16_t)0.5322265625000f,(float16_t)0.8466796875000f, +(float16_t)0.5312500000000f,(float16_t)0.8471679687500f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5283203125000f,(float16_t)0.8491210937500f, +(float16_t)0.5273437500000f,(float16_t)0.8496093750000f, +(float16_t)0.5258789062500f,(float16_t)0.8505859375000f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5234375000000f,(float16_t)0.8520507812500f, +(float16_t)0.5219726562500f,(float16_t)0.8530273437500f, +(float16_t)0.5205078125000f,(float16_t)0.8540039062500f, +(float16_t)0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)0.5180664062500f,(float16_t)0.8554687500000f, +(float16_t)0.5166015625000f,(float16_t)0.8559570312500f, +(float16_t)0.5156250000000f,(float16_t)0.8569335937500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5126953125000f,(float16_t)0.8583984375000f, +(float16_t)0.5112304687500f,(float16_t)0.8593750000000f, +(float16_t)0.5102539062500f,(float16_t)0.8598632812500f, +(float16_t)0.5087890625000f,(float16_t)0.8608398437500f, +(float16_t)0.5073242187500f,(float16_t)0.8618164062500f, +(float16_t)0.5063476562500f,(float16_t)0.8623046875000f, +(float16_t)0.5048828125000f,(float16_t)0.8632812500000f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.5024414062500f,(float16_t)0.8647460937500f, +(float16_t)0.5009765625000f,(float16_t)0.8657226562500f, +(float16_t)0.4995117187500f,(float16_t)0.8662109375000f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4968261718750f,(float16_t)0.8676757812500f, +(float16_t)0.4956054687500f,(float16_t)0.8686523437500f, +(float16_t)0.4941406250000f,(float16_t)0.8691406250000f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4914550781250f,(float16_t)0.8706054687500f, +(float16_t)0.4902343750000f,(float16_t)0.8715820312500f, +(float16_t)0.4887695312500f,(float16_t)0.8725585937500f, +(float16_t)0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)0.4863281250000f,(float16_t)0.8740234375000f, +(float16_t)0.4848632812500f,(float16_t)0.8745117187500f, +(float16_t)0.4836425781250f,(float16_t)0.8754882812500f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4809570312500f,(float16_t)0.8769531250000f, +(float16_t)0.4794921875000f,(float16_t)0.8774414062500f, +(float16_t)0.4780273437500f,(float16_t)0.8784179687500f, +(float16_t)0.4768066406250f,(float16_t)0.8789062500000f, +(float16_t)0.4753417968750f,(float16_t)0.8798828125000f, +(float16_t)0.4741210937500f,(float16_t)0.8803710937500f, +(float16_t)0.4726562500000f,(float16_t)0.8813476562500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4699707031250f,(float16_t)0.8828125000000f, +(float16_t)0.4687500000000f,(float16_t)0.8833007812500f, +(float16_t)0.4672851562500f,(float16_t)0.8842773437500f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4645996093750f,(float16_t)0.8857421875000f, +(float16_t)0.4633789062500f,(float16_t)0.8862304687500f, +(float16_t)0.4619140625000f,(float16_t)0.8867187500000f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4592285156250f,(float16_t)0.8881835937500f, +(float16_t)0.4577636718750f,(float16_t)0.8891601562500f, +(float16_t)0.4565429687500f,(float16_t)0.8896484375000f, +(float16_t)0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)0.4536132812500f,(float16_t)0.8911132812500f, +(float16_t)0.4523925781250f,(float16_t)0.8916015625000f, +(float16_t)0.4509277343750f,(float16_t)0.8925781250000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4482421875000f,(float16_t)0.8940429687500f, +(float16_t)0.4467773437500f,(float16_t)0.8945312500000f, +(float16_t)0.4455566406250f,(float16_t)0.8955078125000f, +(float16_t)0.4440917968750f,(float16_t)0.8959960937500f, +(float16_t)0.4426269531250f,(float16_t)0.8964843750000f, +(float16_t)0.4414062500000f,(float16_t)0.8974609375000f, +(float16_t)0.4399414062500f,(float16_t)0.8979492187500f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4372558593750f,(float16_t)0.8994140625000f, +(float16_t)0.4357910156250f,(float16_t)0.8999023437500f, +(float16_t)0.4345703125000f,(float16_t)0.9008789062500f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4316406250000f,(float16_t)0.9018554687500f, +(float16_t)0.4304199218750f,(float16_t)0.9028320312500f, +(float16_t)0.4289550781250f,(float16_t)0.9033203125000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4262695312500f,(float16_t)0.9047851562500f, +(float16_t)0.4248046875000f,(float16_t)0.9052734375000f, +(float16_t)0.4233398437500f,(float16_t)0.9057617187500f, +(float16_t)0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)0.4206542968750f,(float16_t)0.9072265625000f, +(float16_t)0.4191894531250f,(float16_t)0.9077148437500f, +(float16_t)0.4177246093750f,(float16_t)0.9086914062500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4150390625000f,(float16_t)0.9096679687500f, +(float16_t)0.4135742187500f,(float16_t)0.9106445312500f, +(float16_t)0.4123535156250f,(float16_t)0.9111328125000f, +(float16_t)0.4108886718750f,(float16_t)0.9116210937500f, +(float16_t)0.4094238281250f,(float16_t)0.9121093750000f, +(float16_t)0.4079589843750f,(float16_t)0.9130859375000f, +(float16_t)0.4067382812500f,(float16_t)0.9135742187500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.4038085937500f,(float16_t)0.9150390625000f, +(float16_t)0.4023437500000f,(float16_t)0.9155273437500f, +(float16_t)0.4011230468750f,(float16_t)0.9160156250000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3981933593750f,(float16_t)0.9174804687500f, +(float16_t)0.3967285156250f,(float16_t)0.9179687500000f, +(float16_t)0.3955078125000f,(float16_t)0.9184570312500f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3925781250000f,(float16_t)0.9199218750000f, +(float16_t)0.3911132812500f,(float16_t)0.9204101562500f, +(float16_t)0.3896484375000f,(float16_t)0.9208984375000f, +(float16_t)0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)0.3869628906250f,(float16_t)0.9218750000000f, +(float16_t)0.3854980468750f,(float16_t)0.9228515625000f, +(float16_t)0.3840332031250f,(float16_t)0.9233398437500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3813476562500f,(float16_t)0.9243164062500f, +(float16_t)0.3798828125000f,(float16_t)0.9252929687500f, +(float16_t)0.3784179687500f,(float16_t)0.9257812500000f, +(float16_t)0.3769531250000f,(float16_t)0.9262695312500f, +(float16_t)0.3754882812500f,(float16_t)0.9267578125000f, +(float16_t)0.3742675781250f,(float16_t)0.9272460937500f, +(float16_t)0.3728027343750f,(float16_t)0.9277343750000f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3698730468750f,(float16_t)0.9291992187500f, +(float16_t)0.3684082031250f,(float16_t)0.9296875000000f, +(float16_t)0.3669433593750f,(float16_t)0.9301757812500f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3642578125000f,(float16_t)0.9311523437500f, +(float16_t)0.3627929687500f,(float16_t)0.9316406250000f, +(float16_t)0.3613281250000f,(float16_t)0.9326171875000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3583984375000f,(float16_t)0.9335937500000f, +(float16_t)0.3569335937500f,(float16_t)0.9340820312500f, +(float16_t)0.3557128906250f,(float16_t)0.9345703125000f, +(float16_t)0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)0.3527832031250f,(float16_t)0.9355468750000f, +(float16_t)0.3513183593750f,(float16_t)0.9360351562500f, +(float16_t)0.3498535156250f,(float16_t)0.9370117187500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3469238281250f,(float16_t)0.9379882812500f, +(float16_t)0.3454589843750f,(float16_t)0.9384765625000f, +(float16_t)0.3439941406250f,(float16_t)0.9389648437500f, +(float16_t)0.3427734375000f,(float16_t)0.9394531250000f, +(float16_t)0.3413085937500f,(float16_t)0.9399414062500f, +(float16_t)0.3398437500000f,(float16_t)0.9404296875000f, +(float16_t)0.3383789062500f,(float16_t)0.9409179687500f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3354492187500f,(float16_t)0.9418945312500f, +(float16_t)0.3339843750000f,(float16_t)0.9423828125000f, +(float16_t)0.3325195312500f,(float16_t)0.9428710937500f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3295898437500f,(float16_t)0.9443359375000f, +(float16_t)0.3281250000000f,(float16_t)0.9448242187500f, +(float16_t)0.3266601562500f,(float16_t)0.9453125000000f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3239746093750f,(float16_t)0.9462890625000f, +(float16_t)0.3225097656250f,(float16_t)0.9467773437500f, +(float16_t)0.3210449218750f,(float16_t)0.9472656250000f, +(float16_t)0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)0.3181152343750f,(float16_t)0.9482421875000f, +(float16_t)0.3166503906250f,(float16_t)0.9487304687500f, +(float16_t)0.3151855468750f,(float16_t)0.9492187500000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3122558593750f,(float16_t)0.9501953125000f, +(float16_t)0.3107910156250f,(float16_t)0.9506835937500f, +(float16_t)0.3093261718750f,(float16_t)0.9511718750000f, +(float16_t)0.3078613281250f,(float16_t)0.9516601562500f, +(float16_t)0.3063964843750f,(float16_t)0.9521484375000f, +(float16_t)0.3049316406250f,(float16_t)0.9521484375000f, +(float16_t)0.3034667968750f,(float16_t)0.9526367187500f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.3005371093750f,(float16_t)0.9536132812500f, +(float16_t)0.2990722656250f,(float16_t)0.9541015625000f, +(float16_t)0.2976074218750f,(float16_t)0.9545898437500f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2946777343750f,(float16_t)0.9555664062500f, +(float16_t)0.2932128906250f,(float16_t)0.9560546875000f, +(float16_t)0.2917480468750f,(float16_t)0.9565429687500f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2888183593750f,(float16_t)0.9575195312500f, +(float16_t)0.2873535156250f,(float16_t)0.9580078125000f, +(float16_t)0.2858886718750f,(float16_t)0.9584960937500f, +(float16_t)0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)0.2829589843750f,(float16_t)0.9589843750000f, +(float16_t)0.2814941406250f,(float16_t)0.9594726562500f, +(float16_t)0.2800292968750f,(float16_t)0.9599609375000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2770996093750f,(float16_t)0.9609375000000f, +(float16_t)0.2756347656250f,(float16_t)0.9614257812500f, +(float16_t)0.2741699218750f,(float16_t)0.9619140625000f, +(float16_t)0.2727050781250f,(float16_t)0.9619140625000f, +(float16_t)0.2712402343750f,(float16_t)0.9624023437500f, +(float16_t)0.2697753906250f,(float16_t)0.9628906250000f, +(float16_t)0.2683105468750f,(float16_t)0.9633789062500f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2651367187500f,(float16_t)0.9643554687500f, +(float16_t)0.2636718750000f,(float16_t)0.9643554687500f, +(float16_t)0.2622070312500f,(float16_t)0.9648437500000f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2592773437500f,(float16_t)0.9658203125000f, +(float16_t)0.2578125000000f,(float16_t)0.9663085937500f, +(float16_t)0.2563476562500f,(float16_t)0.9667968750000f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2534179687500f,(float16_t)0.9672851562500f, +(float16_t)0.2519531250000f,(float16_t)0.9677734375000f, +(float16_t)0.2504882812500f,(float16_t)0.9682617187500f, +(float16_t)0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)0.2474365234375f,(float16_t)0.9687500000000f, +(float16_t)0.2459716796875f,(float16_t)0.9692382812500f, +(float16_t)0.2445068359375f,(float16_t)0.9697265625000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2414550781250f,(float16_t)0.9702148437500f, +(float16_t)0.2399902343750f,(float16_t)0.9707031250000f, +(float16_t)0.2385253906250f,(float16_t)0.9711914062500f, +(float16_t)0.2370605468750f,(float16_t)0.9716796875000f, +(float16_t)0.2354736328125f,(float16_t)0.9716796875000f, +(float16_t)0.2340087890625f,(float16_t)0.9721679687500f, +(float16_t)0.2325439453125f,(float16_t)0.9726562500000f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2296142578125f,(float16_t)0.9731445312500f, +(float16_t)0.2280273437500f,(float16_t)0.9736328125000f, +(float16_t)0.2265625000000f,(float16_t)0.9741210937500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2236328125000f,(float16_t)0.9746093750000f, +(float16_t)0.2220458984375f,(float16_t)0.9750976562500f, +(float16_t)0.2205810546875f,(float16_t)0.9755859375000f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2176513671875f,(float16_t)0.9760742187500f, +(float16_t)0.2160644531250f,(float16_t)0.9765625000000f, +(float16_t)0.2145996093750f,(float16_t)0.9765625000000f, +(float16_t)0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)0.2116699218750f,(float16_t)0.9775390625000f, +(float16_t)0.2100830078125f,(float16_t)0.9775390625000f, +(float16_t)0.2086181640625f,(float16_t)0.9780273437500f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.2055664062500f,(float16_t)0.9785156250000f, +(float16_t)0.2041015625000f,(float16_t)0.9790039062500f, +(float16_t)0.2026367187500f,(float16_t)0.9794921875000f, +(float16_t)0.2010498046875f,(float16_t)0.9794921875000f, +(float16_t)0.1995849609375f,(float16_t)0.9799804687500f, +(float16_t)0.1981201171875f,(float16_t)0.9799804687500f, +(float16_t)0.1966552734375f,(float16_t)0.9804687500000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1936035156250f,(float16_t)0.9809570312500f, +(float16_t)0.1921386718750f,(float16_t)0.9814453125000f, +(float16_t)0.1905517578125f,(float16_t)0.9814453125000f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1876220703125f,(float16_t)0.9824218750000f, +(float16_t)0.1860351562500f,(float16_t)0.9824218750000f, +(float16_t)0.1845703125000f,(float16_t)0.9829101562500f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1815185546875f,(float16_t)0.9833984375000f, +(float16_t)0.1800537109375f,(float16_t)0.9838867187500f, +(float16_t)0.1784667968750f,(float16_t)0.9838867187500f, +(float16_t)0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)0.1755371093750f,(float16_t)0.9843750000000f, +(float16_t)0.1739501953125f,(float16_t)0.9848632812500f, +(float16_t)0.1724853515625f,(float16_t)0.9848632812500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1694335937500f,(float16_t)0.9853515625000f, +(float16_t)0.1679687500000f,(float16_t)0.9858398437500f, +(float16_t)0.1663818359375f,(float16_t)0.9858398437500f, +(float16_t)0.1649169921875f,(float16_t)0.9863281250000f, +(float16_t)0.1634521484375f,(float16_t)0.9863281250000f, +(float16_t)0.1618652343750f,(float16_t)0.9868164062500f, +(float16_t)0.1604003906250f,(float16_t)0.9868164062500f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1573486328125f,(float16_t)0.9873046875000f, +(float16_t)0.1558837890625f,(float16_t)0.9877929687500f, +(float16_t)0.1542968750000f,(float16_t)0.9877929687500f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1512451171875f,(float16_t)0.9882812500000f, +(float16_t)0.1497802734375f,(float16_t)0.9887695312500f, +(float16_t)0.1481933593750f,(float16_t)0.9887695312500f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1452636718750f,(float16_t)0.9892578125000f, +(float16_t)0.1436767578125f,(float16_t)0.9897460937500f, +(float16_t)0.1422119140625f,(float16_t)0.9897460937500f, +(float16_t)0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)0.1391601562500f,(float16_t)0.9902343750000f, +(float16_t)0.1375732421875f,(float16_t)0.9907226562500f, +(float16_t)0.1361083984375f,(float16_t)0.9907226562500f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1330566406250f,(float16_t)0.9912109375000f, +(float16_t)0.1315917968750f,(float16_t)0.9912109375000f, +(float16_t)0.1300048828125f,(float16_t)0.9916992187500f, +(float16_t)0.1285400390625f,(float16_t)0.9916992187500f, +(float16_t)0.1269531250000f,(float16_t)0.9916992187500f, +(float16_t)0.1254882812500f,(float16_t)0.9921875000000f, +(float16_t)0.1239624023438f,(float16_t)0.9921875000000f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1209106445312f,(float16_t)0.9926757812500f, +(float16_t)0.1193847656250f,(float16_t)0.9926757812500f, +(float16_t)0.1178588867188f,(float16_t)0.9931640625000f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.1148071289062f,(float16_t)0.9931640625000f, +(float16_t)0.1132812500000f,(float16_t)0.9936523437500f, +(float16_t)0.1117553710938f,(float16_t)0.9936523437500f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.1087036132812f,(float16_t)0.9941406250000f, +(float16_t)0.1071777343750f,(float16_t)0.9941406250000f, +(float16_t)0.1056518554688f,(float16_t)0.9946289062500f, +(float16_t)0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)0.1026000976562f,(float16_t)0.9946289062500f, +(float16_t)0.1010742187500f,(float16_t)0.9951171875000f, +(float16_t)0.0995483398438f,(float16_t)0.9951171875000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0964965820312f,(float16_t)0.9951171875000f, +(float16_t)0.0949707031250f,(float16_t)0.9956054687500f, +(float16_t)0.0934448242188f,(float16_t)0.9956054687500f, +(float16_t)0.0919189453125f,(float16_t)0.9956054687500f, +(float16_t)0.0903930664062f,(float16_t)0.9960937500000f, +(float16_t)0.0888671875000f,(float16_t)0.9960937500000f, +(float16_t)0.0873413085938f,(float16_t)0.9960937500000f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0842895507812f,(float16_t)0.9965820312500f, +(float16_t)0.0827636718750f,(float16_t)0.9965820312500f, +(float16_t)0.0812377929688f,(float16_t)0.9965820312500f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0781250000000f,(float16_t)0.9970703125000f, +(float16_t)0.0765991210938f,(float16_t)0.9970703125000f, +(float16_t)0.0750732421875f,(float16_t)0.9970703125000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0720214843750f,(float16_t)0.9975585937500f, +(float16_t)0.0704956054688f,(float16_t)0.9975585937500f, +(float16_t)0.0689697265625f,(float16_t)0.9975585937500f, +(float16_t)0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)0.0659179687500f,(float16_t)0.9980468750000f, +(float16_t)0.0643920898438f,(float16_t)0.9980468750000f, +(float16_t)0.0628662109375f,(float16_t)0.9980468750000f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0597839355469f,(float16_t)0.9980468750000f, +(float16_t)0.0582580566406f,(float16_t)0.9985351562500f, +(float16_t)0.0567321777344f,(float16_t)0.9985351562500f, +(float16_t)0.0552062988281f,(float16_t)0.9985351562500f, +(float16_t)0.0536499023438f,(float16_t)0.9985351562500f, +(float16_t)0.0521240234375f,(float16_t)0.9985351562500f, +(float16_t)0.0505981445312f,(float16_t)0.9985351562500f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0475463867188f,(float16_t)0.9990234375000f, +(float16_t)0.0459899902344f,(float16_t)0.9990234375000f, +(float16_t)0.0444641113281f,(float16_t)0.9990234375000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0414123535156f,(float16_t)0.9990234375000f, +(float16_t)0.0398864746094f,(float16_t)0.9990234375000f, +(float16_t)0.0383300781250f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0352783203125f,(float16_t)0.9995117187500f, +(float16_t)0.0337524414062f,(float16_t)0.9995117187500f, +(float16_t)0.0321960449219f,(float16_t)0.9995117187500f, +(float16_t)0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)0.0291442871094f,(float16_t)0.9995117187500f, +(float16_t)0.0276031494141f,(float16_t)0.9995117187500f, +(float16_t)0.0260772705078f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0230102539062f,(float16_t)0.9995117187500f, +(float16_t)0.0214691162109f,(float16_t)1.0000000000000f, +(float16_t)0.0199432373047f,(float16_t)1.0000000000000f, +(float16_t)0.0184020996094f,(float16_t)1.0000000000000f, +(float16_t)0.0168762207031f,(float16_t)1.0000000000000f, +(float16_t)0.0153427124023f,(float16_t)1.0000000000000f, +(float16_t)0.0138015747070f,(float16_t)1.0000000000000f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0107345581055f,(float16_t)1.0000000000000f, +(float16_t)0.0092010498047f,(float16_t)1.0000000000000f, +(float16_t)0.0076713562012f,(float16_t)1.0000000000000f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)0.0046005249023f,(float16_t)1.0000000000000f, +(float16_t)0.0030670166016f,(float16_t)1.0000000000000f, +(float16_t)0.0015335083008f,(float16_t)1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0061340332031f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0429382324219f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9965820312500f,(float16_t)0.0797119140625f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9931640625000f,(float16_t)0.1163330078125f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9882812500000f,(float16_t)0.1528320312500f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9819335937500f,(float16_t)0.1890869140625f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9741210937500f,(float16_t)0.2250976562500f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9653320312500f,(float16_t)0.2607421875000f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9550781250000f,(float16_t)0.2961425781250f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9433593750000f,(float16_t)0.3310546875000f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9306640625000f,(float16_t)0.3657226562500f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9165039062500f,(float16_t)0.3996582031250f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.9013671875000f,(float16_t)0.4331054687500f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8847656250000f,(float16_t)0.4660644531250f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8671875000000f,(float16_t)0.4982910156250f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8481445312500f,(float16_t)0.5297851562500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8281250000000f,(float16_t)0.5605468750000f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8066406250000f,(float16_t)0.5908203125000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7846679687500f,(float16_t)0.6201171875000f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7612304687500f,(float16_t)0.6484375000000f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7368164062500f,(float16_t)0.6762695312500f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7114257812500f,(float16_t)0.7026367187500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6850585937500f,(float16_t)0.7285156250000f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6577148437500f,(float16_t)0.7534179687500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6293945312500f,(float16_t)0.7768554687500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.6005859375000f,(float16_t)0.7993164062500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5708007812500f,(float16_t)0.8212890625000f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5400390625000f,(float16_t)0.8417968750000f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5087890625000f,(float16_t)0.8608398437500f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4768066406250f,(float16_t)0.8789062500000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4440917968750f,(float16_t)0.8959960937500f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4108886718750f,(float16_t)0.9116210937500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3769531250000f,(float16_t)0.9262695312500f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3427734375000f,(float16_t)0.9394531250000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3078613281250f,(float16_t)0.9516601562500f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2727050781250f,(float16_t)0.9619140625000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2370605468750f,(float16_t)0.9716796875000f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.2010498046875f,(float16_t)0.9794921875000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1649169921875f,(float16_t)0.9863281250000f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1285400390625f,(float16_t)0.9916992187500f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0919189453125f,(float16_t)0.9956054687500f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0552062988281f,(float16_t)0.9985351562500f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0184020996094f,(float16_t)1.0000000000000f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f,}; float16_t rearranged_twiddle_stride2_4096_f16[2728]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99999529380957619118f,(float16_t)0.00306795676296597614f, -(float16_t)0.99998117528260110909f,(float16_t)0.00613588464915447527f, -(float16_t)0.99995764455196389786f,(float16_t)0.00920375478205981944f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99988234745421256111f,(float16_t)0.01533920628498810015f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99976940535121527898f,(float16_t)0.02147408027546950787f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99961882249517863830f,(float16_t)0.02760814577896573974f, -(float16_t)0.99952941750109314256f,(float16_t)0.03067480317663662595f, -(float16_t)0.99943060455546173237f,(float16_t)0.03374117185137757990f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99920475861836388631f,(float16_t)0.03987292758773981066f, -(float16_t)0.99907772775264536147f,(float16_t)0.04293825693494082024f, -(float16_t)0.99894129318685687124f,(float16_t)0.04600318213091462299f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99864021818026527111f,(float16_t)0.05213170468028332366f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99830154493389289261f,(float16_t)0.05825826450043575244f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99792528619859599548f,(float16_t)0.06438263092985746505f, -(float16_t)0.99772306664419163624f,(float16_t)0.06744391956366405094f, -(float16_t)0.99751145614030345410f,(float16_t)0.07050457338961385600f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99706007033948296225f,(float16_t)0.07662386139203149205f, -(float16_t)0.99682029929116566791f,(float16_t)0.07968243797143012563f, -(float16_t)0.99657114579055483539f,(float16_t)0.08274026454937569164f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99604470090125196702f,(float16_t)0.08885355258252460031f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99548075549192693856f,(float16_t)0.09496349532963899165f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99487933079480561638f,(float16_t)0.10106986275482782167f, -(float16_t)0.99456457073425541537f,(float16_t)0.10412163387205458642f, -(float16_t)0.99424044945318790223f,(float16_t)0.10717242495680884273f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99356413552059530403f,(float16_t)0.11327095217756434631f, -(float16_t)0.99321194923479450001f,(float16_t)0.11631863091190475235f, -(float16_t)0.99285041445986510489f,(float16_t)0.11936521481099135467f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99209931314219179654f,(float16_t)0.12545498341154623367f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.99131085984611544415f,(float16_t)0.13154002870288311611f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.99048508425645709341f,(float16_t)0.13762012158648603832f, -(float16_t)0.99005821026229712256f,(float16_t)0.14065823933284921088f, -(float16_t)0.98962201746320088702f,(float16_t)0.14369503315029447110f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98872169196032377858f,(float16_t)0.14976453467732150915f, -(float16_t)0.98825756773074946437f,(float16_t)0.15279718525844343535f, -(float16_t)0.98778414164457217783f,(float16_t)0.15582839765426523271f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98680940181418552726f,(float16_t)0.16188639378011182579f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98579750916756747614f,(float16_t)0.16793829497473117263f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98474850180190420801f,(float16_t)0.17398387338746382214f, -(float16_t)0.98421009238692902521f,(float16_t)0.17700422041214874946f, -(float16_t)0.98366241921173025453f,(float16_t)0.18002290140569951471f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98253930228744124076f,(float16_t)0.18605515166344663291f, -(float16_t)0.98196386910955524296f,(float16_t)0.18906866414980619262f, -(float16_t)0.98137919331375456089f,(float16_t)0.19208039704989243734f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.98018213596811742949f,(float16_t)0.19809841071795356027f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97894817531906219710f,(float16_t)0.20410896609281686809f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97767735782450992943f,(float16_t)0.21011183688046961016f, -(float16_t)0.97702814265775439484f,(float16_t)0.21311031991609136194f, -(float16_t)0.97636973133002114000f,(float16_t)0.21610679707621952006f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97502534506699412020f,(float16_t)0.22209362097320350937f, -(float16_t)0.97433938278557585821f,(float16_t)0.22508391135979283204f, -(float16_t)0.97364424965081197705f,(float16_t)0.22807208317088573102f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97222649707893626925f,(float16_t)0.23404195858354343018f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.97077214072895035013f,(float16_t)0.24000302244874149871f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96928123535654853171f,(float16_t)0.24595505033579459497f, -(float16_t)0.96852209427441737777f,(float16_t)0.24892760574572014853f, -(float16_t)0.96775383709347551076f,(float16_t)0.25189781815421696809f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96619000344541250413f,(float16_t)0.25783110216215898713f, -(float16_t)0.96539444169768939830f,(float16_t)0.26079411791527551401f, -(float16_t)0.96458979328981275803f,(float16_t)0.26375467897483134694f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96295326687368387741f,(float16_t)0.26966832557291509076f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.96128048581132063966f,(float16_t)0.27557181931095814376f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.95957151308198451733f,(float16_t)0.28146493792575794091f, -(float16_t)0.95870347489587159906f,(float16_t)0.28440753721127187692f, -(float16_t)0.95782641302753290802f,(float16_t)0.28734745954472951102f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95604525134999640557f,(float16_t)0.29321916269425862822f, -(float16_t)0.95514116830577078243f,(float16_t)0.29615088824362378883f, -(float16_t)0.95422809510910566733f,(float16_t)0.29907982630804047508f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.95237501271976587880f,(float16_t)0.30492922973540237397f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.95048607394948170235f,(float16_t)0.31076715274961147495f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94856134991573026749f,(float16_t)0.31659337555616584581f, -(float16_t)0.94758559101774109124f,(float16_t)0.31950203081601569188f, -(float16_t)0.94660091308328353499f,(float16_t)0.32240767880106985244f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94460483726148025685f,(float16_t)0.32820984357909249729f, -(float16_t)0.94359345816196038559f,(float16_t)0.33110630575987642921f, -(float16_t)0.94257319760144686605f,(float16_t)0.33399965144200938205f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.94050607059326829518f,(float16_t)0.33977688440682685123f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93840353406310805795f,(float16_t)0.34554132496398909380f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93626566717027825959f,(float16_t)0.35129275608556709276f, -(float16_t)0.93518350993894761025f,(float16_t)0.35416352542049034380f, -(float16_t)0.93409255040425887007f,(float16_t)0.35703096123342997759f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.93188426558166814750f,(float16_t)0.36275572436739722537f, -(float16_t)0.93076696107898371224f,(float16_t)0.36561299780477385379f, -(float16_t)0.92964089584318121418f,(float16_t)0.36846682995337232125f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92736252565040111495f,(float16_t)0.37416406297145793358f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.92504924078267758425f,(float16_t)0.37984720892405116066f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.92270112833387862850f,(float16_t)0.38551605384391884890f, -(float16_t)0.92151403934204190183f,(float16_t)0.38834504669882624617f, -(float16_t)0.92031827670911059425f,(float16_t)0.39117038430225387069f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91790077562139049672f,(float16_t)0.39680998741671030805f, -(float16_t)0.91667905992104270485f,(float16_t)0.39962419984564678810f, -(float16_t)0.91544871608826783316f,(float16_t)0.40243465085941843018f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.91296219042839821256f,(float16_t)0.40804416286497868782f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.91044129225806724737f,(float16_t)0.41363831223843450235f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90788611648766626150f,(float16_t)0.41921688836322390515f, -(float16_t)0.90659570451491533483f,(float16_t)0.42200027079979968159f, -(float16_t)0.90529675931811881551f,(float16_t)0.42477968120910880589f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.90267331823725882600f,(float16_t)0.43032648134008261165f, -(float16_t)0.90134884704602202810f,(float16_t)0.43309381885315195726f, -(float16_t)0.90001589201616016833f,(float16_t)0.43585707992225547480f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89732458070541831763f,(float16_t)0.44137126873171667052f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.89459948563138269595f,(float16_t)0.44686884016237415906f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.89184070939234272313f,(float16_t)0.45234958723377088896f, -(float16_t)0.89044872324475787817f,(float16_t)0.45508358712634383592f, -(float16_t)0.88904835585466457371f,(float16_t)0.45781330359887717485f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88622253014888063838f,(float16_t)0.46325978355186014923f, -(float16_t)0.88479709843093778954f,(float16_t)0.46597649576796618121f, -(float16_t)0.88336333866573157891f,(float16_t)0.46868882203582790114f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.88047088905216075450f,(float16_t)0.47410021465054996703f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87754529020726135258f,(float16_t)0.47949375766015295275f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87458665227817611321f,(float16_t)0.48486924800079106435f, -(float16_t)0.87309497841829009079f,(float16_t)0.48755016014843599592f, -(float16_t)0.87159508665595097909f,(float16_t)0.49022648328829115938f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86857070597134089507f,(float16_t)0.49556526182577254058f, -(float16_t)0.86704624551569264845f,(float16_t)0.49822766697278181303f, -(float16_t)0.86551362409056908920f,(float16_t)0.50088538261124071482f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.86242395611104050168f,(float16_t)0.50618664534515522835f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85930181835700847337f,(float16_t)0.51146885043797030157f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85614732837519447184f,(float16_t)0.51673179901764987321f, -(float16_t)0.85455798836540053376f,(float16_t)0.51935599016558964269f, -(float16_t)0.85296060493036363059f,(float16_t)0.52197529293715438925f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84974176800085254868f,(float16_t)0.52719913478190127964f, -(float16_t)0.84812034480329723252f,(float16_t)0.52980362468629460526f, -(float16_t)0.84649093877405212627f,(float16_t)0.53240312787719790144f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.84320823964184543620f,(float16_t)0.53758707629564539410f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83989379419599952126f,(float16_t)0.54275078486451588944f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83654772722351200542f,(float16_t)0.54789405917310018967f, -(float16_t)0.83486287498638001026f,(float16_t)0.55045797293660481131f, -(float16_t)0.83317016470191318511f,(float16_t)0.55301670558002746780f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82976123379452304540f,(float16_t)0.55811853122055610221f, -(float16_t)0.82804504525775579626f,(float16_t)0.56066157619733603124f, -(float16_t)0.82632106284566353427f,(float16_t)0.56319934401383409117f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.82284978137582642788f,(float16_t)0.56825895267013148970f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81934752007679700903f,(float16_t)0.57329716669804220430f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81581441080673378075f,(float16_t)0.57831379641165558958f, -(float16_t)0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)0.81225058658520399302f,(float16_t)0.58330865293769829094f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80865618158817498262f,(float16_t)0.58828154822264522306f, -(float16_t)0.80684755354379933401f,(float16_t)0.59075970185887416442f, -(float16_t)0.80503133114296365758f,(float16_t)0.59323229503979979516f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.80137617172314024039f,(float16_t)0.59816070699634238395f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.79769084094339115509f,(float16_t)0.60306659854034816437f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.79397547755433717231f,(float16_t)0.60794978496777363208f, -(float16_t)0.79210657730021238887f,(float16_t)0.61038280627630947528f, -(float16_t)0.79023022143731003197f,(float16_t)0.61281008242940970820f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78645521359908576731f,(float16_t)0.61764730793780386886f, -(float16_t)0.78455659715557524159f,(float16_t)0.62005721176328909561f, -(float16_t)0.78265059616657572938f,(float16_t)0.62246127937414996723f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77881651238147597827f,(float16_t)0.62725181549514408275f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.77495310659487393057f,(float16_t)0.63201873593980906207f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.77106052426181381776f,(float16_t)0.63676186123628419899f, -(float16_t)0.76910333764557969882f,(float16_t)0.63912444486377573138f, -(float16_t)0.76713891193582040007f,(float16_t)0.64148101280858305095f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.76318841726338138010f,(float16_t)0.64617601298331628357f, -(float16_t)0.76120238548426177871f,(float16_t)0.64851440102211244110f, -(float16_t)0.75920918897838796102f,(float16_t)0.65084668499638087535f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.75520137689653654700f,(float16_t)0.65549285299961534967f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.75116513190968636771f,(float16_t)0.66011434206742047870f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74710060598018013245f,(float16_t)0.66471097820334479334f, -(float16_t)0.74505778544146594733f,(float16_t)0.66699992230363747137f, -(float16_t)0.74300795213512171866f,(float16_t)0.66928258834663600929f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73888732446061511361f,(float16_t)0.67382900037875603783f, -(float16_t)0.73681656887736979300f,(float16_t)0.67609270357531592310f, -(float16_t)0.73473887809596349907f,(float16_t)0.67835004312986146857f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.73056276922782759087f,(float16_t)0.68284554638524808112f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.72635915508434600873f,(float16_t)0.68731534089175905233f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.72212819392921534511f,(float16_t)0.69175925836415774750f, -(float16_t)0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)0.71787004505573170920f,(float16_t)0.69617713149146298601f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.71358486878079352422f,(float16_t)0.70056879394324833576f, -(float16_t)0.71143219574521643356f,(float16_t)0.70275474445722529993f, -(float16_t)0.70927282643886568891f,(float16_t)0.70493408037590488124f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.70493408037590499227f,(float16_t)0.70927282643886568891f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.70056879394324844679f,(float16_t)0.71358486878079352422f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.69617713149146298601f,(float16_t)0.71787004505573170920f, -(float16_t)0.69397146088965400157f,(float16_t)0.72000250796138165477f, -(float16_t)0.69175925836415774750f,(float16_t)0.72212819392921534511f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68731534089175905233f,(float16_t)0.72635915508434600873f, -(float16_t)0.68508366777270035541f,(float16_t)0.72846439044822519637f, -(float16_t)0.68284554638524808112f,(float16_t)0.73056276922782759087f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67835004312986146857f,(float16_t)0.73473887809596349907f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.67382900037875614885f,(float16_t)0.73888732446061511361f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.66928258834663600929f,(float16_t)0.74300795213512171866f, -(float16_t)0.66699992230363747137f,(float16_t)0.74505778544146594733f, -(float16_t)0.66471097820334490436f,(float16_t)0.74710060598018013245f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.66011434206742047870f,(float16_t)0.75116513190968636771f, -(float16_t)0.65780669329707874837f,(float16_t)0.75318679904361252042f, -(float16_t)0.65549285299961546070f,(float16_t)0.75520137689653654700f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.65084668499638098638f,(float16_t)0.75920918897838796102f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.64617601298331639459f,(float16_t)0.76318841726338126907f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.64148101280858316198f,(float16_t)0.76713891193582040007f, -(float16_t)0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)0.63676186123628419899f,(float16_t)0.77106052426181381776f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.63201873593980906207f,(float16_t)0.77495310659487381955f, -(float16_t)0.62963823891492709528f,(float16_t)0.77688846567323244230f, -(float16_t)0.62725181549514419377f,(float16_t)0.77881651238147586724f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.62246127937415007825f,(float16_t)0.78265059616657572938f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.61764730793780397988f,(float16_t)0.78645521359908576731f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.61281008242940970820f,(float16_t)0.79023022143731003197f, -(float16_t)0.61038280627630947528f,(float16_t)0.79210657730021227785f, -(float16_t)0.60794978496777374311f,(float16_t)0.79397547755433717231f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.60306659854034827539f,(float16_t)0.79769084094339104407f, -(float16_t)0.60061647938386897305f,(float16_t)0.79953726910790501314f, -(float16_t)0.59816070699634238395f,(float16_t)0.80137617172314012937f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.59323229503979979516f,(float16_t)0.80503133114296365758f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.58828154822264533408f,(float16_t)0.80865618158817498262f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.58330865293769829094f,(float16_t)0.81225058658520388200f, -(float16_t)0.58081395809576452649f,(float16_t)0.81403632970594830276f, -(float16_t)0.57831379641165558958f,(float16_t)0.81581441080673378075f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.57329716669804231532f,(float16_t)0.81934752007679689800f, -(float16_t)0.57078074588696736669f,(float16_t)0.82110251499110464835f, -(float16_t)0.56825895267013148970f,(float16_t)0.82284978137582631685f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.56319934401383409117f,(float16_t)0.82632106284566353427f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.55811853122055610221f,(float16_t)0.82976123379452304540f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.55301670558002757883f,(float16_t)0.83317016470191318511f, -(float16_t)0.55045797293660481131f,(float16_t)0.83486287498638001026f, -(float16_t)0.54789405917310018967f,(float16_t)0.83654772722351189440f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.54275078486451600046f,(float16_t)0.83989379419599941023f, -(float16_t)0.54017147272989296525f,(float16_t)0.84155497743689833268f, -(float16_t)0.53758707629564550512f,(float16_t)0.84320823964184543620f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.53240312787719801246f,(float16_t)0.84649093877405212627f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.52719913478190139067f,(float16_t)0.84974176800085243766f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.52197529293715438925f,(float16_t)0.85296060493036363059f, -(float16_t)0.51935599016558953167f,(float16_t)0.85455798836540053376f, -(float16_t)0.51673179901764998423f,(float16_t)0.85614732837519447184f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.51146885043797052361f,(float16_t)0.85930181835700836235f, -(float16_t)0.50883014254310698909f,(float16_t)0.86086693863776730939f, -(float16_t)0.50618664534515533937f,(float16_t)0.86242395611104050168f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.50088538261124093687f,(float16_t)0.86551362409056897818f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.49556526182577248507f,(float16_t)0.86857070597134089507f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.49022648328829110387f,(float16_t)0.87159508665595109012f, -(float16_t)0.48755016014843605143f,(float16_t)0.87309497841829009079f, -(float16_t)0.48486924800079111986f,(float16_t)0.87458665227817611321f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47949375766015300826f,(float16_t)0.87754529020726124156f, -(float16_t)0.47679923006332225466f,(float16_t)0.87901222642863341417f, -(float16_t)0.47410021465055002254f,(float16_t)0.88047088905216075450f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.46868882203582795665f,(float16_t)0.88336333866573157891f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.46325978355186026025f,(float16_t)0.88622253014888063838f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.45781330359887728587f,(float16_t)0.88904835585466457371f, -(float16_t)0.45508358712634383592f,(float16_t)0.89044872324475787817f, -(float16_t)0.45234958723377099998f,(float16_t)0.89184070939234272313f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.44686884016237432560f,(float16_t)0.89459948563138258493f, -(float16_t)0.44412214457042925586f,(float16_t)0.89596624975618510689f, -(float16_t)0.44137126873171661501f,(float16_t)0.89732458070541831763f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.43585707992225547480f,(float16_t)0.90001589201616027935f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.43032648134008261165f,(float16_t)0.90267331823725882600f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.42477968120910880589f,(float16_t)0.90529675931811881551f, -(float16_t)0.42200027079979979261f,(float16_t)0.90659570451491533483f, -(float16_t)0.41921688836322396066f,(float16_t)0.90788611648766626150f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.41363831223843455787f,(float16_t)0.91044129225806713634f, -(float16_t)0.41084317105790391089f,(float16_t)0.91170603200542987832f, -(float16_t)0.40804416286497874333f,(float16_t)0.91296219042839810154f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.40243465085941854120f,(float16_t)0.91544871608826783316f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.39680998741671041907f,(float16_t)0.91790077562139038569f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.39117038430225398171f,(float16_t)0.92031827670911048322f, -(float16_t)0.38834504669882630168f,(float16_t)0.92151403934204190183f, -(float16_t)0.38551605384391901543f,(float16_t)0.92270112833387851747f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37984720892405110515f,(float16_t)0.92504924078267758425f, -(float16_t)0.37700741021641831496f,(float16_t)0.92621024213831126826f, -(float16_t)0.37416406297145798909f,(float16_t)0.92736252565040111495f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.36846682995337232125f,(float16_t)0.92964089584318121418f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.36275572436739722537f,(float16_t)0.93188426558166814750f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.35703096123343003310f,(float16_t)0.93409255040425887007f, -(float16_t)0.35416352542049051033f,(float16_t)0.93518350993894749923f, -(float16_t)0.35129275608556714827f,(float16_t)0.93626566717027825959f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.34554132496398914931f,(float16_t)0.93840353406310805795f, -(float16_t)0.34266071731199437833f,(float16_t)0.93945922360218991898f, -(float16_t)0.33977688440682696225f,(float16_t)0.94050607059326829518f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.33399965144200949307f,(float16_t)0.94257319760144686605f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.32820984357909266382f,(float16_t)0.94460483726148025685f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.32240767880107001897f,(float16_t)0.94660091308328353499f, -(float16_t)0.31950203081601574739f,(float16_t)0.94758559101774109124f, -(float16_t)0.31659337555616584581f,(float16_t)0.94856134991573026749f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.31076715274961147495f,(float16_t)0.95048607394948170235f, -(float16_t)0.30784964004153497763f,(float16_t)0.95143502096900833820f, -(float16_t)0.30492922973540242948f,(float16_t)0.95237501271976587880f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.29907982630804047508f,(float16_t)0.95422809510910566733f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.29321916269425868373f,(float16_t)0.95604525134999640557f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.28734745954472956653f,(float16_t)0.95782641302753290802f, -(float16_t)0.28440753721127182141f,(float16_t)0.95870347489587159906f, -(float16_t)0.28146493792575805193f,(float16_t)0.95957151308198451733f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.27557181931095825478f,(float16_t)0.96128048581132063966f, -(float16_t)0.27262135544994897662f,(float16_t)0.96212140426904158019f, -(float16_t)0.26966832557291520178f,(float16_t)0.96295326687368387741f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.26375467897483151347f,(float16_t)0.96458979328981264700f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.25783110216215893162f,(float16_t)0.96619000344541261516f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.25189781815421691258f,(float16_t)0.96775383709347551076f, -(float16_t)0.24892760574572025956f,(float16_t)0.96852209427441726675f, -(float16_t)0.24595505033579459497f,(float16_t)0.96928123535654853171f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.24000302244874149871f,(float16_t)0.97077214072895035013f, -(float16_t)0.23702360599436733679f,(float16_t)0.97150389098625178352f, -(float16_t)0.23404195858354345794f,(float16_t)0.97222649707893626925f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.22807208317088578653f,(float16_t)0.97364424965081186603f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.22209362097320359264f,(float16_t)0.97502534506699412020f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.21610679707621960333f,(float16_t)0.97636973133002114000f, -(float16_t)0.21311031991609136194f,(float16_t)0.97702814265775439484f, -(float16_t)0.21011183688046972118f,(float16_t)0.97767735782450992943f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.20410896609281700687f,(float16_t)0.97894817531906219710f, -(float16_t)0.20110463484209195606f,(float16_t)0.97956976568544051887f, -(float16_t)0.19809841071795372680f,(float16_t)0.98018213596811731847f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.19208039704989238183f,(float16_t)0.98137919331375456089f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.18605515166344663291f,(float16_t)0.98253930228744124076f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.18002290140569951471f,(float16_t)0.98366241921173025453f, -(float16_t)0.17700422041214886049f,(float16_t)0.98421009238692902521f, -(float16_t)0.17398387338746384989f,(float16_t)0.98474850180190420801f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.16793829497473122814f,(float16_t)0.98579750916756736512f, -(float16_t)0.16491312048997008866f,(float16_t)0.98630809724459866938f, -(float16_t)0.16188639378011188130f,(float16_t)0.98680940181418541624f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.15582839765426531597f,(float16_t)0.98778414164457217783f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.14976453467732162017f,(float16_t)0.98872169196032377858f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.14369503315029458212f,(float16_t)0.98962201746320077600f, -(float16_t)0.14065823933284923863f,(float16_t)0.99005821026229712256f, -(float16_t)0.13762012158648617710f,(float16_t)0.99048508425645698239f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.13154002870288328264f,(float16_t)0.99131085984611544415f, -(float16_t)0.12849811079379322432f,(float16_t)0.99170975366909952520f, -(float16_t)0.12545498341154620592f,(float16_t)0.99209931314219179654f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.11936521481099135467f,(float16_t)0.99285041445986510489f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.11327095217756436019f,(float16_t)0.99356413552059530403f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.10717242495680887049f,(float16_t)0.99424044945318790223f, -(float16_t)0.10412163387205472520f,(float16_t)0.99456457073425541537f, -(float16_t)0.10106986275482787718f,(float16_t)0.99487933079480561638f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.09496349532963906104f,(float16_t)0.99548075549192693856f, -(float16_t)0.09190895649713269611f,(float16_t)0.99576741446765981713f, -(float16_t)0.08885355258252468358f,(float16_t)0.99604470090125196702f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.08274026454937580266f,(float16_t)0.99657114579055483539f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.07662386139203161695f,(float16_t)0.99706007033948296225f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.07050457338961400866f,(float16_t)0.99751145614030345410f, -(float16_t)0.06744391956366410645f,(float16_t)0.99772306664419163624f, -(float16_t)0.06438263092985740954f,(float16_t)0.99792528619859599548f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.05825826450043573163f,(float16_t)0.99830154493389289261f, -(float16_t)0.05519524434969003135f,(float16_t)0.99847558057329477421f, -(float16_t)0.05213170468028331672f,(float16_t)0.99864021818026527111f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.04600318213091464381f,(float16_t)0.99894129318685687124f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.03987292758773984536f,(float16_t)0.99920475861836388631f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.03374117185137764235f,(float16_t)0.99943060455546173237f, -(float16_t)0.03067480317663658085f,(float16_t)0.99952941750109314256f, -(float16_t)0.02760814577896581953f,(float16_t)0.99961882249517863830f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.02147408027546960502f,(float16_t)0.99976940535121527898f, -(float16_t)0.01840672990580482019f,(float16_t)0.99983058179582340319f, -(float16_t)0.01533920628498821985f,(float16_t)0.99988234745421256111f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.00920375478205995995f,(float16_t)0.99995764455196389786f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)0.00306795676296613791f,(float16_t)0.99999529380957619118f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.00306795676296601561f,(float16_t)0.99999529380957619118f, -(float16_t)-0.00613588464915439287f,(float16_t)0.99998117528260110909f, -(float16_t)-0.00920375478205983678f,(float16_t)0.99995764455196389786f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.01533920628498809842f,(float16_t)0.99988234745421256111f, -(float16_t)-0.01840672990580469529f,(float16_t)0.99983058179582340319f, -(float16_t)-0.02147408027546948359f,(float16_t)0.99976940535121527898f, -(float16_t)-0.02454122852291214241f,(float16_t)0.99969881869620424997f, -(float16_t)-0.02760814577896569810f,(float16_t)0.99961882249517863830f, -(float16_t)-0.03067480317663645942f,(float16_t)0.99952941750109314256f, -(float16_t)-0.03374117185137751745f,(float16_t)0.99943060455546173237f, -(float16_t)-0.03680722294135886641f,(float16_t)0.99932238458834954375f, -(float16_t)-0.03987292758773972740f,(float16_t)0.99920475861836388631f, -(float16_t)-0.04293825693494083412f,(float16_t)0.99907772775264536147f, -(float16_t)-0.04600318213091451891f,(float16_t)0.99894129318685687124f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.05213170468028319182f,(float16_t)0.99864021818026527111f, -(float16_t)-0.05519524434968991339f,(float16_t)0.99847558057329477421f, -(float16_t)-0.05825826450043560673f,(float16_t)0.99830154493389289261f, -(float16_t)-0.06132073630220852972f,(float16_t)0.99811811290014917919f, -(float16_t)-0.06438263092985728464f,(float16_t)0.99792528619859599548f, -(float16_t)-0.06744391956366398155f,(float16_t)0.99772306664419163624f, -(float16_t)-0.07050457338961389764f,(float16_t)0.99751145614030345410f, -(float16_t)-0.07356456359966732916f,(float16_t)0.99729045667869020697f, -(float16_t)-0.07662386139203150592f,(float16_t)0.99706007033948296225f, -(float16_t)-0.07968243797143001461f,(float16_t)0.99682029929116577893f, -(float16_t)-0.08274026454937567776f,(float16_t)0.99657114579055483539f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.08885355258252455868f,(float16_t)0.99604470090125196702f, -(float16_t)-0.09190895649713257121f,(float16_t)0.99576741446765981713f, -(float16_t)-0.09496349532963895002f,(float16_t)0.99548075549192693856f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.10106986275482775228f,(float16_t)0.99487933079480561638f, -(float16_t)-0.10412163387205460030f,(float16_t)0.99456457073425541537f, -(float16_t)-0.10717242495680875947f,(float16_t)0.99424044945318790223f, -(float16_t)-0.11022220729388305938f,(float16_t)0.99390697000235606051f, -(float16_t)-0.11327095217756423529f,(float16_t)0.99356413552059530403f, -(float16_t)-0.11631863091190475235f,(float16_t)0.99321194923479450001f, -(float16_t)-0.11936521481099122977f,(float16_t)0.99285041445986510489f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.12545498341154606714f,(float16_t)0.99209931314219179654f, -(float16_t)-0.12849811079379311329f,(float16_t)0.99170975366909952520f, -(float16_t)-0.13154002870288314386f,(float16_t)0.99131085984611544415f, -(float16_t)-0.13458070850712611222f,(float16_t)0.99090263542778000971f, -(float16_t)-0.13762012158648606608f,(float16_t)0.99048508425645698239f, -(float16_t)-0.14065823933284912761f,(float16_t)0.99005821026229712256f, -(float16_t)-0.14369503315029444335f,(float16_t)0.98962201746320088702f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.14976453467732150915f,(float16_t)0.98872169196032377858f, -(float16_t)-0.15279718525844329657f,(float16_t)0.98825756773074946437f, -(float16_t)-0.15582839765426520495f,(float16_t)0.98778414164457217783f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.16188639378011177028f,(float16_t)0.98680940181418552726f, -(float16_t)-0.16491312048996994988f,(float16_t)0.98630809724459866938f, -(float16_t)-0.16793829497473108936f,(float16_t)0.98579750916756747614f, -(float16_t)-0.17096188876030124493f,(float16_t)0.98527764238894122162f, -(float16_t)-0.17398387338746371111f,(float16_t)0.98474850180190420801f, -(float16_t)-0.17700422041214874946f,(float16_t)0.98421009238692902521f, -(float16_t)-0.18002290140569940369f,(float16_t)0.98366241921173025453f, -(float16_t)-0.18303988795514092303f,(float16_t)0.98310548743121628501f, -(float16_t)-0.18605515166344649414f,(float16_t)0.98253930228744124076f, -(float16_t)-0.18906866414980616486f,(float16_t)0.98196386910955524296f, -(float16_t)-0.19208039704989227081f,(float16_t)0.98137919331375456089f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.19809841071795361578f,(float16_t)0.98018213596811742949f, -(float16_t)-0.20110463484209181728f,(float16_t)0.97956976568544051887f, -(float16_t)-0.20410896609281689584f,(float16_t)0.97894817531906219710f, -(float16_t)-0.20711137619221844930f,(float16_t)0.97831737071962765473f, -(float16_t)-0.21011183688046961016f,(float16_t)0.97767735782450992943f, -(float16_t)-0.21311031991609125091f,(float16_t)0.97702814265775439484f, -(float16_t)-0.21610679707621949230f,(float16_t)0.97636973133002114000f, -(float16_t)-0.21910124015686965881f,(float16_t)0.97570213003852857003f, -(float16_t)-0.22209362097320348162f,(float16_t)0.97502534506699412020f, -(float16_t)-0.22508391135979266551f,(float16_t)0.97433938278557585821f, -(float16_t)-0.22807208317088567551f,(float16_t)0.97364424965081197705f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.23404195858354331916f,(float16_t)0.97222649707893638027f, -(float16_t)-0.23702360599436722577f,(float16_t)0.97150389098625178352f, -(float16_t)-0.24000302244874138768f,(float16_t)0.97077214072895035013f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.24595505033579448395f,(float16_t)0.96928123535654853171f, -(float16_t)-0.24892760574572012078f,(float16_t)0.96852209427441737777f, -(float16_t)-0.25189781815421680156f,(float16_t)0.96775383709347551076f, -(float16_t)-0.25486565960451451618f,(float16_t)0.96697647104485207059f, -(float16_t)-0.25783110216215882060f,(float16_t)0.96619000344541261516f, -(float16_t)-0.26079411791527545850f,(float16_t)0.96539444169768939830f, -(float16_t)-0.26375467897483140245f,(float16_t)0.96458979328981275803f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.26966832557291509076f,(float16_t)0.96295326687368387741f, -(float16_t)-0.27262135544994886560f,(float16_t)0.96212140426904158019f, -(float16_t)-0.27557181931095814376f,(float16_t)0.96128048581132063966f, -(float16_t)-0.27851968938505294870f,(float16_t)0.96043051941556589757f, -(float16_t)-0.28146493792575794091f,(float16_t)0.95957151308198451733f, -(float16_t)-0.28440753721127171039f,(float16_t)0.95870347489587159906f, -(float16_t)-0.28734745954472945551f,(float16_t)0.95782641302753290802f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.29321916269425857271f,(float16_t)0.95604525134999651659f, -(float16_t)-0.29615088824362384434f,(float16_t)0.95514116830577067141f, -(float16_t)-0.29907982630804036406f,(float16_t)0.95422809510910566733f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.30492922973540226295f,(float16_t)0.95237501271976587880f, -(float16_t)-0.30784964004153486661f,(float16_t)0.95143502096900833820f, -(float16_t)-0.31076715274961136393f,(float16_t)0.95048607394948181337f, -(float16_t)-0.31368174039889140658f,(float16_t)0.94952818059303667475f, -(float16_t)-0.31659337555616573479f,(float16_t)0.94856134991573037851f, -(float16_t)-0.31950203081601563637f,(float16_t)0.94758559101774120226f, -(float16_t)-0.32240767880106985244f,(float16_t)0.94660091308328353499f, -(float16_t)-0.32531029216226287071f,(float16_t)0.94560732538052139073f, -(float16_t)-0.32820984357909255280f,(float16_t)0.94460483726148025685f, -(float16_t)-0.33110630575987631818f,(float16_t)0.94359345816196038559f, -(float16_t)-0.33399965144200938205f,(float16_t)0.94257319760144686605f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.33977688440682685123f,(float16_t)0.94050607059326829518f, -(float16_t)-0.34266071731199426731f,(float16_t)0.93945922360218991898f, -(float16_t)-0.34554132496398903829f,(float16_t)0.93840353406310816897f, -(float16_t)-0.34841868024943439819f,(float16_t)0.93733901191257495977f, -(float16_t)-0.35129275608556703725f,(float16_t)0.93626566717027825959f, -(float16_t)-0.35416352542049039931f,(float16_t)0.93518350993894761025f, -(float16_t)-0.35703096123342992207f,(float16_t)0.93409255040425898109f, -(float16_t)-0.35989503653498816638f,(float16_t)0.93299279883473884567f, -(float16_t)-0.36275572436739711435f,(float16_t)0.93188426558166814750f, -(float16_t)-0.36561299780477385379f,(float16_t)0.93076696107898371224f, -(float16_t)-0.36846682995337221023f,(float16_t)0.92964089584318132520f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.37416406297145787807f,(float16_t)0.92736252565040111495f, -(float16_t)-0.37700741021641820394f,(float16_t)0.92621024213831137928f, -(float16_t)-0.37984720892405099413f,(float16_t)0.92504924078267769527f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.38551605384391890441f,(float16_t)0.92270112833387851747f, -(float16_t)-0.38834504669882619066f,(float16_t)0.92151403934204201285f, -(float16_t)-0.39117038430225387069f,(float16_t)0.92031827670911059425f, -(float16_t)-0.39399204006104798781f,(float16_t)0.91911385169005777040f, -(float16_t)-0.39680998741671030805f,(float16_t)0.91790077562139049672f, -(float16_t)-0.39962419984564667708f,(float16_t)0.91667905992104270485f, -(float16_t)-0.40243465085941843018f,(float16_t)0.91544871608826783316f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.40804416286497863231f,(float16_t)0.91296219042839821256f, -(float16_t)-0.41084317105790379987f,(float16_t)0.91170603200542987832f, -(float16_t)-0.41363831223843450235f,(float16_t)0.91044129225806724737f, -(float16_t)-0.41642956009763698599f,(float16_t)0.90916798309052249127f, -(float16_t)-0.41921688836322407168f,(float16_t)0.90788611648766615048f, -(float16_t)-0.42200027079979968159f,(float16_t)0.90659570451491533483f, -(float16_t)-0.42477968120910869487f,(float16_t)0.90529675931811881551f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.43032648134008272267f,(float16_t)0.90267331823725871498f, -(float16_t)-0.43309381885315190175f,(float16_t)0.90134884704602202810f, -(float16_t)-0.43585707992225536378f,(float16_t)0.90001589201616027935f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.44137126873171672603f,(float16_t)0.89732458070541831763f, -(float16_t)-0.44412214457042914484f,(float16_t)0.89596624975618521791f, -(float16_t)-0.44686884016237399253f,(float16_t)0.89459948563138280697f, -(float16_t)-0.44961132965460670619f,(float16_t)0.89322430119551521344f, -(float16_t)-0.45234958723377088896f,(float16_t)0.89184070939234272313f, -(float16_t)-0.45508358712634372489f,(float16_t)0.89044872324475798919f, -(float16_t)-0.45781330359887700832f,(float16_t)0.88904835585466468473f, -(float16_t)-0.46053871095824006066f,(float16_t)0.88763962040285393496f, -(float16_t)-0.46325978355186014923f,(float16_t)0.88622253014888063838f, -(float16_t)-0.46597649576796601467f,(float16_t)0.88479709843093790056f, -(float16_t)-0.46868882203582767909f,(float16_t)0.88336333866573168994f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.47410021465054991152f,(float16_t)0.88047088905216086552f, -(float16_t)-0.47679923006332192159f,(float16_t)0.87901222642863352519f, -(float16_t)-0.47949375766015311928f,(float16_t)0.87754529020726124156f, -(float16_t)-0.48218377207912271887f,(float16_t)0.87607009419540660122f, -(float16_t)-0.48486924800079100883f,(float16_t)0.87458665227817622423f, -(float16_t)-0.48755016014843571837f,(float16_t)0.87309497841829020182f, -(float16_t)-0.49022648328829121489f,(float16_t)0.87159508665595097909f, -(float16_t)-0.49289819222978398239f,(float16_t)0.87008699110871146054f, -(float16_t)-0.49556526182577237405f,(float16_t)0.86857070597134100609f, -(float16_t)-0.49822766697278159098f,(float16_t)0.86704624551569275948f, -(float16_t)-0.50088538261124082585f,(float16_t)0.86551362409056908920f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.50618664534515511733f,(float16_t)0.86242395611104061270f, -(float16_t)-0.50883014254310710012f,(float16_t)0.86086693863776719837f, -(float16_t)-0.51146885043797041259f,(float16_t)0.85930181835700847337f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.51673179901764965116f,(float16_t)0.85614732837519458286f, -(float16_t)-0.51935599016558964269f,(float16_t)0.85455798836540053376f, -(float16_t)-0.52197529293715427823f,(float16_t)0.85296060493036374162f, -(float16_t)-0.52458968267846872724f,(float16_t)0.85135519310526519554f, -(float16_t)-0.52719913478190105760f,(float16_t)0.84974176800085265970f, -(float16_t)-0.52980362468629471628f,(float16_t)0.84812034480329723252f, -(float16_t)-0.53240312787719790144f,(float16_t)0.84649093877405212627f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.53758707629564561614f,(float16_t)0.84320823964184532517f, -(float16_t)-0.54017147272989285423f,(float16_t)0.84155497743689844370f, -(float16_t)-0.54275078486451577842f,(float16_t)0.83989379419599952126f, -(float16_t)-0.54532498842204624179f,(float16_t)0.83822470555483818977f, -(float16_t)-0.54789405917310018967f,(float16_t)0.83654772722351200542f, -(float16_t)-0.55045797293660470029f,(float16_t)0.83486287498638012128f, -(float16_t)-0.55301670558002735678f,(float16_t)0.83317016470191329613f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.55811853122055610221f,(float16_t)0.82976123379452304540f, -(float16_t)-0.56066157619733592021f,(float16_t)0.82804504525775579626f, -(float16_t)-0.56319934401383386913f,(float16_t)0.82632106284566364529f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.56825895267013148970f,(float16_t)0.82284978137582631685f, -(float16_t)-0.57078074588696714464f,(float16_t)0.82110251499110475937f, -(float16_t)-0.57329716669804198226f,(float16_t)0.81934752007679712005f, -(float16_t)-0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)-0.57831379641165547856f,(float16_t)0.81581441080673378075f, -(float16_t)-0.58081395809576441547f,(float16_t)0.81403632970594852480f, -(float16_t)-0.58330865293769840196f,(float16_t)0.81225058658520388200f, -(float16_t)-0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)-0.58828154822264522306f,(float16_t)0.80865618158817509364f, -(float16_t)-0.59075970185887405339f,(float16_t)0.80684755354379944503f, -(float16_t)-0.59323229503979990618f,(float16_t)0.80503133114296354655f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.59816070699634216190f,(float16_t)0.80137617172314024039f, -(float16_t)-0.60061647938386875101f,(float16_t)0.79953726910790523519f, -(float16_t)-0.60306659854034827539f,(float16_t)0.79769084094339104407f, -(float16_t)-0.60551104140432543410f,(float16_t)0.79583690460888356633f, -(float16_t)-0.60794978496777352106f,(float16_t)0.79397547755433728334f, -(float16_t)-0.61038280627630958630f,(float16_t)0.79210657730021227785f, -(float16_t)-0.61281008242940970820f,(float16_t)0.79023022143731003197f, -(float16_t)-0.61523159058062670823f,(float16_t)0.78834642762660633863f, -(float16_t)-0.61764730793780375784f,(float16_t)0.78645521359908587833f, -(float16_t)-0.62005721176328920663f,(float16_t)0.78455659715557513056f, -(float16_t)-0.62246127937414996723f,(float16_t)0.78265059616657572938f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.62725181549514386070f,(float16_t)0.77881651238147608929f, -(float16_t)-0.62963823891492709528f,(float16_t)0.77688846567323244230f, -(float16_t)-0.63201873593980895105f,(float16_t)0.77495310659487393057f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.63676186123628431002f,(float16_t)0.77106052426181370674f, -(float16_t)-0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)-0.64148101280858305095f,(float16_t)0.76713891193582040007f, -(float16_t)-0.64383154288979127511f,(float16_t)0.76516726562245906962f, -(float16_t)-0.64617601298331639459f,(float16_t)0.76318841726338115805f, -(float16_t)-0.64851440102211244110f,(float16_t)0.76120238548426188974f, -(float16_t)-0.65084668499638076433f,(float16_t)0.75920918897838807204f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.65549285299961546070f,(float16_t)0.75520137689653643598f, -(float16_t)-0.65780669329707852633f,(float16_t)0.75318679904361252042f, -(float16_t)-0.66011434206742036768f,(float16_t)0.75116513190968658975f, -(float16_t)-0.66241577759017189475f,(float16_t)0.74913639452345925918f, -(float16_t)-0.66471097820334490436f,(float16_t)0.74710060598018013245f, -(float16_t)-0.66699992230363736034f,(float16_t)0.74505778544146605835f, -(float16_t)-0.66928258834663589827f,(float16_t)0.74300795213512182968f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.67382900037875603783f,(float16_t)0.73888732446061522463f, -(float16_t)-0.67609270357531581208f,(float16_t)0.73681656887737001504f, -(float16_t)-0.67835004312986124653f,(float16_t)0.73473887809596372112f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.68284554638524797010f,(float16_t)0.73056276922782759087f, -(float16_t)-0.68508366777270024439f,(float16_t)0.72846439044822530740f, -(float16_t)-0.68731534089175916336f,(float16_t)0.72635915508434589771f, -(float16_t)-0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)-0.69175925836415763648f,(float16_t)0.72212819392921545614f, -(float16_t)-0.69397146088965377952f,(float16_t)0.72000250796138176579f, -(float16_t)-0.69617713149146298601f,(float16_t)0.71787004505573170920f, -(float16_t)-0.69837624940897280457f,(float16_t)0.71573082528381870571f, -(float16_t)-0.70056879394324822474f,(float16_t)0.71358486878079363525f, -(float16_t)-0.70275474445722507788f,(float16_t)0.71143219574521665560f, -(float16_t)-0.70493408037590488124f,(float16_t)0.70927282643886557789f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.70927282643886546687f,(float16_t)0.70493408037590510329f, -(float16_t)-0.71143219574521654458f,(float16_t)0.70275474445722518890f, -(float16_t)-0.71358486878079352422f,(float16_t)0.70056879394324833576f, -(float16_t)-0.71573082528381859468f,(float16_t)0.69837624940897291559f, -(float16_t)-0.71787004505573159818f,(float16_t)0.69617713149146309703f, -(float16_t)-0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)-0.72212819392921523409f,(float16_t)0.69175925836415785852f, -(float16_t)-0.72424708295146678072f,(float16_t)0.68954054473706705153f, -(float16_t)-0.72635915508434578669f,(float16_t)0.68731534089175927438f, -(float16_t)-0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)-0.73056276922782747985f,(float16_t)0.68284554638524808112f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.73473887809596349907f,(float16_t)0.67835004312986135755f, -(float16_t)-0.73681656887736979300f,(float16_t)0.67609270357531592310f, -(float16_t)-0.73888732446061511361f,(float16_t)0.67382900037875614885f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.74300795213512171866f,(float16_t)0.66928258834663600929f, -(float16_t)-0.74505778544146594733f,(float16_t)0.66699992230363758239f, -(float16_t)-0.74710060598018002143f,(float16_t)0.66471097820334501538f, -(float16_t)-0.74913639452345914815f,(float16_t)0.66241577759017200577f, -(float16_t)-0.75116513190968636771f,(float16_t)0.66011434206742047870f, -(float16_t)-0.75318679904361240940f,(float16_t)0.65780669329707874837f, -(float16_t)-0.75520137689653643598f,(float16_t)0.65549285299961557172f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.75920918897838796102f,(float16_t)0.65084668499638098638f, -(float16_t)-0.76120238548426166769f,(float16_t)0.64851440102211255212f, -(float16_t)-0.76318841726338115805f,(float16_t)0.64617601298331661663f, -(float16_t)-0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)-0.76713891193582040007f,(float16_t)0.64148101280858316198f, -(float16_t)-0.76910333764557947678f,(float16_t)0.63912444486377584241f, -(float16_t)-0.77106052426181359571f,(float16_t)0.63676186123628442104f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.77495310659487381955f,(float16_t)0.63201873593980906207f, -(float16_t)-0.77688846567323233128f,(float16_t)0.62963823891492720630f, -(float16_t)-0.77881651238147597827f,(float16_t)0.62725181549514408275f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.78265059616657561836f,(float16_t)0.62246127937415007825f, -(float16_t)-0.78455659715557501954f,(float16_t)0.62005721176328942867f, -(float16_t)-0.78645521359908576731f,(float16_t)0.61764730793780386886f, -(float16_t)-0.78834642762660622761f,(float16_t)0.61523159058062693028f, -(float16_t)-0.79023022143730992095f,(float16_t)0.61281008242940981923f, -(float16_t)-0.79210657730021216683f,(float16_t)0.61038280627630969732f, -(float16_t)-0.79397547755433717231f,(float16_t)0.60794978496777363208f, -(float16_t)-0.79583690460888345530f,(float16_t)0.60551104140432565615f, -(float16_t)-0.79769084094339093305f,(float16_t)0.60306659854034838641f, -(float16_t)-0.79953726910790512417f,(float16_t)0.60061647938386886203f, -(float16_t)-0.80137617172314024039f,(float16_t)0.59816070699634238395f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.80503133114296343553f,(float16_t)0.59323229503980001720f, -(float16_t)-0.80684755354379933401f,(float16_t)0.59075970185887416442f, -(float16_t)-0.80865618158817498262f,(float16_t)0.58828154822264533408f, -(float16_t)-0.81045719825259465718f,(float16_t)0.58579785745643897510f, -(float16_t)-0.81225058658520377097f,(float16_t)0.58330865293769851299f, -(float16_t)-0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)-0.81581441080673378075f,(float16_t)0.57831379641165570060f, -(float16_t)-0.81758481315158360037f,(float16_t)0.57580819141784544968f, -(float16_t)-0.81934752007679700903f,(float16_t)0.57329716669804209328f, -(float16_t)-0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)-0.82284978137582620583f,(float16_t)0.56825895267013171175f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.82632106284566353427f,(float16_t)0.56319934401383409117f, -(float16_t)-0.82804504525775568524f,(float16_t)0.56066157619733614226f, -(float16_t)-0.82976123379452293438f,(float16_t)0.55811853122055632426f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.83317016470191318511f,(float16_t)0.55301670558002746780f, -(float16_t)-0.83486287498638001026f,(float16_t)0.55045797293660492233f, -(float16_t)-0.83654772722351189440f,(float16_t)0.54789405917310041172f, -(float16_t)-0.83822470555483807875f,(float16_t)0.54532498842204635281f, -(float16_t)-0.83989379419599952126f,(float16_t)0.54275078486451588944f, -(float16_t)-0.84155497743689833268f,(float16_t)0.54017147272989296525f, -(float16_t)-0.84320823964184532517f,(float16_t)0.53758707629564572716f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.84649093877405201525f,(float16_t)0.53240312787719801246f, -(float16_t)-0.84812034480329712149f,(float16_t)0.52980362468629482731f, -(float16_t)-0.84974176800085254868f,(float16_t)0.52719913478190127964f, -(float16_t)-0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)-0.85296060493036363059f,(float16_t)0.52197529293715438925f, -(float16_t)-0.85455798836540042274f,(float16_t)0.51935599016558975372f, -(float16_t)-0.85614732837519447184f,(float16_t)0.51673179901764976218f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.85930181835700836235f,(float16_t)0.51146885043797052361f, -(float16_t)-0.86086693863776719837f,(float16_t)0.50883014254310732216f, -(float16_t)-0.86242395611104050168f,(float16_t)0.50618664534515522835f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.86551362409056897818f,(float16_t)0.50088538261124093687f, -(float16_t)-0.86704624551569264845f,(float16_t)0.49822766697278175752f, -(float16_t)-0.86857070597134089507f,(float16_t)0.49556526182577254058f, -(float16_t)-0.87008699110871134952f,(float16_t)0.49289819222978414892f, -(float16_t)-0.87159508665595086807f,(float16_t)0.49022648328829138142f, -(float16_t)-0.87309497841829009079f,(float16_t)0.48755016014843588490f, -(float16_t)-0.87458665227817611321f,(float16_t)0.48486924800079111986f, -(float16_t)-0.87607009419540649020f,(float16_t)0.48218377207912288540f, -(float16_t)-0.87754529020726113053f,(float16_t)0.47949375766015328582f, -(float16_t)-0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)-0.88047088905216075450f,(float16_t)0.47410021465055007805f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.88336333866573168994f,(float16_t)0.46868882203582784562f, -(float16_t)-0.88479709843093778954f,(float16_t)0.46597649576796618121f, -(float16_t)-0.88622253014888052736f,(float16_t)0.46325978355186031576f, -(float16_t)-0.88763962040285382393f,(float16_t)0.46053871095824022719f, -(float16_t)-0.88904835585466457371f,(float16_t)0.45781330359887717485f, -(float16_t)-0.89044872324475787817f,(float16_t)0.45508358712634389143f, -(float16_t)-0.89184070939234261211f,(float16_t)0.45234958723377105549f, -(float16_t)-0.89322430119551521344f,(float16_t)0.44961132965460687272f, -(float16_t)-0.89459948563138269595f,(float16_t)0.44686884016237415906f, -(float16_t)-0.89596624975618510689f,(float16_t)0.44412214457042931137f, -(float16_t)-0.89732458070541820661f,(float16_t)0.44137126873171689256f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90001589201616016833f,(float16_t)0.43585707992225553031f, -(float16_t)-0.90134884704602191707f,(float16_t)0.43309381885315206828f, -(float16_t)-0.90267331823725871498f,(float16_t)0.43032648134008288920f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.90529675931811870448f,(float16_t)0.42477968120910886141f, -(float16_t)-0.90659570451491533483f,(float16_t)0.42200027079979984812f, -(float16_t)-0.90788611648766603945f,(float16_t)0.41921688836322423821f, -(float16_t)-0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)-0.91044129225806713634f,(float16_t)0.41363831223843466889f, -(float16_t)-0.91170603200542976730f,(float16_t)0.41084317105790413294f, -(float16_t)-0.91296219042839821256f,(float16_t)0.40804416286497857680f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.91544871608826772214f,(float16_t)0.40243465085941859671f, -(float16_t)-0.91667905992104259383f,(float16_t)0.39962419984564706565f, -(float16_t)-0.91790077562139049672f,(float16_t)0.39680998741671025254f, -(float16_t)-0.91911385169005777040f,(float16_t)0.39399204006104815434f, -(float16_t)-0.92031827670911048322f,(float16_t)0.39117038430225403722f, -(float16_t)-0.92151403934204179080f,(float16_t)0.38834504669882657923f, -(float16_t)-0.92270112833387862850f,(float16_t)0.38551605384391884890f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.92504924078267747323f,(float16_t)0.37984720892405138271f, -(float16_t)-0.92621024213831137928f,(float16_t)0.37700741021641814843f, -(float16_t)-0.92736252565040111495f,(float16_t)0.37416406297145804460f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.92964089584318121418f,(float16_t)0.36846682995337259880f, -(float16_t)-0.93076696107898371224f,(float16_t)0.36561299780477379828f, -(float16_t)-0.93188426558166803648f,(float16_t)0.36275572436739728088f, -(float16_t)-0.93299279883473884567f,(float16_t)0.35989503653498833291f, -(float16_t)-0.93409255040425875904f,(float16_t)0.35703096123343031065f, -(float16_t)-0.93518350993894761025f,(float16_t)0.35416352542049039931f, -(float16_t)-0.93626566717027825959f,(float16_t)0.35129275608556720378f, -(float16_t)-0.93733901191257484875f,(float16_t)0.34841868024943478677f, -(float16_t)-0.93840353406310816897f,(float16_t)0.34554132496398898278f, -(float16_t)-0.93945922360218991898f,(float16_t)0.34266071731199443384f, -(float16_t)-0.94050607059326829518f,(float16_t)0.33977688440682701776f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94257319760144686605f,(float16_t)0.33399965144200938205f, -(float16_t)-0.94359345816196038559f,(float16_t)0.33110630575987648472f, -(float16_t)-0.94460483726148014583f,(float16_t)0.32820984357909271933f, -(float16_t)-0.94560732538052116869f,(float16_t)0.32531029216226325929f, -(float16_t)-0.94660091308328353499f,(float16_t)0.32240767880106985244f, -(float16_t)-0.94758559101774109124f,(float16_t)0.31950203081601580291f, -(float16_t)-0.94856134991573026749f,(float16_t)0.31659337555616606785f, -(float16_t)-0.94952818059303667475f,(float16_t)0.31368174039889140658f, -(float16_t)-0.95048607394948170235f,(float16_t)0.31076715274961153046f, -(float16_t)-0.95143502096900833820f,(float16_t)0.30784964004153503314f, -(float16_t)-0.95237501271976576778f,(float16_t)0.30492922973540265152f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95422809510910555630f,(float16_t)0.29907982630804053059f, -(float16_t)-0.95514116830577067141f,(float16_t)0.29615088824362401088f, -(float16_t)-0.95604525134999629454f,(float16_t)0.29321916269425896129f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.95782641302753290802f,(float16_t)0.28734745954472962204f, -(float16_t)-0.95870347489587148804f,(float16_t)0.28440753721127209896f, -(float16_t)-0.95957151308198451733f,(float16_t)0.28146493792575788540f, -(float16_t)-0.96043051941556578655f,(float16_t)0.27851968938505317075f, -(float16_t)-0.96128048581132063966f,(float16_t)0.27557181931095831029f, -(float16_t)-0.96212140426904146917f,(float16_t)0.27262135544994925418f, -(float16_t)-0.96295326687368387741f,(float16_t)0.26966832557291509076f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96458979328981264700f,(float16_t)0.26375467897483156898f, -(float16_t)-0.96539444169768928727f,(float16_t)0.26079411791527584707f, -(float16_t)-0.96619000344541250413f,(float16_t)0.25783110216215898713f, -(float16_t)-0.96697647104485207059f,(float16_t)0.25486565960451468271f, -(float16_t)-0.96775383709347539973f,(float16_t)0.25189781815421719013f, -(float16_t)-0.96852209427441737777f,(float16_t)0.24892760574572009302f, -(float16_t)-0.96928123535654842069f,(float16_t)0.24595505033579465048f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.97077214072895023911f,(float16_t)0.24000302244874177626f, -(float16_t)-0.97150389098625178352f,(float16_t)0.23702360599436717026f, -(float16_t)-0.97222649707893626925f,(float16_t)0.23404195858354351345f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97364424965081186603f,(float16_t)0.22807208317088606409f, -(float16_t)-0.97433938278557585821f,(float16_t)0.22508391135979283204f, -(float16_t)-0.97502534506699412020f,(float16_t)0.22209362097320364815f, -(float16_t)-0.97570213003852845901f,(float16_t)0.21910124015687004739f, -(float16_t)-0.97636973133002114000f,(float16_t)0.21610679707621943679f, -(float16_t)-0.97702814265775439484f,(float16_t)0.21311031991609141745f, -(float16_t)-0.97767735782450992943f,(float16_t)0.21011183688046980444f, -(float16_t)-0.97831737071962754371f,(float16_t)0.20711137619221883788f, -(float16_t)-0.97894817531906219710f,(float16_t)0.20410896609281684033f, -(float16_t)-0.97956976568544051887f,(float16_t)0.20110463484209201157f, -(float16_t)-0.98018213596811731847f,(float16_t)0.19809841071795381007f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98137919331375456089f,(float16_t)0.19208039704989246510f, -(float16_t)-0.98196386910955524296f,(float16_t)0.18906866414980635915f, -(float16_t)-0.98253930228744124076f,(float16_t)0.18605515166344691047f, -(float16_t)-0.98310548743121628501f,(float16_t)0.18303988795514089527f, -(float16_t)-0.98366241921173025453f,(float16_t)0.18002290140569957022f, -(float16_t)-0.98421009238692902521f,(float16_t)0.17700422041214894375f, -(float16_t)-0.98474850180190420801f,(float16_t)0.17398387338746412745f, -(float16_t)-0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)-0.98579750916756736512f,(float16_t)0.16793829497473128365f, -(float16_t)-0.98630809724459855836f,(float16_t)0.16491312048997014417f, -(float16_t)-0.98680940181418552726f,(float16_t)0.16188639378011174252f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.98778414164457217783f,(float16_t)0.15582839765426537149f, -(float16_t)-0.98825756773074946437f,(float16_t)0.15279718525844368515f, -(float16_t)-0.98872169196032377858f,(float16_t)0.14976453467732145364f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.98962201746320077600f,(float16_t)0.14369503315029463764f, -(float16_t)-0.99005821026229701154f,(float16_t)0.14065823933284954395f, -(float16_t)-0.99048508425645709341f,(float16_t)0.13762012158648603832f, -(float16_t)-0.99090263542778000971f,(float16_t)0.13458070850712627875f, -(float16_t)-0.99131085984611544415f,(float16_t)0.13154002870288333815f, -(float16_t)-0.99170975366909952520f,(float16_t)0.12849811079379308554f, -(float16_t)-0.99209931314219179654f,(float16_t)0.12545498341154626143f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99285041445986510489f,(float16_t)0.11936521481099163222f, -(float16_t)-0.99321194923479450001f,(float16_t)0.11631863091190471071f, -(float16_t)-0.99356413552059530403f,(float16_t)0.11327095217756441570f, -(float16_t)-0.99390697000235606051f,(float16_t)0.11022220729388323979f, -(float16_t)-0.99424044945318790223f,(float16_t)0.10717242495680916192f, -(float16_t)-0.99456457073425541537f,(float16_t)0.10412163387205457254f, -(float16_t)-0.99487933079480561638f,(float16_t)0.10106986275482793269f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99548075549192693856f,(float16_t)0.09496349532963890838f, -(float16_t)-0.99576741446765981713f,(float16_t)0.09190895649713275162f, -(float16_t)-0.99604470090125196702f,(float16_t)0.08885355258252475297f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99657114579055483539f,(float16_t)0.08274026454937563613f, -(float16_t)-0.99682029929116566791f,(float16_t)0.07968243797143019502f, -(float16_t)-0.99706007033948296225f,(float16_t)0.07662386139203168633f, -(float16_t)-0.99729045667869020697f,(float16_t)0.07356456359966773162f, -(float16_t)-0.99751145614030345410f,(float16_t)0.07050457338961385600f, -(float16_t)-0.99772306664419163624f,(float16_t)0.06744391956366417584f, -(float16_t)-0.99792528619859599548f,(float16_t)0.06438263092985770097f, -(float16_t)-0.99811811290014917919f,(float16_t)0.06132073630220848809f, -(float16_t)-0.99830154493389289261f,(float16_t)0.05825826450043579408f, -(float16_t)-0.99847558057329477421f,(float16_t)0.05519524434969009380f, -(float16_t)-0.99864021818026516009f,(float16_t)0.05213170468028359428f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99894129318685687124f,(float16_t)0.04600318213091470626f, -(float16_t)-0.99907772775264536147f,(float16_t)0.04293825693494102147f, -(float16_t)-0.99920475861836388631f,(float16_t)0.03987292758774012985f, -(float16_t)-0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)-0.99943060455546173237f,(float16_t)0.03374117185137770480f, -(float16_t)-0.99952941750109314256f,(float16_t)0.03067480317663686534f, -(float16_t)-0.99961882249517863830f,(float16_t)0.02760814577896565994f, -(float16_t)-0.99969881869620424997f,(float16_t)0.02454122852291232629f, -(float16_t)-0.99976940535121527898f,(float16_t)0.02147408027546966747f, -(float16_t)-0.99983058179582340319f,(float16_t)0.01840672990580510121f, -(float16_t)-0.99988234745421256111f,(float16_t)0.01533920628498806026f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)-0.99995764455196389786f,(float16_t)0.00920375478206002066f, -(float16_t)-0.99998117528260110909f,(float16_t)0.00613588464915479880f, -(float16_t)-0.99999529380957619118f,(float16_t)0.00306795676296597701f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99992470183914450299f,(float16_t)0.01227153828571992539f, -(float16_t)0.99969881869620424997f,(float16_t)0.02454122852291228812f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99811811290014917919f,(float16_t)0.06132073630220857829f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99631261218277800129f,(float16_t)0.08579731234443989385f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99247953459870996706f,(float16_t)0.12241067519921619566f, -(float16_t)0.99090263542778000971f,(float16_t)0.13458070850712616773f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98730141815785843473f,(float16_t)0.15885814333386144570f, -(float16_t)0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97831737071962765473f,(float16_t)0.20711137619221856032f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97293995220556017678f,(float16_t)0.23105810828067110951f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96377606579543984022f,(float16_t)0.26671275747489836538f, -(float16_t)0.96043051941556578655f,(float16_t)0.27851968938505305973f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95330604035419386211f,(float16_t)0.30200594931922808417f, -(float16_t)0.94952818059303667475f,(float16_t)0.31368174039889151761f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.93733901191257495977f,(float16_t)0.34841868024943456472f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92850608047321558924f,(float16_t)0.37131719395183754306f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91420975570353069095f,(float16_t)0.40524131400498986100f, -(float16_t)0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89867446569395381673f,(float16_t)0.43861623853852765853f, -(float16_t)0.89322430119551532446f,(float16_t)0.44961132965460653965f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.87607009419540660122f,(float16_t)0.48218377207912271887f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)0.83822470555483807875f,(float16_t)0.54532498842204646383f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82458930278502529099f,(float16_t)0.56573181078361312046f, -(float16_t)0.81758481315158371139f,(float16_t)0.57580819141784533866f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.79583690460888356633f,(float16_t)0.60551104140432554512f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.75720884650648456748f,(float16_t)0.65317284295377675551f, -(float16_t)0.74913639452345937020f,(float16_t)0.66241577759017178373f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73265427167241281570f,(float16_t)0.68060099779545302212f, -(float16_t)0.72424708295146700276f,(float16_t)0.68954054473706682948f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.69837624940897291559f,(float16_t)0.71573082528381859468f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.65317284295377686654f,(float16_t)0.75720884650648456748f, -(float16_t)0.64383154288979149715f,(float16_t)0.76516726562245895860f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62485948814238645443f,(float16_t)0.78073722857209448822f, -(float16_t)0.61523159058062681925f,(float16_t)0.78834642762660622761f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.56573181078361323149f,(float16_t)0.82458930278502529099f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.53499761988709726435f,(float16_t)0.84485356524970700587f, -(float16_t)0.52458968267846883826f,(float16_t)0.85135519310526519554f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.50353838372571757542f,(float16_t)0.86397285612158669643f, -(float16_t)0.49289819222978409341f,(float16_t)0.87008699110871134952f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.46053871095824000514f,(float16_t)0.88763962040285393496f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.43861623853852771404f,(float16_t)0.89867446569395381673f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.40524131400498986100f,(float16_t)0.91420975570353069095f, -(float16_t)0.39399204006104809883f,(float16_t)0.91911385169005777040f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37131719395183759858f,(float16_t)0.92850608047321558924f, -(float16_t)0.35989503653498827740f,(float16_t)0.93299279883473884567f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.32531029216226298173f,(float16_t)0.94560732538052127971f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.30200594931922819519f,(float16_t)0.95330604035419375109f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.26671275747489842090f,(float16_t)0.96377606579543984022f, -(float16_t)0.25486565960451462720f,(float16_t)0.96697647104485207059f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.23105810828067127605f,(float16_t)0.97293995220556006576f, -(float16_t)0.21910124015686976984f,(float16_t)0.97570213003852857003f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.18303988795514106180f,(float16_t)0.98310548743121628501f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.15885814333386139019f,(float16_t)0.98730141815785843473f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.12241067519921627893f,(float16_t)0.99247953459870996706f, -(float16_t)0.11022220729388318428f,(float16_t)0.99390697000235606051f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.08579731234443987997f,(float16_t)0.99631261218277800129f, -(float16_t)0.07356456359966745406f,(float16_t)0.99729045667869020697f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.03680722294135899131f,(float16_t)0.99932238458834954375f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.01227153828571994447f,(float16_t)0.99992470183914450299f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.02454122852291214241f,(float16_t)0.99969881869620424997f, -(float16_t)-0.03680722294135886641f,(float16_t)0.99932238458834954375f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.06132073630220852972f,(float16_t)0.99811811290014917919f, -(float16_t)-0.07356456359966732916f,(float16_t)0.99729045667869020697f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.11022220729388305938f,(float16_t)0.99390697000235606051f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.13458070850712611222f,(float16_t)0.99090263542778000971f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.17096188876030124493f,(float16_t)0.98527764238894122162f, -(float16_t)-0.18303988795514092303f,(float16_t)0.98310548743121628501f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.20711137619221844930f,(float16_t)0.97831737071962765473f, -(float16_t)-0.21910124015686965881f,(float16_t)0.97570213003852857003f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.25486565960451451618f,(float16_t)0.96697647104485207059f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.27851968938505294870f,(float16_t)0.96043051941556589757f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.31368174039889140658f,(float16_t)0.94952818059303667475f, -(float16_t)-0.32531029216226287071f,(float16_t)0.94560732538052139073f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.34841868024943439819f,(float16_t)0.93733901191257495977f, -(float16_t)-0.35989503653498816638f,(float16_t)0.93299279883473884567f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.39399204006104798781f,(float16_t)0.91911385169005777040f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.41642956009763698599f,(float16_t)0.90916798309052249127f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.44961132965460670619f,(float16_t)0.89322430119551521344f, -(float16_t)-0.46053871095824006066f,(float16_t)0.88763962040285393496f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.48218377207912271887f,(float16_t)0.87607009419540660122f, -(float16_t)-0.49289819222978398239f,(float16_t)0.87008699110871146054f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.52458968267846872724f,(float16_t)0.85135519310526519554f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.54532498842204624179f,(float16_t)0.83822470555483818977f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)-0.58579785745643886408f,(float16_t)0.81045719825259476821f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.60551104140432543410f,(float16_t)0.79583690460888356633f, -(float16_t)-0.61523159058062670823f,(float16_t)0.78834642762660633863f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.64383154288979127511f,(float16_t)0.76516726562245906962f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.66241577759017189475f,(float16_t)0.74913639452345925918f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)-0.69837624940897280457f,(float16_t)0.71573082528381870571f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.71573082528381859468f,(float16_t)0.69837624940897291559f, -(float16_t)-0.72424708295146678072f,(float16_t)0.68954054473706705153f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.74913639452345914815f,(float16_t)0.66241577759017200577f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.78834642762660622761f,(float16_t)0.61523159058062693028f, -(float16_t)-0.79583690460888345530f,(float16_t)0.60551104140432565615f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.81045719825259465718f,(float16_t)0.58579785745643897510f, -(float16_t)-0.81758481315158360037f,(float16_t)0.57580819141784544968f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.83822470555483807875f,(float16_t)0.54532498842204635281f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.87008699110871134952f,(float16_t)0.49289819222978414892f, -(float16_t)-0.87607009419540649020f,(float16_t)0.48218377207912288540f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.88763962040285382393f,(float16_t)0.46053871095824022719f, -(float16_t)-0.89322430119551521344f,(float16_t)0.44961132965460687272f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.90916798309052238025f,(float16_t)0.41642956009763715253f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.91911385169005777040f,(float16_t)0.39399204006104815434f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.93299279883473884567f,(float16_t)0.35989503653498833291f, -(float16_t)-0.93733901191257484875f,(float16_t)0.34841868024943478677f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94560732538052116869f,(float16_t)0.32531029216226325929f, -(float16_t)-0.94952818059303667475f,(float16_t)0.31368174039889140658f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.96043051941556578655f,(float16_t)0.27851968938505317075f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96697647104485207059f,(float16_t)0.25486565960451468271f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97570213003852845901f,(float16_t)0.21910124015687004739f, -(float16_t)-0.97831737071962754371f,(float16_t)0.20711137619221883788f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98310548743121628501f,(float16_t)0.18303988795514089527f, -(float16_t)-0.98527764238894122162f,(float16_t)0.17096188876030121717f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.99090263542778000971f,(float16_t)0.13458070850712627875f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99390697000235606051f,(float16_t)0.11022220729388323979f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99729045667869020697f,(float16_t)0.07356456359966773162f, -(float16_t)-0.99811811290014917919f,(float16_t)0.06132073630220848809f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)-0.99969881869620424997f,(float16_t)0.02454122852291232629f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99879545620517240501f,(float16_t)0.04906767432741801493f, -(float16_t)0.99518472667219692873f,(float16_t)0.09801714032956060363f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.97003125319454397424f,(float16_t)0.24298017990326387094f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.94154406518302080631f,(float16_t)0.33688985339222005111f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.88192126434835504956f,(float16_t)0.47139673682599764204f, -(float16_t)0.85772861000027211809f,(float16_t)0.51410274419322166128f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.80320753148064494287f,(float16_t)0.59569930449243335691f, -(float16_t)0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.67155895484701833009f,(float16_t)0.74095112535495910588f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.59569930449243346793f,(float16_t)0.80320753148064483184f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.47139673682599780857f,(float16_t)0.88192126434835493853f, -(float16_t)0.42755509343028219593f,(float16_t)0.90398929312344333820f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.33688985339222005111f,(float16_t)0.94154406518302080631f, -(float16_t)0.29028467725446233105f,(float16_t)0.95694033573220893540f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.14673047445536174793f,(float16_t)0.98917650996478101444f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.04906767432741812596f,(float16_t)0.99879545620517240501f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.09801714032956064526f,(float16_t)0.99518472667219692873f, -(float16_t)-0.14673047445536163691f,(float16_t)0.98917650996478101444f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.24298017990326387094f,(float16_t)0.97003125319454397424f, -(float16_t)-0.29028467725446216452f,(float16_t)0.95694033573220893540f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.42755509343028186287f,(float16_t)0.90398929312344344922f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.51410274419322155026f,(float16_t)0.85772861000027211809f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.63439328416364537677f,(float16_t)0.77301045336273710440f, -(float16_t)-0.67155895484701844111f,(float16_t)0.74095112535495899486f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.74095112535495888384f,(float16_t)0.67155895484701855214f, -(float16_t)-0.77301045336273699338f,(float16_t)0.63439328416364548779f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.85772861000027200706f,(float16_t)0.51410274419322177231f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.90398929312344333820f,(float16_t)0.42755509343028202940f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.95694033573220882438f,(float16_t)0.29028467725446238656f, -(float16_t)-0.97003125319454397424f,(float16_t)0.24298017990326406523f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98917650996478101444f,(float16_t)0.14673047445536180344f, -(float16_t)-0.99518472667219681771f,(float16_t)0.09801714032956082567f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.98078528040323043058f,(float16_t)0.19509032201612824808f, -(float16_t)0.92387953251128673848f,(float16_t)0.38268343236508978178f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.55557023301960228867f,(float16_t)0.83146961230254523567f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.19509032201612833135f,(float16_t)0.98078528040323043058f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.38268343236508972627f,(float16_t)0.92387953251128673848f, -(float16_t)-0.55557023301960195560f,(float16_t)0.83146961230254534669f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.83146961230254534669f,(float16_t)0.55557023301960217765f, -(float16_t)-0.92387953251128673848f,(float16_t)0.38268343236508989280f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.70710678118654757274f,(float16_t)0.70710678118654757274f, -(float16_t)0.00000000000000006123f,(float16_t)1.00000000000000000000f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0030670166016f, +(float16_t)1.0000000000000f,(float16_t)0.0061340332031f, +(float16_t)1.0000000000000f,(float16_t)0.0092010498047f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0153427124023f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)1.0000000000000f,(float16_t)0.0214691162109f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0276031494141f, +(float16_t)0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)0.9995117187500f,(float16_t)0.0337524414062f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0398864746094f, +(float16_t)0.9990234375000f,(float16_t)0.0429382324219f, +(float16_t)0.9990234375000f,(float16_t)0.0459899902344f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9985351562500f,(float16_t)0.0521240234375f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9985351562500f,(float16_t)0.0582580566406f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9980468750000f,(float16_t)0.0643920898438f, +(float16_t)0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)0.9975585937500f,(float16_t)0.0704956054688f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9970703125000f,(float16_t)0.0765991210938f, +(float16_t)0.9965820312500f,(float16_t)0.0797119140625f, +(float16_t)0.9965820312500f,(float16_t)0.0827636718750f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9960937500000f,(float16_t)0.0888671875000f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9956054687500f,(float16_t)0.0949707031250f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9951171875000f,(float16_t)0.1010742187500f, +(float16_t)0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)0.9941406250000f,(float16_t)0.1071777343750f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9936523437500f,(float16_t)0.1132812500000f, +(float16_t)0.9931640625000f,(float16_t)0.1163330078125f, +(float16_t)0.9926757812500f,(float16_t)0.1193847656250f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9921875000000f,(float16_t)0.1254882812500f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9912109375000f,(float16_t)0.1315917968750f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9907226562500f,(float16_t)0.1375732421875f, +(float16_t)0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)0.9897460937500f,(float16_t)0.1436767578125f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9887695312500f,(float16_t)0.1497802734375f, +(float16_t)0.9882812500000f,(float16_t)0.1528320312500f, +(float16_t)0.9877929687500f,(float16_t)0.1558837890625f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9868164062500f,(float16_t)0.1618652343750f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9858398437500f,(float16_t)0.1679687500000f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9848632812500f,(float16_t)0.1739501953125f, +(float16_t)0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)0.9838867187500f,(float16_t)0.1800537109375f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9824218750000f,(float16_t)0.1860351562500f, +(float16_t)0.9819335937500f,(float16_t)0.1890869140625f, +(float16_t)0.9814453125000f,(float16_t)0.1921386718750f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9799804687500f,(float16_t)0.1981201171875f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9790039062500f,(float16_t)0.2041015625000f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9775390625000f,(float16_t)0.2100830078125f, +(float16_t)0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)0.9765625000000f,(float16_t)0.2160644531250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9750976562500f,(float16_t)0.2220458984375f, +(float16_t)0.9741210937500f,(float16_t)0.2250976562500f, +(float16_t)0.9736328125000f,(float16_t)0.2280273437500f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9721679687500f,(float16_t)0.2340087890625f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9707031250000f,(float16_t)0.2399902343750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9692382812500f,(float16_t)0.2459716796875f, +(float16_t)0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)0.9677734375000f,(float16_t)0.2519531250000f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9663085937500f,(float16_t)0.2578125000000f, +(float16_t)0.9653320312500f,(float16_t)0.2607421875000f, +(float16_t)0.9643554687500f,(float16_t)0.2636718750000f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9628906250000f,(float16_t)0.2697753906250f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9614257812500f,(float16_t)0.2756347656250f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9594726562500f,(float16_t)0.2814941406250f, +(float16_t)0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)0.9580078125000f,(float16_t)0.2873535156250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9560546875000f,(float16_t)0.2932128906250f, +(float16_t)0.9550781250000f,(float16_t)0.2961425781250f, +(float16_t)0.9541015625000f,(float16_t)0.2990722656250f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9521484375000f,(float16_t)0.3049316406250f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9506835937500f,(float16_t)0.3107910156250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9487304687500f,(float16_t)0.3166503906250f, +(float16_t)0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)0.9467773437500f,(float16_t)0.3225097656250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9448242187500f,(float16_t)0.3281250000000f, +(float16_t)0.9433593750000f,(float16_t)0.3310546875000f, +(float16_t)0.9423828125000f,(float16_t)0.3339843750000f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9404296875000f,(float16_t)0.3398437500000f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9384765625000f,(float16_t)0.3454589843750f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9360351562500f,(float16_t)0.3513183593750f, +(float16_t)0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)0.9340820312500f,(float16_t)0.3569335937500f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9316406250000f,(float16_t)0.3627929687500f, +(float16_t)0.9306640625000f,(float16_t)0.3657226562500f, +(float16_t)0.9296875000000f,(float16_t)0.3684082031250f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9272460937500f,(float16_t)0.3742675781250f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9252929687500f,(float16_t)0.3798828125000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9228515625000f,(float16_t)0.3854980468750f, +(float16_t)0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)0.9204101562500f,(float16_t)0.3911132812500f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9179687500000f,(float16_t)0.3967285156250f, +(float16_t)0.9165039062500f,(float16_t)0.3996582031250f, +(float16_t)0.9155273437500f,(float16_t)0.4023437500000f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9130859375000f,(float16_t)0.4079589843750f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9106445312500f,(float16_t)0.4135742187500f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9077148437500f,(float16_t)0.4191894531250f, +(float16_t)0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)0.9052734375000f,(float16_t)0.4248046875000f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.9028320312500f,(float16_t)0.4304199218750f, +(float16_t)0.9013671875000f,(float16_t)0.4331054687500f, +(float16_t)0.8999023437500f,(float16_t)0.4357910156250f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8974609375000f,(float16_t)0.4414062500000f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8945312500000f,(float16_t)0.4467773437500f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8916015625000f,(float16_t)0.4523925781250f, +(float16_t)0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)0.8891601562500f,(float16_t)0.4577636718750f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8862304687500f,(float16_t)0.4633789062500f, +(float16_t)0.8847656250000f,(float16_t)0.4660644531250f, +(float16_t)0.8833007812500f,(float16_t)0.4687500000000f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8803710937500f,(float16_t)0.4741210937500f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8774414062500f,(float16_t)0.4794921875000f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8745117187500f,(float16_t)0.4848632812500f, +(float16_t)0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)0.8715820312500f,(float16_t)0.4902343750000f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8686523437500f,(float16_t)0.4956054687500f, +(float16_t)0.8671875000000f,(float16_t)0.4982910156250f, +(float16_t)0.8657226562500f,(float16_t)0.5009765625000f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8623046875000f,(float16_t)0.5063476562500f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8593750000000f,(float16_t)0.5112304687500f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8559570312500f,(float16_t)0.5166015625000f, +(float16_t)0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)0.8530273437500f,(float16_t)0.5219726562500f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8496093750000f,(float16_t)0.5273437500000f, +(float16_t)0.8481445312500f,(float16_t)0.5297851562500f, +(float16_t)0.8466796875000f,(float16_t)0.5322265625000f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8432617187500f,(float16_t)0.5375976562500f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8398437500000f,(float16_t)0.5429687500000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8364257812500f,(float16_t)0.5478515625000f, +(float16_t)0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)0.8330078125000f,(float16_t)0.5532226562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8295898437500f,(float16_t)0.5581054687500f, +(float16_t)0.8281250000000f,(float16_t)0.5605468750000f, +(float16_t)0.8261718750000f,(float16_t)0.5629882812500f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8227539062500f,(float16_t)0.5683593750000f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8193359375000f,(float16_t)0.5732421875000f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8159179687500f,(float16_t)0.5781250000000f, +(float16_t)0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)0.8120117187500f,(float16_t)0.5834960937500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8085937500000f,(float16_t)0.5883789062500f, +(float16_t)0.8066406250000f,(float16_t)0.5908203125000f, +(float16_t)0.8051757812500f,(float16_t)0.5932617187500f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.8012695312500f,(float16_t)0.5981445312500f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7978515625000f,(float16_t)0.6030273437500f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7939453125000f,(float16_t)0.6079101562500f, +(float16_t)0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)0.7900390625000f,(float16_t)0.6127929687500f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7866210937500f,(float16_t)0.6176757812500f, +(float16_t)0.7846679687500f,(float16_t)0.6201171875000f, +(float16_t)0.7827148437500f,(float16_t)0.6225585937500f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7788085937500f,(float16_t)0.6274414062500f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7749023437500f,(float16_t)0.6318359375000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7709960937500f,(float16_t)0.6367187500000f, +(float16_t)0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)0.7670898437500f,(float16_t)0.6416015625000f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7631835937500f,(float16_t)0.6459960937500f, +(float16_t)0.7612304687500f,(float16_t)0.6484375000000f, +(float16_t)0.7592773437500f,(float16_t)0.6508789062500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7553710937500f,(float16_t)0.6552734375000f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7509765625000f,(float16_t)0.6601562500000f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7470703125000f,(float16_t)0.6645507812500f, +(float16_t)0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)0.7431640625000f,(float16_t)0.6694335937500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7387695312500f,(float16_t)0.6738281250000f, +(float16_t)0.7368164062500f,(float16_t)0.6762695312500f, +(float16_t)0.7348632812500f,(float16_t)0.6782226562500f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7304687500000f,(float16_t)0.6826171875000f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7265625000000f,(float16_t)0.6875000000000f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7221679687500f,(float16_t)0.6918945312500f, +(float16_t)0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)0.7177734375000f,(float16_t)0.6962890625000f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7133789062500f,(float16_t)0.7006835937500f, +(float16_t)0.7114257812500f,(float16_t)0.7026367187500f, +(float16_t)0.7094726562500f,(float16_t)0.7050781250000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.7050781250000f,(float16_t)0.7094726562500f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.7006835937500f,(float16_t)0.7133789062500f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6962890625000f,(float16_t)0.7177734375000f, +(float16_t)0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)0.6918945312500f,(float16_t)0.7221679687500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6875000000000f,(float16_t)0.7265625000000f, +(float16_t)0.6850585937500f,(float16_t)0.7285156250000f, +(float16_t)0.6826171875000f,(float16_t)0.7304687500000f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6782226562500f,(float16_t)0.7348632812500f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6738281250000f,(float16_t)0.7387695312500f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6694335937500f,(float16_t)0.7431640625000f, +(float16_t)0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)0.6645507812500f,(float16_t)0.7470703125000f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6601562500000f,(float16_t)0.7509765625000f, +(float16_t)0.6577148437500f,(float16_t)0.7534179687500f, +(float16_t)0.6552734375000f,(float16_t)0.7553710937500f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6508789062500f,(float16_t)0.7592773437500f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6459960937500f,(float16_t)0.7631835937500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6416015625000f,(float16_t)0.7670898437500f, +(float16_t)0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)0.6367187500000f,(float16_t)0.7709960937500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6318359375000f,(float16_t)0.7749023437500f, +(float16_t)0.6293945312500f,(float16_t)0.7768554687500f, +(float16_t)0.6274414062500f,(float16_t)0.7788085937500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6225585937500f,(float16_t)0.7827148437500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6176757812500f,(float16_t)0.7866210937500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6127929687500f,(float16_t)0.7900390625000f, +(float16_t)0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)0.6079101562500f,(float16_t)0.7939453125000f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.6030273437500f,(float16_t)0.7978515625000f, +(float16_t)0.6005859375000f,(float16_t)0.7993164062500f, +(float16_t)0.5981445312500f,(float16_t)0.8012695312500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5932617187500f,(float16_t)0.8051757812500f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5883789062500f,(float16_t)0.8085937500000f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5834960937500f,(float16_t)0.8120117187500f, +(float16_t)0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)0.5781250000000f,(float16_t)0.8159179687500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5732421875000f,(float16_t)0.8193359375000f, +(float16_t)0.5708007812500f,(float16_t)0.8212890625000f, +(float16_t)0.5683593750000f,(float16_t)0.8227539062500f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5629882812500f,(float16_t)0.8261718750000f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5581054687500f,(float16_t)0.8295898437500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5532226562500f,(float16_t)0.8330078125000f, +(float16_t)0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)0.5478515625000f,(float16_t)0.8364257812500f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5429687500000f,(float16_t)0.8398437500000f, +(float16_t)0.5400390625000f,(float16_t)0.8417968750000f, +(float16_t)0.5375976562500f,(float16_t)0.8432617187500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5322265625000f,(float16_t)0.8466796875000f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5273437500000f,(float16_t)0.8496093750000f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5219726562500f,(float16_t)0.8530273437500f, +(float16_t)0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)0.5166015625000f,(float16_t)0.8559570312500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5112304687500f,(float16_t)0.8593750000000f, +(float16_t)0.5087890625000f,(float16_t)0.8608398437500f, +(float16_t)0.5063476562500f,(float16_t)0.8623046875000f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.5009765625000f,(float16_t)0.8657226562500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4956054687500f,(float16_t)0.8686523437500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4902343750000f,(float16_t)0.8715820312500f, +(float16_t)0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)0.4848632812500f,(float16_t)0.8745117187500f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4794921875000f,(float16_t)0.8774414062500f, +(float16_t)0.4768066406250f,(float16_t)0.8789062500000f, +(float16_t)0.4741210937500f,(float16_t)0.8803710937500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4687500000000f,(float16_t)0.8833007812500f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4633789062500f,(float16_t)0.8862304687500f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4577636718750f,(float16_t)0.8891601562500f, +(float16_t)0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)0.4523925781250f,(float16_t)0.8916015625000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4467773437500f,(float16_t)0.8945312500000f, +(float16_t)0.4440917968750f,(float16_t)0.8959960937500f, +(float16_t)0.4414062500000f,(float16_t)0.8974609375000f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4357910156250f,(float16_t)0.8999023437500f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4304199218750f,(float16_t)0.9028320312500f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4248046875000f,(float16_t)0.9052734375000f, +(float16_t)0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)0.4191894531250f,(float16_t)0.9077148437500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4135742187500f,(float16_t)0.9106445312500f, +(float16_t)0.4108886718750f,(float16_t)0.9116210937500f, +(float16_t)0.4079589843750f,(float16_t)0.9130859375000f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.4023437500000f,(float16_t)0.9155273437500f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3967285156250f,(float16_t)0.9179687500000f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3911132812500f,(float16_t)0.9204101562500f, +(float16_t)0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)0.3854980468750f,(float16_t)0.9228515625000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3798828125000f,(float16_t)0.9252929687500f, +(float16_t)0.3769531250000f,(float16_t)0.9262695312500f, +(float16_t)0.3742675781250f,(float16_t)0.9272460937500f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3684082031250f,(float16_t)0.9296875000000f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3627929687500f,(float16_t)0.9316406250000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3569335937500f,(float16_t)0.9340820312500f, +(float16_t)0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)0.3513183593750f,(float16_t)0.9360351562500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3454589843750f,(float16_t)0.9384765625000f, +(float16_t)0.3427734375000f,(float16_t)0.9394531250000f, +(float16_t)0.3398437500000f,(float16_t)0.9404296875000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3339843750000f,(float16_t)0.9423828125000f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3281250000000f,(float16_t)0.9448242187500f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3225097656250f,(float16_t)0.9467773437500f, +(float16_t)0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)0.3166503906250f,(float16_t)0.9487304687500f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3107910156250f,(float16_t)0.9506835937500f, +(float16_t)0.3078613281250f,(float16_t)0.9516601562500f, +(float16_t)0.3049316406250f,(float16_t)0.9521484375000f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.2990722656250f,(float16_t)0.9541015625000f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2932128906250f,(float16_t)0.9560546875000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2873535156250f,(float16_t)0.9580078125000f, +(float16_t)0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)0.2814941406250f,(float16_t)0.9594726562500f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2756347656250f,(float16_t)0.9614257812500f, +(float16_t)0.2727050781250f,(float16_t)0.9619140625000f, +(float16_t)0.2697753906250f,(float16_t)0.9628906250000f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2636718750000f,(float16_t)0.9643554687500f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2578125000000f,(float16_t)0.9663085937500f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2519531250000f,(float16_t)0.9677734375000f, +(float16_t)0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)0.2459716796875f,(float16_t)0.9692382812500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2399902343750f,(float16_t)0.9707031250000f, +(float16_t)0.2370605468750f,(float16_t)0.9716796875000f, +(float16_t)0.2340087890625f,(float16_t)0.9721679687500f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2280273437500f,(float16_t)0.9736328125000f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2220458984375f,(float16_t)0.9750976562500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2160644531250f,(float16_t)0.9765625000000f, +(float16_t)0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)0.2100830078125f,(float16_t)0.9775390625000f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.2041015625000f,(float16_t)0.9790039062500f, +(float16_t)0.2010498046875f,(float16_t)0.9794921875000f, +(float16_t)0.1981201171875f,(float16_t)0.9799804687500f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1921386718750f,(float16_t)0.9814453125000f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1860351562500f,(float16_t)0.9824218750000f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1800537109375f,(float16_t)0.9838867187500f, +(float16_t)0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)0.1739501953125f,(float16_t)0.9848632812500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1679687500000f,(float16_t)0.9858398437500f, +(float16_t)0.1649169921875f,(float16_t)0.9863281250000f, +(float16_t)0.1618652343750f,(float16_t)0.9868164062500f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1558837890625f,(float16_t)0.9877929687500f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1497802734375f,(float16_t)0.9887695312500f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1436767578125f,(float16_t)0.9897460937500f, +(float16_t)0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)0.1375732421875f,(float16_t)0.9907226562500f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1315917968750f,(float16_t)0.9912109375000f, +(float16_t)0.1285400390625f,(float16_t)0.9916992187500f, +(float16_t)0.1254882812500f,(float16_t)0.9921875000000f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1193847656250f,(float16_t)0.9926757812500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.1132812500000f,(float16_t)0.9936523437500f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.1071777343750f,(float16_t)0.9941406250000f, +(float16_t)0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)0.1010742187500f,(float16_t)0.9951171875000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0949707031250f,(float16_t)0.9956054687500f, +(float16_t)0.0919189453125f,(float16_t)0.9956054687500f, +(float16_t)0.0888671875000f,(float16_t)0.9960937500000f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0827636718750f,(float16_t)0.9965820312500f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0765991210938f,(float16_t)0.9970703125000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0704956054688f,(float16_t)0.9975585937500f, +(float16_t)0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)0.0643920898438f,(float16_t)0.9980468750000f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0582580566406f,(float16_t)0.9985351562500f, +(float16_t)0.0552062988281f,(float16_t)0.9985351562500f, +(float16_t)0.0521240234375f,(float16_t)0.9985351562500f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0459899902344f,(float16_t)0.9990234375000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0398864746094f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0337524414062f,(float16_t)0.9995117187500f, +(float16_t)0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)0.0276031494141f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0214691162109f,(float16_t)1.0000000000000f, +(float16_t)0.0184020996094f,(float16_t)1.0000000000000f, +(float16_t)0.0153427124023f,(float16_t)1.0000000000000f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0092010498047f,(float16_t)1.0000000000000f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)0.0030670166016f,(float16_t)1.0000000000000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0030670166016f,(float16_t)1.0000000000000f, +(float16_t)-0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)-0.0092010498047f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0153427124023f,(float16_t)1.0000000000000f, +(float16_t)-0.0184020996094f,(float16_t)1.0000000000000f, +(float16_t)-0.0214691162109f,(float16_t)1.0000000000000f, +(float16_t)-0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0276031494141f,(float16_t)0.9995117187500f, +(float16_t)-0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)-0.0337524414062f,(float16_t)0.9995117187500f, +(float16_t)-0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)-0.0398864746094f,(float16_t)0.9990234375000f, +(float16_t)-0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)-0.0459899902344f,(float16_t)0.9990234375000f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0521240234375f,(float16_t)0.9985351562500f, +(float16_t)-0.0552062988281f,(float16_t)0.9985351562500f, +(float16_t)-0.0582580566406f,(float16_t)0.9985351562500f, +(float16_t)-0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)-0.0643920898438f,(float16_t)0.9980468750000f, +(float16_t)-0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)-0.0704956054688f,(float16_t)0.9975585937500f, +(float16_t)-0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)-0.0765991210938f,(float16_t)0.9970703125000f, +(float16_t)-0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)-0.0827636718750f,(float16_t)0.9965820312500f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.0888671875000f,(float16_t)0.9960937500000f, +(float16_t)-0.0919189453125f,(float16_t)0.9956054687500f, +(float16_t)-0.0949707031250f,(float16_t)0.9956054687500f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1010742187500f,(float16_t)0.9951171875000f, +(float16_t)-0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)-0.1071777343750f,(float16_t)0.9941406250000f, +(float16_t)-0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)-0.1132812500000f,(float16_t)0.9936523437500f, +(float16_t)-0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)-0.1193847656250f,(float16_t)0.9926757812500f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1254882812500f,(float16_t)0.9921875000000f, +(float16_t)-0.1285400390625f,(float16_t)0.9916992187500f, +(float16_t)-0.1315917968750f,(float16_t)0.9912109375000f, +(float16_t)-0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)-0.1375732421875f,(float16_t)0.9907226562500f, +(float16_t)-0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)-0.1436767578125f,(float16_t)0.9897460937500f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1497802734375f,(float16_t)0.9887695312500f, +(float16_t)-0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)-0.1558837890625f,(float16_t)0.9877929687500f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1618652343750f,(float16_t)0.9868164062500f, +(float16_t)-0.1649169921875f,(float16_t)0.9863281250000f, +(float16_t)-0.1679687500000f,(float16_t)0.9858398437500f, +(float16_t)-0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)-0.1739501953125f,(float16_t)0.9848632812500f, +(float16_t)-0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)-0.1800537109375f,(float16_t)0.9838867187500f, +(float16_t)-0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)-0.1860351562500f,(float16_t)0.9824218750000f, +(float16_t)-0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)-0.1921386718750f,(float16_t)0.9814453125000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.1981201171875f,(float16_t)0.9799804687500f, +(float16_t)-0.2010498046875f,(float16_t)0.9794921875000f, +(float16_t)-0.2041015625000f,(float16_t)0.9790039062500f, +(float16_t)-0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)-0.2100830078125f,(float16_t)0.9775390625000f, +(float16_t)-0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)-0.2160644531250f,(float16_t)0.9765625000000f, +(float16_t)-0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)-0.2220458984375f,(float16_t)0.9750976562500f, +(float16_t)-0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)-0.2280273437500f,(float16_t)0.9736328125000f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2340087890625f,(float16_t)0.9721679687500f, +(float16_t)-0.2370605468750f,(float16_t)0.9716796875000f, +(float16_t)-0.2399902343750f,(float16_t)0.9707031250000f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2459716796875f,(float16_t)0.9692382812500f, +(float16_t)-0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)-0.2519531250000f,(float16_t)0.9677734375000f, +(float16_t)-0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)-0.2578125000000f,(float16_t)0.9663085937500f, +(float16_t)-0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)-0.2636718750000f,(float16_t)0.9643554687500f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2697753906250f,(float16_t)0.9628906250000f, +(float16_t)-0.2727050781250f,(float16_t)0.9619140625000f, +(float16_t)-0.2756347656250f,(float16_t)0.9614257812500f, +(float16_t)-0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)-0.2814941406250f,(float16_t)0.9594726562500f, +(float16_t)-0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)-0.2873535156250f,(float16_t)0.9580078125000f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.2932128906250f,(float16_t)0.9560546875000f, +(float16_t)-0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)-0.2990722656250f,(float16_t)0.9541015625000f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3049316406250f,(float16_t)0.9521484375000f, +(float16_t)-0.3078613281250f,(float16_t)0.9516601562500f, +(float16_t)-0.3107910156250f,(float16_t)0.9506835937500f, +(float16_t)-0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)-0.3166503906250f,(float16_t)0.9487304687500f, +(float16_t)-0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)-0.3225097656250f,(float16_t)0.9467773437500f, +(float16_t)-0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)-0.3281250000000f,(float16_t)0.9448242187500f, +(float16_t)-0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)-0.3339843750000f,(float16_t)0.9423828125000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3398437500000f,(float16_t)0.9404296875000f, +(float16_t)-0.3427734375000f,(float16_t)0.9394531250000f, +(float16_t)-0.3454589843750f,(float16_t)0.9384765625000f, +(float16_t)-0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)-0.3513183593750f,(float16_t)0.9360351562500f, +(float16_t)-0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)-0.3569335937500f,(float16_t)0.9340820312500f, +(float16_t)-0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)-0.3627929687500f,(float16_t)0.9316406250000f, +(float16_t)-0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)-0.3684082031250f,(float16_t)0.9296875000000f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3742675781250f,(float16_t)0.9272460937500f, +(float16_t)-0.3769531250000f,(float16_t)0.9262695312500f, +(float16_t)-0.3798828125000f,(float16_t)0.9252929687500f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.3854980468750f,(float16_t)0.9228515625000f, +(float16_t)-0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)-0.3911132812500f,(float16_t)0.9204101562500f, +(float16_t)-0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)-0.3967285156250f,(float16_t)0.9179687500000f, +(float16_t)-0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)-0.4023437500000f,(float16_t)0.9155273437500f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4079589843750f,(float16_t)0.9130859375000f, +(float16_t)-0.4108886718750f,(float16_t)0.9116210937500f, +(float16_t)-0.4135742187500f,(float16_t)0.9106445312500f, +(float16_t)-0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)-0.4191894531250f,(float16_t)0.9077148437500f, +(float16_t)-0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)-0.4248046875000f,(float16_t)0.9052734375000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4304199218750f,(float16_t)0.9028320312500f, +(float16_t)-0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)-0.4357910156250f,(float16_t)0.8999023437500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4414062500000f,(float16_t)0.8974609375000f, +(float16_t)-0.4440917968750f,(float16_t)0.8959960937500f, +(float16_t)-0.4467773437500f,(float16_t)0.8945312500000f, +(float16_t)-0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)-0.4523925781250f,(float16_t)0.8916015625000f, +(float16_t)-0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)-0.4577636718750f,(float16_t)0.8891601562500f, +(float16_t)-0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)-0.4633789062500f,(float16_t)0.8862304687500f, +(float16_t)-0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)-0.4687500000000f,(float16_t)0.8833007812500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4741210937500f,(float16_t)0.8803710937500f, +(float16_t)-0.4768066406250f,(float16_t)0.8789062500000f, +(float16_t)-0.4794921875000f,(float16_t)0.8774414062500f, +(float16_t)-0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)-0.4848632812500f,(float16_t)0.8745117187500f, +(float16_t)-0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)-0.4902343750000f,(float16_t)0.8715820312500f, +(float16_t)-0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)-0.4956054687500f,(float16_t)0.8686523437500f, +(float16_t)-0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)-0.5009765625000f,(float16_t)0.8657226562500f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5063476562500f,(float16_t)0.8623046875000f, +(float16_t)-0.5087890625000f,(float16_t)0.8608398437500f, +(float16_t)-0.5112304687500f,(float16_t)0.8593750000000f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5166015625000f,(float16_t)0.8559570312500f, +(float16_t)-0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)-0.5219726562500f,(float16_t)0.8530273437500f, +(float16_t)-0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)-0.5273437500000f,(float16_t)0.8496093750000f, +(float16_t)-0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)-0.5322265625000f,(float16_t)0.8466796875000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5375976562500f,(float16_t)0.8432617187500f, +(float16_t)-0.5400390625000f,(float16_t)0.8417968750000f, +(float16_t)-0.5429687500000f,(float16_t)0.8398437500000f, +(float16_t)-0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)-0.5478515625000f,(float16_t)0.8364257812500f, +(float16_t)-0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)-0.5532226562500f,(float16_t)0.8330078125000f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5581054687500f,(float16_t)0.8295898437500f, +(float16_t)-0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)-0.5629882812500f,(float16_t)0.8261718750000f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5683593750000f,(float16_t)0.8227539062500f, +(float16_t)-0.5708007812500f,(float16_t)0.8212890625000f, +(float16_t)-0.5732421875000f,(float16_t)0.8193359375000f, +(float16_t)-0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)-0.5781250000000f,(float16_t)0.8159179687500f, +(float16_t)-0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)-0.5834960937500f,(float16_t)0.8120117187500f, +(float16_t)-0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)-0.5883789062500f,(float16_t)0.8085937500000f, +(float16_t)-0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)-0.5932617187500f,(float16_t)0.8051757812500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.5981445312500f,(float16_t)0.8012695312500f, +(float16_t)-0.6005859375000f,(float16_t)0.7993164062500f, +(float16_t)-0.6030273437500f,(float16_t)0.7978515625000f, +(float16_t)-0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)-0.6079101562500f,(float16_t)0.7939453125000f, +(float16_t)-0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)-0.6127929687500f,(float16_t)0.7900390625000f, +(float16_t)-0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)-0.6176757812500f,(float16_t)0.7866210937500f, +(float16_t)-0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)-0.6225585937500f,(float16_t)0.7827148437500f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6274414062500f,(float16_t)0.7788085937500f, +(float16_t)-0.6293945312500f,(float16_t)0.7768554687500f, +(float16_t)-0.6318359375000f,(float16_t)0.7749023437500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6367187500000f,(float16_t)0.7709960937500f, +(float16_t)-0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)-0.6416015625000f,(float16_t)0.7670898437500f, +(float16_t)-0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)-0.6459960937500f,(float16_t)0.7631835937500f, +(float16_t)-0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)-0.6508789062500f,(float16_t)0.7592773437500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6552734375000f,(float16_t)0.7553710937500f, +(float16_t)-0.6577148437500f,(float16_t)0.7534179687500f, +(float16_t)-0.6601562500000f,(float16_t)0.7509765625000f, +(float16_t)-0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)-0.6645507812500f,(float16_t)0.7470703125000f, +(float16_t)-0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)-0.6694335937500f,(float16_t)0.7431640625000f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.6738281250000f,(float16_t)0.7387695312500f, +(float16_t)-0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)-0.6782226562500f,(float16_t)0.7348632812500f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6826171875000f,(float16_t)0.7304687500000f, +(float16_t)-0.6850585937500f,(float16_t)0.7285156250000f, +(float16_t)-0.6875000000000f,(float16_t)0.7265625000000f, +(float16_t)-0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)-0.6918945312500f,(float16_t)0.7221679687500f, +(float16_t)-0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)-0.6962890625000f,(float16_t)0.7177734375000f, +(float16_t)-0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)-0.7006835937500f,(float16_t)0.7133789062500f, +(float16_t)-0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)-0.7050781250000f,(float16_t)0.7094726562500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7094726562500f,(float16_t)0.7050781250000f, +(float16_t)-0.7114257812500f,(float16_t)0.7026367187500f, +(float16_t)-0.7133789062500f,(float16_t)0.7006835937500f, +(float16_t)-0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)-0.7177734375000f,(float16_t)0.6962890625000f, +(float16_t)-0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)-0.7221679687500f,(float16_t)0.6918945312500f, +(float16_t)-0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)-0.7265625000000f,(float16_t)0.6875000000000f, +(float16_t)-0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)-0.7304687500000f,(float16_t)0.6826171875000f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7348632812500f,(float16_t)0.6782226562500f, +(float16_t)-0.7368164062500f,(float16_t)0.6762695312500f, +(float16_t)-0.7387695312500f,(float16_t)0.6738281250000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7431640625000f,(float16_t)0.6694335937500f, +(float16_t)-0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)-0.7470703125000f,(float16_t)0.6645507812500f, +(float16_t)-0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)-0.7509765625000f,(float16_t)0.6601562500000f, +(float16_t)-0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)-0.7553710937500f,(float16_t)0.6552734375000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7592773437500f,(float16_t)0.6508789062500f, +(float16_t)-0.7612304687500f,(float16_t)0.6484375000000f, +(float16_t)-0.7631835937500f,(float16_t)0.6459960937500f, +(float16_t)-0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)-0.7670898437500f,(float16_t)0.6416015625000f, +(float16_t)-0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)-0.7709960937500f,(float16_t)0.6367187500000f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.7749023437500f,(float16_t)0.6318359375000f, +(float16_t)-0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)-0.7788085937500f,(float16_t)0.6274414062500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7827148437500f,(float16_t)0.6225585937500f, +(float16_t)-0.7846679687500f,(float16_t)0.6201171875000f, +(float16_t)-0.7866210937500f,(float16_t)0.6176757812500f, +(float16_t)-0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)-0.7900390625000f,(float16_t)0.6127929687500f, +(float16_t)-0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)-0.7939453125000f,(float16_t)0.6079101562500f, +(float16_t)-0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)-0.7978515625000f,(float16_t)0.6030273437500f, +(float16_t)-0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)-0.8012695312500f,(float16_t)0.5981445312500f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8051757812500f,(float16_t)0.5932617187500f, +(float16_t)-0.8066406250000f,(float16_t)0.5908203125000f, +(float16_t)-0.8085937500000f,(float16_t)0.5883789062500f, +(float16_t)-0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)-0.8120117187500f,(float16_t)0.5834960937500f, +(float16_t)-0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)-0.8159179687500f,(float16_t)0.5781250000000f, +(float16_t)-0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)-0.8193359375000f,(float16_t)0.5732421875000f, +(float16_t)-0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)-0.8227539062500f,(float16_t)0.5683593750000f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8261718750000f,(float16_t)0.5629882812500f, +(float16_t)-0.8281250000000f,(float16_t)0.5605468750000f, +(float16_t)-0.8295898437500f,(float16_t)0.5581054687500f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8330078125000f,(float16_t)0.5532226562500f, +(float16_t)-0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)-0.8364257812500f,(float16_t)0.5478515625000f, +(float16_t)-0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)-0.8398437500000f,(float16_t)0.5429687500000f, +(float16_t)-0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)-0.8432617187500f,(float16_t)0.5375976562500f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8466796875000f,(float16_t)0.5322265625000f, +(float16_t)-0.8481445312500f,(float16_t)0.5297851562500f, +(float16_t)-0.8496093750000f,(float16_t)0.5273437500000f, +(float16_t)-0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)-0.8530273437500f,(float16_t)0.5219726562500f, +(float16_t)-0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)-0.8559570312500f,(float16_t)0.5166015625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8593750000000f,(float16_t)0.5112304687500f, +(float16_t)-0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)-0.8623046875000f,(float16_t)0.5063476562500f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8657226562500f,(float16_t)0.5009765625000f, +(float16_t)-0.8671875000000f,(float16_t)0.4982910156250f, +(float16_t)-0.8686523437500f,(float16_t)0.4956054687500f, +(float16_t)-0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)-0.8715820312500f,(float16_t)0.4902343750000f, +(float16_t)-0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)-0.8745117187500f,(float16_t)0.4848632812500f, +(float16_t)-0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)-0.8774414062500f,(float16_t)0.4794921875000f, +(float16_t)-0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)-0.8803710937500f,(float16_t)0.4741210937500f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8833007812500f,(float16_t)0.4687500000000f, +(float16_t)-0.8847656250000f,(float16_t)0.4660644531250f, +(float16_t)-0.8862304687500f,(float16_t)0.4633789062500f, +(float16_t)-0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)-0.8891601562500f,(float16_t)0.4577636718750f, +(float16_t)-0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)-0.8916015625000f,(float16_t)0.4523925781250f, +(float16_t)-0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)-0.8945312500000f,(float16_t)0.4467773437500f, +(float16_t)-0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)-0.8974609375000f,(float16_t)0.4414062500000f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.8999023437500f,(float16_t)0.4357910156250f, +(float16_t)-0.9013671875000f,(float16_t)0.4331054687500f, +(float16_t)-0.9028320312500f,(float16_t)0.4304199218750f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9052734375000f,(float16_t)0.4248046875000f, +(float16_t)-0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)-0.9077148437500f,(float16_t)0.4191894531250f, +(float16_t)-0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)-0.9106445312500f,(float16_t)0.4135742187500f, +(float16_t)-0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)-0.9130859375000f,(float16_t)0.4079589843750f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9155273437500f,(float16_t)0.4023437500000f, +(float16_t)-0.9165039062500f,(float16_t)0.3996582031250f, +(float16_t)-0.9179687500000f,(float16_t)0.3967285156250f, +(float16_t)-0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)-0.9204101562500f,(float16_t)0.3911132812500f, +(float16_t)-0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)-0.9228515625000f,(float16_t)0.3854980468750f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9252929687500f,(float16_t)0.3798828125000f, +(float16_t)-0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)-0.9272460937500f,(float16_t)0.3742675781250f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9296875000000f,(float16_t)0.3684082031250f, +(float16_t)-0.9306640625000f,(float16_t)0.3657226562500f, +(float16_t)-0.9316406250000f,(float16_t)0.3627929687500f, +(float16_t)-0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)-0.9340820312500f,(float16_t)0.3569335937500f, +(float16_t)-0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)-0.9360351562500f,(float16_t)0.3513183593750f, +(float16_t)-0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)-0.9384765625000f,(float16_t)0.3454589843750f, +(float16_t)-0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)-0.9404296875000f,(float16_t)0.3398437500000f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9423828125000f,(float16_t)0.3339843750000f, +(float16_t)-0.9433593750000f,(float16_t)0.3310546875000f, +(float16_t)-0.9448242187500f,(float16_t)0.3281250000000f, +(float16_t)-0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)-0.9467773437500f,(float16_t)0.3225097656250f, +(float16_t)-0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)-0.9487304687500f,(float16_t)0.3166503906250f, +(float16_t)-0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)-0.9506835937500f,(float16_t)0.3107910156250f, +(float16_t)-0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)-0.9521484375000f,(float16_t)0.3049316406250f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9541015625000f,(float16_t)0.2990722656250f, +(float16_t)-0.9550781250000f,(float16_t)0.2961425781250f, +(float16_t)-0.9560546875000f,(float16_t)0.2932128906250f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9580078125000f,(float16_t)0.2873535156250f, +(float16_t)-0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)-0.9594726562500f,(float16_t)0.2814941406250f, +(float16_t)-0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)-0.9614257812500f,(float16_t)0.2756347656250f, +(float16_t)-0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)-0.9628906250000f,(float16_t)0.2697753906250f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9643554687500f,(float16_t)0.2636718750000f, +(float16_t)-0.9653320312500f,(float16_t)0.2607421875000f, +(float16_t)-0.9663085937500f,(float16_t)0.2578125000000f, +(float16_t)-0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)-0.9677734375000f,(float16_t)0.2519531250000f, +(float16_t)-0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)-0.9692382812500f,(float16_t)0.2459716796875f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9707031250000f,(float16_t)0.2399902343750f, +(float16_t)-0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)-0.9721679687500f,(float16_t)0.2340087890625f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9736328125000f,(float16_t)0.2280273437500f, +(float16_t)-0.9741210937500f,(float16_t)0.2250976562500f, +(float16_t)-0.9750976562500f,(float16_t)0.2220458984375f, +(float16_t)-0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)-0.9765625000000f,(float16_t)0.2160644531250f, +(float16_t)-0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)-0.9775390625000f,(float16_t)0.2100830078125f, +(float16_t)-0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)-0.9790039062500f,(float16_t)0.2041015625000f, +(float16_t)-0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)-0.9799804687500f,(float16_t)0.1981201171875f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9814453125000f,(float16_t)0.1921386718750f, +(float16_t)-0.9819335937500f,(float16_t)0.1890869140625f, +(float16_t)-0.9824218750000f,(float16_t)0.1860351562500f, +(float16_t)-0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)-0.9838867187500f,(float16_t)0.1800537109375f, +(float16_t)-0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)-0.9848632812500f,(float16_t)0.1739501953125f, +(float16_t)-0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)-0.9858398437500f,(float16_t)0.1679687500000f, +(float16_t)-0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)-0.9868164062500f,(float16_t)0.1618652343750f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9877929687500f,(float16_t)0.1558837890625f, +(float16_t)-0.9882812500000f,(float16_t)0.1528320312500f, +(float16_t)-0.9887695312500f,(float16_t)0.1497802734375f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9897460937500f,(float16_t)0.1436767578125f, +(float16_t)-0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)-0.9907226562500f,(float16_t)0.1375732421875f, +(float16_t)-0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)-0.9912109375000f,(float16_t)0.1315917968750f, +(float16_t)-0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)-0.9921875000000f,(float16_t)0.1254882812500f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9926757812500f,(float16_t)0.1193847656250f, +(float16_t)-0.9931640625000f,(float16_t)0.1163330078125f, +(float16_t)-0.9936523437500f,(float16_t)0.1132812500000f, +(float16_t)-0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)-0.9941406250000f,(float16_t)0.1071777343750f, +(float16_t)-0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)-0.9951171875000f,(float16_t)0.1010742187500f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9956054687500f,(float16_t)0.0949707031250f, +(float16_t)-0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)-0.9960937500000f,(float16_t)0.0888671875000f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9965820312500f,(float16_t)0.0827636718750f, +(float16_t)-0.9965820312500f,(float16_t)0.0797119140625f, +(float16_t)-0.9970703125000f,(float16_t)0.0765991210938f, +(float16_t)-0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)-0.9975585937500f,(float16_t)0.0704956054688f, +(float16_t)-0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)-0.9980468750000f,(float16_t)0.0643920898438f, +(float16_t)-0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)-0.9985351562500f,(float16_t)0.0582580566406f, +(float16_t)-0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)-0.9985351562500f,(float16_t)0.0521240234375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9990234375000f,(float16_t)0.0459899902344f, +(float16_t)-0.9990234375000f,(float16_t)0.0429382324219f, +(float16_t)-0.9990234375000f,(float16_t)0.0398864746094f, +(float16_t)-0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)-0.9995117187500f,(float16_t)0.0337524414062f, +(float16_t)-0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)-0.9995117187500f,(float16_t)0.0276031494141f, +(float16_t)-0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)-1.0000000000000f,(float16_t)0.0214691162109f, +(float16_t)-1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)-1.0000000000000f,(float16_t)0.0153427124023f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)-1.0000000000000f,(float16_t)0.0092010498047f, +(float16_t)-1.0000000000000f,(float16_t)0.0061340332031f, +(float16_t)-1.0000000000000f,(float16_t)0.0030670166016f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0368041992188f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)-0.0735473632812f,(float16_t)0.9970703125000f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1102294921875f,(float16_t)0.9941406250000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)-0.1829833984375f,(float16_t)0.9829101562500f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)-0.2191162109375f,(float16_t)0.9755859375000f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2548828125000f,(float16_t)0.9667968750000f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)-0.3251953125000f,(float16_t)0.9458007812500f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)-0.3598632812500f,(float16_t)0.9331054687500f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.3940429687500f,(float16_t)0.9189453125000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)-0.4604492187500f,(float16_t)0.8876953125000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)-0.4929199218750f,(float16_t)0.8701171875000f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5244140625000f,(float16_t)0.8515625000000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)-0.5859375000000f,(float16_t)0.8105468750000f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)-0.6152343750000f,(float16_t)0.7885742187500f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6440429687500f,(float16_t)0.7651367187500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)-0.6982421875000f,(float16_t)0.7158203125000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)-0.7241210937500f,(float16_t)0.6894531250000f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7490234375000f,(float16_t)0.6625976562500f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)-0.7958984375000f,(float16_t)0.6054687500000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)-0.8173828125000f,(float16_t)0.5756835937500f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8383789062500f,(float16_t)0.5454101562500f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)-0.8759765625000f,(float16_t)0.4821777343750f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)-0.8930664062500f,(float16_t)0.4497070312500f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9091796875000f,(float16_t)0.4165039062500f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)-0.9375000000000f,(float16_t)0.3483886718750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)-0.9497070312500f,(float16_t)0.3137207031250f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9604492187500f,(float16_t)0.2785644531250f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)-0.9785156250000f,(float16_t)0.2071533203125f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)-0.9853515625000f,(float16_t)0.1710205078125f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9907226562500f,(float16_t)0.1345214843750f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)-0.9980468750000f,(float16_t)0.0613098144531f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)-0.9995117187500f,(float16_t)0.0245361328125f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1467285156250f,(float16_t)0.9892578125000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)-0.2902832031250f,(float16_t)0.9570312500000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.4274902343750f,(float16_t)0.9038085937500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)-0.6713867187500f,(float16_t)0.7407226562500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)-0.7729492187500f,(float16_t)0.6342773437500f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.8579101562500f,(float16_t)0.5141601562500f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)-0.9702148437500f,(float16_t)0.2429199218750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)-0.9951171875000f,(float16_t)0.0980224609375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.5556640625000f,(float16_t)0.8315429687500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)-0.9238281250000f,(float16_t)0.3825683593750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)0.0000000000000f,(float16_t)1.0000000000000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f,}; float16_t rearranged_twiddle_stride3_4096_f16[2728]={ -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99998941108192840321f,(float16_t)0.00460192612044857050f, -(float16_t)0.99995764455196389786f,(float16_t)0.00920375478205981944f, -(float16_t)0.99990470108285289808f,(float16_t)0.01380538852806039059f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99973528826056168306f,(float16_t)0.02300768146883936868f, -(float16_t)0.99961882249517863830f,(float16_t)0.02760814577896573974f, -(float16_t)0.99948118696616694567f,(float16_t)0.03220802540830458582f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99914241872481690532f,(float16_t)0.04140564097707673946f, -(float16_t)0.99894129318685687124f,(float16_t)0.04600318213091462299f, -(float16_t)0.99871901223387293811f,(float16_t)0.05059974903689928166f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99821100336047818846f,(float16_t)0.05978957074663986820f, -(float16_t)0.99792528619859599548f,(float16_t)0.06438263092985746505f, -(float16_t)0.99761843513851955478f,(float16_t)0.06897432762826674613f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99694135776498216117f,(float16_t)0.07815324163279423197f, -(float16_t)0.99657114579055483539f,(float16_t)0.08274026454937569164f, -(float16_t)0.99617982859569698117f,(float16_t)0.08732553520619205922f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99533391214048227980f,(float16_t)0.09649043135525259274f, -(float16_t)0.99487933079480561638f,(float16_t)0.10106986275482782167f, -(float16_t)0.99440368005767909576f,(float16_t)0.10564715371341061589f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99338921114808065305f,(float16_t)0.11479492660651008373f, -(float16_t)0.99285041445986510489f,(float16_t)0.11936521481099135467f, -(float16_t)0.99229059134825736699f,(float16_t)0.12393297511851215920f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.99110791372327688986f,(float16_t)0.13306052515713906459f, -(float16_t)0.99048508425645709341f,(float16_t)0.13762012158648603832f, -(float16_t)0.98984127845882052821f,(float16_t)0.14217680351944803063f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98849079285269658701f,(float16_t)0.15128103795733022219f, -(float16_t)0.98778414164457217783f,(float16_t)0.15582839765426523271f, -(float16_t)0.98705657130575097380f,(float16_t)0.16037245724292828464f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98553873531217606185f,(float16_t)0.16945029123396795900f, -(float16_t)0.98474850180190420801f,(float16_t)0.17398387338746382214f, -(float16_t)0.98393741344921892278f,(float16_t)0.17851377093899750692f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.98225274136628937249f,(float16_t)0.18756212858252960252f, -(float16_t)0.98137919331375456089f,(float16_t)0.19208039704989243734f, -(float16_t)0.98048486177346938497f,(float16_t)0.19659459767008022335f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97863392442942320759f,(float16_t)0.20561041305309923910f, -(float16_t)0.97767735782450992943f,(float16_t)0.21011183688046961016f, -(float16_t)0.97670008612871184184f,(float16_t)0.21460881099378675829f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97468351068851066810f,(float16_t)0.22358902922978998729f, -(float16_t)0.97364424965081197705f,(float16_t)0.22807208317088573102f, -(float16_t)0.97258436893473221296f,(float16_t)0.23255030703877524467f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.97040283868755550234f,(float16_t)0.24149188530286933019f, -(float16_t)0.96928123535654853171f,(float16_t)0.24595505033579459497f, -(float16_t)0.96813910474636244441f,(float16_t)0.25041300657296522436f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96579335887408368500f,(float16_t)0.25931291513288623474f, -(float16_t)0.96458979328981275803f,(float16_t)0.26375467897483134694f, -(float16_t)0.96336579978095404631f,(float16_t)0.26819085706340317632f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.96085663310767965850f,(float16_t)0.27704608030609989555f, -(float16_t)0.95957151308198451733f,(float16_t)0.28146493792575794091f, -(float16_t)0.95826607140801767226f,(float16_t)0.28587783472708061527f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95559433413077110586f,(float16_t)0.29468537218051432669f, -(float16_t)0.95422809510910566733f,(float16_t)0.29907982630804047508f, -(float16_t)0.95284164760119871573f,(float16_t)0.30346794657201131562f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.95000824500184299914f,(float16_t)0.31222481392182488413f, -(float16_t)0.94856134991573026749f,(float16_t)0.31659337555616584581f, -(float16_t)0.94709436635277721717f,(float16_t)0.32095523242787521445f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.94410025849127265918f,(float16_t)0.32965846252858749255f, -(float16_t)0.94257319760144686605f,(float16_t)0.33399965144200938205f, -(float16_t)0.94102617505088925753f,(float16_t)0.33833376696554112728f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93787237643998988545f,(float16_t)0.34698041084592368133f, -(float16_t)0.93626566717027825959f,(float16_t)0.35129275608556709276f, -(float16_t)0.93463912981968078064f,(float16_t)0.35559766170478385172f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.93132670908118042608f,(float16_t)0.36418478956707989180f, -(float16_t)0.92964089584318121418f,(float16_t)0.36846682995337232125f, -(float16_t)0.92793539482261788720f,(float16_t)0.37274106700951575855f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.92446547432526260391f,(float16_t)0.38126576922216237620f, -(float16_t)0.92270112833387862850f,(float16_t)0.38551605384391884890f, -(float16_t)0.92091724152918941204f,(float16_t)0.38975817406985641123f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91729099700837790632f,(float16_t)0.39821756215337356100f, -(float16_t)0.91544871608826783316f,(float16_t)0.40243465085941843018f, -(float16_t)0.91358704794525080750f,(float16_t)0.40664321687036902864f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.90980570810465222209f,(float16_t)0.41503442447608163146f, -(float16_t)0.90788611648766626150f,(float16_t)0.41921688836322390515f, -(float16_t)0.90594729780726845902f,(float16_t)0.42339047414379604728f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.90201214390249317976f,(float16_t)0.43171065802505725895f, -(float16_t)0.90001589201616016833f,(float16_t)0.43585707992225547480f, -(float16_t)0.89800057974073987932f,(float16_t)0.43999427130963325583f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.89391294514520325265f,(float16_t)0.44824061228521988598f, -(float16_t)0.89184070939234272313f,(float16_t)0.45234958723377088896f, -(float16_t)0.88974958638307277692f,(float16_t)0.45644898239688391772f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.88551085613619995307f,(float16_t)0.46461868630623781584f, -(float16_t)0.88336333866573157891f,(float16_t)0.46868882203582790114f, -(float16_t)0.88119711347122209322f,(float16_t)0.47274903195034279069f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87680872380914565145f,(float16_t)0.48083933060033395845f, -(float16_t)0.87458665227817611321f,(float16_t)0.48486924800079106435f, -(float16_t)0.87234605889439154058f,(float16_t)0.48888889691976317176f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86780949676330332299f,(float16_t)0.49689704902265446895f, -(float16_t)0.86551362409056908920f,(float16_t)0.50088538261124071482f, -(float16_t)0.86319942171212415971f,(float16_t)0.50486310853126759035f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85851622426444273994f,(float16_t)0.51278640063356295542f, -(float16_t)0.85614732837519447184f,(float16_t)0.51673179901764987321f, -(float16_t)0.85376030113811141042f,(float16_t)0.52066625414036715735f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84893205521163961347f,(float16_t)0.52850200154222848337f, -(float16_t)0.84649093877405212627f,(float16_t)0.53240312787719790144f, -(float16_t)0.84403189549006640835f,(float16_t)0.53629297906596318235f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83906023707031274217f,(float16_t)0.54403852673088382019f, -(float16_t)0.83654772722351200542f,(float16_t)0.54789405917310018967f, -(float16_t)0.83401750110601813315f,(float16_t)0.55173798840470733573f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82890411477186487499f,(float16_t)0.55939071185913613604f, -(float16_t)0.82632106284566353427f,(float16_t)0.56319934401383409117f, -(float16_t)0.82372051122739142759f,(float16_t)0.56699604882510867832f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81846712958029865792f,(float16_t)0.57455335504771576360f, -(float16_t)0.81581441080673378075f,(float16_t)0.57831379641165558958f, -(float16_t)0.81314441484925359394f,(float16_t)0.58206199034077543697f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.80775281792619035848f,(float16_t)0.58952131864106394055f, -(float16_t)0.80503133114296365758f,(float16_t)0.59323229503979979516f, -(float16_t)0.80229279553811572168f,(float16_t)0.59693070806219639124f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.79676481020841882774f,(float16_t)0.60428953094815596181f, -(float16_t)0.79397547755433717231f,(float16_t)0.60794978496777363208f, -(float16_t)0.79116933021769020318f,(float16_t)0.61159716392646190641f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.78550682956405393220f,(float16_t)0.61885298796097631957f, -(float16_t)0.78265059616657572938f,(float16_t)0.62246127937414996723f, -(float16_t)0.77977778792301455368f,(float16_t)0.62605638840434352232f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.77398269060682289844f,(float16_t)0.63320675505005719064f, -(float16_t)0.77106052426181381776f,(float16_t)0.63676186123628419899f, -(float16_t)0.76812202852336541881f,(float16_t)0.64030348218415167327f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.76219629813457900891f,(float16_t)0.64734596863651205911f, -(float16_t)0.75920918897838796102f,(float16_t)0.65084668499638087535f, -(float16_t)0.75620600141439453523f,(float16_t)0.65433361783180044036f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.75015164580621507273f,(float16_t)0.66126583783999226540f, -(float16_t)0.74710060598018013245f,(float16_t)0.66471097820334479334f, -(float16_t)0.74403374417992929057f,(float16_t)0.66814204142651845153f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.73785281478846598269f,(float16_t)0.67496164610201192513f, -(float16_t)0.73473887809596349907f,(float16_t)0.67835004312986146857f, -(float16_t)0.73160938122389262972f,(float16_t)0.68172407417164970767f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.72530397237306076796f,(float16_t)0.68842875278409043638f, -(float16_t)0.72212819392921534511f,(float16_t)0.69175925836415774750f, -(float16_t)0.71893712237280449351f,(float16_t)0.69507511398000088043f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.71250937056469243469f,(float16_t)0.70166259474016845488f, -(float16_t)0.70927282643886568891f,(float16_t)0.70493408037590488124f, -(float16_t)0.70602126144933974317f,(float16_t)0.70819063703319540259f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.69947334464028376733f,(float16_t)0.71465868786276909308f, -(float16_t)0.69617713149146298601f,(float16_t)0.71787004505573170920f, -(float16_t)0.69286617481742474034f,(float16_t)0.72106619931450810501f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.68620031168003858824f,(float16_t)0.72741262860237576593f, -(float16_t)0.68284554638524808112f,(float16_t)0.73056276922782759087f, -(float16_t)0.67947631989936496666f,(float16_t)0.73369743811466026084f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.67269476907077296879f,(float16_t)0.73992009545951609173f, -(float16_t)0.66928258834663600929f,(float16_t)0.74300795213512171866f, -(float16_t)0.66585623366550972246f,(float16_t)0.74608007351006366825f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.65896129298203731661f,(float16_t)0.75217685044904269986f, -(float16_t)0.65549285299961546070f,(float16_t)0.75520137689653654700f, -(float16_t)0.65201053109695950027f,(float16_t)0.75820990981301528144f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.64500453681554403840f,(float16_t)0.76417874053611667406f, -(float16_t)0.64148101280858316198f,(float16_t)0.76713891193582040007f, -(float16_t)0.63794390362184416610f,(float16_t)0.77008283699334789674f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.63082922962842458148f,(float16_t)0.77592169904340757558f, -(float16_t)0.62725181549514419377f,(float16_t)0.77881651238147586724f, -(float16_t)0.62366111752569464155f,(float16_t)0.78169483207105938671f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.61644017453085364622f,(float16_t)0.78740174702903131809f, -(float16_t)0.61281008242940970820f,(float16_t)0.79023022143731003197f, -(float16_t)0.60916701233645320634f,(float16_t)0.79304196047944364167f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.60184224705858002658f,(float16_t)0.79861499463476082195f, -(float16_t)0.59816070699634238395f,(float16_t)0.80137617172314012937f, -(float16_t)0.59446649918466454299f,(float16_t)0.80412037739826569549f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.58704039352091808013f,(float16_t)0.80955764240405125864f, -(float16_t)0.58330865293769829094f,(float16_t)0.81225058658520388200f, -(float16_t)0.57956455913940574387f,(float16_t)0.81492632905652662156f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.57203962932475704850f,(float16_t)0.82022598256943468620f, -(float16_t)0.56825895267013148970f,(float16_t)0.82284978137582631685f, -(float16_t)0.56446624152051949608f,(float16_t)0.82545615400437744036f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.55684503727516010407f,(float16_t)0.83061640030884620334f, -(float16_t)0.55301670558002757883f,(float16_t)0.83317016470191318511f, -(float16_t)0.54917666218771976627f,(float16_t)0.83570628435375260423f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.54146176585312355556f,(float16_t)0.84072537497045796151f, -(float16_t)0.53758707629564550512f,(float16_t)0.84320823964184543620f, -(float16_t)0.53370100180715296379f,(float16_t)0.84567324698729906540f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.52589502747108474168f,(float16_t)0.85054948126560336874f, -(float16_t)0.52197529293715438925f,(float16_t)0.85296060493036363059f, -(float16_t)0.51804450409599933636f,(float16_t)0.85535366473519602870f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.51015009670676669806f,(float16_t)0.86008539042939025077f, -(float16_t)0.50618664534515533937f,(float16_t)0.86242395611104050168f, -(float16_t)0.50221247404571089934f,(float16_t)0.86474425751946237817f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.49423230851595972846f,(float16_t)0.86932987134860673084f, -(float16_t)0.49022648328829110387f,(float16_t)0.87159508665595109012f, -(float16_t)0.48621027612448652899f,(float16_t)0.87384184346536675214f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.47814705642484311987f,(float16_t)0.87827979165654146421f, -(float16_t)0.47410021465055002254f,(float16_t)0.88047088905216075450f, -(float16_t)0.47004333245959561971f,(float16_t)0.88264333997956279099f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.46189979070246284243f,(float16_t)0.88693211879434208367f, -(float16_t)0.45781330359887728587f,(float16_t)0.88904835585466457371f, -(float16_t)0.45371712100016392544f,(float16_t)0.89114576479458318392f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.44549601651398174074f,(float16_t)0.89528392103855758410f, -(float16_t)0.44137126873171661501f,(float16_t)0.89732458070541831763f, -(float16_t)0.43723717366104419835f,(float16_t)0.89934623697934146236f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.42894129205532954829f,(float16_t)0.90333236849451181705f, -(float16_t)0.42477968120910880589f,(float16_t)0.90529675931811881551f, -(float16_t)0.42060907444840250902f,(float16_t)0.90724197791529592738f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.41224122666988299857f,(float16_t)0.91107473405517624965f, -(float16_t)0.40804416286497874333f,(float16_t)0.91296219042839810154f, -(float16_t)0.40383845756765412993f,(float16_t)0.91483031223794608611f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.39540147894781629834f,(float16_t)0.91850839432521225181f, -(float16_t)0.39117038430225398171f,(float16_t)0.92031827670911048322f, -(float16_t)0.38693100551438869283f,(float16_t)0.92210866874334507237f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.37842775480876561511f,(float16_t)0.92563083050987271516f, -(float16_t)0.37416406297145798909f,(float16_t)0.92736252565040111495f, -(float16_t)0.36989244714893426691f,(float16_t)0.92907458125931574600f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.36132580556845433906f,(float16_t)0.93243962926846235550f, -(float16_t)0.35703096123343003310f,(float16_t)0.93409255040425887007f, -(float16_t)0.35272855575521072646f,(float16_t)0.93572568948108036935f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.34410142598993898044f,(float16_t)0.93893248353206448797f, -(float16_t)0.33977688440682696225f,(float16_t)0.94050607059326829518f, -(float16_t)0.33544514708453165852f,(float16_t)0.94205973977101731265f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.32676045232013178898f,(float16_t)0.94510719328526060501f, -(float16_t)0.32240767880107001897f,(float16_t)0.94660091308328353499f, -(float16_t)0.31804807738501505998f,(float16_t)0.94807458592227622507f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.30930876031226878231f,(float16_t)0.95096166631157508231f, -(float16_t)0.30492922973540242948f,(float16_t)0.95237501271976587880f, -(float16_t)0.30054324141727339903f,(float16_t)0.95376818988599032512f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.29175226323498937298f,(float16_t)0.95649391890239499059f, -(float16_t)0.28734745954472956653f,(float16_t)0.95782641302753290802f, -(float16_t)0.28293657045705539188f,(float16_t)0.95913862246184189431f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.27409690986870632878f,(float16_t)0.96170207652912254037f, -(float16_t)0.26966832557291520178f,(float16_t)0.96295326687368387741f, -(float16_t)0.26523403028551190141f,(float16_t)0.96418406395174571788f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.25634868248994291395f,(float16_t)0.96658437447833311928f, -(float16_t)0.25189781815421691258f,(float16_t)0.96775383709347551076f, -(float16_t)0.24744161916777343557f,(float16_t)0.96890280477642887202f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.23851359484431849944f,(float16_t)0.97113915844972509284f, -(float16_t)0.23404195858354345794f,(float16_t)0.97222649707893626925f, -(float16_t)0.22956536582051886852f,(float16_t)0.97329324605469824672f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.22059769010887364526f,(float16_t)0.97536488511665686563f, -(float16_t)0.21610679707621960333f,(float16_t)0.97636973133002114000f, -(float16_t)0.21161132736922760866f,(float16_t)0.97735390014519996082f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.20260703884442110567f,(float16_t)0.97926012264908202098f, -(float16_t)0.19809841071795372680f,(float16_t)0.98018213596811731847f, -(float16_t)0.19358558729580374602f,(float16_t)0.98108339115048659451f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.18454773693861964423f,(float16_t)0.98282355119870523641f, -(float16_t)0.18002290140569951471f,(float16_t)0.98366241921173025453f, -(float16_t)0.17549425337727139751f,(float16_t)0.98448045538322093151f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.16642590354046421508f,(float16_t)0.98605396334619543897f, -(float16_t)0.16188639378011188130f,(float16_t)0.98680940181418541624f, -(float16_t)0.15734345561623827581f,(float16_t)0.98754394179435922574f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.14824767898689619749f,(float16_t)0.98895026451030298986f, -(float16_t)0.14369503315029458212f,(float16_t)0.98962201746320077600f, -(float16_t)0.13913934416382628401f,(float16_t)0.99027281236316910817f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.13001922272223334631f,(float16_t)0.99151147331874389668f, -(float16_t)0.12545498341154620592f,(float16_t)0.99209931314219179654f, -(float16_t)0.12088808723577722237f,(float16_t)0.99266614244894801899f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.11174671121112665639f,(float16_t)0.99373672194072459884f, -(float16_t)0.10717242495680887049f,(float16_t)0.99424044945318790223f, -(float16_t)0.10259586902243628126f,(float16_t)0.99472312110432570265f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.09343633584574791151f,(float16_t)0.99562525638099430569f, -(float16_t)0.08885355258252468358f,(float16_t)0.99604470090125196702f, -(float16_t)0.08426888759332412659f,(float16_t)0.99644305135004263008f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.07509430084792129145f,(float16_t)0.99717643673532618820f, -(float16_t)0.07050457338961400866f,(float16_t)0.99751145614030345410f, -(float16_t)0.06591335279700392957f,(float16_t)0.99782535041111164453f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.05672682116690778292f,(float16_t)0.99838973740734016094f, -(float16_t)0.05213170468028331672f,(float16_t)0.99864021818026527111f, -(float16_t)0.04753548415695926094f,(float16_t)0.99886954991428356099f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.03834012037355279123f,(float16_t)0.99926474728659442359f, -(float16_t)0.03374117185137764235f,(float16_t)0.99943060455546173237f, -(float16_t)0.02914150876419373953f,(float16_t)0.99957529604674921764f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.01994042855151459750f,(float16_t)0.99980116988788425569f, -(float16_t)0.01533920628498821985f,(float16_t)0.99988234745421256111f, -(float16_t)0.01073765916726457208f,(float16_t)0.99994234967602391162f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)0.00153398018628476615f,(float16_t)0.99999882345170187925f, -(float16_t)-0.00306795676296601561f,(float16_t)0.99999529380957619118f, -(float16_t)-0.00766982873953095477f,(float16_t)0.99997058643097413988f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.01687298794728165144f,(float16_t)0.99985764100582386060f, -(float16_t)-0.02147408027546948359f,(float16_t)0.99976940535121527898f, -(float16_t)-0.02607471782910391472f,(float16_t)0.99965999674395922270f, -(float16_t)-0.03067480317663645942f,(float16_t)0.99952941750109314256f, -(float16_t)-0.03527423889821382219f,(float16_t)0.99937767038800284780f, -(float16_t)-0.03987292758773972740f,(float16_t)0.99920475861836388631f, -(float16_t)-0.04447077185493861912f,(float16_t)0.99901068585407337697f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.05366353765273055437f,(float16_t)0.99855907422975931365f, -(float16_t)-0.05825826450043560673f,(float16_t)0.99830154493389289261f, -(float16_t)-0.06285175756416130910f,(float16_t)0.99802287377148624081f, -(float16_t)-0.06744391956366398155f,(float16_t)0.99772306664419163624f, -(float16_t)-0.07203465324688929083f,(float16_t)0.99740212990127530279f, -(float16_t)-0.07662386139203150592f,(float16_t)0.99706007033948296225f, -(float16_t)-0.08121144680959226092f,(float16_t)0.99669689520289606044f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.09038136087786488582f,(float16_t)0.99590722941741172125f, -(float16_t)-0.09496349532963895002f,(float16_t)0.99548075549192693856f, -(float16_t)-0.09954361866006931903f,(float16_t)0.99503319943811863180f, -(float16_t)-0.10412163387205460030f,(float16_t)0.99456457073425541537f, -(float16_t)-0.10869744401313856386f,(float16_t)0.99407487930487947736f, -(float16_t)-0.11327095217756423529f,(float16_t)0.99356413552059530403f, -(float16_t)-0.11784206150832489401f,(float16_t)0.99303235019785141002f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.12697669649688586579f,(float16_t)0.99190570043060932726f, -(float16_t)-0.13154002870288314386f,(float16_t)0.99131085984611544415f, -(float16_t)-0.13610057517570606223f,(float16_t)0.99069502544266463406f, -(float16_t)-0.14065823933284912761f,(float16_t)0.99005821026229712256f, -(float16_t)-0.14521292465284740825f,(float16_t)0.98940042779138037687f, -(float16_t)-0.14976453467732150915f,(float16_t)0.98872169196032377858f, -(float16_t)-0.15431297301302013270f,(float16_t)0.98802201714328352633f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.16339994938297311422f,(float16_t)0.98655991026477551920f, -(float16_t)-0.16793829497473108936f,(float16_t)0.98579750916756747614f, -(float16_t)-0.17247308399679592283f,(float16_t)0.98501423101223983814f, -(float16_t)-0.17700422041214874946f,(float16_t)0.98421009238692902521f, -(float16_t)-0.18153160826112502146f,(float16_t)0.98338511032155118130f, -(float16_t)-0.18605515166344649414f,(float16_t)0.98253930228744124076f, -(float16_t)-0.19057475482025265645f,(float16_t)0.98167268619698311305f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.19960175762113094300f,(float16_t)0.97987710369951763756f, -(float16_t)-0.20410896609281689584f,(float16_t)0.97894817531906219710f, -(float16_t)-0.20861185197826331850f,(float16_t)0.97799851493455713936f, -(float16_t)-0.21311031991609125091f,(float16_t)0.97702814265775439484f, -(float16_t)-0.21760427463848355800f,(float16_t)0.97603707903903913490f, -(float16_t)-0.22209362097320348162f,(float16_t)0.97502534506699412020f, -(float16_t)-0.22657826384560997290f,(float16_t)0.97399296216795583359f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.23553305940497534787f,(float16_t)0.97186633748027939639f, -(float16_t)-0.24000302244874138768f,(float16_t)0.97077214072895035013f, -(float16_t)-0.24446790274782409513f,(float16_t)0.96965738512429244800f, -(float16_t)-0.24892760574572012078f,(float16_t)0.96852209427441737777f, -(float16_t)-0.25338203699557015902f,(float16_t)0.96736629222232850545f, -(float16_t)-0.25783110216215882060f,(float16_t)0.96619000344541261516f, -(float16_t)-0.26227470702391347812f,(float16_t)0.96499325285492043580f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.27114515952680795507f,(float16_t)0.96253846804435916340f, -(float16_t)-0.27557181931095814376f,(float16_t)0.96128048581132063966f, -(float16_t)-0.27999264308027327353f,(float16_t)0.96000214573766584625f, -(float16_t)-0.28440753721127171039f,(float16_t)0.95870347489587159906f, -(float16_t)-0.28881640820604936870f,(float16_t)0.95738450078897596729f, -(float16_t)-0.29321916269425857271f,(float16_t)0.95604525134999651659f, -(float16_t)-0.29761570743508619641f,(float16_t)0.95468575494133833814f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.30638979537086097338f,(float16_t)0.95190613680793234597f, -(float16_t)-0.31076715274961136393f,(float16_t)0.95048607394948181337f, -(float16_t)-0.31513792875252233383f,(float16_t)0.94904588185270055689f, -(float16_t)-0.31950203081601563637f,(float16_t)0.94758559101774120226f, -(float16_t)-0.32385936651785285356f,(float16_t)0.94610523237040344835f, -(float16_t)-0.32820984357909255280f,(float16_t)0.94460483726148025685f, -(float16_t)-0.33255336986604405736f,(float16_t)0.94308443746609349478f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.34121920232028229991f,(float16_t)0.93998375303401404679f, -(float16_t)-0.34554132496398903829f,(float16_t)0.93840353406310816897f, -(float16_t)-0.34985612979013491763f,(float16_t)0.93680344173592156043f, -(float16_t)-0.35416352542049039931f,(float16_t)0.93518350993894761025f, -(float16_t)-0.35846342063373642928f,(float16_t)0.93354377297883628373f, -(float16_t)-0.36275572436739711435f,(float16_t)0.93188426558166814750f, -(float16_t)-0.36704034571976712487f,(float16_t)0.93020502289221906889f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.37558617848921721505f,(float16_t)0.92678747430458174872f, -(float16_t)-0.37984720892405099413f,(float16_t)0.92504924078267769527f, -(float16_t)-0.38410019501693493105f,(float16_t)0.92329141671952774661f, -(float16_t)-0.38834504669882619066f,(float16_t)0.92151403934204201285f, -(float16_t)-0.39258167407295141427f,(float16_t)0.91971714629122736095f, -(float16_t)-0.39680998741671030805f,(float16_t)0.91790077562139049672f, -(float16_t)-0.40102989718357567872f,(float16_t)0.91606496579933172075f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.40944414869225753684f,(float16_t)0.91233518462332285903f, -(float16_t)-0.41363831223843450235f,(float16_t)0.91044129225806724737f, -(float16_t)-0.41782371582021227141f,(float16_t)0.90852811871630612117f, -(float16_t)-0.42200027079979968159f,(float16_t)0.90659570451491533483f, -(float16_t)-0.42616788872679967071f,(float16_t)0.90464409057824612947f, -(float16_t)-0.43032648134008272267f,(float16_t)0.90267331823725871498f, -(float16_t)-0.43447596056965581690f,(float16_t)0.90068342922864685907f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.44274722756456980077f,(float16_t)0.89664647017868026602f, -(float16_t)-0.44686884016237399253f,(float16_t)0.89459948563138280697f, -(float16_t)-0.45098098904510369733f,(float16_t)0.89253355540276468894f, -(float16_t)-0.45508358712634372489f,(float16_t)0.89044872324475798919f, -(float16_t)-0.45917654752194403400f,(float16_t)0.88834503330959635470f, -(float16_t)-0.46325978355186014923f,(float16_t)0.88622253014888063838f, -(float16_t)-0.46733320874198841510f,(float16_t)0.88408125871263498752f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.47545028174715592284f,(float16_t)0.87974259280004740713f, -(float16_t)-0.47949375766015311928f,(float16_t)0.87754529020726124156f, -(float16_t)-0.48352707893291846375f,(float16_t)0.87532940310411100349f, -(float16_t)-0.48755016014843571837f,(float16_t)0.87309497841829020182f, -(float16_t)-0.49156291610654972990f,(float16_t)0.87084206347007897531f, -(float16_t)-0.49556526182577237405f,(float16_t)0.86857070597134100609f, -(float16_t)-0.49955711254508178287f,(float16_t)0.86628095402451310569f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.50750899105297075931f,(float16_t)0.86164646114308141023f, -(float16_t)-0.51146885043797041259f,(float16_t)0.85930181835700847337f, -(float16_t)-0.51541787801946303826f,(float16_t)0.85693897741782865118f, -(float16_t)-0.51935599016558964269f,(float16_t)0.85455798836540053376f, -(float16_t)-0.52328310347565654137f,(float16_t)0.85215890162391971785f, -(float16_t)-0.52719913478190105760f,(float16_t)0.84974176800085265970f, -(float16_t)-0.53110400115125477871f,(float16_t)0.84730663868585853749f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.53887990853100831146f,(float16_t)0.84238259964318595863f, -(float16_t)-0.54275078486451577842f,(float16_t)0.83989379419599952126f, -(float16_t)-0.54661016691083474939f,(float16_t)0.83738720161566193578f, -(float16_t)-0.55045797293660470029f,(float16_t)0.83486287498638012128f, -(float16_t)-0.55429412145362011444f,(float16_t)0.83232086776792968408f, -(float16_t)-0.55811853122055610221f,(float16_t)0.82976123379452304540f, -(float16_t)-0.56193112124468946877f,(float16_t)0.82718402727366902027f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.56952051934694725155f,(float16_t)0.82197711527924144370f, -(float16_t)-0.57329716669804198226f,(float16_t)0.81934752007679712005f, -(float16_t)-0.57706167285567933067f,(float16_t)0.81670057286682795628f, -(float16_t)-0.58081395809576441547f,(float16_t)0.81403632970594852480f, -(float16_t)-0.58455394295301521534f,(float16_t)0.81135484701706384048f, -(float16_t)-0.58828154822264522306f,(float16_t)0.80865618158817509364f, -(float16_t)-0.59199669496204088137f,(float16_t)0.80594039057117639047f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.59938929840056454079f,(float16_t)0.80045766219262282082f, -(float16_t)-0.60306659854034827539f,(float16_t)0.79769084094339104407f, -(float16_t)-0.60673112703452458661f,(float16_t)0.79490712632823690154f, -(float16_t)-0.61038280627630958630f,(float16_t)0.79210657730021227785f, -(float16_t)-0.61402155893103815831f,(float16_t)0.78928925316888587371f, -(float16_t)-0.61764730793780375784f,(float16_t)0.78645521359908587833f, -(float16_t)-0.62125997651108744169f,(float16_t)0.78360451860963831194f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.62844576660183260053f,(float16_t)0.77785340420945314754f, -(float16_t)-0.63201873593980895105f,(float16_t)0.77495310659487393057f, -(float16_t)-0.63557832048855611440f,(float16_t)0.77203639715038452351f, -(float16_t)-0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)-0.64265703396622686494f,(float16_t)0.76615399019631280630f, -(float16_t)-0.64617601298331639459f,(float16_t)0.76318841726338115805f, -(float16_t)-0.64968130739068330470f,(float16_t)0.76020668165120230952f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.65665054572942882505f,(float16_t)0.75419497531688928227f, -(float16_t)-0.66011434206742036768f,(float16_t)0.75116513190968658975f, -(float16_t)-0.66356415861203965623f,(float16_t)0.74811938045040371481f, -(float16_t)-0.66699992230363736034f,(float16_t)0.74505778544146605835f, -(float16_t)-0.67042156038017308717f,(float16_t)0.74198041172083106787f, -(float16_t)-0.67382900037875603783f,(float16_t)0.73888732446061522463f, -(float16_t)-0.67722217013718044587f,(float16_t)0.73577858916571359238f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.68396541179731551452f,(float16_t)0.72951443814699701296f, -(float16_t)-0.68731534089175916336f,(float16_t)0.72635915508434589771f, -(float16_t)-0.69065071413453438254f,(float16_t)0.72318848930652757101f, -(float16_t)-0.69397146088965377952f,(float16_t)0.72000250796138176579f, -(float16_t)-0.69727751083088640449f,(float16_t)0.71680127852109964959f, -(float16_t)-0.70056879394324822474f,(float16_t)0.71358486878079363525f, -(float16_t)-0.70384524052448482756f,(float16_t)0.71035334685706241764f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.71035334685706230662f,(float16_t)0.70384524052448504960f, -(float16_t)-0.71358486878079352422f,(float16_t)0.70056879394324833576f, -(float16_t)-0.71680127852109953857f,(float16_t)0.69727751083088651551f, -(float16_t)-0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)-0.72318848930652745999f,(float16_t)0.69065071413453460458f, -(float16_t)-0.72635915508434578669f,(float16_t)0.68731534089175927438f, -(float16_t)-0.72951443814699679091f,(float16_t)0.68396541179731562554f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.73577858916571337033f,(float16_t)0.67722217013718055689f, -(float16_t)-0.73888732446061511361f,(float16_t)0.67382900037875614885f, -(float16_t)-0.74198041172083095685f,(float16_t)0.67042156038017319819f, -(float16_t)-0.74505778544146594733f,(float16_t)0.66699992230363758239f, -(float16_t)-0.74811938045040360379f,(float16_t)0.66356415861203976725f, -(float16_t)-0.75116513190968636771f,(float16_t)0.66011434206742047870f, -(float16_t)-0.75419497531688917125f,(float16_t)0.65665054572942904709f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.76020668165120219850f,(float16_t)0.64968130739068341573f, -(float16_t)-0.76318841726338115805f,(float16_t)0.64617601298331661663f, -(float16_t)-0.76615399019631280630f,(float16_t)0.64265703396622708699f, -(float16_t)-0.76910333764557947678f,(float16_t)0.63912444486377584241f, -(float16_t)-0.77203639715038441249f,(float16_t)0.63557832048855622542f, -(float16_t)-0.77495310659487381955f,(float16_t)0.63201873593980906207f, -(float16_t)-0.77785340420945303652f,(float16_t)0.62844576660183271155f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.78360451860963820092f,(float16_t)0.62125997651108755271f, -(float16_t)-0.78645521359908576731f,(float16_t)0.61764730793780386886f, -(float16_t)-0.78928925316888576269f,(float16_t)0.61402155893103838036f, -(float16_t)-0.79210657730021216683f,(float16_t)0.61038280627630969732f, -(float16_t)-0.79490712632823679051f,(float16_t)0.60673112703452469763f, -(float16_t)-0.79769084094339093305f,(float16_t)0.60306659854034838641f, -(float16_t)-0.80045766219262259877f,(float16_t)0.59938929840056465181f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.80594039057117627944f,(float16_t)0.59199669496204099239f, -(float16_t)-0.80865618158817498262f,(float16_t)0.58828154822264533408f, -(float16_t)-0.81135484701706372945f,(float16_t)0.58455394295301532637f, -(float16_t)-0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)-0.81670057286682784525f,(float16_t)0.57706167285567944170f, -(float16_t)-0.81934752007679700903f,(float16_t)0.57329716669804209328f, -(float16_t)-0.82197711527924133268f,(float16_t)0.56952051934694747359f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.82718402727366902027f,(float16_t)0.56193112124468957980f, -(float16_t)-0.82976123379452293438f,(float16_t)0.55811853122055632426f, -(float16_t)-0.83232086776792957306f,(float16_t)0.55429412145362022546f, -(float16_t)-0.83486287498638001026f,(float16_t)0.55045797293660492233f, -(float16_t)-0.83738720161566182476f,(float16_t)0.54661016691083497143f, -(float16_t)-0.83989379419599952126f,(float16_t)0.54275078486451588944f, -(float16_t)-0.84238259964318584760f,(float16_t)0.53887990853100842248f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.84730663868585842646f,(float16_t)0.53110400115125488973f, -(float16_t)-0.84974176800085254868f,(float16_t)0.52719913478190127964f, -(float16_t)-0.85215890162391960683f,(float16_t)0.52328310347565665239f, -(float16_t)-0.85455798836540042274f,(float16_t)0.51935599016558975372f, -(float16_t)-0.85693897741782865118f,(float16_t)0.51541787801946314929f, -(float16_t)-0.85930181835700836235f,(float16_t)0.51146885043797052361f, -(float16_t)-0.86164646114308129921f,(float16_t)0.50750899105297098135f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.86628095402451299467f,(float16_t)0.49955711254508189390f, -(float16_t)-0.86857070597134089507f,(float16_t)0.49556526182577254058f, -(float16_t)-0.87084206347007886428f,(float16_t)0.49156291610654989643f, -(float16_t)-0.87309497841829009079f,(float16_t)0.48755016014843588490f, -(float16_t)-0.87532940310411089246f,(float16_t)0.48352707893291863028f, -(float16_t)-0.87754529020726113053f,(float16_t)0.47949375766015328582f, -(float16_t)-0.87974259280004729611f,(float16_t)0.47545028174715608937f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.88408125871263487650f,(float16_t)0.46733320874198858164f, -(float16_t)-0.88622253014888052736f,(float16_t)0.46325978355186031576f, -(float16_t)-0.88834503330959624368f,(float16_t)0.45917654752194420054f, -(float16_t)-0.89044872324475787817f,(float16_t)0.45508358712634389143f, -(float16_t)-0.89253355540276457791f,(float16_t)0.45098098904510386387f, -(float16_t)-0.89459948563138269595f,(float16_t)0.44686884016237415906f, -(float16_t)-0.89664647017868026602f,(float16_t)0.44274722756456996731f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90068342922864674804f,(float16_t)0.43447596056965598343f, -(float16_t)-0.90267331823725871498f,(float16_t)0.43032648134008288920f, -(float16_t)-0.90464409057824612947f,(float16_t)0.42616788872679983724f, -(float16_t)-0.90659570451491533483f,(float16_t)0.42200027079979984812f, -(float16_t)-0.90852811871630612117f,(float16_t)0.41782371582021243794f, -(float16_t)-0.91044129225806713634f,(float16_t)0.41363831223843466889f, -(float16_t)-0.91233518462332274801f,(float16_t)0.40944414869225770337f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.91606496579933172075f,(float16_t)0.40102989718357562321f, -(float16_t)-0.91790077562139049672f,(float16_t)0.39680998741671025254f, -(float16_t)-0.91971714629122736095f,(float16_t)0.39258167407295141427f, -(float16_t)-0.92151403934204179080f,(float16_t)0.38834504669882657923f, -(float16_t)-0.92329141671952752457f,(float16_t)0.38410019501693531963f, -(float16_t)-0.92504924078267747323f,(float16_t)0.37984720892405138271f, -(float16_t)-0.92678747430458174872f,(float16_t)0.37558617848921738158f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.93020502289221906889f,(float16_t)0.36704034571976729140f, -(float16_t)-0.93188426558166803648f,(float16_t)0.36275572436739728088f, -(float16_t)-0.93354377297883617270f,(float16_t)0.35846342063373659581f, -(float16_t)-0.93518350993894761025f,(float16_t)0.35416352542049039931f, -(float16_t)-0.93680344173592167145f,(float16_t)0.34985612979013486212f, -(float16_t)-0.93840353406310816897f,(float16_t)0.34554132496398898278f, -(float16_t)-0.93998375303401382475f,(float16_t)0.34121920232028268849f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94308443746609338376f,(float16_t)0.33255336986604444593f, -(float16_t)-0.94460483726148014583f,(float16_t)0.32820984357909271933f, -(float16_t)-0.94610523237040333733f,(float16_t)0.32385936651785302010f, -(float16_t)-0.94758559101774109124f,(float16_t)0.31950203081601580291f, -(float16_t)-0.94904588185270055689f,(float16_t)0.31513792875252250036f, -(float16_t)-0.95048607394948170235f,(float16_t)0.31076715274961153046f, -(float16_t)-0.95190613680793234597f,(float16_t)0.30638979537086091787f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95468575494133833814f,(float16_t)0.29761570743508614090f, -(float16_t)-0.95604525134999629454f,(float16_t)0.29321916269425896129f, -(float16_t)-0.95738450078897585627f,(float16_t)0.28881640820604975728f, -(float16_t)-0.95870347489587148804f,(float16_t)0.28440753721127209896f, -(float16_t)-0.96000214573766584625f,(float16_t)0.27999264308027344006f, -(float16_t)-0.96128048581132063966f,(float16_t)0.27557181931095831029f, -(float16_t)-0.96253846804435916340f,(float16_t)0.27114515952680812161f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96499325285492032478f,(float16_t)0.26227470702391370017f, -(float16_t)-0.96619000344541250413f,(float16_t)0.25783110216215898713f, -(float16_t)-0.96736629222232850545f,(float16_t)0.25338203699557010351f, -(float16_t)-0.96852209427441737777f,(float16_t)0.24892760574572009302f, -(float16_t)-0.96965738512429233698f,(float16_t)0.24446790274782448371f, -(float16_t)-0.97077214072895023911f,(float16_t)0.24000302244874177626f, -(float16_t)-0.97186633748027928537f,(float16_t)0.23553305940497573645f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97399296216795583359f,(float16_t)0.22657826384561016719f, -(float16_t)-0.97502534506699412020f,(float16_t)0.22209362097320364815f, -(float16_t)-0.97603707903903902388f,(float16_t)0.21760427463848372454f, -(float16_t)-0.97702814265775439484f,(float16_t)0.21311031991609141745f, -(float16_t)-0.97799851493455713936f,(float16_t)0.20861185197826351279f, -(float16_t)-0.97894817531906219710f,(float16_t)0.20410896609281684033f, -(float16_t)-0.97987710369951763756f,(float16_t)0.19960175762113091524f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98167268619698311305f,(float16_t)0.19057475482025307278f, -(float16_t)-0.98253930228744124076f,(float16_t)0.18605515166344691047f, -(float16_t)-0.98338511032155118130f,(float16_t)0.18153160826112521575f, -(float16_t)-0.98421009238692902521f,(float16_t)0.17700422041214894375f, -(float16_t)-0.98501423101223983814f,(float16_t)0.17247308399679611712f, -(float16_t)-0.98579750916756736512f,(float16_t)0.16793829497473128365f, -(float16_t)-0.98655991026477540817f,(float16_t)0.16339994938297328075f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.98802201714328352633f,(float16_t)0.15431297301302007718f, -(float16_t)-0.98872169196032377858f,(float16_t)0.14976453467732145364f, -(float16_t)-0.98940042779138037687f,(float16_t)0.14521292465284735274f, -(float16_t)-0.99005821026229701154f,(float16_t)0.14065823933284954395f, -(float16_t)-0.99069502544266463406f,(float16_t)0.13610057517570647856f, -(float16_t)-0.99131085984611544415f,(float16_t)0.13154002870288333815f, -(float16_t)-0.99190570043060932726f,(float16_t)0.12697669649688606008f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99303235019785141002f,(float16_t)0.11784206150832508830f, -(float16_t)-0.99356413552059530403f,(float16_t)0.11327095217756441570f, -(float16_t)-0.99407487930487936634f,(float16_t)0.10869744401313874427f, -(float16_t)-0.99456457073425541537f,(float16_t)0.10412163387205457254f, -(float16_t)-0.99503319943811863180f,(float16_t)0.09954361866006927739f, -(float16_t)-0.99548075549192693856f,(float16_t)0.09496349532963890838f, -(float16_t)-0.99590722941741172125f,(float16_t)0.09038136087786528827f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99669689520289606044f,(float16_t)0.08121144680959266338f, -(float16_t)-0.99706007033948296225f,(float16_t)0.07662386139203168633f, -(float16_t)-0.99740212990127530279f,(float16_t)0.07203465324688947125f, -(float16_t)-0.99772306664419163624f,(float16_t)0.06744391956366417584f, -(float16_t)-0.99802287377148624081f,(float16_t)0.06285175756416148951f, -(float16_t)-0.99830154493389289261f,(float16_t)0.05825826450043579408f, -(float16_t)-0.99855907422975931365f,(float16_t)0.05366353765273051968f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99901068585407337697f,(float16_t)0.04447077185493858442f, -(float16_t)-0.99920475861836388631f,(float16_t)0.03987292758774012985f, -(float16_t)-0.99937767038800284780f,(float16_t)0.03527423889821423159f, -(float16_t)-0.99952941750109314256f,(float16_t)0.03067480317663686534f, -(float16_t)-0.99965999674395922270f,(float16_t)0.02607471782910409860f, -(float16_t)-0.99976940535121527898f,(float16_t)0.02147408027546966747f, -(float16_t)-0.99985764100582386060f,(float16_t)0.01687298794728183532f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)-0.99997058643097413988f,(float16_t)0.00766982873953113778f, -(float16_t)-0.99999529380957619118f,(float16_t)0.00306795676296597701f, -(float16_t)-0.99999882345170187925f,(float16_t)-0.00153398018628480431f, -(float16_t)-0.99998117528260110909f,(float16_t)-0.00613588464915455420f, -(float16_t)-0.99994234967602391162f,(float16_t)-0.01073765916726416615f, -(float16_t)-0.99988234745421256111f,(float16_t)-0.01533920628498781566f, -(float16_t)-0.99980116988788425569f,(float16_t)-0.01994042855151419158f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99957529604674921764f,(float16_t)-0.02914150876419355565f, -(float16_t)-0.99943060455546173237f,(float16_t)-0.03374117185137745500f, -(float16_t)-0.99926474728659442359f,(float16_t)-0.03834012037355261082f, -(float16_t)-0.99907772775264536147f,(float16_t)-0.04293825693494077861f, -(float16_t)-0.99886954991428356099f,(float16_t)-0.04753548415695929563f, -(float16_t)-0.99864021818026527111f,(float16_t)-0.05213170468028335142f, -(float16_t)-0.99838973740734016094f,(float16_t)-0.05672682116690781762f, -(float16_t)-0.99811811290014917919f,(float16_t)-0.06132073630220824523f, -(float16_t)-0.99782535041111164453f,(float16_t)-0.06591335279700352712f, -(float16_t)-0.99751145614030345410f,(float16_t)-0.07050457338961360620f, -(float16_t)-0.99717643673532618820f,(float16_t)-0.07509430084792109716f, -(float16_t)-0.99682029929116577893f,(float16_t)-0.07968243797142994522f, -(float16_t)-0.99644305135004263008f,(float16_t)-0.08426888759332393231f, -(float16_t)-0.99604470090125196702f,(float16_t)-0.08885355258252450317f, -(float16_t)-0.99562525638099430569f,(float16_t)-0.09343633584574773110f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.99472312110432570265f,(float16_t)-0.10259586902243630901f, -(float16_t)-0.99424044945318790223f,(float16_t)-0.10717242495680891212f, -(float16_t)-0.99373672194072470987f,(float16_t)-0.11174671121112625394f, -(float16_t)-0.99321194923479461103f,(float16_t)-0.11631863091190447479f, -(float16_t)-0.99266614244894801899f,(float16_t)-0.12088808723577681992f, -(float16_t)-0.99209931314219179654f,(float16_t)-0.12545498341154601163f, -(float16_t)-0.99151147331874400770f,(float16_t)-0.13001922272223317978f, -(float16_t)-0.99090263542778000971f,(float16_t)-0.13458070850712605671f, -(float16_t)-0.99027281236316910817f,(float16_t)-0.13913934416382611747f, -(float16_t)-0.98962201746320088702f,(float16_t)-0.14369503315029438784f, -(float16_t)-0.98895026451030298986f,(float16_t)-0.14824767898689603096f, -(float16_t)-0.98825756773074946437f,(float16_t)-0.15279718525844343535f, -(float16_t)-0.98754394179435922574f,(float16_t)-0.15734345561623830356f, -(float16_t)-0.98680940181418552726f,(float16_t)-0.16188639378011149272f, -(float16_t)-0.98605396334619543897f,(float16_t)-0.16642590354046382650f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.98448045538322093151f,(float16_t)-0.17549425337727120322f, -(float16_t)-0.98366241921173025453f,(float16_t)-0.18002290140569934818f, -(float16_t)-0.98282355119870534743f,(float16_t)-0.18454773693861947770f, -(float16_t)-0.98196386910955524296f,(float16_t)-0.18906866414980610935f, -(float16_t)-0.98108339115048670553f,(float16_t)-0.19358558729580355173f, -(float16_t)-0.98018213596811742949f,(float16_t)-0.19809841071795356027f, -(float16_t)-0.97926012264908202098f,(float16_t)-0.20260703884442113343f, -(float16_t)-0.97831737071962765473f,(float16_t)-0.20711137619221858808f, -(float16_t)-0.97735390014519996082f,(float16_t)-0.21161132736922766417f, -(float16_t)-0.97636973133002125103f,(float16_t)-0.21610679707621921475f, -(float16_t)-0.97536488511665697665f,(float16_t)-0.22059769010887325669f, -(float16_t)-0.97433938278557585821f,(float16_t)-0.22508391135979261000f, -(float16_t)-0.97329324605469824672f,(float16_t)-0.22956536582051870199f, -(float16_t)-0.97222649707893638027f,(float16_t)-0.23404195858354326365f, -(float16_t)-0.97113915844972520386f,(float16_t)-0.23851359484431830515f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.96890280477642887202f,(float16_t)-0.24744161916777326904f, -(float16_t)-0.96775383709347551076f,(float16_t)-0.25189781815421696809f, -(float16_t)-0.96658437447833311928f,(float16_t)-0.25634868248994291395f, -(float16_t)-0.96539444169768939830f,(float16_t)-0.26079411791527562503f, -(float16_t)-0.96418406395174582890f,(float16_t)-0.26523403028551151284f, -(float16_t)-0.96295326687368398844f,(float16_t)-0.26966832557291481320f, -(float16_t)-0.96170207652912265139f,(float16_t)-0.27409690986870616225f, -(float16_t)-0.96043051941556589757f,(float16_t)-0.27851968938505289319f, -(float16_t)-0.95913862246184200533f,(float16_t)-0.28293657045705516984f, -(float16_t)-0.95782641302753290802f,(float16_t)-0.28734745954472939999f, -(float16_t)-0.95649391890239510161f,(float16_t)-0.29175226323498920644f, -(float16_t)-0.95514116830577078243f,(float16_t)-0.29615088824362378883f, -(float16_t)-0.95376818988599032512f,(float16_t)-0.30054324141727345454f, -(float16_t)-0.95237501271976587880f,(float16_t)-0.30492922973540242948f, -(float16_t)-0.95096166631157508231f,(float16_t)-0.30930876031226878231f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.94807458592227633609f,(float16_t)-0.31804807738501467140f, -(float16_t)-0.94660091308328364601f,(float16_t)-0.32240767880106963039f, -(float16_t)-0.94510719328526060501f,(float16_t)-0.32676045232013156694f, -(float16_t)-0.94359345816196038559f,(float16_t)-0.33110630575987626267f, -(float16_t)-0.94205973977101742367f,(float16_t)-0.33544514708453149199f, -(float16_t)-0.94050607059326840620f,(float16_t)-0.33977688440682679571f, -(float16_t)-0.93893248353206459900f,(float16_t)-0.34410142598993881391f, -(float16_t)-0.93733901191257495977f,(float16_t)-0.34841868024943456472f, -(float16_t)-0.93572568948108036935f,(float16_t)-0.35272855575521072646f, -(float16_t)-0.93409255040425887007f,(float16_t)-0.35703096123343008861f, -(float16_t)-0.93243962926846246653f,(float16_t)-0.36132580556845395048f, -(float16_t)-0.93076696107898382326f,(float16_t)-0.36561299780477357624f, -(float16_t)-0.92907458125931585702f,(float16_t)-0.36989244714893387833f, -(float16_t)-0.92736252565040111495f,(float16_t)-0.37416406297145782256f, -(float16_t)-0.92563083050987282618f,(float16_t)-0.37842775480876539307f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.92210866874334518339f,(float16_t)-0.38693100551438852630f, -(float16_t)-0.92031827670911059425f,(float16_t)-0.39117038430225381518f, -(float16_t)-0.91850839432521225181f,(float16_t)-0.39540147894781629834f, -(float16_t)-0.91667905992104270485f,(float16_t)-0.39962419984564684361f, -(float16_t)-0.91483031223794608611f,(float16_t)-0.40383845756765418544f, -(float16_t)-0.91296219042839832358f,(float16_t)-0.40804416286497835475f, -(float16_t)-0.91107473405517647169f,(float16_t)-0.41224122666988260999f, -(float16_t)-0.90916798309052249127f,(float16_t)-0.41642956009763693048f, -(float16_t)-0.90724197791529592738f,(float16_t)-0.42060907444840234248f, -(float16_t)-0.90529675931811881551f,(float16_t)-0.42477968120910863936f, -(float16_t)-0.90333236849451192807f,(float16_t)-0.42894129205532938176f, -(float16_t)-0.90134884704602202810f,(float16_t)-0.43309381885315184624f, -(float16_t)-0.89934623697934157338f,(float16_t)-0.43723717366104403181f, -(float16_t)-0.89732458070541831763f,(float16_t)-0.44137126873171667052f, -(float16_t)-0.89528392103855747308f,(float16_t)-0.44549601651398174074f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.89114576479458340597f,(float16_t)-0.45371712100016353686f, -(float16_t)-0.88904835585466468473f,(float16_t)-0.45781330359887695280f, -(float16_t)-0.88693211879434230571f,(float16_t)-0.46189979070246250936f, -(float16_t)-0.88479709843093790056f,(float16_t)-0.46597649576796595916f, -(float16_t)-0.88264333997956290201f,(float16_t)-0.47004333245959545318f, -(float16_t)-0.88047088905216086552f,(float16_t)-0.47410021465054985601f, -(float16_t)-0.87827979165654157523f,(float16_t)-0.47814705642484295334f, -(float16_t)-0.87607009419540660122f,(float16_t)-0.48218377207912266336f, -(float16_t)-0.87384184346536686316f,(float16_t)-0.48621027612448636246f, -(float16_t)-0.87159508665595109012f,(float16_t)-0.49022648328829115938f, -(float16_t)-0.86932987134860673084f,(float16_t)-0.49423230851595978397f, -(float16_t)-0.86704624551569287050f,(float16_t)-0.49822766697278153547f, -(float16_t)-0.86474425751946248919f,(float16_t)-0.50221247404571056627f, -(float16_t)-0.86242395611104072373f,(float16_t)-0.50618664534515500630f, -(float16_t)-0.86008539042939025077f,(float16_t)-0.51015009670676658704f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.85535366473519613972f,(float16_t)-0.51804450409599922533f, -(float16_t)-0.85296060493036374162f,(float16_t)-0.52197529293715427823f, -(float16_t)-0.85054948126560347976f,(float16_t)-0.52589502747108463065f, -(float16_t)-0.84812034480329723252f,(float16_t)-0.52980362468629460526f, -(float16_t)-0.84567324698729906540f,(float16_t)-0.53370100180715296379f, -(float16_t)-0.84320823964184543620f,(float16_t)-0.53758707629564550512f, -(float16_t)-0.84072537497045818355f,(float16_t)-0.54146176585312322249f, -(float16_t)-0.83822470555483818977f,(float16_t)-0.54532498842204613076f, -(float16_t)-0.83570628435375271525f,(float16_t)-0.54917666218771943321f, -(float16_t)-0.83317016470191329613f,(float16_t)-0.55301670558002735678f, -(float16_t)-0.83061640030884642538f,(float16_t)-0.55684503727515988203f, -(float16_t)-0.82804504525775590729f,(float16_t)-0.56066157619733592021f, -(float16_t)-0.82545615400437755138f,(float16_t)-0.56446624152051938506f, -(float16_t)-0.82284978137582642788f,(float16_t)-0.56825895267013148970f, -(float16_t)-0.82022598256943468620f,(float16_t)-0.57203962932475704850f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.81492632905652662156f,(float16_t)-0.57956455913940574387f, -(float16_t)-0.81225058658520388200f,(float16_t)-0.58330865293769829094f, -(float16_t)-0.80955764240405148069f,(float16_t)-0.58704039352091774706f, -(float16_t)-0.80684755354379944503f,(float16_t)-0.59075970185887394237f, -(float16_t)-0.80412037739826591753f,(float16_t)-0.59446649918466420992f, -(float16_t)-0.80137617172314035141f,(float16_t)-0.59816070699634216190f, -(float16_t)-0.79861499463476093297f,(float16_t)-0.60184224705857991555f, -(float16_t)-0.79583690460888356633f,(float16_t)-0.60551104140432543410f, -(float16_t)-0.79304196047944375270f,(float16_t)-0.60916701233645309532f, -(float16_t)-0.79023022143731003197f,(float16_t)-0.61281008242940970820f, -(float16_t)-0.78740174702903142911f,(float16_t)-0.61644017453085364622f, -(float16_t)-0.78455659715557524159f,(float16_t)-0.62005721176328920663f, -(float16_t)-0.78169483207105938671f,(float16_t)-0.62366111752569464155f, -(float16_t)-0.77881651238147620031f,(float16_t)-0.62725181549514386070f, -(float16_t)-0.77592169904340779762f,(float16_t)-0.63082922962842424841f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.77008283699334811878f,(float16_t)-0.63794390362184394405f, -(float16_t)-0.76713891193582051109f,(float16_t)-0.64148101280858305095f, -(float16_t)-0.76417874053611678509f,(float16_t)-0.64500453681554381635f, -(float16_t)-0.76120238548426188974f,(float16_t)-0.64851440102211233008f, -(float16_t)-0.75820990981301539247f,(float16_t)-0.65201053109695950027f, -(float16_t)-0.75520137689653654700f,(float16_t)-0.65549285299961534967f, -(float16_t)-0.75217685044904269986f,(float16_t)-0.65896129298203731661f, -(float16_t)-0.74913639452345925918f,(float16_t)-0.66241577759017178373f, -(float16_t)-0.74608007351006400132f,(float16_t)-0.66585623366550938940f, -(float16_t)-0.74300795213512194071f,(float16_t)-0.66928258834663578725f, -(float16_t)-0.73992009545951631377f,(float16_t)-0.67269476907077274674f, -(float16_t)-0.73681656887737001504f,(float16_t)-0.67609270357531581208f, -(float16_t)-0.73369743811466037187f,(float16_t)-0.67947631989936485564f, -(float16_t)-0.73056276922782770189f,(float16_t)-0.68284554638524797010f, -(float16_t)-0.72741262860237587695f,(float16_t)-0.68620031168003847721f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.72106619931450810501f,(float16_t)-0.69286617481742462932f, -(float16_t)-0.71787004505573170920f,(float16_t)-0.69617713149146298601f, -(float16_t)-0.71465868786276898206f,(float16_t)-0.69947334464028387835f, -(float16_t)-0.71143219574521665560f,(float16_t)-0.70275474445722507788f, -(float16_t)-0.70819063703319551362f,(float16_t)-0.70602126144933952112f, -(float16_t)-0.70493408037590510329f,(float16_t)-0.70927282643886546687f, -(float16_t)-0.70166259474016867692f,(float16_t)-0.71250937056469221265f, -(float16_t)-0.69837624940897302661f,(float16_t)-0.71573082528381848366f, -(float16_t)-0.69507511398000099145f,(float16_t)-0.71893712237280438249f, -(float16_t)-0.69175925836415785852f,(float16_t)-0.72212819392921523409f, -(float16_t)-0.68842875278409054740f,(float16_t)-0.72530397237306065694f, -(float16_t)-0.68508366777270035541f,(float16_t)-0.72846439044822519637f, -(float16_t)-0.68172407417164981869f,(float16_t)-0.73160938122389251870f, -(float16_t)-0.67835004312986146857f,(float16_t)-0.73473887809596349907f, -(float16_t)-0.67496164610201225820f,(float16_t)-0.73785281478846576064f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.66814204142651867357f,(float16_t)-0.74403374417992906853f, -(float16_t)-0.66471097820334501538f,(float16_t)-0.74710060598017991040f, -(float16_t)-0.66126583783999237642f,(float16_t)-0.75015164580621496171f, -(float16_t)-0.65780669329707874837f,(float16_t)-0.75318679904361240940f, -(float16_t)-0.65433361783180066240f,(float16_t)-0.75620600141439442421f, -(float16_t)-0.65084668499638098638f,(float16_t)-0.75920918897838796102f, -(float16_t)-0.64734596863651250320f,(float16_t)-0.76219629813457856482f, -(float16_t)-0.64383154288979149715f,(float16_t)-0.76516726562245895860f, -(float16_t)-0.64030348218415200634f,(float16_t)-0.76812202852336519676f, -(float16_t)-0.63676186123628419899f,(float16_t)-0.77106052426181381776f, -(float16_t)-0.63320675505005752370f,(float16_t)-0.77398269060682256537f, -(float16_t)-0.62963823891492687324f,(float16_t)-0.77688846567323255332f, -(float16_t)-0.62605638840434374437f,(float16_t)-0.77977778792301433164f, -(float16_t)-0.62246127937414974518f,(float16_t)-0.78265059616657584041f, -(float16_t)-0.61885298796097643059f,(float16_t)-0.78550682956405382118f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.61159716392646201744f,(float16_t)-0.79116933021769009216f, -(float16_t)-0.60794978496777407617f,(float16_t)-0.79397547755433683925f, -(float16_t)-0.60428953094815607283f,(float16_t)-0.79676481020841871672f, -(float16_t)-0.60061647938386930612f,(float16_t)-0.79953726910790479110f, -(float16_t)-0.59693070806219639124f,(float16_t)-0.80229279553811572168f, -(float16_t)-0.59323229503980012822f,(float16_t)-0.80503133114296343553f, -(float16_t)-0.58952131864106382952f,(float16_t)-0.80775281792619046950f, -(float16_t)-0.58579785745643908612f,(float16_t)-0.81045719825259465718f, -(float16_t)-0.58206199034077532595f,(float16_t)-0.81314441484925370496f, -(float16_t)-0.57831379641165570060f,(float16_t)-0.81581441080673366972f, -(float16_t)-0.57455335504771631872f,(float16_t)-0.81846712958029832485f, -(float16_t)-0.57078074588696736669f,(float16_t)-0.82110251499110464835f, -(float16_t)-0.56699604882510901138f,(float16_t)-0.82372051122739109452f, -(float16_t)-0.56319934401383409117f,(float16_t)-0.82632106284566342325f, -(float16_t)-0.55939071185913646911f,(float16_t)-0.82890411477186465294f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.55173798840470766880f,(float16_t)-0.83401750110601791111f, -(float16_t)-0.54789405917310007865f,(float16_t)-0.83654772722351211645f, -(float16_t)-0.54403852673088415326f,(float16_t)-0.83906023707031252012f, -(float16_t)-0.54017147272989274320f,(float16_t)-0.84155497743689855472f, -(float16_t)-0.53629297906596329337f,(float16_t)-0.84403189549006629733f, -(float16_t)-0.53240312787719845655f,(float16_t)-0.84649093877405179320f, -(float16_t)-0.52850200154222859439f,(float16_t)-0.84893205521163961347f, -(float16_t)-0.52458968267846928235f,(float16_t)-0.85135519310526486247f, -(float16_t)-0.52066625414036715735f,(float16_t)-0.85376030113811141042f, -(float16_t)-0.51673179901765020627f,(float16_t)-0.85614732837519424979f, -(float16_t)-0.51278640063356295542f,(float16_t)-0.85851622426444285097f, -(float16_t)-0.50883014254310732216f,(float16_t)-0.86086693863776708735f, -(float16_t)-0.50486310853126736831f,(float16_t)-0.86319942171212427073f, -(float16_t)-0.50088538261124104789f,(float16_t)-0.86551362409056897818f, -(float16_t)-0.49689704902265435793f,(float16_t)-0.86780949676330332299f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.48888889691976367136f,(float16_t)-0.87234605889439120752f, -(float16_t)-0.48486924800079117537f,(float16_t)-0.87458665227817611321f, -(float16_t)-0.48083933060033440254f,(float16_t)-0.87680872380914542941f, -(float16_t)-0.47679923006332214364f,(float16_t)-0.87901222642863341417f, -(float16_t)-0.47274903195034317926f,(float16_t)-0.88119711347122187117f, -(float16_t)-0.46868882203582790114f,(float16_t)-0.88336333866573157891f, -(float16_t)-0.46461868630623814891f,(float16_t)-0.88551085613619973103f, -(float16_t)-0.46053871095823989412f,(float16_t)-0.88763962040285404598f, -(float16_t)-0.45644898239688419528f,(float16_t)-0.88974958638307266590f, -(float16_t)-0.45234958723377066692f,(float16_t)-0.89184070939234283415f, -(float16_t)-0.44824061228522010802f,(float16_t)-0.89391294514520314163f, -(float16_t)-0.44412214457042975546f,(float16_t)-0.89596624975618488484f, -(float16_t)-0.43999427130963336685f,(float16_t)-0.89800057974073976830f, -(float16_t)-0.43585707992225597440f,(float16_t)-0.90001589201615994629f, -(float16_t)-0.43171065802505731446f,(float16_t)-0.90201214390249317976f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.42339047414379599177f,(float16_t)-0.90594729780726845902f, -(float16_t)-0.41921688836322429372f,(float16_t)-0.90788611648766603945f, -(float16_t)-0.41503442447608152044f,(float16_t)-0.90980570810465233311f, -(float16_t)-0.41084317105790418845f,(float16_t)-0.91170603200542976730f, -(float16_t)-0.40664321687036886210f,(float16_t)-0.91358704794525091852f, -(float16_t)-0.40243465085941865222f,(float16_t)-0.91544871608826772214f, -(float16_t)-0.39821756215337417162f,(float16_t)-0.91729099700837768427f, -(float16_t)-0.39399204006104820985f,(float16_t)-0.91911385169005765938f, -(float16_t)-0.38975817406985696634f,(float16_t)-0.92091724152918930102f, -(float16_t)-0.38551605384391890441f,(float16_t)-0.92270112833387851747f, -(float16_t)-0.38126576922216276477f,(float16_t)-0.92446547432526249288f, -(float16_t)-0.37700741021641820394f,(float16_t)-0.92621024213831137928f, -(float16_t)-0.37274106700951614712f,(float16_t)-0.92793539482261766516f, -(float16_t)-0.36846682995337221023f,(float16_t)-0.92964089584318132520f, -(float16_t)-0.36418478956708016936f,(float16_t)-0.93132670908118031505f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.35559766170478407377f,(float16_t)-0.93463912981968066962f, -(float16_t)-0.35129275608556687072f,(float16_t)-0.93626566717027837061f, -(float16_t)-0.34698041084592379235f,(float16_t)-0.93787237643998977443f, -(float16_t)-0.34266071731199487793f,(float16_t)-0.93945922360218969693f, -(float16_t)-0.33833376696554123830f,(float16_t)-0.94102617505088925753f, -(float16_t)-0.33399965144200982614f,(float16_t)-0.94257319760144675502f, -(float16_t)-0.32965846252858749255f,(float16_t)-0.94410025849127265918f, -(float16_t)-0.32531029216226331480f,(float16_t)-0.94560732538052116869f, -(float16_t)-0.32095523242787515894f,(float16_t)-0.94709436635277721717f, -(float16_t)-0.31659337555616617887f,(float16_t)-0.94856134991573015647f, -(float16_t)-0.31222481392182477311f,(float16_t)-0.95000824500184311017f, -(float16_t)-0.30784964004153508865f,(float16_t)-0.95143502096900833820f, -(float16_t)-0.30346794657201103806f,(float16_t)-0.95284164760119871573f, -(float16_t)-0.29907982630804058610f,(float16_t)-0.95422809510910555630f, -(float16_t)-0.29468537218051488180f,(float16_t)-0.95559433413077088382f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.28587783472708105936f,(float16_t)-0.95826607140801756124f, -(float16_t)-0.28146493792575794091f,(float16_t)-0.95957151308198451733f, -(float16_t)-0.27704608030610028413f,(float16_t)-0.96085663310767954748f, -(float16_t)-0.27262135544994886560f,(float16_t)-0.96212140426904158019f, -(float16_t)-0.26819085706340350939f,(float16_t)-0.96336579978095393528f, -(float16_t)-0.26375467897483123592f,(float16_t)-0.96458979328981275803f, -(float16_t)-0.25931291513288645678f,(float16_t)-0.96579335887408357397f, -(float16_t)-0.25486565960451434965f,(float16_t)-0.96697647104485218161f, -(float16_t)-0.25041300657296539089f,(float16_t)-0.96813910474636233339f, -(float16_t)-0.24595505033579515008f,(float16_t)-0.96928123535654830967f, -(float16_t)-0.24149188530286941345f,(float16_t)-0.97040283868755550234f, -(float16_t)-0.23702360599436766986f,(float16_t)-0.97150389098625167250f, -(float16_t)-0.23255030703877521692f,(float16_t)-0.97258436893473221296f, -(float16_t)-0.22807208317088611960f,(float16_t)-0.97364424965081186603f, -(float16_t)-0.22358902922978990402f,(float16_t)-0.97468351068851066810f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.21460881099378659176f,(float16_t)-0.97670008612871184184f, -(float16_t)-0.21011183688046985996f,(float16_t)-0.97767735782450992943f, -(float16_t)-0.20561041305309901706f,(float16_t)-0.97863392442942320759f, -(float16_t)-0.20110463484209206708f,(float16_t)-0.97956976568544051887f, -(float16_t)-0.19659459767008077846f,(float16_t)-0.98048486177346927395f, -(float16_t)-0.19208039704989252061f,(float16_t)-0.98137919331375456089f, -(float16_t)-0.18756212858253007436f,(float16_t)-0.98225274136628937249f, -(float16_t)-0.18303988795514095078f,(float16_t)-0.98310548743121628501f, -(float16_t)-0.17851377093899792325f,(float16_t)-0.98393741344921881176f, -(float16_t)-0.17398387338746373887f,(float16_t)-0.98474850180190420801f, -(float16_t)-0.16945029123396829207f,(float16_t)-0.98553873531217606185f, -(float16_t)-0.16491312048996975559f,(float16_t)-0.98630809724459866938f, -(float16_t)-0.16037245724292850668f,(float16_t)-0.98705657130575097380f, -(float16_t)-0.15582839765426498291f,(float16_t)-0.98778414164457217783f, -(float16_t)-0.15128103795733036097f,(float16_t)-0.98849079285269658701f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.14217680351944814165f,(float16_t)-0.98984127845882052821f, -(float16_t)-0.13762012158648653792f,(float16_t)-0.99048508425645698239f, -(float16_t)-0.13306052515713906459f,(float16_t)-0.99110791372327688986f, -(float16_t)-0.12849811079379358514f,(float16_t)-0.99170975366909952520f, -(float16_t)-0.12393297511851208981f,(float16_t)-0.99229059134825736699f, -(float16_t)-0.11936521481099168773f,(float16_t)-0.99285041445986510489f, -(float16_t)-0.11479492660650993108f,(float16_t)-0.99338921114808065305f, -(float16_t)-0.11022220729388330918f,(float16_t)-0.99390697000235606051f, -(float16_t)-0.10564715371341037997f,(float16_t)-0.99440368005767909576f, -(float16_t)-0.10106986275482798820f,(float16_t)-0.99487933079480561638f, -(float16_t)-0.09649043135525316173f,(float16_t)-0.99533391214048216877f, -(float16_t)-0.09190895649713282101f,(float16_t)-0.99576741446765981713f, -(float16_t)-0.08732553520619255882f,(float16_t)-0.99617982859569687015f, -(float16_t)-0.08274026454937570552f,(float16_t)-0.99657114579055483539f, -(float16_t)-0.07815324163279464831f,(float16_t)-0.99694135776498205015f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)-0.06897432762826707919f,(float16_t)-0.99761843513851955478f, -(float16_t)-0.06438263092985731240f,(float16_t)-0.99792528619859599548f, -(float16_t)-0.05978957074664013188f,(float16_t)-0.99821100336047818846f, -(float16_t)-0.05519524434968971216f,(float16_t)-0.99847558057329477421f, -(float16_t)-0.05059974903689945513f,(float16_t)-0.99871901223387293811f, -(float16_t)-0.04600318213091520586f,(float16_t)-0.99894129318685687124f, -(float16_t)-0.04140564097707683661f,(float16_t)-0.99914241872481690532f, -(float16_t)-0.03680722294135933131f,(float16_t)-0.99932238458834943273f, -(float16_t)-0.03220802540830459970f,(float16_t)-0.99948118696616694567f, -(float16_t)-0.02760814577896616301f,(float16_t)-0.99961882249517863830f, -(float16_t)-0.02300768146883930970f,(float16_t)-0.99973528826056168306f, -(float16_t)-0.01840672990580516366f,(float16_t)-0.99983058179582340319f, -(float16_t)-0.01380538852806025008f,(float16_t)-0.99990470108285289808f, -(float16_t)-0.00920375478206008311f,(float16_t)-0.99995764455196389786f, -(float16_t)-0.00460192612044835019f,(float16_t)-0.99998941108192840321f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99983058179582340319f,(float16_t)0.01840672990580482019f, -(float16_t)0.99932238458834954375f,(float16_t)0.03680722294135883171f, -(float16_t)0.99847558057329477421f,(float16_t)0.05519524434968993420f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.99576741446765981713f,(float16_t)0.09190895649713272386f, -(float16_t)0.99390697000235606051f,(float16_t)0.11022220729388305938f, -(float16_t)0.99170975366909952520f,(float16_t)0.12849811079379316880f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.98630809724459866938f,(float16_t)0.16491312048996989437f, -(float16_t)0.98310548743121628501f,(float16_t)0.18303988795514095078f, -(float16_t)0.97956976568544051887f,(float16_t)0.20110463484209190055f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.97150389098625178352f,(float16_t)0.23702360599436719801f, -(float16_t)0.96697647104485207059f,(float16_t)0.25486565960451457169f, -(float16_t)0.96212140426904158019f,(float16_t)0.27262135544994897662f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.95143502096900833820f,(float16_t)0.30784964004153486661f, -(float16_t)0.94560732538052127971f,(float16_t)0.32531029216226292622f, -(float16_t)0.93945922360218991898f,(float16_t)0.34266071731199437833f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.92621024213831137928f,(float16_t)0.37700741021641825945f, -(float16_t)0.91911385169005777040f,(float16_t)0.39399204006104809883f, -(float16_t)0.91170603200542987832f,(float16_t)0.41084317105790391089f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.89596624975618521791f,(float16_t)0.44412214457042920035f, -(float16_t)0.88763962040285393496f,(float16_t)0.46053871095824000514f, -(float16_t)0.87901222642863352519f,(float16_t)0.47679923006332208812f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.86086693863776730939f,(float16_t)0.50883014254310698909f, -(float16_t)0.85135519310526519554f,(float16_t)0.52458968267846894928f, -(float16_t)0.84155497743689844370f,(float16_t)0.54017147272989285423f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.82110251499110464835f,(float16_t)0.57078074588696725566f, -(float16_t)0.81045719825259476821f,(float16_t)0.58579785745643886408f, -(float16_t)0.79953726910790501314f,(float16_t)0.60061647938386897305f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.77688846567323244230f,(float16_t)0.62963823891492698426f, -(float16_t)0.76516726562245895860f,(float16_t)0.64383154288979138613f, -(float16_t)0.75318679904361252042f,(float16_t)0.65780669329707863735f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.72846439044822519637f,(float16_t)0.68508366777270035541f, -(float16_t)0.71573082528381870571f,(float16_t)0.69837624940897280457f, -(float16_t)0.70275474445722529993f,(float16_t)0.71143219574521643356f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.67609270357531603413f,(float16_t)0.73681656887736979300f, -(float16_t)0.66241577759017178373f,(float16_t)0.74913639452345925918f, -(float16_t)0.64851440102211255212f,(float16_t)0.76120238548426177871f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.62005721176328920663f,(float16_t)0.78455659715557524159f, -(float16_t)0.60551104140432554512f,(float16_t)0.79583690460888345530f, -(float16_t)0.59075970185887427544f,(float16_t)0.80684755354379922299f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.56066157619733603124f,(float16_t)0.82804504525775579626f, -(float16_t)0.54532498842204646383f,(float16_t)0.83822470555483796772f, -(float16_t)0.52980362468629482731f,(float16_t)0.84812034480329712149f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.49822766697278186854f,(float16_t)0.86704624551569264845f, -(float16_t)0.48218377207912282989f,(float16_t)0.87607009419540660122f, -(float16_t)0.46597649576796612569f,(float16_t)0.88479709843093778954f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.43309381885315201277f,(float16_t)0.90134884704602202810f, -(float16_t)0.41642956009763731906f,(float16_t)0.90916798309052226923f, -(float16_t)0.39962419984564678810f,(float16_t)0.91667905992104270485f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.36561299780477396482f,(float16_t)0.93076696107898371224f, -(float16_t)0.34841868024943450921f,(float16_t)0.93733901191257495977f, -(float16_t)0.33110630575987642921f,(float16_t)0.94359345816196038559f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.29615088824362395536f,(float16_t)0.95514116830577067141f, -(float16_t)0.27851968938505305973f,(float16_t)0.96043051941556578655f, -(float16_t)0.26079411791527556952f,(float16_t)0.96539444169768939830f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.22508391135979277653f,(float16_t)0.97433938278557585821f, -(float16_t)0.20711137619221856032f,(float16_t)0.97831737071962765473f, -(float16_t)0.18906866414980627589f,(float16_t)0.98196386910955524296f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.15279718525844340760f,(float16_t)0.98825756773074946437f, -(float16_t)0.13458070850712622324f,(float16_t)0.99090263542778000971f, -(float16_t)0.11631863091190487725f,(float16_t)0.99321194923479450001f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.07968243797143012563f,(float16_t)0.99682029929116566791f, -(float16_t)0.06132073630220864768f,(float16_t)0.99811811290014917919f, -(float16_t)0.04293825693494095902f,(float16_t)0.99907772775264536147f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)0.00613588464915451517f,(float16_t)0.99998117528260110909f, -(float16_t)-0.01227153828571982304f,(float16_t)0.99992470183914450299f, -(float16_t)-0.03067480317663645942f,(float16_t)0.99952941750109314256f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.06744391956366398155f,(float16_t)0.99772306664419163624f, -(float16_t)-0.08579731234443975507f,(float16_t)0.99631261218277800129f, -(float16_t)-0.10412163387205460030f,(float16_t)0.99456457073425541537f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.14065823933284912761f,(float16_t)0.99005821026229712256f, -(float16_t)-0.15885814333386127917f,(float16_t)0.98730141815785843473f, -(float16_t)-0.17700422041214874946f,(float16_t)0.98421009238692902521f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.21311031991609125091f,(float16_t)0.97702814265775439484f, -(float16_t)-0.23105810828067113727f,(float16_t)0.97293995220556017678f, -(float16_t)-0.24892760574572012078f,(float16_t)0.96852209427441737777f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.28440753721127171039f,(float16_t)0.95870347489587159906f, -(float16_t)-0.30200594931922808417f,(float16_t)0.95330604035419386211f, -(float16_t)-0.31950203081601563637f,(float16_t)0.94758559101774120226f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.35416352542049039931f,(float16_t)0.93518350993894761025f, -(float16_t)-0.37131719395183748755f,(float16_t)0.92850608047321558924f, -(float16_t)-0.38834504669882619066f,(float16_t)0.92151403934204201285f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.42200027079979968159f,(float16_t)0.90659570451491533483f, -(float16_t)-0.43861623853852738097f,(float16_t)0.89867446569395392775f, -(float16_t)-0.45508358712634372489f,(float16_t)0.89044872324475798919f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.48755016014843571837f,(float16_t)0.87309497841829020182f, -(float16_t)-0.50353838372571746440f,(float16_t)0.86397285612158680745f, -(float16_t)-0.51935599016558964269f,(float16_t)0.85455798836540053376f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.55045797293660470029f,(float16_t)0.83486287498638012128f, -(float16_t)-0.56573181078361323149f,(float16_t)0.82458930278502517996f, -(float16_t)-0.58081395809576441547f,(float16_t)0.81403632970594852480f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.61038280627630958630f,(float16_t)0.79210657730021227785f, -(float16_t)-0.62485948814238623239f,(float16_t)0.78073722857209459924f, -(float16_t)-0.63912444486377573138f,(float16_t)0.76910333764557958780f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.66699992230363736034f,(float16_t)0.74505778544146605835f, -(float16_t)-0.68060099779545302212f,(float16_t)0.73265427167241281570f, -(float16_t)-0.69397146088965377952f,(float16_t)0.72000250796138176579f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.72000250796138165477f,(float16_t)0.69397146088965389055f, -(float16_t)-0.73265427167241270467f,(float16_t)0.68060099779545324417f, -(float16_t)-0.74505778544146594733f,(float16_t)0.66699992230363758239f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.76910333764557947678f,(float16_t)0.63912444486377584241f, -(float16_t)-0.78073722857209448822f,(float16_t)0.62485948814238634341f, -(float16_t)-0.79210657730021216683f,(float16_t)0.61038280627630969732f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.81403632970594841378f,(float16_t)0.58081395809576452649f, -(float16_t)-0.82458930278502506894f,(float16_t)0.56573181078361345353f, -(float16_t)-0.83486287498638001026f,(float16_t)0.55045797293660492233f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.85455798836540042274f,(float16_t)0.51935599016558975372f, -(float16_t)-0.86397285612158669643f,(float16_t)0.50353838372571757542f, -(float16_t)-0.87309497841829009079f,(float16_t)0.48755016014843588490f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.89044872324475787817f,(float16_t)0.45508358712634389143f, -(float16_t)-0.89867446569395392775f,(float16_t)0.43861623853852754751f, -(float16_t)-0.90659570451491533483f,(float16_t)0.42200027079979984812f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.92151403934204179080f,(float16_t)0.38834504669882657923f, -(float16_t)-0.92850608047321547822f,(float16_t)0.37131719395183770960f, -(float16_t)-0.93518350993894761025f,(float16_t)0.35416352542049039931f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.94758559101774109124f,(float16_t)0.31950203081601580291f, -(float16_t)-0.95330604035419386211f,(float16_t)0.30200594931922802866f, -(float16_t)-0.95870347489587148804f,(float16_t)0.28440753721127209896f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.96852209427441737777f,(float16_t)0.24892760574572009302f, -(float16_t)-0.97293995220556006576f,(float16_t)0.23105810828067133156f, -(float16_t)-0.97702814265775439484f,(float16_t)0.21311031991609141745f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.98421009238692902521f,(float16_t)0.17700422041214894375f, -(float16_t)-0.98730141815785843473f,(float16_t)0.15885814333386147346f, -(float16_t)-0.99005821026229701154f,(float16_t)0.14065823933284954395f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99456457073425541537f,(float16_t)0.10412163387205457254f, -(float16_t)-0.99631261218277800129f,(float16_t)0.08579731234444015753f, -(float16_t)-0.99772306664419163624f,(float16_t)0.06744391956366417584f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99952941750109314256f,(float16_t)0.03067480317663686534f, -(float16_t)-0.99992470183914450299f,(float16_t)0.01227153828572000692f, -(float16_t)-0.99998117528260110909f,(float16_t)-0.00613588464915455420f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99907772775264536147f,(float16_t)-0.04293825693494077861f, -(float16_t)-0.99811811290014917919f,(float16_t)-0.06132073630220824523f, -(float16_t)-0.99682029929116577893f,(float16_t)-0.07968243797142994522f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.99321194923479461103f,(float16_t)-0.11631863091190447479f, -(float16_t)-0.99090263542778000971f,(float16_t)-0.13458070850712605671f, -(float16_t)-0.98825756773074946437f,(float16_t)-0.15279718525844343535f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.98196386910955524296f,(float16_t)-0.18906866414980610935f, -(float16_t)-0.97831737071962765473f,(float16_t)-0.20711137619221858808f, -(float16_t)-0.97433938278557585821f,(float16_t)-0.22508391135979261000f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.96539444169768939830f,(float16_t)-0.26079411791527562503f, -(float16_t)-0.96043051941556589757f,(float16_t)-0.27851968938505289319f, -(float16_t)-0.95514116830577078243f,(float16_t)-0.29615088824362378883f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.94359345816196038559f,(float16_t)-0.33110630575987626267f, -(float16_t)-0.93733901191257495977f,(float16_t)-0.34841868024943456472f, -(float16_t)-0.93076696107898382326f,(float16_t)-0.36561299780477357624f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.91667905992104270485f,(float16_t)-0.39962419984564684361f, -(float16_t)-0.90916798309052249127f,(float16_t)-0.41642956009763693048f, -(float16_t)-0.90134884704602202810f,(float16_t)-0.43309381885315184624f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.88479709843093790056f,(float16_t)-0.46597649576796595916f, -(float16_t)-0.87607009419540660122f,(float16_t)-0.48218377207912266336f, -(float16_t)-0.86704624551569287050f,(float16_t)-0.49822766697278153547f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.84812034480329723252f,(float16_t)-0.52980362468629460526f, -(float16_t)-0.83822470555483818977f,(float16_t)-0.54532498842204613076f, -(float16_t)-0.82804504525775590729f,(float16_t)-0.56066157619733592021f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.80684755354379944503f,(float16_t)-0.59075970185887394237f, -(float16_t)-0.79583690460888356633f,(float16_t)-0.60551104140432543410f, -(float16_t)-0.78455659715557524159f,(float16_t)-0.62005721176328920663f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.76120238548426188974f,(float16_t)-0.64851440102211233008f, -(float16_t)-0.74913639452345925918f,(float16_t)-0.66241577759017178373f, -(float16_t)-0.73681656887737001504f,(float16_t)-0.67609270357531581208f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.71143219574521665560f,(float16_t)-0.70275474445722507788f, -(float16_t)-0.69837624940897302661f,(float16_t)-0.71573082528381848366f, -(float16_t)-0.68508366777270035541f,(float16_t)-0.72846439044822519637f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.65780669329707874837f,(float16_t)-0.75318679904361240940f, -(float16_t)-0.64383154288979149715f,(float16_t)-0.76516726562245895860f, -(float16_t)-0.62963823891492687324f,(float16_t)-0.77688846567323255332f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.60061647938386930612f,(float16_t)-0.79953726910790479110f, -(float16_t)-0.58579785745643908612f,(float16_t)-0.81045719825259465718f, -(float16_t)-0.57078074588696736669f,(float16_t)-0.82110251499110464835f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.54017147272989274320f,(float16_t)-0.84155497743689855472f, -(float16_t)-0.52458968267846928235f,(float16_t)-0.85135519310526486247f, -(float16_t)-0.50883014254310732216f,(float16_t)-0.86086693863776708735f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.47679923006332214364f,(float16_t)-0.87901222642863341417f, -(float16_t)-0.46053871095823989412f,(float16_t)-0.88763962040285404598f, -(float16_t)-0.44412214457042975546f,(float16_t)-0.89596624975618488484f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.41084317105790418845f,(float16_t)-0.91170603200542976730f, -(float16_t)-0.39399204006104820985f,(float16_t)-0.91911385169005765938f, -(float16_t)-0.37700741021641820394f,(float16_t)-0.92621024213831137928f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.34266071731199487793f,(float16_t)-0.93945922360218969693f, -(float16_t)-0.32531029216226331480f,(float16_t)-0.94560732538052116869f, -(float16_t)-0.30784964004153508865f,(float16_t)-0.95143502096900833820f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.27262135544994886560f,(float16_t)-0.96212140426904158019f, -(float16_t)-0.25486565960451434965f,(float16_t)-0.96697647104485218161f, -(float16_t)-0.23702360599436766986f,(float16_t)-0.97150389098625167250f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.20110463484209206708f,(float16_t)-0.97956976568544051887f, -(float16_t)-0.18303988795514095078f,(float16_t)-0.98310548743121628501f, -(float16_t)-0.16491312048996975559f,(float16_t)-0.98630809724459866938f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.12849811079379358514f,(float16_t)-0.99170975366909952520f, -(float16_t)-0.11022220729388330918f,(float16_t)-0.99390697000235606051f, -(float16_t)-0.09190895649713282101f,(float16_t)-0.99576741446765981713f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)-0.05519524434968971216f,(float16_t)-0.99847558057329477421f, -(float16_t)-0.03680722294135933131f,(float16_t)-0.99932238458834943273f, -(float16_t)-0.01840672990580516366f,(float16_t)-0.99983058179582340319f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.99729045667869020697f,(float16_t)0.07356456359966742631f, -(float16_t)0.98917650996478101444f,(float16_t)0.14673047445536174793f, -(float16_t)0.97570213003852857003f,(float16_t)0.21910124015686979759f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.93299279883473895669f,(float16_t)0.35989503653498811087f, -(float16_t)0.90398929312344333820f,(float16_t)0.42755509343028208491f, -(float16_t)0.87008699110871146054f,(float16_t)0.49289819222978403790f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.78834642762660622761f,(float16_t)0.61523159058062681925f, -(float16_t)0.74095112535495921691f,(float16_t)0.67155895484701833009f, -(float16_t)0.68954054473706694051f,(float16_t)0.72424708295146689174f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.57580819141784533866f,(float16_t)0.81758481315158371139f, -(float16_t)0.51410274419322166128f,(float16_t)0.85772861000027211809f, -(float16_t)0.44961132965460659516f,(float16_t)0.89322430119551532446f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.31368174039889157312f,(float16_t)0.94952818059303667475f, -(float16_t)0.24298017990326398197f,(float16_t)0.97003125319454397424f, -(float16_t)0.17096188876030135595f,(float16_t)0.98527764238894122162f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)0.02454122852291226384f,(float16_t)0.99969881869620424997f, -(float16_t)-0.04906767432741800800f,(float16_t)0.99879545620517240501f, -(float16_t)-0.12241067519921615403f,(float16_t)0.99247953459870996706f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.26671275747489830987f,(float16_t)0.96377606579543984022f, -(float16_t)-0.33688985339221994009f,(float16_t)0.94154406518302080631f, -(float16_t)-0.40524131400498974998f,(float16_t)0.91420975570353069095f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.53499761988709704230f,(float16_t)0.84485356524970722791f, -(float16_t)-0.59569930449243335691f,(float16_t)0.80320753148064494287f, -(float16_t)-0.65317284295377653347f,(float16_t)0.75720884650648467851f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.75720884650648467851f,(float16_t)0.65317284295377664449f, -(float16_t)-0.80320753148064483184f,(float16_t)0.59569930449243346793f, -(float16_t)-0.84485356524970711689f,(float16_t)0.53499761988709715332f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.91420975570353069095f,(float16_t)0.40524131400498991651f, -(float16_t)-0.94154406518302069529f,(float16_t)0.33688985339222032867f, -(float16_t)-0.96377606579543984022f,(float16_t)0.26671275747489847641f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99247953459870996706f,(float16_t)0.12241067519921634832f, -(float16_t)-0.99879545620517240501f,(float16_t)0.04906767432741796636f, -(float16_t)-0.99969881869620424997f,(float16_t)-0.02454122852291207996f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.98527764238894133264f,(float16_t)-0.17096188876030096737f, -(float16_t)-0.97003125319454397424f,(float16_t)-0.24298017990326381543f, -(float16_t)-0.94952818059303678577f,(float16_t)-0.31368174039889118454f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.89322430119551532446f,(float16_t)-0.44961132965460665067f, -(float16_t)-0.85772861000027211809f,(float16_t)-0.51410274419322155026f, -(float16_t)-0.81758481315158371139f,(float16_t)-0.57580819141784533866f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.72424708295146700276f,(float16_t)-0.68954054473706682948f, -(float16_t)-0.67155895484701866316f,(float16_t)-0.74095112535495888384f, -(float16_t)-0.61523159058062726334f,(float16_t)-0.78834642762660589455f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.49289819222978420443f,(float16_t)-0.87008699110871134952f, -(float16_t)-0.42755509343028247349f,(float16_t)-0.90398929312344311615f, -(float16_t)-0.35989503653498794433f,(float16_t)-0.93299279883473895669f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)-0.21910124015687010290f,(float16_t)-0.97570213003852845901f, -(float16_t)-0.14673047445536230304f,(float16_t)-0.98917650996478090342f, -(float16_t)-0.07356456359966735692f,(float16_t)-0.99729045667869020697f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.95694033573220882438f,(float16_t)0.29028467725446233105f, -(float16_t)0.83146961230254523567f,(float16_t)0.55557023301960217765f, -(float16_t)0.63439328416364548779f,(float16_t)0.77301045336273688235f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)0.09801714032956077016f,(float16_t)0.99518472667219681771f, -(float16_t)-0.19509032201612819257f,(float16_t)0.98078528040323043058f, -(float16_t)-0.47139673682599769755f,(float16_t)0.88192126434835504956f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.88192126434835493853f,(float16_t)0.47139673682599780857f, -(float16_t)-0.98078528040323043058f,(float16_t)0.19509032201612860891f, -(float16_t)-0.99518472667219692873f,(float16_t)-0.09801714032956058975f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f, -(float16_t)-0.77301045336273710440f,(float16_t)-0.63439328416364526575f, -(float16_t)-0.55557023301960217765f,(float16_t)-0.83146961230254523567f, -(float16_t)-0.29028467725446244208f,(float16_t)-0.95694033573220882438f, -(float16_t)1.00000000000000000000f,(float16_t)0.00000000000000000000f, -(float16_t)0.38268343236508983729f,(float16_t)0.92387953251128673848f, -(float16_t)-0.70710678118654746172f,(float16_t)0.70710678118654757274f, -(float16_t)-0.92387953251128684951f,(float16_t)-0.38268343236508967076f,}; +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0046005249023f, +(float16_t)1.0000000000000f,(float16_t)0.0092010498047f, +(float16_t)1.0000000000000f,(float16_t)0.0138015747070f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)0.9995117187500f,(float16_t)0.0230102539062f, +(float16_t)0.9995117187500f,(float16_t)0.0276031494141f, +(float16_t)0.9995117187500f,(float16_t)0.0321960449219f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9990234375000f,(float16_t)0.0414123535156f, +(float16_t)0.9990234375000f,(float16_t)0.0459899902344f, +(float16_t)0.9985351562500f,(float16_t)0.0505981445312f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9980468750000f,(float16_t)0.0597839355469f, +(float16_t)0.9980468750000f,(float16_t)0.0643920898438f, +(float16_t)0.9975585937500f,(float16_t)0.0689697265625f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9970703125000f,(float16_t)0.0781250000000f, +(float16_t)0.9965820312500f,(float16_t)0.0827636718750f, +(float16_t)0.9960937500000f,(float16_t)0.0873413085938f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9951171875000f,(float16_t)0.0964965820312f, +(float16_t)0.9951171875000f,(float16_t)0.1010742187500f, +(float16_t)0.9946289062500f,(float16_t)0.1056518554688f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9931640625000f,(float16_t)0.1148071289062f, +(float16_t)0.9926757812500f,(float16_t)0.1193847656250f, +(float16_t)0.9921875000000f,(float16_t)0.1239624023438f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9912109375000f,(float16_t)0.1330566406250f, +(float16_t)0.9907226562500f,(float16_t)0.1375732421875f, +(float16_t)0.9897460937500f,(float16_t)0.1422119140625f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9882812500000f,(float16_t)0.1512451171875f, +(float16_t)0.9877929687500f,(float16_t)0.1558837890625f, +(float16_t)0.9868164062500f,(float16_t)0.1604003906250f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9853515625000f,(float16_t)0.1694335937500f, +(float16_t)0.9848632812500f,(float16_t)0.1739501953125f, +(float16_t)0.9838867187500f,(float16_t)0.1784667968750f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9824218750000f,(float16_t)0.1876220703125f, +(float16_t)0.9814453125000f,(float16_t)0.1921386718750f, +(float16_t)0.9804687500000f,(float16_t)0.1966552734375f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9785156250000f,(float16_t)0.2055664062500f, +(float16_t)0.9775390625000f,(float16_t)0.2100830078125f, +(float16_t)0.9765625000000f,(float16_t)0.2145996093750f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9746093750000f,(float16_t)0.2236328125000f, +(float16_t)0.9736328125000f,(float16_t)0.2280273437500f, +(float16_t)0.9726562500000f,(float16_t)0.2325439453125f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9702148437500f,(float16_t)0.2414550781250f, +(float16_t)0.9692382812500f,(float16_t)0.2459716796875f, +(float16_t)0.9682617187500f,(float16_t)0.2504882812500f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9658203125000f,(float16_t)0.2592773437500f, +(float16_t)0.9643554687500f,(float16_t)0.2636718750000f, +(float16_t)0.9633789062500f,(float16_t)0.2683105468750f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9609375000000f,(float16_t)0.2770996093750f, +(float16_t)0.9594726562500f,(float16_t)0.2814941406250f, +(float16_t)0.9584960937500f,(float16_t)0.2858886718750f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9555664062500f,(float16_t)0.2946777343750f, +(float16_t)0.9541015625000f,(float16_t)0.2990722656250f, +(float16_t)0.9526367187500f,(float16_t)0.3034667968750f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9501953125000f,(float16_t)0.3122558593750f, +(float16_t)0.9487304687500f,(float16_t)0.3166503906250f, +(float16_t)0.9472656250000f,(float16_t)0.3210449218750f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9443359375000f,(float16_t)0.3295898437500f, +(float16_t)0.9423828125000f,(float16_t)0.3339843750000f, +(float16_t)0.9409179687500f,(float16_t)0.3383789062500f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9379882812500f,(float16_t)0.3469238281250f, +(float16_t)0.9360351562500f,(float16_t)0.3513183593750f, +(float16_t)0.9345703125000f,(float16_t)0.3557128906250f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9311523437500f,(float16_t)0.3642578125000f, +(float16_t)0.9296875000000f,(float16_t)0.3684082031250f, +(float16_t)0.9277343750000f,(float16_t)0.3728027343750f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9243164062500f,(float16_t)0.3813476562500f, +(float16_t)0.9228515625000f,(float16_t)0.3854980468750f, +(float16_t)0.9208984375000f,(float16_t)0.3896484375000f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9174804687500f,(float16_t)0.3981933593750f, +(float16_t)0.9155273437500f,(float16_t)0.4023437500000f, +(float16_t)0.9135742187500f,(float16_t)0.4067382812500f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9096679687500f,(float16_t)0.4150390625000f, +(float16_t)0.9077148437500f,(float16_t)0.4191894531250f, +(float16_t)0.9057617187500f,(float16_t)0.4233398437500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.9018554687500f,(float16_t)0.4316406250000f, +(float16_t)0.8999023437500f,(float16_t)0.4357910156250f, +(float16_t)0.8979492187500f,(float16_t)0.4399414062500f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8940429687500f,(float16_t)0.4482421875000f, +(float16_t)0.8916015625000f,(float16_t)0.4523925781250f, +(float16_t)0.8896484375000f,(float16_t)0.4565429687500f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8857421875000f,(float16_t)0.4645996093750f, +(float16_t)0.8833007812500f,(float16_t)0.4687500000000f, +(float16_t)0.8813476562500f,(float16_t)0.4726562500000f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8769531250000f,(float16_t)0.4809570312500f, +(float16_t)0.8745117187500f,(float16_t)0.4848632812500f, +(float16_t)0.8725585937500f,(float16_t)0.4887695312500f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8676757812500f,(float16_t)0.4968261718750f, +(float16_t)0.8657226562500f,(float16_t)0.5009765625000f, +(float16_t)0.8632812500000f,(float16_t)0.5048828125000f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8583984375000f,(float16_t)0.5126953125000f, +(float16_t)0.8559570312500f,(float16_t)0.5166015625000f, +(float16_t)0.8540039062500f,(float16_t)0.5205078125000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8491210937500f,(float16_t)0.5283203125000f, +(float16_t)0.8466796875000f,(float16_t)0.5322265625000f, +(float16_t)0.8442382812500f,(float16_t)0.5361328125000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8388671875000f,(float16_t)0.5439453125000f, +(float16_t)0.8364257812500f,(float16_t)0.5478515625000f, +(float16_t)0.8339843750000f,(float16_t)0.5517578125000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8291015625000f,(float16_t)0.5595703125000f, +(float16_t)0.8261718750000f,(float16_t)0.5629882812500f, +(float16_t)0.8237304687500f,(float16_t)0.5668945312500f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8183593750000f,(float16_t)0.5747070312500f, +(float16_t)0.8159179687500f,(float16_t)0.5781250000000f, +(float16_t)0.8129882812500f,(float16_t)0.5820312500000f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.8076171875000f,(float16_t)0.5893554687500f, +(float16_t)0.8051757812500f,(float16_t)0.5932617187500f, +(float16_t)0.8022460937500f,(float16_t)0.5971679687500f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7968750000000f,(float16_t)0.6044921875000f, +(float16_t)0.7939453125000f,(float16_t)0.6079101562500f, +(float16_t)0.7910156250000f,(float16_t)0.6118164062500f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7856445312500f,(float16_t)0.6186523437500f, +(float16_t)0.7827148437500f,(float16_t)0.6225585937500f, +(float16_t)0.7797851562500f,(float16_t)0.6259765625000f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7739257812500f,(float16_t)0.6333007812500f, +(float16_t)0.7709960937500f,(float16_t)0.6367187500000f, +(float16_t)0.7680664062500f,(float16_t)0.6401367187500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7622070312500f,(float16_t)0.6474609375000f, +(float16_t)0.7592773437500f,(float16_t)0.6508789062500f, +(float16_t)0.7563476562500f,(float16_t)0.6542968750000f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7500000000000f,(float16_t)0.6611328125000f, +(float16_t)0.7470703125000f,(float16_t)0.6645507812500f, +(float16_t)0.7441406250000f,(float16_t)0.6679687500000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7377929687500f,(float16_t)0.6748046875000f, +(float16_t)0.7348632812500f,(float16_t)0.6782226562500f, +(float16_t)0.7314453125000f,(float16_t)0.6816406250000f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7250976562500f,(float16_t)0.6884765625000f, +(float16_t)0.7221679687500f,(float16_t)0.6918945312500f, +(float16_t)0.7187500000000f,(float16_t)0.6953125000000f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7124023437500f,(float16_t)0.7016601562500f, +(float16_t)0.7094726562500f,(float16_t)0.7050781250000f, +(float16_t)0.7060546875000f,(float16_t)0.7080078125000f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.6997070312500f,(float16_t)0.7148437500000f, +(float16_t)0.6962890625000f,(float16_t)0.7177734375000f, +(float16_t)0.6928710937500f,(float16_t)0.7211914062500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6860351562500f,(float16_t)0.7275390625000f, +(float16_t)0.6826171875000f,(float16_t)0.7304687500000f, +(float16_t)0.6796875000000f,(float16_t)0.7338867187500f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6728515625000f,(float16_t)0.7397460937500f, +(float16_t)0.6694335937500f,(float16_t)0.7431640625000f, +(float16_t)0.6660156250000f,(float16_t)0.7460937500000f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6591796875000f,(float16_t)0.7519531250000f, +(float16_t)0.6552734375000f,(float16_t)0.7553710937500f, +(float16_t)0.6518554687500f,(float16_t)0.7583007812500f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6450195312500f,(float16_t)0.7641601562500f, +(float16_t)0.6416015625000f,(float16_t)0.7670898437500f, +(float16_t)0.6381835937500f,(float16_t)0.7700195312500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6308593750000f,(float16_t)0.7758789062500f, +(float16_t)0.6274414062500f,(float16_t)0.7788085937500f, +(float16_t)0.6235351562500f,(float16_t)0.7817382812500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6162109375000f,(float16_t)0.7875976562500f, +(float16_t)0.6127929687500f,(float16_t)0.7900390625000f, +(float16_t)0.6093750000000f,(float16_t)0.7929687500000f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.6020507812500f,(float16_t)0.7988281250000f, +(float16_t)0.5981445312500f,(float16_t)0.8012695312500f, +(float16_t)0.5942382812500f,(float16_t)0.8041992187500f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5869140625000f,(float16_t)0.8095703125000f, +(float16_t)0.5834960937500f,(float16_t)0.8120117187500f, +(float16_t)0.5795898437500f,(float16_t)0.8149414062500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5722656250000f,(float16_t)0.8203125000000f, +(float16_t)0.5683593750000f,(float16_t)0.8227539062500f, +(float16_t)0.5644531250000f,(float16_t)0.8256835937500f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5566406250000f,(float16_t)0.8305664062500f, +(float16_t)0.5532226562500f,(float16_t)0.8330078125000f, +(float16_t)0.5493164062500f,(float16_t)0.8359375000000f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5415039062500f,(float16_t)0.8408203125000f, +(float16_t)0.5375976562500f,(float16_t)0.8432617187500f, +(float16_t)0.5336914062500f,(float16_t)0.8457031250000f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5258789062500f,(float16_t)0.8505859375000f, +(float16_t)0.5219726562500f,(float16_t)0.8530273437500f, +(float16_t)0.5180664062500f,(float16_t)0.8554687500000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.5102539062500f,(float16_t)0.8598632812500f, +(float16_t)0.5063476562500f,(float16_t)0.8623046875000f, +(float16_t)0.5024414062500f,(float16_t)0.8647460937500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4941406250000f,(float16_t)0.8691406250000f, +(float16_t)0.4902343750000f,(float16_t)0.8715820312500f, +(float16_t)0.4863281250000f,(float16_t)0.8740234375000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4780273437500f,(float16_t)0.8784179687500f, +(float16_t)0.4741210937500f,(float16_t)0.8803710937500f, +(float16_t)0.4699707031250f,(float16_t)0.8828125000000f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4619140625000f,(float16_t)0.8867187500000f, +(float16_t)0.4577636718750f,(float16_t)0.8891601562500f, +(float16_t)0.4536132812500f,(float16_t)0.8911132812500f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4455566406250f,(float16_t)0.8955078125000f, +(float16_t)0.4414062500000f,(float16_t)0.8974609375000f, +(float16_t)0.4372558593750f,(float16_t)0.8994140625000f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4289550781250f,(float16_t)0.9033203125000f, +(float16_t)0.4248046875000f,(float16_t)0.9052734375000f, +(float16_t)0.4206542968750f,(float16_t)0.9072265625000f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.4123535156250f,(float16_t)0.9111328125000f, +(float16_t)0.4079589843750f,(float16_t)0.9130859375000f, +(float16_t)0.4038085937500f,(float16_t)0.9150390625000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3955078125000f,(float16_t)0.9184570312500f, +(float16_t)0.3911132812500f,(float16_t)0.9204101562500f, +(float16_t)0.3869628906250f,(float16_t)0.9218750000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3784179687500f,(float16_t)0.9257812500000f, +(float16_t)0.3742675781250f,(float16_t)0.9272460937500f, +(float16_t)0.3698730468750f,(float16_t)0.9291992187500f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3613281250000f,(float16_t)0.9326171875000f, +(float16_t)0.3569335937500f,(float16_t)0.9340820312500f, +(float16_t)0.3527832031250f,(float16_t)0.9355468750000f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3439941406250f,(float16_t)0.9389648437500f, +(float16_t)0.3398437500000f,(float16_t)0.9404296875000f, +(float16_t)0.3354492187500f,(float16_t)0.9418945312500f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3266601562500f,(float16_t)0.9453125000000f, +(float16_t)0.3225097656250f,(float16_t)0.9467773437500f, +(float16_t)0.3181152343750f,(float16_t)0.9482421875000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.3093261718750f,(float16_t)0.9511718750000f, +(float16_t)0.3049316406250f,(float16_t)0.9521484375000f, +(float16_t)0.3005371093750f,(float16_t)0.9536132812500f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2917480468750f,(float16_t)0.9565429687500f, +(float16_t)0.2873535156250f,(float16_t)0.9580078125000f, +(float16_t)0.2829589843750f,(float16_t)0.9589843750000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2741699218750f,(float16_t)0.9619140625000f, +(float16_t)0.2697753906250f,(float16_t)0.9628906250000f, +(float16_t)0.2651367187500f,(float16_t)0.9643554687500f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2563476562500f,(float16_t)0.9667968750000f, +(float16_t)0.2519531250000f,(float16_t)0.9677734375000f, +(float16_t)0.2474365234375f,(float16_t)0.9687500000000f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2385253906250f,(float16_t)0.9711914062500f, +(float16_t)0.2340087890625f,(float16_t)0.9721679687500f, +(float16_t)0.2296142578125f,(float16_t)0.9731445312500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2205810546875f,(float16_t)0.9755859375000f, +(float16_t)0.2160644531250f,(float16_t)0.9765625000000f, +(float16_t)0.2116699218750f,(float16_t)0.9775390625000f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.2026367187500f,(float16_t)0.9794921875000f, +(float16_t)0.1981201171875f,(float16_t)0.9799804687500f, +(float16_t)0.1936035156250f,(float16_t)0.9809570312500f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1845703125000f,(float16_t)0.9829101562500f, +(float16_t)0.1800537109375f,(float16_t)0.9838867187500f, +(float16_t)0.1755371093750f,(float16_t)0.9843750000000f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1663818359375f,(float16_t)0.9858398437500f, +(float16_t)0.1618652343750f,(float16_t)0.9868164062500f, +(float16_t)0.1573486328125f,(float16_t)0.9873046875000f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1481933593750f,(float16_t)0.9887695312500f, +(float16_t)0.1436767578125f,(float16_t)0.9897460937500f, +(float16_t)0.1391601562500f,(float16_t)0.9902343750000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1300048828125f,(float16_t)0.9916992187500f, +(float16_t)0.1254882812500f,(float16_t)0.9921875000000f, +(float16_t)0.1209106445312f,(float16_t)0.9926757812500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.1117553710938f,(float16_t)0.9936523437500f, +(float16_t)0.1071777343750f,(float16_t)0.9941406250000f, +(float16_t)0.1026000976562f,(float16_t)0.9946289062500f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0934448242188f,(float16_t)0.9956054687500f, +(float16_t)0.0888671875000f,(float16_t)0.9960937500000f, +(float16_t)0.0842895507812f,(float16_t)0.9965820312500f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0750732421875f,(float16_t)0.9970703125000f, +(float16_t)0.0704956054688f,(float16_t)0.9975585937500f, +(float16_t)0.0659179687500f,(float16_t)0.9980468750000f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0567321777344f,(float16_t)0.9985351562500f, +(float16_t)0.0521240234375f,(float16_t)0.9985351562500f, +(float16_t)0.0475463867188f,(float16_t)0.9990234375000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0383300781250f,(float16_t)0.9990234375000f, +(float16_t)0.0337524414062f,(float16_t)0.9995117187500f, +(float16_t)0.0291442871094f,(float16_t)0.9995117187500f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0199432373047f,(float16_t)1.0000000000000f, +(float16_t)0.0153427124023f,(float16_t)1.0000000000000f, +(float16_t)0.0107345581055f,(float16_t)1.0000000000000f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)0.0015335083008f,(float16_t)1.0000000000000f, +(float16_t)-0.0030670166016f,(float16_t)1.0000000000000f, +(float16_t)-0.0076713562012f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0168762207031f,(float16_t)1.0000000000000f, +(float16_t)-0.0214691162109f,(float16_t)1.0000000000000f, +(float16_t)-0.0260772705078f,(float16_t)0.9995117187500f, +(float16_t)-0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)-0.0352783203125f,(float16_t)0.9995117187500f, +(float16_t)-0.0398864746094f,(float16_t)0.9990234375000f, +(float16_t)-0.0444641113281f,(float16_t)0.9990234375000f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0536499023438f,(float16_t)0.9985351562500f, +(float16_t)-0.0582580566406f,(float16_t)0.9985351562500f, +(float16_t)-0.0628662109375f,(float16_t)0.9980468750000f, +(float16_t)-0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)-0.0720214843750f,(float16_t)0.9975585937500f, +(float16_t)-0.0765991210938f,(float16_t)0.9970703125000f, +(float16_t)-0.0812377929688f,(float16_t)0.9965820312500f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.0903930664062f,(float16_t)0.9960937500000f, +(float16_t)-0.0949707031250f,(float16_t)0.9956054687500f, +(float16_t)-0.0995483398438f,(float16_t)0.9951171875000f, +(float16_t)-0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)-0.1087036132812f,(float16_t)0.9941406250000f, +(float16_t)-0.1132812500000f,(float16_t)0.9936523437500f, +(float16_t)-0.1178588867188f,(float16_t)0.9931640625000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1269531250000f,(float16_t)0.9916992187500f, +(float16_t)-0.1315917968750f,(float16_t)0.9912109375000f, +(float16_t)-0.1361083984375f,(float16_t)0.9907226562500f, +(float16_t)-0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)-0.1452636718750f,(float16_t)0.9892578125000f, +(float16_t)-0.1497802734375f,(float16_t)0.9887695312500f, +(float16_t)-0.1542968750000f,(float16_t)0.9877929687500f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1634521484375f,(float16_t)0.9863281250000f, +(float16_t)-0.1679687500000f,(float16_t)0.9858398437500f, +(float16_t)-0.1724853515625f,(float16_t)0.9848632812500f, +(float16_t)-0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)-0.1815185546875f,(float16_t)0.9833984375000f, +(float16_t)-0.1860351562500f,(float16_t)0.9824218750000f, +(float16_t)-0.1905517578125f,(float16_t)0.9814453125000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.1995849609375f,(float16_t)0.9799804687500f, +(float16_t)-0.2041015625000f,(float16_t)0.9790039062500f, +(float16_t)-0.2086181640625f,(float16_t)0.9780273437500f, +(float16_t)-0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)-0.2176513671875f,(float16_t)0.9760742187500f, +(float16_t)-0.2220458984375f,(float16_t)0.9750976562500f, +(float16_t)-0.2265625000000f,(float16_t)0.9741210937500f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2354736328125f,(float16_t)0.9716796875000f, +(float16_t)-0.2399902343750f,(float16_t)0.9707031250000f, +(float16_t)-0.2445068359375f,(float16_t)0.9697265625000f, +(float16_t)-0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)-0.2534179687500f,(float16_t)0.9672851562500f, +(float16_t)-0.2578125000000f,(float16_t)0.9663085937500f, +(float16_t)-0.2622070312500f,(float16_t)0.9648437500000f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2712402343750f,(float16_t)0.9624023437500f, +(float16_t)-0.2756347656250f,(float16_t)0.9614257812500f, +(float16_t)-0.2800292968750f,(float16_t)0.9599609375000f, +(float16_t)-0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)-0.2888183593750f,(float16_t)0.9575195312500f, +(float16_t)-0.2932128906250f,(float16_t)0.9560546875000f, +(float16_t)-0.2976074218750f,(float16_t)0.9545898437500f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3063964843750f,(float16_t)0.9521484375000f, +(float16_t)-0.3107910156250f,(float16_t)0.9506835937500f, +(float16_t)-0.3151855468750f,(float16_t)0.9492187500000f, +(float16_t)-0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)-0.3239746093750f,(float16_t)0.9462890625000f, +(float16_t)-0.3281250000000f,(float16_t)0.9448242187500f, +(float16_t)-0.3325195312500f,(float16_t)0.9428710937500f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3413085937500f,(float16_t)0.9399414062500f, +(float16_t)-0.3454589843750f,(float16_t)0.9384765625000f, +(float16_t)-0.3498535156250f,(float16_t)0.9370117187500f, +(float16_t)-0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)-0.3583984375000f,(float16_t)0.9335937500000f, +(float16_t)-0.3627929687500f,(float16_t)0.9316406250000f, +(float16_t)-0.3669433593750f,(float16_t)0.9301757812500f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3754882812500f,(float16_t)0.9267578125000f, +(float16_t)-0.3798828125000f,(float16_t)0.9252929687500f, +(float16_t)-0.3840332031250f,(float16_t)0.9233398437500f, +(float16_t)-0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)-0.3925781250000f,(float16_t)0.9199218750000f, +(float16_t)-0.3967285156250f,(float16_t)0.9179687500000f, +(float16_t)-0.4011230468750f,(float16_t)0.9160156250000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4094238281250f,(float16_t)0.9121093750000f, +(float16_t)-0.4135742187500f,(float16_t)0.9106445312500f, +(float16_t)-0.4177246093750f,(float16_t)0.9086914062500f, +(float16_t)-0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)-0.4262695312500f,(float16_t)0.9047851562500f, +(float16_t)-0.4304199218750f,(float16_t)0.9028320312500f, +(float16_t)-0.4345703125000f,(float16_t)0.9008789062500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4426269531250f,(float16_t)0.8964843750000f, +(float16_t)-0.4467773437500f,(float16_t)0.8945312500000f, +(float16_t)-0.4509277343750f,(float16_t)0.8925781250000f, +(float16_t)-0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)-0.4592285156250f,(float16_t)0.8881835937500f, +(float16_t)-0.4633789062500f,(float16_t)0.8862304687500f, +(float16_t)-0.4672851562500f,(float16_t)0.8842773437500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4753417968750f,(float16_t)0.8798828125000f, +(float16_t)-0.4794921875000f,(float16_t)0.8774414062500f, +(float16_t)-0.4836425781250f,(float16_t)0.8754882812500f, +(float16_t)-0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)-0.4914550781250f,(float16_t)0.8706054687500f, +(float16_t)-0.4956054687500f,(float16_t)0.8686523437500f, +(float16_t)-0.4995117187500f,(float16_t)0.8662109375000f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5073242187500f,(float16_t)0.8618164062500f, +(float16_t)-0.5112304687500f,(float16_t)0.8593750000000f, +(float16_t)-0.5156250000000f,(float16_t)0.8569335937500f, +(float16_t)-0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)-0.5234375000000f,(float16_t)0.8520507812500f, +(float16_t)-0.5273437500000f,(float16_t)0.8496093750000f, +(float16_t)-0.5312500000000f,(float16_t)0.8471679687500f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5390625000000f,(float16_t)0.8422851562500f, +(float16_t)-0.5429687500000f,(float16_t)0.8398437500000f, +(float16_t)-0.5463867187500f,(float16_t)0.8374023437500f, +(float16_t)-0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)-0.5541992187500f,(float16_t)0.8325195312500f, +(float16_t)-0.5581054687500f,(float16_t)0.8295898437500f, +(float16_t)-0.5620117187500f,(float16_t)0.8271484375000f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5693359375000f,(float16_t)0.8217773437500f, +(float16_t)-0.5732421875000f,(float16_t)0.8193359375000f, +(float16_t)-0.5771484375000f,(float16_t)0.8168945312500f, +(float16_t)-0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)-0.5844726562500f,(float16_t)0.8115234375000f, +(float16_t)-0.5883789062500f,(float16_t)0.8085937500000f, +(float16_t)-0.5917968750000f,(float16_t)0.8061523437500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.5996093750000f,(float16_t)0.8002929687500f, +(float16_t)-0.6030273437500f,(float16_t)0.7978515625000f, +(float16_t)-0.6069335937500f,(float16_t)0.7949218750000f, +(float16_t)-0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)-0.6142578125000f,(float16_t)0.7890625000000f, +(float16_t)-0.6176757812500f,(float16_t)0.7866210937500f, +(float16_t)-0.6210937500000f,(float16_t)0.7836914062500f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6284179687500f,(float16_t)0.7778320312500f, +(float16_t)-0.6318359375000f,(float16_t)0.7749023437500f, +(float16_t)-0.6357421875000f,(float16_t)0.7719726562500f, +(float16_t)-0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)-0.6425781250000f,(float16_t)0.7661132812500f, +(float16_t)-0.6459960937500f,(float16_t)0.7631835937500f, +(float16_t)-0.6499023437500f,(float16_t)0.7602539062500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6567382812500f,(float16_t)0.7543945312500f, +(float16_t)-0.6601562500000f,(float16_t)0.7509765625000f, +(float16_t)-0.6635742187500f,(float16_t)0.7480468750000f, +(float16_t)-0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)-0.6704101562500f,(float16_t)0.7421875000000f, +(float16_t)-0.6738281250000f,(float16_t)0.7387695312500f, +(float16_t)-0.6772460937500f,(float16_t)0.7358398437500f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6840820312500f,(float16_t)0.7294921875000f, +(float16_t)-0.6875000000000f,(float16_t)0.7265625000000f, +(float16_t)-0.6904296875000f,(float16_t)0.7231445312500f, +(float16_t)-0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)-0.6972656250000f,(float16_t)0.7167968750000f, +(float16_t)-0.7006835937500f,(float16_t)0.7133789062500f, +(float16_t)-0.7036132812500f,(float16_t)0.7104492187500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7104492187500f,(float16_t)0.7036132812500f, +(float16_t)-0.7133789062500f,(float16_t)0.7006835937500f, +(float16_t)-0.7167968750000f,(float16_t)0.6972656250000f, +(float16_t)-0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)-0.7231445312500f,(float16_t)0.6904296875000f, +(float16_t)-0.7265625000000f,(float16_t)0.6875000000000f, +(float16_t)-0.7294921875000f,(float16_t)0.6840820312500f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7358398437500f,(float16_t)0.6772460937500f, +(float16_t)-0.7387695312500f,(float16_t)0.6738281250000f, +(float16_t)-0.7421875000000f,(float16_t)0.6704101562500f, +(float16_t)-0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)-0.7480468750000f,(float16_t)0.6635742187500f, +(float16_t)-0.7509765625000f,(float16_t)0.6601562500000f, +(float16_t)-0.7543945312500f,(float16_t)0.6567382812500f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7602539062500f,(float16_t)0.6499023437500f, +(float16_t)-0.7631835937500f,(float16_t)0.6459960937500f, +(float16_t)-0.7661132812500f,(float16_t)0.6425781250000f, +(float16_t)-0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)-0.7719726562500f,(float16_t)0.6357421875000f, +(float16_t)-0.7749023437500f,(float16_t)0.6318359375000f, +(float16_t)-0.7778320312500f,(float16_t)0.6284179687500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7836914062500f,(float16_t)0.6210937500000f, +(float16_t)-0.7866210937500f,(float16_t)0.6176757812500f, +(float16_t)-0.7890625000000f,(float16_t)0.6142578125000f, +(float16_t)-0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)-0.7949218750000f,(float16_t)0.6069335937500f, +(float16_t)-0.7978515625000f,(float16_t)0.6030273437500f, +(float16_t)-0.8002929687500f,(float16_t)0.5996093750000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8061523437500f,(float16_t)0.5917968750000f, +(float16_t)-0.8085937500000f,(float16_t)0.5883789062500f, +(float16_t)-0.8115234375000f,(float16_t)0.5844726562500f, +(float16_t)-0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)-0.8168945312500f,(float16_t)0.5771484375000f, +(float16_t)-0.8193359375000f,(float16_t)0.5732421875000f, +(float16_t)-0.8217773437500f,(float16_t)0.5693359375000f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8271484375000f,(float16_t)0.5620117187500f, +(float16_t)-0.8295898437500f,(float16_t)0.5581054687500f, +(float16_t)-0.8325195312500f,(float16_t)0.5541992187500f, +(float16_t)-0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)-0.8374023437500f,(float16_t)0.5463867187500f, +(float16_t)-0.8398437500000f,(float16_t)0.5429687500000f, +(float16_t)-0.8422851562500f,(float16_t)0.5390625000000f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8471679687500f,(float16_t)0.5312500000000f, +(float16_t)-0.8496093750000f,(float16_t)0.5273437500000f, +(float16_t)-0.8520507812500f,(float16_t)0.5234375000000f, +(float16_t)-0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)-0.8569335937500f,(float16_t)0.5156250000000f, +(float16_t)-0.8593750000000f,(float16_t)0.5112304687500f, +(float16_t)-0.8618164062500f,(float16_t)0.5073242187500f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8662109375000f,(float16_t)0.4995117187500f, +(float16_t)-0.8686523437500f,(float16_t)0.4956054687500f, +(float16_t)-0.8706054687500f,(float16_t)0.4914550781250f, +(float16_t)-0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)-0.8754882812500f,(float16_t)0.4836425781250f, +(float16_t)-0.8774414062500f,(float16_t)0.4794921875000f, +(float16_t)-0.8798828125000f,(float16_t)0.4753417968750f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8842773437500f,(float16_t)0.4672851562500f, +(float16_t)-0.8862304687500f,(float16_t)0.4633789062500f, +(float16_t)-0.8881835937500f,(float16_t)0.4592285156250f, +(float16_t)-0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)-0.8925781250000f,(float16_t)0.4509277343750f, +(float16_t)-0.8945312500000f,(float16_t)0.4467773437500f, +(float16_t)-0.8964843750000f,(float16_t)0.4426269531250f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.9008789062500f,(float16_t)0.4345703125000f, +(float16_t)-0.9028320312500f,(float16_t)0.4304199218750f, +(float16_t)-0.9047851562500f,(float16_t)0.4262695312500f, +(float16_t)-0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)-0.9086914062500f,(float16_t)0.4177246093750f, +(float16_t)-0.9106445312500f,(float16_t)0.4135742187500f, +(float16_t)-0.9121093750000f,(float16_t)0.4094238281250f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9160156250000f,(float16_t)0.4011230468750f, +(float16_t)-0.9179687500000f,(float16_t)0.3967285156250f, +(float16_t)-0.9199218750000f,(float16_t)0.3925781250000f, +(float16_t)-0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)-0.9233398437500f,(float16_t)0.3840332031250f, +(float16_t)-0.9252929687500f,(float16_t)0.3798828125000f, +(float16_t)-0.9267578125000f,(float16_t)0.3754882812500f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9301757812500f,(float16_t)0.3669433593750f, +(float16_t)-0.9316406250000f,(float16_t)0.3627929687500f, +(float16_t)-0.9335937500000f,(float16_t)0.3583984375000f, +(float16_t)-0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)-0.9370117187500f,(float16_t)0.3498535156250f, +(float16_t)-0.9384765625000f,(float16_t)0.3454589843750f, +(float16_t)-0.9399414062500f,(float16_t)0.3413085937500f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9428710937500f,(float16_t)0.3325195312500f, +(float16_t)-0.9448242187500f,(float16_t)0.3281250000000f, +(float16_t)-0.9462890625000f,(float16_t)0.3239746093750f, +(float16_t)-0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)-0.9492187500000f,(float16_t)0.3151855468750f, +(float16_t)-0.9506835937500f,(float16_t)0.3107910156250f, +(float16_t)-0.9521484375000f,(float16_t)0.3063964843750f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9545898437500f,(float16_t)0.2976074218750f, +(float16_t)-0.9560546875000f,(float16_t)0.2932128906250f, +(float16_t)-0.9575195312500f,(float16_t)0.2888183593750f, +(float16_t)-0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)-0.9599609375000f,(float16_t)0.2800292968750f, +(float16_t)-0.9614257812500f,(float16_t)0.2756347656250f, +(float16_t)-0.9624023437500f,(float16_t)0.2712402343750f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9648437500000f,(float16_t)0.2622070312500f, +(float16_t)-0.9663085937500f,(float16_t)0.2578125000000f, +(float16_t)-0.9672851562500f,(float16_t)0.2534179687500f, +(float16_t)-0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)-0.9697265625000f,(float16_t)0.2445068359375f, +(float16_t)-0.9707031250000f,(float16_t)0.2399902343750f, +(float16_t)-0.9716796875000f,(float16_t)0.2354736328125f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9741210937500f,(float16_t)0.2265625000000f, +(float16_t)-0.9750976562500f,(float16_t)0.2220458984375f, +(float16_t)-0.9760742187500f,(float16_t)0.2176513671875f, +(float16_t)-0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)-0.9780273437500f,(float16_t)0.2086181640625f, +(float16_t)-0.9790039062500f,(float16_t)0.2041015625000f, +(float16_t)-0.9799804687500f,(float16_t)0.1995849609375f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9814453125000f,(float16_t)0.1905517578125f, +(float16_t)-0.9824218750000f,(float16_t)0.1860351562500f, +(float16_t)-0.9833984375000f,(float16_t)0.1815185546875f, +(float16_t)-0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)-0.9848632812500f,(float16_t)0.1724853515625f, +(float16_t)-0.9858398437500f,(float16_t)0.1679687500000f, +(float16_t)-0.9863281250000f,(float16_t)0.1634521484375f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9877929687500f,(float16_t)0.1542968750000f, +(float16_t)-0.9887695312500f,(float16_t)0.1497802734375f, +(float16_t)-0.9892578125000f,(float16_t)0.1452636718750f, +(float16_t)-0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)-0.9907226562500f,(float16_t)0.1361083984375f, +(float16_t)-0.9912109375000f,(float16_t)0.1315917968750f, +(float16_t)-0.9916992187500f,(float16_t)0.1269531250000f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9931640625000f,(float16_t)0.1178588867188f, +(float16_t)-0.9936523437500f,(float16_t)0.1132812500000f, +(float16_t)-0.9941406250000f,(float16_t)0.1087036132812f, +(float16_t)-0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)-0.9951171875000f,(float16_t)0.0995483398438f, +(float16_t)-0.9956054687500f,(float16_t)0.0949707031250f, +(float16_t)-0.9960937500000f,(float16_t)0.0903930664062f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9965820312500f,(float16_t)0.0812377929688f, +(float16_t)-0.9970703125000f,(float16_t)0.0765991210938f, +(float16_t)-0.9975585937500f,(float16_t)0.0720214843750f, +(float16_t)-0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)-0.9980468750000f,(float16_t)0.0628662109375f, +(float16_t)-0.9985351562500f,(float16_t)0.0582580566406f, +(float16_t)-0.9985351562500f,(float16_t)0.0536499023438f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9990234375000f,(float16_t)0.0444641113281f, +(float16_t)-0.9990234375000f,(float16_t)0.0398864746094f, +(float16_t)-0.9995117187500f,(float16_t)0.0352783203125f, +(float16_t)-0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)-0.9995117187500f,(float16_t)0.0260772705078f, +(float16_t)-1.0000000000000f,(float16_t)0.0214691162109f, +(float16_t)-1.0000000000000f,(float16_t)0.0168762207031f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)-1.0000000000000f,(float16_t)0.0076713562012f, +(float16_t)-1.0000000000000f,(float16_t)0.0030670166016f, +(float16_t)-1.0000000000000f,(float16_t)-0.0015335083008f, +(float16_t)-1.0000000000000f,(float16_t)-0.0061340332031f, +(float16_t)-1.0000000000000f,(float16_t)-0.0107345581055f, +(float16_t)-1.0000000000000f,(float16_t)-0.0153427124023f, +(float16_t)-1.0000000000000f,(float16_t)-0.0199432373047f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9995117187500f,(float16_t)-0.0291442871094f, +(float16_t)-0.9995117187500f,(float16_t)-0.0337524414062f, +(float16_t)-0.9990234375000f,(float16_t)-0.0383300781250f, +(float16_t)-0.9990234375000f,(float16_t)-0.0429382324219f, +(float16_t)-0.9990234375000f,(float16_t)-0.0475463867188f, +(float16_t)-0.9985351562500f,(float16_t)-0.0521240234375f, +(float16_t)-0.9985351562500f,(float16_t)-0.0567321777344f, +(float16_t)-0.9980468750000f,(float16_t)-0.0613098144531f, +(float16_t)-0.9980468750000f,(float16_t)-0.0659179687500f, +(float16_t)-0.9975585937500f,(float16_t)-0.0704956054688f, +(float16_t)-0.9970703125000f,(float16_t)-0.0750732421875f, +(float16_t)-0.9965820312500f,(float16_t)-0.0797119140625f, +(float16_t)-0.9965820312500f,(float16_t)-0.0842895507812f, +(float16_t)-0.9960937500000f,(float16_t)-0.0888671875000f, +(float16_t)-0.9956054687500f,(float16_t)-0.0934448242188f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9946289062500f,(float16_t)-0.1026000976562f, +(float16_t)-0.9941406250000f,(float16_t)-0.1071777343750f, +(float16_t)-0.9936523437500f,(float16_t)-0.1117553710938f, +(float16_t)-0.9931640625000f,(float16_t)-0.1163330078125f, +(float16_t)-0.9926757812500f,(float16_t)-0.1209106445312f, +(float16_t)-0.9921875000000f,(float16_t)-0.1254882812500f, +(float16_t)-0.9916992187500f,(float16_t)-0.1300048828125f, +(float16_t)-0.9907226562500f,(float16_t)-0.1345214843750f, +(float16_t)-0.9902343750000f,(float16_t)-0.1391601562500f, +(float16_t)-0.9897460937500f,(float16_t)-0.1436767578125f, +(float16_t)-0.9887695312500f,(float16_t)-0.1481933593750f, +(float16_t)-0.9882812500000f,(float16_t)-0.1528320312500f, +(float16_t)-0.9873046875000f,(float16_t)-0.1573486328125f, +(float16_t)-0.9868164062500f,(float16_t)-0.1618652343750f, +(float16_t)-0.9858398437500f,(float16_t)-0.1663818359375f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9843750000000f,(float16_t)-0.1755371093750f, +(float16_t)-0.9838867187500f,(float16_t)-0.1800537109375f, +(float16_t)-0.9829101562500f,(float16_t)-0.1845703125000f, +(float16_t)-0.9819335937500f,(float16_t)-0.1890869140625f, +(float16_t)-0.9809570312500f,(float16_t)-0.1936035156250f, +(float16_t)-0.9799804687500f,(float16_t)-0.1981201171875f, +(float16_t)-0.9794921875000f,(float16_t)-0.2026367187500f, +(float16_t)-0.9785156250000f,(float16_t)-0.2071533203125f, +(float16_t)-0.9775390625000f,(float16_t)-0.2116699218750f, +(float16_t)-0.9765625000000f,(float16_t)-0.2160644531250f, +(float16_t)-0.9755859375000f,(float16_t)-0.2205810546875f, +(float16_t)-0.9741210937500f,(float16_t)-0.2250976562500f, +(float16_t)-0.9731445312500f,(float16_t)-0.2296142578125f, +(float16_t)-0.9721679687500f,(float16_t)-0.2340087890625f, +(float16_t)-0.9711914062500f,(float16_t)-0.2385253906250f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9687500000000f,(float16_t)-0.2474365234375f, +(float16_t)-0.9677734375000f,(float16_t)-0.2519531250000f, +(float16_t)-0.9667968750000f,(float16_t)-0.2563476562500f, +(float16_t)-0.9653320312500f,(float16_t)-0.2607421875000f, +(float16_t)-0.9643554687500f,(float16_t)-0.2651367187500f, +(float16_t)-0.9628906250000f,(float16_t)-0.2697753906250f, +(float16_t)-0.9619140625000f,(float16_t)-0.2741699218750f, +(float16_t)-0.9604492187500f,(float16_t)-0.2785644531250f, +(float16_t)-0.9589843750000f,(float16_t)-0.2829589843750f, +(float16_t)-0.9580078125000f,(float16_t)-0.2873535156250f, +(float16_t)-0.9565429687500f,(float16_t)-0.2917480468750f, +(float16_t)-0.9550781250000f,(float16_t)-0.2961425781250f, +(float16_t)-0.9536132812500f,(float16_t)-0.3005371093750f, +(float16_t)-0.9521484375000f,(float16_t)-0.3049316406250f, +(float16_t)-0.9511718750000f,(float16_t)-0.3093261718750f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9482421875000f,(float16_t)-0.3181152343750f, +(float16_t)-0.9467773437500f,(float16_t)-0.3225097656250f, +(float16_t)-0.9453125000000f,(float16_t)-0.3266601562500f, +(float16_t)-0.9433593750000f,(float16_t)-0.3310546875000f, +(float16_t)-0.9418945312500f,(float16_t)-0.3354492187500f, +(float16_t)-0.9404296875000f,(float16_t)-0.3398437500000f, +(float16_t)-0.9389648437500f,(float16_t)-0.3439941406250f, +(float16_t)-0.9375000000000f,(float16_t)-0.3483886718750f, +(float16_t)-0.9355468750000f,(float16_t)-0.3527832031250f, +(float16_t)-0.9340820312500f,(float16_t)-0.3569335937500f, +(float16_t)-0.9326171875000f,(float16_t)-0.3613281250000f, +(float16_t)-0.9306640625000f,(float16_t)-0.3657226562500f, +(float16_t)-0.9291992187500f,(float16_t)-0.3698730468750f, +(float16_t)-0.9272460937500f,(float16_t)-0.3742675781250f, +(float16_t)-0.9257812500000f,(float16_t)-0.3784179687500f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.9218750000000f,(float16_t)-0.3869628906250f, +(float16_t)-0.9204101562500f,(float16_t)-0.3911132812500f, +(float16_t)-0.9184570312500f,(float16_t)-0.3955078125000f, +(float16_t)-0.9165039062500f,(float16_t)-0.3996582031250f, +(float16_t)-0.9150390625000f,(float16_t)-0.4038085937500f, +(float16_t)-0.9130859375000f,(float16_t)-0.4079589843750f, +(float16_t)-0.9111328125000f,(float16_t)-0.4123535156250f, +(float16_t)-0.9091796875000f,(float16_t)-0.4165039062500f, +(float16_t)-0.9072265625000f,(float16_t)-0.4206542968750f, +(float16_t)-0.9052734375000f,(float16_t)-0.4248046875000f, +(float16_t)-0.9033203125000f,(float16_t)-0.4289550781250f, +(float16_t)-0.9013671875000f,(float16_t)-0.4331054687500f, +(float16_t)-0.8994140625000f,(float16_t)-0.4372558593750f, +(float16_t)-0.8974609375000f,(float16_t)-0.4414062500000f, +(float16_t)-0.8955078125000f,(float16_t)-0.4455566406250f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8911132812500f,(float16_t)-0.4536132812500f, +(float16_t)-0.8891601562500f,(float16_t)-0.4577636718750f, +(float16_t)-0.8867187500000f,(float16_t)-0.4619140625000f, +(float16_t)-0.8847656250000f,(float16_t)-0.4660644531250f, +(float16_t)-0.8828125000000f,(float16_t)-0.4699707031250f, +(float16_t)-0.8803710937500f,(float16_t)-0.4741210937500f, +(float16_t)-0.8784179687500f,(float16_t)-0.4780273437500f, +(float16_t)-0.8759765625000f,(float16_t)-0.4821777343750f, +(float16_t)-0.8740234375000f,(float16_t)-0.4863281250000f, +(float16_t)-0.8715820312500f,(float16_t)-0.4902343750000f, +(float16_t)-0.8691406250000f,(float16_t)-0.4941406250000f, +(float16_t)-0.8671875000000f,(float16_t)-0.4982910156250f, +(float16_t)-0.8647460937500f,(float16_t)-0.5024414062500f, +(float16_t)-0.8623046875000f,(float16_t)-0.5063476562500f, +(float16_t)-0.8598632812500f,(float16_t)-0.5102539062500f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8554687500000f,(float16_t)-0.5180664062500f, +(float16_t)-0.8530273437500f,(float16_t)-0.5219726562500f, +(float16_t)-0.8505859375000f,(float16_t)-0.5258789062500f, +(float16_t)-0.8481445312500f,(float16_t)-0.5297851562500f, +(float16_t)-0.8457031250000f,(float16_t)-0.5336914062500f, +(float16_t)-0.8432617187500f,(float16_t)-0.5375976562500f, +(float16_t)-0.8408203125000f,(float16_t)-0.5415039062500f, +(float16_t)-0.8383789062500f,(float16_t)-0.5454101562500f, +(float16_t)-0.8359375000000f,(float16_t)-0.5493164062500f, +(float16_t)-0.8330078125000f,(float16_t)-0.5532226562500f, +(float16_t)-0.8305664062500f,(float16_t)-0.5566406250000f, +(float16_t)-0.8281250000000f,(float16_t)-0.5605468750000f, +(float16_t)-0.8256835937500f,(float16_t)-0.5644531250000f, +(float16_t)-0.8227539062500f,(float16_t)-0.5683593750000f, +(float16_t)-0.8203125000000f,(float16_t)-0.5722656250000f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.8149414062500f,(float16_t)-0.5795898437500f, +(float16_t)-0.8120117187500f,(float16_t)-0.5834960937500f, +(float16_t)-0.8095703125000f,(float16_t)-0.5869140625000f, +(float16_t)-0.8066406250000f,(float16_t)-0.5908203125000f, +(float16_t)-0.8041992187500f,(float16_t)-0.5942382812500f, +(float16_t)-0.8012695312500f,(float16_t)-0.5981445312500f, +(float16_t)-0.7988281250000f,(float16_t)-0.6020507812500f, +(float16_t)-0.7958984375000f,(float16_t)-0.6054687500000f, +(float16_t)-0.7929687500000f,(float16_t)-0.6093750000000f, +(float16_t)-0.7900390625000f,(float16_t)-0.6127929687500f, +(float16_t)-0.7875976562500f,(float16_t)-0.6162109375000f, +(float16_t)-0.7846679687500f,(float16_t)-0.6201171875000f, +(float16_t)-0.7817382812500f,(float16_t)-0.6235351562500f, +(float16_t)-0.7788085937500f,(float16_t)-0.6274414062500f, +(float16_t)-0.7758789062500f,(float16_t)-0.6308593750000f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7700195312500f,(float16_t)-0.6381835937500f, +(float16_t)-0.7670898437500f,(float16_t)-0.6416015625000f, +(float16_t)-0.7641601562500f,(float16_t)-0.6450195312500f, +(float16_t)-0.7612304687500f,(float16_t)-0.6484375000000f, +(float16_t)-0.7583007812500f,(float16_t)-0.6518554687500f, +(float16_t)-0.7553710937500f,(float16_t)-0.6552734375000f, +(float16_t)-0.7519531250000f,(float16_t)-0.6591796875000f, +(float16_t)-0.7490234375000f,(float16_t)-0.6625976562500f, +(float16_t)-0.7460937500000f,(float16_t)-0.6660156250000f, +(float16_t)-0.7431640625000f,(float16_t)-0.6694335937500f, +(float16_t)-0.7397460937500f,(float16_t)-0.6728515625000f, +(float16_t)-0.7368164062500f,(float16_t)-0.6762695312500f, +(float16_t)-0.7338867187500f,(float16_t)-0.6796875000000f, +(float16_t)-0.7304687500000f,(float16_t)-0.6826171875000f, +(float16_t)-0.7275390625000f,(float16_t)-0.6860351562500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.7211914062500f,(float16_t)-0.6928710937500f, +(float16_t)-0.7177734375000f,(float16_t)-0.6962890625000f, +(float16_t)-0.7148437500000f,(float16_t)-0.6997070312500f, +(float16_t)-0.7114257812500f,(float16_t)-0.7026367187500f, +(float16_t)-0.7080078125000f,(float16_t)-0.7060546875000f, +(float16_t)-0.7050781250000f,(float16_t)-0.7094726562500f, +(float16_t)-0.7016601562500f,(float16_t)-0.7124023437500f, +(float16_t)-0.6982421875000f,(float16_t)-0.7158203125000f, +(float16_t)-0.6953125000000f,(float16_t)-0.7187500000000f, +(float16_t)-0.6918945312500f,(float16_t)-0.7221679687500f, +(float16_t)-0.6884765625000f,(float16_t)-0.7250976562500f, +(float16_t)-0.6850585937500f,(float16_t)-0.7285156250000f, +(float16_t)-0.6816406250000f,(float16_t)-0.7314453125000f, +(float16_t)-0.6782226562500f,(float16_t)-0.7348632812500f, +(float16_t)-0.6748046875000f,(float16_t)-0.7377929687500f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6679687500000f,(float16_t)-0.7441406250000f, +(float16_t)-0.6645507812500f,(float16_t)-0.7470703125000f, +(float16_t)-0.6611328125000f,(float16_t)-0.7500000000000f, +(float16_t)-0.6577148437500f,(float16_t)-0.7534179687500f, +(float16_t)-0.6542968750000f,(float16_t)-0.7563476562500f, +(float16_t)-0.6508789062500f,(float16_t)-0.7592773437500f, +(float16_t)-0.6474609375000f,(float16_t)-0.7622070312500f, +(float16_t)-0.6440429687500f,(float16_t)-0.7651367187500f, +(float16_t)-0.6401367187500f,(float16_t)-0.7680664062500f, +(float16_t)-0.6367187500000f,(float16_t)-0.7709960937500f, +(float16_t)-0.6333007812500f,(float16_t)-0.7739257812500f, +(float16_t)-0.6293945312500f,(float16_t)-0.7768554687500f, +(float16_t)-0.6259765625000f,(float16_t)-0.7797851562500f, +(float16_t)-0.6225585937500f,(float16_t)-0.7827148437500f, +(float16_t)-0.6186523437500f,(float16_t)-0.7856445312500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.6118164062500f,(float16_t)-0.7910156250000f, +(float16_t)-0.6079101562500f,(float16_t)-0.7939453125000f, +(float16_t)-0.6044921875000f,(float16_t)-0.7968750000000f, +(float16_t)-0.6005859375000f,(float16_t)-0.7993164062500f, +(float16_t)-0.5971679687500f,(float16_t)-0.8022460937500f, +(float16_t)-0.5932617187500f,(float16_t)-0.8051757812500f, +(float16_t)-0.5893554687500f,(float16_t)-0.8076171875000f, +(float16_t)-0.5859375000000f,(float16_t)-0.8105468750000f, +(float16_t)-0.5820312500000f,(float16_t)-0.8129882812500f, +(float16_t)-0.5781250000000f,(float16_t)-0.8159179687500f, +(float16_t)-0.5747070312500f,(float16_t)-0.8183593750000f, +(float16_t)-0.5708007812500f,(float16_t)-0.8212890625000f, +(float16_t)-0.5668945312500f,(float16_t)-0.8237304687500f, +(float16_t)-0.5629882812500f,(float16_t)-0.8261718750000f, +(float16_t)-0.5595703125000f,(float16_t)-0.8291015625000f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.5517578125000f,(float16_t)-0.8339843750000f, +(float16_t)-0.5478515625000f,(float16_t)-0.8364257812500f, +(float16_t)-0.5439453125000f,(float16_t)-0.8388671875000f, +(float16_t)-0.5400390625000f,(float16_t)-0.8417968750000f, +(float16_t)-0.5361328125000f,(float16_t)-0.8442382812500f, +(float16_t)-0.5322265625000f,(float16_t)-0.8466796875000f, +(float16_t)-0.5283203125000f,(float16_t)-0.8491210937500f, +(float16_t)-0.5244140625000f,(float16_t)-0.8515625000000f, +(float16_t)-0.5205078125000f,(float16_t)-0.8540039062500f, +(float16_t)-0.5166015625000f,(float16_t)-0.8559570312500f, +(float16_t)-0.5126953125000f,(float16_t)-0.8583984375000f, +(float16_t)-0.5087890625000f,(float16_t)-0.8608398437500f, +(float16_t)-0.5048828125000f,(float16_t)-0.8632812500000f, +(float16_t)-0.5009765625000f,(float16_t)-0.8657226562500f, +(float16_t)-0.4968261718750f,(float16_t)-0.8676757812500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4887695312500f,(float16_t)-0.8725585937500f, +(float16_t)-0.4848632812500f,(float16_t)-0.8745117187500f, +(float16_t)-0.4809570312500f,(float16_t)-0.8769531250000f, +(float16_t)-0.4768066406250f,(float16_t)-0.8789062500000f, +(float16_t)-0.4726562500000f,(float16_t)-0.8813476562500f, +(float16_t)-0.4687500000000f,(float16_t)-0.8833007812500f, +(float16_t)-0.4645996093750f,(float16_t)-0.8857421875000f, +(float16_t)-0.4604492187500f,(float16_t)-0.8876953125000f, +(float16_t)-0.4565429687500f,(float16_t)-0.8896484375000f, +(float16_t)-0.4523925781250f,(float16_t)-0.8916015625000f, +(float16_t)-0.4482421875000f,(float16_t)-0.8940429687500f, +(float16_t)-0.4440917968750f,(float16_t)-0.8959960937500f, +(float16_t)-0.4399414062500f,(float16_t)-0.8979492187500f, +(float16_t)-0.4357910156250f,(float16_t)-0.8999023437500f, +(float16_t)-0.4316406250000f,(float16_t)-0.9018554687500f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.4233398437500f,(float16_t)-0.9057617187500f, +(float16_t)-0.4191894531250f,(float16_t)-0.9077148437500f, +(float16_t)-0.4150390625000f,(float16_t)-0.9096679687500f, +(float16_t)-0.4108886718750f,(float16_t)-0.9116210937500f, +(float16_t)-0.4067382812500f,(float16_t)-0.9135742187500f, +(float16_t)-0.4023437500000f,(float16_t)-0.9155273437500f, +(float16_t)-0.3981933593750f,(float16_t)-0.9174804687500f, +(float16_t)-0.3940429687500f,(float16_t)-0.9189453125000f, +(float16_t)-0.3896484375000f,(float16_t)-0.9208984375000f, +(float16_t)-0.3854980468750f,(float16_t)-0.9228515625000f, +(float16_t)-0.3813476562500f,(float16_t)-0.9243164062500f, +(float16_t)-0.3769531250000f,(float16_t)-0.9262695312500f, +(float16_t)-0.3728027343750f,(float16_t)-0.9277343750000f, +(float16_t)-0.3684082031250f,(float16_t)-0.9296875000000f, +(float16_t)-0.3642578125000f,(float16_t)-0.9311523437500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.3557128906250f,(float16_t)-0.9345703125000f, +(float16_t)-0.3513183593750f,(float16_t)-0.9360351562500f, +(float16_t)-0.3469238281250f,(float16_t)-0.9379882812500f, +(float16_t)-0.3427734375000f,(float16_t)-0.9394531250000f, +(float16_t)-0.3383789062500f,(float16_t)-0.9409179687500f, +(float16_t)-0.3339843750000f,(float16_t)-0.9423828125000f, +(float16_t)-0.3295898437500f,(float16_t)-0.9443359375000f, +(float16_t)-0.3251953125000f,(float16_t)-0.9458007812500f, +(float16_t)-0.3210449218750f,(float16_t)-0.9472656250000f, +(float16_t)-0.3166503906250f,(float16_t)-0.9487304687500f, +(float16_t)-0.3122558593750f,(float16_t)-0.9501953125000f, +(float16_t)-0.3078613281250f,(float16_t)-0.9516601562500f, +(float16_t)-0.3034667968750f,(float16_t)-0.9526367187500f, +(float16_t)-0.2990722656250f,(float16_t)-0.9541015625000f, +(float16_t)-0.2946777343750f,(float16_t)-0.9555664062500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2858886718750f,(float16_t)-0.9584960937500f, +(float16_t)-0.2814941406250f,(float16_t)-0.9594726562500f, +(float16_t)-0.2770996093750f,(float16_t)-0.9609375000000f, +(float16_t)-0.2727050781250f,(float16_t)-0.9619140625000f, +(float16_t)-0.2683105468750f,(float16_t)-0.9633789062500f, +(float16_t)-0.2636718750000f,(float16_t)-0.9643554687500f, +(float16_t)-0.2592773437500f,(float16_t)-0.9658203125000f, +(float16_t)-0.2548828125000f,(float16_t)-0.9667968750000f, +(float16_t)-0.2504882812500f,(float16_t)-0.9682617187500f, +(float16_t)-0.2459716796875f,(float16_t)-0.9692382812500f, +(float16_t)-0.2414550781250f,(float16_t)-0.9702148437500f, +(float16_t)-0.2370605468750f,(float16_t)-0.9716796875000f, +(float16_t)-0.2325439453125f,(float16_t)-0.9726562500000f, +(float16_t)-0.2280273437500f,(float16_t)-0.9736328125000f, +(float16_t)-0.2236328125000f,(float16_t)-0.9746093750000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.2145996093750f,(float16_t)-0.9765625000000f, +(float16_t)-0.2100830078125f,(float16_t)-0.9775390625000f, +(float16_t)-0.2055664062500f,(float16_t)-0.9785156250000f, +(float16_t)-0.2010498046875f,(float16_t)-0.9794921875000f, +(float16_t)-0.1966552734375f,(float16_t)-0.9804687500000f, +(float16_t)-0.1921386718750f,(float16_t)-0.9814453125000f, +(float16_t)-0.1876220703125f,(float16_t)-0.9824218750000f, +(float16_t)-0.1829833984375f,(float16_t)-0.9829101562500f, +(float16_t)-0.1784667968750f,(float16_t)-0.9838867187500f, +(float16_t)-0.1739501953125f,(float16_t)-0.9848632812500f, +(float16_t)-0.1694335937500f,(float16_t)-0.9853515625000f, +(float16_t)-0.1649169921875f,(float16_t)-0.9863281250000f, +(float16_t)-0.1604003906250f,(float16_t)-0.9868164062500f, +(float16_t)-0.1558837890625f,(float16_t)-0.9877929687500f, +(float16_t)-0.1512451171875f,(float16_t)-0.9882812500000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.1422119140625f,(float16_t)-0.9897460937500f, +(float16_t)-0.1375732421875f,(float16_t)-0.9907226562500f, +(float16_t)-0.1330566406250f,(float16_t)-0.9912109375000f, +(float16_t)-0.1285400390625f,(float16_t)-0.9916992187500f, +(float16_t)-0.1239624023438f,(float16_t)-0.9921875000000f, +(float16_t)-0.1193847656250f,(float16_t)-0.9926757812500f, +(float16_t)-0.1148071289062f,(float16_t)-0.9931640625000f, +(float16_t)-0.1102294921875f,(float16_t)-0.9941406250000f, +(float16_t)-0.1056518554688f,(float16_t)-0.9946289062500f, +(float16_t)-0.1010742187500f,(float16_t)-0.9951171875000f, +(float16_t)-0.0964965820312f,(float16_t)-0.9951171875000f, +(float16_t)-0.0919189453125f,(float16_t)-0.9956054687500f, +(float16_t)-0.0873413085938f,(float16_t)-0.9960937500000f, +(float16_t)-0.0827636718750f,(float16_t)-0.9965820312500f, +(float16_t)-0.0781250000000f,(float16_t)-0.9970703125000f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)-0.0689697265625f,(float16_t)-0.9975585937500f, +(float16_t)-0.0643920898438f,(float16_t)-0.9980468750000f, +(float16_t)-0.0597839355469f,(float16_t)-0.9980468750000f, +(float16_t)-0.0552062988281f,(float16_t)-0.9985351562500f, +(float16_t)-0.0505981445312f,(float16_t)-0.9985351562500f, +(float16_t)-0.0459899902344f,(float16_t)-0.9990234375000f, +(float16_t)-0.0414123535156f,(float16_t)-0.9990234375000f, +(float16_t)-0.0368041992188f,(float16_t)-0.9995117187500f, +(float16_t)-0.0321960449219f,(float16_t)-0.9995117187500f, +(float16_t)-0.0276031494141f,(float16_t)-0.9995117187500f, +(float16_t)-0.0230102539062f,(float16_t)-0.9995117187500f, +(float16_t)-0.0184020996094f,(float16_t)-1.0000000000000f, +(float16_t)-0.0138015747070f,(float16_t)-1.0000000000000f, +(float16_t)-0.0092010498047f,(float16_t)-1.0000000000000f, +(float16_t)-0.0046005249023f,(float16_t)-1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0184020996094f, +(float16_t)0.9995117187500f,(float16_t)0.0368041992188f, +(float16_t)0.9985351562500f,(float16_t)0.0552062988281f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9956054687500f,(float16_t)0.0919189453125f, +(float16_t)0.9941406250000f,(float16_t)0.1102294921875f, +(float16_t)0.9916992187500f,(float16_t)0.1285400390625f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9863281250000f,(float16_t)0.1649169921875f, +(float16_t)0.9829101562500f,(float16_t)0.1829833984375f, +(float16_t)0.9794921875000f,(float16_t)0.2010498046875f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9716796875000f,(float16_t)0.2370605468750f, +(float16_t)0.9667968750000f,(float16_t)0.2548828125000f, +(float16_t)0.9619140625000f,(float16_t)0.2727050781250f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9516601562500f,(float16_t)0.3078613281250f, +(float16_t)0.9458007812500f,(float16_t)0.3251953125000f, +(float16_t)0.9394531250000f,(float16_t)0.3427734375000f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9262695312500f,(float16_t)0.3769531250000f, +(float16_t)0.9189453125000f,(float16_t)0.3940429687500f, +(float16_t)0.9116210937500f,(float16_t)0.4108886718750f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8959960937500f,(float16_t)0.4440917968750f, +(float16_t)0.8876953125000f,(float16_t)0.4604492187500f, +(float16_t)0.8789062500000f,(float16_t)0.4768066406250f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8608398437500f,(float16_t)0.5087890625000f, +(float16_t)0.8515625000000f,(float16_t)0.5244140625000f, +(float16_t)0.8417968750000f,(float16_t)0.5400390625000f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.8212890625000f,(float16_t)0.5708007812500f, +(float16_t)0.8105468750000f,(float16_t)0.5859375000000f, +(float16_t)0.7993164062500f,(float16_t)0.6005859375000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7768554687500f,(float16_t)0.6293945312500f, +(float16_t)0.7651367187500f,(float16_t)0.6440429687500f, +(float16_t)0.7534179687500f,(float16_t)0.6577148437500f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.7285156250000f,(float16_t)0.6850585937500f, +(float16_t)0.7158203125000f,(float16_t)0.6982421875000f, +(float16_t)0.7026367187500f,(float16_t)0.7114257812500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6762695312500f,(float16_t)0.7368164062500f, +(float16_t)0.6625976562500f,(float16_t)0.7490234375000f, +(float16_t)0.6484375000000f,(float16_t)0.7612304687500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.6201171875000f,(float16_t)0.7846679687500f, +(float16_t)0.6054687500000f,(float16_t)0.7958984375000f, +(float16_t)0.5908203125000f,(float16_t)0.8066406250000f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5605468750000f,(float16_t)0.8281250000000f, +(float16_t)0.5454101562500f,(float16_t)0.8383789062500f, +(float16_t)0.5297851562500f,(float16_t)0.8481445312500f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4982910156250f,(float16_t)0.8671875000000f, +(float16_t)0.4821777343750f,(float16_t)0.8759765625000f, +(float16_t)0.4660644531250f,(float16_t)0.8847656250000f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.4331054687500f,(float16_t)0.9013671875000f, +(float16_t)0.4165039062500f,(float16_t)0.9091796875000f, +(float16_t)0.3996582031250f,(float16_t)0.9165039062500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3657226562500f,(float16_t)0.9306640625000f, +(float16_t)0.3483886718750f,(float16_t)0.9375000000000f, +(float16_t)0.3310546875000f,(float16_t)0.9433593750000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2961425781250f,(float16_t)0.9550781250000f, +(float16_t)0.2785644531250f,(float16_t)0.9604492187500f, +(float16_t)0.2607421875000f,(float16_t)0.9653320312500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.2250976562500f,(float16_t)0.9741210937500f, +(float16_t)0.2071533203125f,(float16_t)0.9785156250000f, +(float16_t)0.1890869140625f,(float16_t)0.9819335937500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.1528320312500f,(float16_t)0.9882812500000f, +(float16_t)0.1345214843750f,(float16_t)0.9907226562500f, +(float16_t)0.1163330078125f,(float16_t)0.9931640625000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0797119140625f,(float16_t)0.9965820312500f, +(float16_t)0.0613098144531f,(float16_t)0.9980468750000f, +(float16_t)0.0429382324219f,(float16_t)0.9990234375000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)0.0061340332031f,(float16_t)1.0000000000000f, +(float16_t)-0.0122680664062f,(float16_t)1.0000000000000f, +(float16_t)-0.0306701660156f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.0674438476562f,(float16_t)0.9975585937500f, +(float16_t)-0.0858154296875f,(float16_t)0.9960937500000f, +(float16_t)-0.1041259765625f,(float16_t)0.9946289062500f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1406250000000f,(float16_t)0.9902343750000f, +(float16_t)-0.1588134765625f,(float16_t)0.9873046875000f, +(float16_t)-0.1770019531250f,(float16_t)0.9843750000000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2131347656250f,(float16_t)0.9770507812500f, +(float16_t)-0.2310791015625f,(float16_t)0.9731445312500f, +(float16_t)-0.2489013671875f,(float16_t)0.9687500000000f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.2844238281250f,(float16_t)0.9584960937500f, +(float16_t)-0.3020019531250f,(float16_t)0.9531250000000f, +(float16_t)-0.3195800781250f,(float16_t)0.9477539062500f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.3542480468750f,(float16_t)0.9350585937500f, +(float16_t)-0.3713378906250f,(float16_t)0.9287109375000f, +(float16_t)-0.3884277343750f,(float16_t)0.9213867187500f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4221191406250f,(float16_t)0.9067382812500f, +(float16_t)-0.4387207031250f,(float16_t)0.8984375000000f, +(float16_t)-0.4550781250000f,(float16_t)0.8906250000000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.4875488281250f,(float16_t)0.8730468750000f, +(float16_t)-0.5034179687500f,(float16_t)0.8637695312500f, +(float16_t)-0.5195312500000f,(float16_t)0.8544921875000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5502929687500f,(float16_t)0.8349609375000f, +(float16_t)-0.5659179687500f,(float16_t)0.8247070312500f, +(float16_t)-0.5810546875000f,(float16_t)0.8139648437500f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6103515625000f,(float16_t)0.7919921875000f, +(float16_t)-0.6250000000000f,(float16_t)0.7807617187500f, +(float16_t)-0.6391601562500f,(float16_t)0.7690429687500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.6669921875000f,(float16_t)0.7451171875000f, +(float16_t)-0.6806640625000f,(float16_t)0.7324218750000f, +(float16_t)-0.6938476562500f,(float16_t)0.7202148437500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7202148437500f,(float16_t)0.6938476562500f, +(float16_t)-0.7324218750000f,(float16_t)0.6806640625000f, +(float16_t)-0.7451171875000f,(float16_t)0.6669921875000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.7690429687500f,(float16_t)0.6391601562500f, +(float16_t)-0.7807617187500f,(float16_t)0.6250000000000f, +(float16_t)-0.7919921875000f,(float16_t)0.6103515625000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8139648437500f,(float16_t)0.5810546875000f, +(float16_t)-0.8247070312500f,(float16_t)0.5659179687500f, +(float16_t)-0.8349609375000f,(float16_t)0.5502929687500f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8544921875000f,(float16_t)0.5195312500000f, +(float16_t)-0.8637695312500f,(float16_t)0.5034179687500f, +(float16_t)-0.8730468750000f,(float16_t)0.4875488281250f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.8906250000000f,(float16_t)0.4550781250000f, +(float16_t)-0.8984375000000f,(float16_t)0.4387207031250f, +(float16_t)-0.9067382812500f,(float16_t)0.4221191406250f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9213867187500f,(float16_t)0.3884277343750f, +(float16_t)-0.9287109375000f,(float16_t)0.3713378906250f, +(float16_t)-0.9350585937500f,(float16_t)0.3542480468750f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9477539062500f,(float16_t)0.3195800781250f, +(float16_t)-0.9531250000000f,(float16_t)0.3020019531250f, +(float16_t)-0.9584960937500f,(float16_t)0.2844238281250f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9687500000000f,(float16_t)0.2489013671875f, +(float16_t)-0.9731445312500f,(float16_t)0.2310791015625f, +(float16_t)-0.9770507812500f,(float16_t)0.2131347656250f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9843750000000f,(float16_t)0.1770019531250f, +(float16_t)-0.9873046875000f,(float16_t)0.1588134765625f, +(float16_t)-0.9902343750000f,(float16_t)0.1406250000000f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9946289062500f,(float16_t)0.1041259765625f, +(float16_t)-0.9960937500000f,(float16_t)0.0858154296875f, +(float16_t)-0.9975585937500f,(float16_t)0.0674438476562f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)0.0306701660156f, +(float16_t)-1.0000000000000f,(float16_t)0.0122680664062f, +(float16_t)-1.0000000000000f,(float16_t)-0.0061340332031f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9990234375000f,(float16_t)-0.0429382324219f, +(float16_t)-0.9980468750000f,(float16_t)-0.0613098144531f, +(float16_t)-0.9965820312500f,(float16_t)-0.0797119140625f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9931640625000f,(float16_t)-0.1163330078125f, +(float16_t)-0.9907226562500f,(float16_t)-0.1345214843750f, +(float16_t)-0.9882812500000f,(float16_t)-0.1528320312500f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9819335937500f,(float16_t)-0.1890869140625f, +(float16_t)-0.9785156250000f,(float16_t)-0.2071533203125f, +(float16_t)-0.9741210937500f,(float16_t)-0.2250976562500f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9653320312500f,(float16_t)-0.2607421875000f, +(float16_t)-0.9604492187500f,(float16_t)-0.2785644531250f, +(float16_t)-0.9550781250000f,(float16_t)-0.2961425781250f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9433593750000f,(float16_t)-0.3310546875000f, +(float16_t)-0.9375000000000f,(float16_t)-0.3483886718750f, +(float16_t)-0.9306640625000f,(float16_t)-0.3657226562500f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.9165039062500f,(float16_t)-0.3996582031250f, +(float16_t)-0.9091796875000f,(float16_t)-0.4165039062500f, +(float16_t)-0.9013671875000f,(float16_t)-0.4331054687500f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8847656250000f,(float16_t)-0.4660644531250f, +(float16_t)-0.8759765625000f,(float16_t)-0.4821777343750f, +(float16_t)-0.8671875000000f,(float16_t)-0.4982910156250f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8481445312500f,(float16_t)-0.5297851562500f, +(float16_t)-0.8383789062500f,(float16_t)-0.5454101562500f, +(float16_t)-0.8281250000000f,(float16_t)-0.5605468750000f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.8066406250000f,(float16_t)-0.5908203125000f, +(float16_t)-0.7958984375000f,(float16_t)-0.6054687500000f, +(float16_t)-0.7846679687500f,(float16_t)-0.6201171875000f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7612304687500f,(float16_t)-0.6484375000000f, +(float16_t)-0.7490234375000f,(float16_t)-0.6625976562500f, +(float16_t)-0.7368164062500f,(float16_t)-0.6762695312500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.7114257812500f,(float16_t)-0.7026367187500f, +(float16_t)-0.6982421875000f,(float16_t)-0.7158203125000f, +(float16_t)-0.6850585937500f,(float16_t)-0.7285156250000f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6577148437500f,(float16_t)-0.7534179687500f, +(float16_t)-0.6440429687500f,(float16_t)-0.7651367187500f, +(float16_t)-0.6293945312500f,(float16_t)-0.7768554687500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.6005859375000f,(float16_t)-0.7993164062500f, +(float16_t)-0.5859375000000f,(float16_t)-0.8105468750000f, +(float16_t)-0.5708007812500f,(float16_t)-0.8212890625000f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.5400390625000f,(float16_t)-0.8417968750000f, +(float16_t)-0.5244140625000f,(float16_t)-0.8515625000000f, +(float16_t)-0.5087890625000f,(float16_t)-0.8608398437500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4768066406250f,(float16_t)-0.8789062500000f, +(float16_t)-0.4604492187500f,(float16_t)-0.8876953125000f, +(float16_t)-0.4440917968750f,(float16_t)-0.8959960937500f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.4108886718750f,(float16_t)-0.9116210937500f, +(float16_t)-0.3940429687500f,(float16_t)-0.9189453125000f, +(float16_t)-0.3769531250000f,(float16_t)-0.9262695312500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.3427734375000f,(float16_t)-0.9394531250000f, +(float16_t)-0.3251953125000f,(float16_t)-0.9458007812500f, +(float16_t)-0.3078613281250f,(float16_t)-0.9516601562500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2727050781250f,(float16_t)-0.9619140625000f, +(float16_t)-0.2548828125000f,(float16_t)-0.9667968750000f, +(float16_t)-0.2370605468750f,(float16_t)-0.9716796875000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.2010498046875f,(float16_t)-0.9794921875000f, +(float16_t)-0.1829833984375f,(float16_t)-0.9829101562500f, +(float16_t)-0.1649169921875f,(float16_t)-0.9863281250000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.1285400390625f,(float16_t)-0.9916992187500f, +(float16_t)-0.1102294921875f,(float16_t)-0.9941406250000f, +(float16_t)-0.0919189453125f,(float16_t)-0.9956054687500f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)-0.0552062988281f,(float16_t)-0.9985351562500f, +(float16_t)-0.0368041992188f,(float16_t)-0.9995117187500f, +(float16_t)-0.0184020996094f,(float16_t)-1.0000000000000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9970703125000f,(float16_t)0.0735473632812f, +(float16_t)0.9892578125000f,(float16_t)0.1467285156250f, +(float16_t)0.9755859375000f,(float16_t)0.2191162109375f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.9331054687500f,(float16_t)0.3598632812500f, +(float16_t)0.9038085937500f,(float16_t)0.4274902343750f, +(float16_t)0.8701171875000f,(float16_t)0.4929199218750f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.7885742187500f,(float16_t)0.6152343750000f, +(float16_t)0.7407226562500f,(float16_t)0.6713867187500f, +(float16_t)0.6894531250000f,(float16_t)0.7241210937500f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.5756835937500f,(float16_t)0.8173828125000f, +(float16_t)0.5141601562500f,(float16_t)0.8579101562500f, +(float16_t)0.4497070312500f,(float16_t)0.8930664062500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.3137207031250f,(float16_t)0.9497070312500f, +(float16_t)0.2429199218750f,(float16_t)0.9702148437500f, +(float16_t)0.1710205078125f,(float16_t)0.9853515625000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)0.0245361328125f,(float16_t)0.9995117187500f, +(float16_t)-0.0490722656250f,(float16_t)0.9990234375000f, +(float16_t)-0.1224365234375f,(float16_t)0.9926757812500f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.2666015625000f,(float16_t)0.9638671875000f, +(float16_t)-0.3369140625000f,(float16_t)0.9414062500000f, +(float16_t)-0.4052734375000f,(float16_t)0.9140625000000f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.5351562500000f,(float16_t)0.8447265625000f, +(float16_t)-0.5957031250000f,(float16_t)0.8032226562500f, +(float16_t)-0.6533203125000f,(float16_t)0.7573242187500f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.7573242187500f,(float16_t)0.6533203125000f, +(float16_t)-0.8032226562500f,(float16_t)0.5957031250000f, +(float16_t)-0.8447265625000f,(float16_t)0.5351562500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9140625000000f,(float16_t)0.4052734375000f, +(float16_t)-0.9414062500000f,(float16_t)0.3369140625000f, +(float16_t)-0.9638671875000f,(float16_t)0.2666015625000f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9926757812500f,(float16_t)0.1224365234375f, +(float16_t)-0.9990234375000f,(float16_t)0.0490722656250f, +(float16_t)-0.9995117187500f,(float16_t)-0.0245361328125f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9853515625000f,(float16_t)-0.1710205078125f, +(float16_t)-0.9702148437500f,(float16_t)-0.2429199218750f, +(float16_t)-0.9497070312500f,(float16_t)-0.3137207031250f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.8930664062500f,(float16_t)-0.4497070312500f, +(float16_t)-0.8579101562500f,(float16_t)-0.5141601562500f, +(float16_t)-0.8173828125000f,(float16_t)-0.5756835937500f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.7241210937500f,(float16_t)-0.6894531250000f, +(float16_t)-0.6713867187500f,(float16_t)-0.7407226562500f, +(float16_t)-0.6152343750000f,(float16_t)-0.7885742187500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.4929199218750f,(float16_t)-0.8701171875000f, +(float16_t)-0.4274902343750f,(float16_t)-0.9038085937500f, +(float16_t)-0.3598632812500f,(float16_t)-0.9331054687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)-0.2191162109375f,(float16_t)-0.9755859375000f, +(float16_t)-0.1467285156250f,(float16_t)-0.9892578125000f, +(float16_t)-0.0735473632812f,(float16_t)-0.9970703125000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.9570312500000f,(float16_t)0.2902832031250f, +(float16_t)0.8315429687500f,(float16_t)0.5556640625000f, +(float16_t)0.6342773437500f,(float16_t)0.7729492187500f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)0.0980224609375f,(float16_t)0.9951171875000f, +(float16_t)-0.1950683593750f,(float16_t)0.9809570312500f, +(float16_t)-0.4714355468750f,(float16_t)0.8818359375000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.8818359375000f,(float16_t)0.4714355468750f, +(float16_t)-0.9809570312500f,(float16_t)0.1950683593750f, +(float16_t)-0.9951171875000f,(float16_t)-0.0980224609375f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f, +(float16_t)-0.7729492187500f,(float16_t)-0.6342773437500f, +(float16_t)-0.5556640625000f,(float16_t)-0.8315429687500f, +(float16_t)-0.2902832031250f,(float16_t)-0.9570312500000f, +(float16_t)1.0000000000000f,(float16_t)0.0000000000000f, +(float16_t)0.3825683593750f,(float16_t)0.9238281250000f, +(float16_t)-0.7070312500000f,(float16_t)0.7070312500000f, +(float16_t)-0.9238281250000f,(float16_t)-0.3825683593750f,}; #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/ComplexMathFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/ComplexMathFunctionsF16.c deleted file mode 100644 index f03e262..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/ComplexMathFunctionsF16.c +++ /dev/null @@ -1,36 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: CompexMathFunctionsF16.c - * Description: Combination of all complex math function f16 source files. - * - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_cmplx_conj_f16.c" -#include "arm_cmplx_dot_prod_f16.c" -#include "arm_cmplx_mag_f16.c" -#include "arm_cmplx_mag_squared_f16.c" -#include "arm_cmplx_mult_cmplx_f16.c" -#include "arm_cmplx_mult_real_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f16.c index 3fefe25..9b10a4a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f16.c @@ -5,11 +5,13 @@ * Title: arm_cmplx_conj_f16.c * Description: Floating-point complex conjugate * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,28 +35,6 @@ @ingroup groupCmplxMath */ -/** - @defgroup cmplx_conj Complex Conjugate - - Conjugates the elements of a complex data vector. - - The pSrc points to the source data and - pDst points to the destination data where the result should be written. - numSamples specifies the number of complex samples - and the data in each array is stored in an interleaved fashion - (real, imag, real, imag, ...). - Each array has a total of 2*numSamples values. - - The underlying algorithm is used: -
-  for (n = 0; n < numSamples; n++) {
-      pDst[(2*n)  ] =  pSrc[(2*n)  ];    // real part
-      pDst[(2*n)+1] = -pSrc[(2*n)+1];    // imag part
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup cmplx_conj @@ -112,7 +92,7 @@ void arm_cmplx_conj_f16( /* Calculate Complex Conjugate and store result in destination buffer. */ *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; @@ -139,16 +119,16 @@ void arm_cmplx_conj_f16( /* Calculate Complex Conjugate and store result in destination buffer. */ *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; @@ -170,7 +150,7 @@ void arm_cmplx_conj_f16( /* Calculate Complex Conjugate and store result in destination buffer. */ *pDst++ = *pSrc++; - *pDst++ = -*pSrc++; + *pDst++ = -(_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; @@ -183,4 +163,5 @@ void arm_cmplx_conj_f16( @} end of cmplx_conj group */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f32.c index dcb276d..89cbe5b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_conj_f32.c * Description: Floating-point complex conjugate * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q15.c index 3764614..b13e16d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_conj_q15.c * Description: Q15 complex conjugate * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -124,11 +124,11 @@ void arm_cmplx_conj_q15( /* Calculate Complex Conjugate and store result in destination buffer. */ - #if defined (ARM_MATH_DSP) - in1 = read_q15x2_ia ((q15_t **) &pSrc); - in2 = read_q15x2_ia ((q15_t **) &pSrc); - in3 = read_q15x2_ia ((q15_t **) &pSrc); - in4 = read_q15x2_ia ((q15_t **) &pSrc); +#if defined (ARM_MATH_DSP) + in1 = read_q15x2_ia (&pSrc); + in2 = read_q15x2_ia (&pSrc); + in3 = read_q15x2_ia (&pSrc); + in4 = read_q15x2_ia (&pSrc); #ifndef ARM_MATH_BIG_ENDIAN in1 = __QASX(0, in1); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q31.c index aaf8707..879d679 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_conj_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_conj_q31.c * Description: Q31 complex conjugate * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f16.c index 44ea9aa..6066f61 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f16.c @@ -5,11 +5,13 @@ * Title: arm_cmplx_dot_prod_f16.c * Description: Floating-point complex dot product * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,32 +37,6 @@ @ingroup groupCmplxMath */ -/** - @defgroup cmplx_dot_prod Complex Dot Product - - Computes the dot product of two complex vectors. - The vectors are multiplied element-by-element and then summed. - - The pSrcA points to the first complex input vector and - pSrcB points to the second complex input vector. - numSamples specifies the number of complex samples - and the data in each array is stored in an interleaved fashion - (real, imag, real, imag, ...). - Each array has a total of 2*numSamples values. - - The underlying algorithm is used: - -
-  realResult = 0;
-  imagResult = 0;
-  for (n = 0; n < numSamples; n++) {
-      realResult += pSrcA[(2*n)+0] * pSrcB[(2*n)+0] - pSrcA[(2*n)+1] * pSrcB[(2*n)+1];
-      imagResult += pSrcA[(2*n)+0] * pSrcB[(2*n)+1] + pSrcA[(2*n)+1] * pSrcB[(2*n)+0];
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup cmplx_dot_prod @@ -286,4 +262,5 @@ void arm_cmplx_dot_prod_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f32.c index ddc0f6e..8282d6f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_dot_prod_f32.c * Description: Floating-point complex dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q15.c index 4ae4d05..2c93864 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_dot_prod_q15.c * Description: Processing function for the Q15 Complex Dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q31.c index 3e1ec7a..bd5e894 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_dot_prod_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_dot_prod_q31.c * Description: Q31 complex dot product * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f16.c index 8cad742..a4c859d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f16.c @@ -5,11 +5,13 @@ * Title: arm_cmplx_mag_f16.c * Description: Floating-point complex magnitude * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,29 +35,7 @@ @ingroup groupCmplxMath */ -/** - @defgroup cmplx_mag Complex Magnitude - - Computes the magnitude of the elements of a complex data vector. - - The pSrc points to the source data and - pDst points to the where the result should be written. - numSamples specifies the number of complex samples - in the input array and the data is stored in an interleaved fashion - (real, imag, real, imag, ...). - The input array has a total of 2*numSamples values; - the output array has a total of numSamples values. - - The underlying algorithm is used: -
-  for (n = 0; n < numSamples; n++) {
-      pDst[n] = sqrt(pSrc[(2*n)+0]^2 + pSrc[(2*n)+1]^2);
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup cmplx_mag @@ -239,4 +219,5 @@ void arm_cmplx_mag_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f32.c index 8209fce..b2c9230 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_f32.c * Description: Floating-point complex magnitude * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f64.c new file mode 100644 index 0000000..2d651ac --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_f64.c @@ -0,0 +1,82 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cmplx_mag_f64.c + * Description: Floating-point complex magnitude + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" + +/** + @ingroup groupCmplxMath + */ + + + +/** + @addtogroup cmplx_mag + @{ + */ + +/** + @brief Floating-point complex magnitude. + @param[in] pSrc points to input vector + @param[out] pDst points to output vector + @param[in] numSamples number of samples in each vector + @return none + */ +void arm_cmplx_mag_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t numSamples) +{ + uint32_t blkCnt; /* loop counter */ + float64_t real, imag; /* Temporary variables to hold input values */ + + /* Initialize blkCnt with number of samples */ + blkCnt = numSamples; + + while (blkCnt > 0U) + { + /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ + + real = *pSrc++; + imag = *pSrc++; + + /* store result in destination buffer. */ + *pDst++ = sqrt((real * real) + (imag * imag)); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of cmplx_mag group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_fast_q15.c new file mode 100644 index 0000000..6a78a7e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_fast_q15.c @@ -0,0 +1,227 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cmplx_mag_fast_q15.c + * Description: Q15 complex magnitude + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" + +/** + @ingroup groupCmplxMath + */ + +/** + @addtogroup cmplx_mag + @{ + */ + +/** + @brief Q15 complex magnitude. + @param[in] pSrc points to input vector + @param[out] pDst points to output vector + @param[in] numSamples number of samples in each vector + @return none + + @par Scaling and Overflow Behavior + The function implements 1.15 by 1.15 multiplications and finally output is converted into 2.14 format. + Fast functions are less accurate. This function will tend to clamp to 0 + the too small values. So sqrt(x*x) = x will not always be true. + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_cmplx_mag_fast_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples) +{ + + int32_t blockSize = numSamples; /* loop counters */ + uint32_t blkCnt; /* loop counters */ + q15x8x2_t vecSrc; + q15x8_t sum; + q31_t in; + q31_t acc0; + + blkCnt = blockSize >> 3; + while (blkCnt > 0U) + { + vecSrc = vld2q(pSrc); + pSrc += 16; + sum = vqaddq(vmulhq(vecSrc.val[0], vecSrc.val[0]), + vmulhq(vecSrc.val[1], vecSrc.val[1])); + + sum = vshrq(sum, 1); + + sum = FAST_VSQRT_Q15(sum); + + vst1q(pDst, sum); + pDst += 8; + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + + /* + * tail + */ + blkCnt = blockSize & 7; + + while (blkCnt > 0U) + { + /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ + + in = read_q15x2_ia ((q15_t **) &pSrc); + acc0 = __SMUAD(in, in); + + /* store result in 2.14 format in destination buffer. */ + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + + + /* Decrement loop counter */ + blkCnt--; + } +} + +#else +void arm_cmplx_mag_fast_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t numSamples) +{ + uint32_t blkCnt; /* Loop counter */ + +#if defined (ARM_MATH_DSP) + q31_t in; + q31_t acc0; /* Accumulators */ +#else + q15_t real, imag; /* Temporary input variables */ + q31_t acc0, acc1; /* Accumulators */ +#endif + +#if defined (ARM_MATH_LOOPUNROLL) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = numSamples >> 2U; + + while (blkCnt > 0U) + { + /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ + +#if defined (ARM_MATH_DSP) + in = read_q15x2_ia (&pSrc); + acc0 = __SMUAD(in, in); + /* store result in 2.14 format in destination buffer. */ + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + + in = read_q15x2_ia (&pSrc); + acc0 = __SMUAD(in, in); + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + + in = read_q15x2_ia (&pSrc); + acc0 = __SMUAD(in, in); + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + + in = read_q15x2_ia (&pSrc); + acc0 = __SMUAD(in, in); + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); +#else + real = *pSrc++; + imag = *pSrc++; + acc0 = ((q31_t) real * real); + acc1 = ((q31_t) imag * imag); + + /* store result in 2.14 format in destination buffer. */ + arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + + real = *pSrc++; + imag = *pSrc++; + acc0 = ((q31_t) real * real); + acc1 = ((q31_t) imag * imag); + arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + + real = *pSrc++; + imag = *pSrc++; + acc0 = ((q31_t) real * real); + acc1 = ((q31_t) imag * imag); + arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + + real = *pSrc++; + imag = *pSrc++; + acc0 = ((q31_t) real * real); + acc1 = ((q31_t) imag * imag); + arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); +#endif /* #if defined (ARM_MATH_DSP) */ + + /* Decrement loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = numSamples % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = numSamples; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ + +#if defined (ARM_MATH_DSP) + in = read_q15x2_ia (&pSrc); + acc0 = __SMUAD(in, in); + + /* store result in 2.14 format in destination buffer. */ + arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); +#else + real = *pSrc++; + imag = *pSrc++; + acc0 = ((q31_t) real * real); + acc1 = ((q31_t) imag * imag); + + /* store result in 2.14 format in destination buffer. */ + arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); +#endif + + /* Decrement loop counter */ + blkCnt--; + } + +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of cmplx_mag group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q15.c index 473ef07..9c06477 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_q15.c * Description: Q15 complex magnitude * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -49,6 +49,11 @@ @par Scaling and Overflow Behavior The function implements 1.15 by 1.15 multiplications and finally output is converted into 2.14 format. */ + +/* Sqrt q31 is used otherwise accuracy is not good enough + for small values and for some applications it is + an issue. + */ #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" @@ -62,23 +67,52 @@ void arm_cmplx_mag_q15( int32_t blockSize = numSamples; /* loop counters */ uint32_t blkCnt; /* loop counters */ q15x8x2_t vecSrc; - q15x8_t sum; + q31x4_t prod0; + q31x4_t prod1; + q31_t in; q31_t acc0; + q31x4_t acc0V; + q31x4_t acc1V; + + q31_t res; + q15x8_t resV; blkCnt = blockSize >> 3; while (blkCnt > 0U) { vecSrc = vld2q(pSrc); pSrc += 16; - sum = vqaddq(vmulhq(vecSrc.val[0], vecSrc.val[0]), - vmulhq(vecSrc.val[1], vecSrc.val[1])); - sum = vshrq(sum, 1); + acc0V = vdupq_n_s32(0); + acc1V = vdupq_n_s32(0); + + prod0 = vmullbq_int_s16(vecSrc.val[0], vecSrc.val[0]); + acc0V = vqaddq_s32(acc0V,prod0); + + prod0 = vmullbq_int_s16(vecSrc.val[1], vecSrc.val[1]); + acc0V = vqaddq_s32(acc0V,prod0); + + + prod1 = vmulltq_int_s16(vecSrc.val[0], vecSrc.val[0]); + acc1V = vqaddq_s32(acc1V,prod1); + + prod1 = vmulltq_int_s16(vecSrc.val[1], vecSrc.val[1]); + acc1V = vqaddq_s32(acc1V,prod1); + + + + acc0V = vshrq(acc0V, 1); + acc1V = vshrq(acc1V, 1); + + acc0V = FAST_VSQRT_Q31(acc0V); + acc1V = FAST_VSQRT_Q31(acc1V); - sum = FAST_VSQRT_Q15(sum); + resV = vdupq_n_s16(0); + resV = vqshrnbq_n_s32(resV,acc0V,16); + resV = vqshrntq_n_s32(resV,acc1V,16); - vst1q(pDst, sum); + vst1q(pDst, resV); pDst += 8; /* * Decrement the blockSize loop counter @@ -99,7 +133,8 @@ void arm_cmplx_mag_q15( acc0 = __SMUAD(in, in); /* store result in 2.14 format in destination buffer. */ - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; /* Decrement loop counter */ @@ -113,6 +148,7 @@ void arm_cmplx_mag_q15( q15_t * pDst, uint32_t numSamples) { + q31_t res; /* temporary result */ uint32_t blkCnt; /* Loop counter */ #if defined (ARM_MATH_DSP) @@ -133,22 +169,26 @@ void arm_cmplx_mag_q15( /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ #if defined (ARM_MATH_DSP) - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); /* store result in 2.14 format in destination buffer. */ - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; #else real = *pSrc++; imag = *pSrc++; @@ -156,25 +196,29 @@ void arm_cmplx_mag_q15( acc1 = ((q31_t) imag * imag); /* store result in 2.14 format in destination buffer. */ - arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + arm_sqrt_q31((acc0 + acc1) >> 1 , &res); + *pDst++ = res >> 16; real = *pSrc++; imag = *pSrc++; acc0 = ((q31_t) real * real); acc1 = ((q31_t) imag * imag); - arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + arm_sqrt_q31((acc0 + acc1) >> 1 , &res); + *pDst++ = res >> 16; real = *pSrc++; imag = *pSrc++; acc0 = ((q31_t) real * real); acc1 = ((q31_t) imag * imag); - arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + arm_sqrt_q31((acc0 + acc1) >> 1 , &res); + *pDst++ = res >> 16; real = *pSrc++; imag = *pSrc++; acc0 = ((q31_t) real * real); acc1 = ((q31_t) imag * imag); - arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + arm_sqrt_q31((acc0 + acc1) >> 1 , &res); + *pDst++ = res >> 16; #endif /* #if defined (ARM_MATH_DSP) */ /* Decrement loop counter */ @@ -196,11 +240,12 @@ void arm_cmplx_mag_q15( /* C[0] = sqrt(A[0] * A[0] + A[1] * A[1]) */ #if defined (ARM_MATH_DSP) - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); /* store result in 2.14 format in destination buffer. */ - arm_sqrt_q15((q15_t) (acc0 >> 17), pDst++); + arm_sqrt_q31(acc0 >> 1 , &res); + *pDst++ = res >> 16; #else real = *pSrc++; imag = *pSrc++; @@ -208,7 +253,9 @@ void arm_cmplx_mag_q15( acc1 = ((q31_t) imag * imag); /* store result in 2.14 format in destination buffer. */ - arm_sqrt_q15((q15_t) (((q63_t) acc0 + acc1) >> 17), pDst++); + arm_sqrt_q31((acc0 + acc1) >> 1 , &res); + *pDst++ = res >> 16; + #endif /* Decrement loop counter */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q31.c index fa5a4e4..0041620 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_q31.c * Description: Q31 complex magnitude * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f16.c index 1449000..5fd3af1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f16.c @@ -5,11 +5,13 @@ * Title: arm_cmplx_mag_squared_f16.c * Description: Floating-point complex magnitude squared * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,29 +36,6 @@ @ingroup groupCmplxMath */ -/** - @defgroup cmplx_mag_squared Complex Magnitude Squared - - Computes the magnitude squared of the elements of a complex data vector. - - The pSrc points to the source data and - pDst points to the where the result should be written. - numSamples specifies the number of complex samples - in the input array and the data is stored in an interleaved fashion - (real, imag, real, imag, ...). - The input array has a total of 2*numSamples values; - the output array has a total of numSamples values. - - The underlying algorithm is used: - -
-  for (n = 0; n < numSamples; n++) {
-      pDst[n] = pSrc[(2*n)+0]^2 + pSrc[(2*n)+1]^2;
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup cmplx_mag_squared @@ -172,4 +151,5 @@ void arm_cmplx_mag_squared_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f32.c index e611194..eaadf1c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_squared_f32.c * Description: Floating-point complex magnitude squared * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f64.c new file mode 100644 index 0000000..d2a2b36 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_f64.c @@ -0,0 +1,80 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cmplx_mag_squared_f64.c + * Description: Floating-point complex magnitude squared + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" + +/** + @ingroup groupCmplxMath + */ + +/** + @addtogroup cmplx_mag_squared + @{ + */ + +/** + @brief Floating-point complex magnitude squared. + @param[in] pSrc points to input vector + @param[out] pDst points to output vector + @param[in] numSamples number of samples in each vector + @return none + */ +void arm_cmplx_mag_squared_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t numSamples) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t real, imag; /* Temporary input variables */ + + /* Initialize blkCnt with number of samples */ + blkCnt = numSamples; + + while (blkCnt > 0U) + { + /* C[0] = (A[0] * A[0] + A[1] * A[1]) */ + + real = *pSrc++; + imag = *pSrc++; + + /* store result in destination buffer. */ + *pDst++ = (real * real) + (imag * imag); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of cmplx_mag_squared group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q15.c index 5163b22..0e2b2ec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_squared_q15.c * Description: Q15 complex magnitude squared * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -133,20 +133,20 @@ void arm_cmplx_mag_squared_q15( /* C[0] = (A[0] * A[0] + A[1] * A[1]) */ #if defined (ARM_MATH_DSP) - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); /* store result in 3.13 format in destination buffer. */ *pDst++ = (q15_t) (acc0 >> 17); - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); *pDst++ = (q15_t) (acc0 >> 17); - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); *pDst++ = (q15_t) (acc0 >> 17); - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); *pDst++ = (q15_t) (acc0 >> 17); #else @@ -195,7 +195,7 @@ void arm_cmplx_mag_squared_q15( /* C[0] = (A[0] * A[0] + A[1] * A[1]) */ #if defined (ARM_MATH_DSP) - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); acc0 = __SMUAD(in, in); /* store result in 3.13 format in destination buffer. */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q31.c index e9a7649..b533a60 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mag_squared_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mag_squared_q31.c * Description: Q31 complex magnitude squared * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f16.c index 79f48e9..75fefa3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f16.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_cmplx_f16.c * Description: Floating-point complex-by-complex multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -36,27 +36,7 @@ @ingroup groupCmplxMath */ -/** - @defgroup CmplxByCmplxMult Complex-by-Complex Multiplication - - Multiplies a complex vector by another complex vector and generates a complex result. - The data in the complex arrays is stored in an interleaved fashion - (real, imag, real, imag, ...). - The parameter numSamples represents the number of complex - samples processed. The complex arrays have a total of 2*numSamples - real values. - The underlying algorithm is used: - -
-  for (n = 0; n < numSamples; n++) {
-      pDst[(2*n)+0] = pSrcA[(2*n)+0] * pSrcB[(2*n)+0] - pSrcA[(2*n)+1] * pSrcB[(2*n)+1];
-      pDst[(2*n)+1] = pSrcA[(2*n)+0] * pSrcB[(2*n)+1] + pSrcA[(2*n)+1] * pSrcB[(2*n)+0];
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup CmplxByCmplxMult @@ -271,4 +251,5 @@ void arm_cmplx_mult_cmplx_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f32.c index 672ed89..d6ec828 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_cmplx_f32.c * Description: Floating-point complex-by-complex multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f64.c new file mode 100644 index 0000000..603de64 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_f64.c @@ -0,0 +1,87 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cmplx_mult_cmplx_f64.c + * Description: Floating-point complex-by-complex multiplication + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" + +/** + @ingroup groupCmplxMath + */ + +/** + @addtogroup CmplxByCmplxMult + @{ + */ + +/** + @brief Floating-point complex-by-complex multiplication. + @param[in] pSrcA points to first input vector + @param[in] pSrcB points to second input vector + @param[out] pDst points to output vector + @param[in] numSamples number of samples in each vector + @return none + */ + +void arm_cmplx_mult_cmplx_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + float64_t * pDst, + uint32_t numSamples) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t a, b, c, d; /* Temporary variables to store real and imaginary values */ + + /* Initialize blkCnt with number of samples */ + blkCnt = numSamples; + + while (blkCnt > 0U) + { + /* C[2 * i ] = A[2 * i] * B[2 * i ] - A[2 * i + 1] * B[2 * i + 1]. */ + /* C[2 * i + 1] = A[2 * i] * B[2 * i + 1] + A[2 * i + 1] * B[2 * i ]. */ + + a = *pSrcA++; + b = *pSrcA++; + c = *pSrcB++; + d = *pSrcB++; + + /* store result in destination buffer. */ + *pDst++ = (a * c) - (b * d); + *pDst++ = (a * d) + (b * c); + + /* Decrement loop counter */ + blkCnt--; + } + +} + +/** + @} end of CmplxByCmplxMult group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q15.c index 759b917..0790341 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_cmplx_q15.c * Description: Q15 complex-by-complex multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q31.c index 6280603..cbfc505 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_cmplx_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_cmplx_q31.c * Description: Q31 complex-by-complex multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f16.c index 1bc40d2..740639e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f16.c @@ -5,11 +5,13 @@ * Title: arm_cmplx_mult_real_f16.c * Description: Floating-point complex by real multiplication * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,28 +36,6 @@ @ingroup groupCmplxMath */ -/** - @defgroup CmplxByRealMult Complex-by-Real Multiplication - - Multiplies a complex vector by a real vector and generates a complex result. - The data in the complex arrays is stored in an interleaved fashion - (real, imag, real, imag, ...). - The parameter numSamples represents the number of complex - samples processed. The complex arrays have a total of 2*numSamples - real values while the real array has a total of numSamples - real values. - - The underlying algorithm is used: - -
-  for (n = 0; n < numSamples; n++) {
-      pCmplxDst[(2*n)+0] = pSrcCmplx[(2*n)+0] * pSrcReal[n];
-      pCmplxDst[(2*n)+1] = pSrcCmplx[(2*n)+1] * pSrcReal[n];
-  }
-  
- - There are separate functions for floating-point, Q15, and Q31 data types. - */ /** @addtogroup CmplxByRealMult @@ -79,7 +59,7 @@ void arm_cmplx_mult_real_f16( float16_t * pCmplxDst, uint32_t numSamples) { - const static uint16_t stride_cmplx_x_real_16[8] = { + static const uint16_t stride_cmplx_x_real_16[8] = { 0, 0, 1, 1, 2, 2, 3, 3 }; uint32_t blockSizeC = numSamples * CMPLX_DIM; /* loop counters */ @@ -141,20 +121,20 @@ void arm_cmplx_mult_real_f16( in = *pSrcReal++; /* store result in destination buffer. */ - *pCmplxDst++ = *pSrcCmplx++ * in; - *pCmplxDst++ = *pSrcCmplx++ * in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; in = *pSrcReal++; - *pCmplxDst++ = *pSrcCmplx++ * in; - *pCmplxDst++ = *pSrcCmplx++ * in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; in = *pSrcReal++; - *pCmplxDst++ = *pSrcCmplx++ * in; - *pCmplxDst++ = *pSrcCmplx++ * in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; in = *pSrcReal++; - *pCmplxDst++ = *pSrcCmplx++* in; - *pCmplxDst++ = *pSrcCmplx++ * in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; /* Decrement loop counter */ blkCnt--; @@ -177,8 +157,8 @@ void arm_cmplx_mult_real_f16( in = *pSrcReal++; /* store result in destination buffer. */ - *pCmplxDst++ = *pSrcCmplx++ * in; - *pCmplxDst++ = *pSrcCmplx++ * in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; + *pCmplxDst++ = (_Float16)*pSrcCmplx++ * (_Float16)in; /* Decrement loop counter */ blkCnt--; @@ -192,4 +172,5 @@ void arm_cmplx_mult_real_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f32.c index c946dfa..af346be 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_f32.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_real_f32.c * Description: Floating-point complex by real multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -79,7 +79,7 @@ void arm_cmplx_mult_real_f32( float32_t * pCmplxDst, uint32_t numSamples) { - const static uint32_t stride_cmplx_x_real_32[4] = { 0, 0, 1, 1 }; + static const uint32_t stride_cmplx_x_real_32[4] = { 0, 0, 1, 1 }; uint32_t blockSizeC = numSamples * CMPLX_DIM; /* loop counters */ uint32_t blkCnt; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q15.c index 9495dcb..c2aab63 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_real_q15.c * Description: Q15 complex by real multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -59,7 +59,7 @@ void arm_cmplx_mult_real_q15( q15_t * pCmplxDst, uint32_t numSamples) { - const static uint16_t stride_cmplx_x_real_16[8] = { + static const uint16_t stride_cmplx_x_real_16[8] = { 0, 0, 1, 1, 2, 2, 3, 3 }; q15x8_t rVec; @@ -135,10 +135,10 @@ void arm_cmplx_mult_real_q15( #if defined (ARM_MATH_DSP) /* read 2 complex numbers both real and imaginary from complex input buffer */ - inA1 = read_q15x2_ia ((q15_t **) &pSrcCmplx); - inA2 = read_q15x2_ia ((q15_t **) &pSrcCmplx); + inA1 = read_q15x2_ia (&pSrcCmplx); + inA2 = read_q15x2_ia (&pSrcCmplx); /* read 2 real values at a time from real input buffer */ - inB1 = read_q15x2_ia ((q15_t **) &pSrcReal); + inB1 = read_q15x2_ia (&pSrcReal); /* multiply complex number with real numbers */ #ifndef ARM_MATH_BIG_ENDIAN @@ -163,9 +163,9 @@ void arm_cmplx_mult_real_q15( write_q15x2_ia (&pCmplxDst, __PKHBT(out1, out2, 16)); write_q15x2_ia (&pCmplxDst, __PKHBT(out3, out4, 16)); - inA1 = read_q15x2_ia ((q15_t **) &pSrcCmplx); - inA2 = read_q15x2_ia ((q15_t **) &pSrcCmplx); - inB1 = read_q15x2_ia ((q15_t **) &pSrcReal); + inA1 = read_q15x2_ia (&pSrcCmplx); + inA2 = read_q15x2_ia (&pSrcCmplx); + inB1 = read_q15x2_ia (&pSrcReal); #ifndef ARM_MATH_BIG_ENDIAN mul1 = (q31_t) ((q15_t) (inA1) * (q15_t) (inB1)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q31.c index 8303420..700468d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ComplexMathFunctions/arm_cmplx_mult_real_q31.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mult_real_q31.c * Description: Q31 complex by real multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,7 +60,7 @@ void arm_cmplx_mult_real_q31( uint32_t numSamples) { - const static uint32_t stride_cmplx_x_real_32[4] = { + static const uint32_t stride_cmplx_x_real_32[4] = { 0, 0, 1, 1 }; q31x4_t rVec; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_f32.c index 40892c1..976e91f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_pid_init_f32.c * Description: Floating-point PID Control initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q15.c index 1c8e160..79f5f0d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_pid_init_q15.c * Description: Q15 PID Control initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q31.c index d38c740..df5415c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_pid_init_q31.c * Description: Q31 PID Control initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_f32.c index fa29131..b0e6abb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_f32.c @@ -5,13 +5,13 @@ * Title: arm_pid_reset_f32.c * Description: Floating-point PID Control reset function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q15.c index bcd451a..c42f45a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q15.c @@ -5,13 +5,13 @@ * Title: arm_pid_reset_q15.c * Description: Q15 PID Control reset function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q31.c index c13df84..472a2c1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_pid_reset_q31.c @@ -5,13 +5,13 @@ * Title: arm_pid_reset_q31.c * Description: Q31 PID Control reset function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_f32.c index 97a3e39..4c85db6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_f32.c @@ -5,13 +5,13 @@ * Title: arm_sin_cos_f32.c * Description: Sine and Cosine calculation for floating-point values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -31,35 +31,6 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/controller_functions.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" -/** - @ingroup groupController - */ - -/** - @defgroup SinCos Sine Cosine - - Computes the trigonometric sine and cosine values using a combination of table lookup - and linear interpolation. - There are separate functions for Q31 and floating-point data types. - The input to the floating-point version is in degrees while the - fixed-point Q31 have a scaled input with the range - [-1 0.9999] mapping to [-180 +180] degrees. - - The floating point function also allows values that are out of the usual range. When this happens, the function will - take extra time to adjust the input value to the range of [-180 180]. - - The result is accurate to 5 digits after the decimal point. - - The implementation is based on table lookup using 360 values together with linear interpolation. - The steps used are: - -# Calculation of the nearest integer table index. - -# Compute the fractional portion (fract) of the input. - -# Fetch the value corresponding to \c index from sine table to \c y0 and also value from \c index+1 to \c y1. - -# Sine value is computed as *psinVal = y0 + (fract * (y1 - y0)). - -# Fetch the value corresponding to \c index from cosine table to \c y0 and also value from \c index+1 to \c y1. - -# Cosine value is computed as *pcosVal = y0 + (fract * (y1 - y0)). - */ - /** @addtogroup SinCos @{ @@ -109,8 +80,6 @@ void arm_sin_cos_f32( d1 = -sinTable_f32[indexS ]; d2 = -sinTable_f32[indexS+1]; - temp = (1.0f - fract) * f1 + fract * f2; - Dn = 0.0122718463030f; /* delta between the two points (fixed), in this case 2*pi/FAST_MATH_TABLE_SIZE */ Df = f2 - f1; /* delta between the values of the functions */ @@ -127,7 +96,6 @@ void arm_sin_cos_f32( d1 = sinTable_f32[indexC ]; d2 = sinTable_f32[indexC+1]; - temp = (1.0f - fract) * f1 + fract * f2; Df = f2 - f1; // delta between the values of the functions temp = Dn * (d1 + d2) - 2 * Df; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_q31.c index 7e7c881..4198307 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/ControllerFunctions/arm_sin_cos_q31.c @@ -5,13 +5,13 @@ * Title: arm_sin_cos_q31.c * Description: Cosine & Sine calculation for Q31 values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/DistanceFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/DistanceFunctionsF16.c deleted file mode 100644 index f40eb12..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/DistanceFunctionsF16.c +++ /dev/null @@ -1,40 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: DistanceFunctions.c - * Description: Combination of all distance function f16 source files. - * - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_braycurtis_distance_f16.c" -#include "arm_canberra_distance_f16.c" -#include "arm_chebyshev_distance_f16.c" -#include "arm_cityblock_distance_f16.c" -#include "arm_correlation_distance_f16.c" -#include "arm_cosine_distance_f16.c" -#include "arm_euclidean_distance_f16.c" -#include "arm_jensenshannon_distance_f16.c" -#include "arm_minkowski_distance_f16.c" - - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance.c index df49f29..921d039 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance.c @@ -6,11 +6,13 @@ * Title: arm_svm_linear_init_f32.c * Description: SVM Linear Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance_template.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance_template.h index 70a96cd..b50c739 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance_template.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_boolean_distance_template.h @@ -4,11 +4,13 @@ * Title: arm_boolean_distance.c * Description: Templates for boolean distances * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f16.c index bc899da..1c056f2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_braycurtis_distance_f16.c * Description: Bray-Curtis distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -133,8 +135,8 @@ float16_t arm_braycurtis_distance_f16(const float16_t *pA,const float16_t *pB, u { tmpA = *pA++; tmpB = *pB++; - accumDiff += (_Float16)fabsf(tmpA - tmpB); - accumSum += (_Float16)fabsf(tmpA + tmpB); + accumDiff += (_Float16)fabsf((float32_t)((_Float16)tmpA - (_Float16)tmpB)); + accumSum += (_Float16)fabsf((float32_t)((_Float16)tmpA + (_Float16)tmpB)); blockSize --; } /* diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f32.c index b616cd1..4a8fd6b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_braycurtis_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_braycurtis_distance_f32.c * Description: Bray-Curtis distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f16.c index ef0f411..7cfffc1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_canberra_distance_f16.c * Description: Canberra distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -150,11 +152,11 @@ float16_t arm_canberra_distance_f16(const float16_t *pA,const float16_t *pB, uin tmpA = *pA++; tmpB = *pB++; - diff = fabsf(tmpA - tmpB); - sum = fabsf(tmpA) + fabsf(tmpB); - if ((tmpA != 0.0f16) || (tmpB != 0.0f16)) + diff = fabsf((float32_t)((_Float16)tmpA - (_Float16)tmpB)); + sum = (_Float16)fabsf((float32_t)tmpA) + (_Float16)fabsf((float32_t)tmpB); + if (((_Float16)tmpA != 0.0f16) || ((_Float16)tmpB != 0.0f16)) { - accum += (diff / sum); + accum += ((_Float16)diff / (_Float16)sum); } blockSize --; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f32.c index 153124c..78d1353 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_canberra_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_canberra_distance_f32.c * Description: Canberra distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f16.c index f825ac2..bbf41dc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_chebyshev_distance_f16.c * Description: Chebyshev distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -117,7 +119,7 @@ float16_t arm_chebyshev_distance_f16(const float16_t *pA,const float16_t *pB, ui tmpA = *pA++; tmpB = *pB++; - diff = fabsf(tmpA - tmpB); + diff = (_Float16)fabsf((float32_t)((_Float16)tmpA - (_Float16)tmpB)); maxVal = diff; blockSize--; @@ -125,8 +127,8 @@ float16_t arm_chebyshev_distance_f16(const float16_t *pA,const float16_t *pB, ui { tmpA = *pA++; tmpB = *pB++; - diff = fabsf(tmpA - tmpB); - if (diff > maxVal) + diff = (_Float16)fabsf((float32_t)((_Float16)tmpA - (_Float16)tmpB)); + if ((_Float16)diff > (_Float16)maxVal) { maxVal = diff; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f32.c index e306011..ee45e3d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_chebyshev_distance_f32.c * Description: Chebyshev distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f64.c new file mode 100644 index 0000000..0b64f72 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_chebyshev_distance_f64.c @@ -0,0 +1,80 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES + +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_chebyshev_distance_f64.c + * Description: Chebyshev distance between two vectors + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h" +#include +#include + + +/** + @addtogroup Chebyshev + @{ + */ + + +/** + * @brief Chebyshev distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float64_t arm_chebyshev_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize) +{ + float64_t diff=0., maxVal,tmpA, tmpB; + + tmpA = *pA++; + tmpB = *pB++; + diff = fabs(tmpA - tmpB); + maxVal = diff; + blockSize--; + + while(blockSize > 0) + { + tmpA = *pA++; + tmpB = *pB++; + diff = fabs(tmpA - tmpB); + if (diff > maxVal) + { + maxVal = diff; + } + blockSize --; + } + + return(maxVal); +} + +/** + * @} end of Chebyshev group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f16.c index 876da7d..0c9cc2f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_cityblock_distance_f16.c * Description: Cityblock (Manhattan) distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -111,7 +113,7 @@ float16_t arm_cityblock_distance_f16(const float16_t *pA,const float16_t *pB, ui { tmpA = *pA++; tmpB = *pB++; - accum += (_Float16)fabsf(tmpA - tmpB); + accum += (_Float16)fabsf((float32_t)((_Float16)tmpA - (_Float16)tmpB)); blockSize --; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f32.c index d35239b..a749055 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_cityblock_distance_f32.c * Description: Cityblock (Manhattan) distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f64.c new file mode 100644 index 0000000..e07e7a7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cityblock_distance_f64.c @@ -0,0 +1,71 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES + +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cityblock_distance_f64.c + * Description: Cityblock (Manhattan) distance between two vectors + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h" +#include +#include + +/** + @addtogroup Manhattan + @{ + */ + + +/** + * @brief Cityblock (Manhattan) distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float64_t arm_cityblock_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize) +{ + float64_t accum,tmpA, tmpB; + + accum = 0.; + while(blockSize > 0) + { + tmpA = *pA++; + tmpB = *pB++; + accum += fabs(tmpA - tmpB); + + blockSize --; + } + + return(accum); +} + +/** + * @} end of Manhattan group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f16.c index e7d3638..715484b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_correlation_distance_f16.c * Description: Correlation distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,21 +72,21 @@ float16_t arm_correlation_distance_f16(float16_t *pA,float16_t *pB, uint32_t blo arm_mean_f16(pA, blockSize, &ma); arm_mean_f16(pB, blockSize, &mb); - arm_offset_f16(pA, -ma, pA, blockSize); - arm_offset_f16(pB, -mb, pB, blockSize); + arm_offset_f16(pA, -(_Float16)ma, pA, blockSize); + arm_offset_f16(pB, -(_Float16)mb, pB, blockSize); arm_power_f16(pA, blockSize, &pwra); arm_power_f16(pB, blockSize, &pwrb); arm_dot_prod_f16(pA,pB,blockSize,&dot); - dot = dot / blockSize; - pwra = pwra / blockSize; - pwrb = pwrb / blockSize; + dot = (_Float16)dot / (_Float16)blockSize; + pwra = (_Float16)pwra / (_Float16)blockSize; + pwrb = (_Float16)pwrb / (_Float16)blockSize; - arm_sqrt_f16(pwra * pwrb,&tmp); + arm_sqrt_f16((_Float16)pwra * (_Float16)pwrb,&tmp); - return(1.0f - dot / tmp); + return(1.0f16 - (_Float16)dot / (_Float16)tmp); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f32.c index e71fd7f..79d26a9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_correlation_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_correlation_distance_f32.c * Description: Correlation distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f16.c index 0046263..453aebf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_cosine_distance_f16.c * Description: Cosine distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,6 +62,8 @@ * @param[in] blockSize vector length * @return distance * + * @par Description + * cosine_distance(u,v) is 1 - u . v / (Norm(u) Norm(v)) */ float16_t arm_cosine_distance_f16(const float16_t *pA,const float16_t *pB, uint32_t blockSize) @@ -71,8 +75,8 @@ float16_t arm_cosine_distance_f16(const float16_t *pA,const float16_t *pB, uint3 arm_dot_prod_f16(pA,pB,blockSize,&dot); - arm_sqrt_f16(pwra * pwrb, &tmp); - return(1.0f - dot / tmp); + arm_sqrt_f16((_Float16)pwra * (_Float16)pwrb, &tmp); + return(1.0f16 - (_Float16)dot / (_Float16)tmp); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f32.c index 1ad6cc7..871c7af 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_cosine_distance_f32.c * Description: Cosine distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -47,6 +49,8 @@ * @param[in] blockSize vector length * @return distance * + * @par Description + * cosine_distance(u,v) is 1 - u . v / (Norm(u) Norm(v)) */ float32_t arm_cosine_distance_f32(const float32_t *pA,const float32_t *pB, uint32_t blockSize) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f64.c new file mode 100644 index 0000000..ea5e654 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_cosine_distance_f64.c @@ -0,0 +1,74 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES + +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cosine_distance_f64.c + * Description: Cosine distance between two vectors + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h" +#include +#include + + +/** + @addtogroup CosineDist + @{ + */ + + + +/** + * @brief Cosine distance between two vectors + * + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ + +float64_t arm_cosine_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize) +{ + float64_t pwra,pwrb,dot,tmp; + + arm_power_f64(pA, blockSize, &pwra); + arm_power_f64(pB, blockSize, &pwrb); + + arm_dot_prod_f64(pA,pB,blockSize,&dot); + + tmp = sqrt(pwra * pwrb); + return(1. - dot / tmp); + +} + + + +/** + * @} end of CosineDist group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_dice_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_dice_distance.c index 4fd2963..d27dfc9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_dice_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_dice_distance.c @@ -6,11 +6,13 @@ * Title: arm_dice_distance.c * Description: Dice distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f16.c index 67a703e..dd1d9ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_euclidean_distance_f16.c * Description: Euclidean distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f32.c index 101151e..ccbdc77 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_euclidean_distance_f32.c * Description: Euclidean distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f64.c new file mode 100644 index 0000000..04c42f7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_euclidean_distance_f64.c @@ -0,0 +1,70 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES + +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_euclidean_distance_f64.c + * Description: Euclidean distance between two vectors + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/distance_functions.h" +#include +#include + + + +/** + @addtogroup Euclidean + @{ + */ + + +/** + * @brief Euclidean distance between two vectors + * @param[in] pA First vector + * @param[in] pB Second vector + * @param[in] blockSize vector length + * @return distance + * + */ +float64_t arm_euclidean_distance_f64(const float64_t *pA,const float64_t *pB, uint32_t blockSize) +{ + float64_t accum=0.,tmp; + + while(blockSize > 0) + { + tmp = *pA++ - *pB++; + accum += SQ(tmp); + blockSize --; + } + tmp = sqrt(accum); + return(tmp); +} + +/** + * @} end of Euclidean group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_hamming_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_hamming_distance.c index 8a6e4f7..28f2733 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_hamming_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_hamming_distance.c @@ -6,11 +6,13 @@ * Title: arm_hamming_distance.c * Description: Hamming distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jaccard_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jaccard_distance.c index d3dc3bb..30d061b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jaccard_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jaccard_distance.c @@ -6,11 +6,13 @@ * Title: arm_jaccard_distance.c * Description: Jaccard distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f16.c index 87a14d8..14bd4b0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_jensenshannon_distance_f16.c * Description: Jensen-Shannon distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -54,7 +56,7 @@ /// @private __STATIC_INLINE float16_t rel_entr(float16_t x, float16_t y) { - return (x * logf(x / y)); + return ((_Float16)x * (_Float16)logf((float32_t)((_Float16)x / (_Float16)y))); } #endif @@ -117,7 +119,7 @@ float16_t arm_jensenshannon_distance_f16(const float16_t *pA,const float16_t *pB } - arm_sqrt_f16(vecAddAcrossF16Mve(accumV) / 2.0f, &tmp); + arm_sqrt_f16((_Float16)vecAddAcrossF16Mve(accumV) / 2.0f16, &tmp); return (tmp); } @@ -162,7 +164,7 @@ float16_t arm_jensenshannon_distance_f16(const float16_t *pA,const float16_t *pB sum = left + right; - arm_sqrt_f16(sum/2.0f, &result); + arm_sqrt_f16((_Float16)sum/2.0f16, &result); return(result); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f32.c index 56af92e..6aeb797 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_jensenshannon_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_jensenshannon_distance_f32.c * Description: Jensen-Shannon distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_kulsinski_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_kulsinski_distance.c index 2941de9..1bcb2ef 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_kulsinski_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_kulsinski_distance.c @@ -6,11 +6,13 @@ * Title: arm_kulsinski_distance.c * Description: Kulsinski distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f16.c index 79d1b8a..ae9c3cb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f16.c @@ -6,11 +6,13 @@ * Title: arm_minkowski_distance_f16.c * Description: Minkowski distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -69,10 +71,9 @@ float16_t arm_minkowski_distance_f16(const float16_t *pA,const float16_t *pB, int32_t order, uint32_t blockSize) { uint32_t blkCnt; - f16x8_t a, b, tmpV, accumV, sumV; + f16x8_t a, b, tmpV, sumV; sumV = vdupq_n_f16(0.0f); - accumV = vdupq_n_f16(0.0f); blkCnt = blockSize >> 3; while (blkCnt > 0U) { @@ -104,7 +105,7 @@ float16_t arm_minkowski_distance_f16(const float16_t *pA,const float16_t *pB, in sumV = vaddq_m(sumV, sumV, tmpV, p0); } - return (powf(vecAddAcrossF16Mve(sumV), (1.0f / (float16_t) order))); + return (powf((float32_t)vecAddAcrossF16Mve(sumV), (1.0f / (float32_t) order))); } @@ -116,14 +117,14 @@ float16_t arm_minkowski_distance_f16(const float16_t *pA,const float16_t *pB, in _Float16 sum; uint32_t i; - sum = 0.0f; + sum = 0.0f16; for(i=0; i < blockSize; i++) { - sum += (_Float16)powf(fabsf(pA[i] - pB[i]),order); + sum += (_Float16)powf(fabsf((float32_t)((_Float16)pA[i] - (_Float16)pB[i])),order); } - return(powf(sum,(1.0f/order))); + return(_Float16)(powf((float32_t)sum,(1.0f/(float32_t)order))); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f32.c index 51a904a..e29d8e1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_minkowski_distance_f32.c @@ -6,11 +6,13 @@ * Title: arm_minkowski_distance_f32.c * Description: Minkowski distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -76,10 +78,9 @@ __attribute__((weak)) float __powisf2(float a, int b) float32_t arm_minkowski_distance_f32(const float32_t *pA,const float32_t *pB, int32_t order, uint32_t blockSize) { uint32_t blkCnt; - f32x4_t a, b, tmpV, accumV, sumV; + f32x4_t a, b, tmpV, sumV; sumV = vdupq_n_f32(0.0f); - accumV = vdupq_n_f32(0.0f); blkCnt = blockSize >> 2; while (blkCnt > 0U) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_rogerstanimoto_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_rogerstanimoto_distance.c index 2f923dd..eb7820d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_rogerstanimoto_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_rogerstanimoto_distance.c @@ -6,11 +6,13 @@ * Title: arm_rogerstanimoto_distance.c * Description: Roger Stanimoto distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_russellrao_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_russellrao_distance.c index d924ea2..0be143e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_russellrao_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_russellrao_distance.c @@ -6,11 +6,13 @@ * Title: arm_russellrao_distance.c * Description: Russell-Rao distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,7 +65,7 @@ float32_t arm_russellrao_distance(const uint32_t *pA, const uint32_t *pB, uint32 arm_boolean_distance_TT(pA, pB, numberOfBools, &ctt); - return(1.0*(numberOfBools - ctt) / ((float32_t)numberOfBools)); + return(1.0f*(numberOfBools - ctt) / ((float32_t)numberOfBools)); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalmichener_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalmichener_distance.c index d18904e..3b7fd14 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalmichener_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalmichener_distance.c @@ -6,11 +6,13 @@ * Title: arm_sokalmichener_distance.c * Description: Sokal-Michener distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalsneath_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalsneath_distance.c index 48b24fc..707466e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalsneath_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_sokalsneath_distance.c @@ -6,11 +6,13 @@ * Title: arm_sokalsneath_distance.c * Description: Sokal-Sneath distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_yule_distance.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_yule_distance.c index 0535e5b..cf52c90 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_yule_distance.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/DistanceFunctions/arm_yule_distance.c @@ -6,11 +6,13 @@ * Title: arm_yule_distance.c * Description: Yule distance between two vectors * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/FastMathFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/FastMathFunctionsF16.c deleted file mode 100644 index 128e14c..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/FastMathFunctionsF16.c +++ /dev/null @@ -1,35 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: FastMathFunctions.c - * Description: Combination of all fast math function source files. - * - * $Date: 16. March 2020 - * $Revision: V1.1.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2019-2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_vexp_f16.c" -#include "arm_vlog_f16.c" -#include "arm_vinverse_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f16.c new file mode 100644 index 0000000..93c898c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f16.c @@ -0,0 +1,175 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_atan2_f16.c + * Description: float16 Arc tangent of y/x + * + * $Date: 22 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + +/* + +atan for argument between in [0, 1.0] + + +*/ + +#define PIF16 3.14f16 +#define PI16HALF 1.571f16 + +#define ATANHALFF16 0.463648f16 + +#define ATAN2_NB_COEFS_F16 5 + +static const float16_t atan2_coefs_f16[ATAN2_NB_COEFS_F16]={0.f16 +,1.f16 +,0.f16 +,-0.367f16 +,0.152f16 +}; + +__STATIC_FORCEINLINE float16_t arm_atan_limited_f16(float16_t x) +{ + float16_t res=atan2_coefs_f16[ATAN2_NB_COEFS_F16-1]; + int i=1; + for(i=1;i 1.0f16) + { + x = 1.0f16 / (_Float16)x; + res = (_Float16)PI16HALF - (_Float16)arm_atan_limited_f16(x); + } + else + { + res += (_Float16)arm_atan_limited_f16(x); + } + + + if (sign) + { + res = -(_Float16)res; + } + + return(res); +} + +/** + @ingroup groupFastMath + */ + + +/** + @addtogroup atan2 + @{ + */ + +/** + @brief Arc Tangent of y/x using sign of y and x to get right quadrant + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result + @return error status. + + @par Compute the Arc tangent of y/x: + The sign of y and x are used to determine the right quadrant + and compute the right angle. + +*/ +arm_status arm_atan2_f16(float16_t y,float16_t x,float16_t *result) +{ + if ((_Float16)x > 0.0f16) + { + *result=arm_atan_f16((_Float16)y/(_Float16)x); + return(ARM_MATH_SUCCESS); + } + if ((_Float16)x < 0.0f16) + { + if ((_Float16)y > 0.0f16) + { + *result=(_Float16)arm_atan_f16((_Float16)y/(_Float16)x) + (_Float16)PIF16; + } + else if ((_Float16)y < 0.0f16) + { + *result=(_Float16)arm_atan_f16((_Float16)y/(_Float16)x) - (_Float16)PIF16; + } + else + { + if (signbit((float)y)) + { + *result= -(_Float16)PIF16; + } + else + { + *result= PIF16; + } + } + return(ARM_MATH_SUCCESS); + } + if ((_Float16)x == 0.0f16) + { + if ((_Float16)y > 0.0f16) + { + *result=PI16HALF; + return(ARM_MATH_SUCCESS); + } + if ((_Float16)y < 0.0f16) + { + *result=-(_Float16)PI16HALF; + return(ARM_MATH_SUCCESS); + } + } + + + return(ARM_MATH_NANINF); + +} + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of atan2 group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f32.c new file mode 100644 index 0000000..51f6812 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_f32.c @@ -0,0 +1,187 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_atan2_f32.c + * Description: float32 Arc tangent of y/x + * + * $Date: 22 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" + +/* + +atan for argument between in [0, 1.0] + + +*/ + +#define ATANHALFF32 0.463648f +#define PIHALFF32 1.5707963267948966192313f + +#define ATAN2_NB_COEFS_F32 10 + +static const float32_t atan2_coefs_f32[ATAN2_NB_COEFS_F32]={0.0f +,1.0000001638308195518f +,-0.0000228941363602264f +,-0.3328086544578890873f +,-0.004404814619311061f +,0.2162217461808173258f +,-0.0207504842057097504f +,-0.1745263362250363339f +,0.1340557235283553386f +,-0.0323664125927477625f +}; + +__STATIC_FORCEINLINE float32_t arm_atan_limited_f32(float32_t x) +{ + float32_t res=atan2_coefs_f32[ATAN2_NB_COEFS_F32-1]; + int i=1; + for(i=1;i 1.0f) + { + x = 1.0f / x; + res = PIHALFF32 - arm_atan_limited_f32(x); + } + else + { + res += arm_atan_limited_f32(x); + } + + + if (sign) + { + res = -res; + } + + return(res); +} + + +/** + @ingroup groupFastMath + */ + +/** + @defgroup atan2 ArcTan2 + + Computing Arc tangent only using the ratio y/x is not enough to determine the angle + since there is an indeterminacy. Opposite quadrants are giving the same ratio. + + ArcTan2 is not using y/x to compute the angle but y and x and use the sign of y and x + to determine the quadrant. + + */ + +/** + @addtogroup atan2 + @{ + */ + +/** + @brief Arc Tangent of y/x using sign of y and x to get right quadrant + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result + @return error status. + + @par Compute the Arc tangent of y/x: + The sign of y and x are used to determine the right quadrant + and compute the right angle. +*/ + + +arm_status arm_atan2_f32(float32_t y,float32_t x,float32_t *result) +{ + if (x > 0.0f) + { + *result=arm_atan_f32(y/x); + return(ARM_MATH_SUCCESS); + } + if (x < 0.0f) + { + if (y > 0.0f) + { + *result=arm_atan_f32(y/x) + PI; + } + else if (y < 0.0f) + { + *result=arm_atan_f32(y/x) - PI; + } + else + { + if (signbit(y)) + { + *result= -PI; + } + else + { + *result= PI; + } + } + return(ARM_MATH_SUCCESS); + } + if (x == 0.0f) + { + if (y > 0.0f) + { + *result=PIHALFF32; + return(ARM_MATH_SUCCESS); + } + if (y < 0.0f) + { + *result=-PIHALFF32; + return(ARM_MATH_SUCCESS); + } + } + + + return(ARM_MATH_NANINF); + +} + +/** + @} end of atan2 group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q15.c new file mode 100644 index 0000000..c334bee --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q15.c @@ -0,0 +1,239 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_atan2_q15.c + * Description: float32 Arc tangent of y/x + * + * $Date: 22 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" + +/* + +atan for argument between in [0, 1.0] + +*/ + + +/* Q2.13 */ +#define ATANHALFQ13 0xed6 +#define PIHALFQ13 0x3244 +#define PIQ13 0x6488 + +#define ATAN2_NB_COEFS_Q15 10 + +static const q15_t atan2_coefs_q15[ATAN2_NB_COEFS_Q15]={0x0000 +,0x7fff +,0xffff +,0xd567 +,0xff70 +,0x1bad +,0xfd58 +,0xe9a9 +,0x1129 +,0xfbdb +}; + +__STATIC_FORCEINLINE q15_t arm_atan_limited_q15(q15_t x) +{ + q31_t res=(q31_t)atan2_coefs_q15[ATAN2_NB_COEFS_Q15-1]; + int i=1; + for(i=1;i> 15U; + res = res + ((q31_t) atan2_coefs_q15[ATAN2_NB_COEFS_Q15-1-i]) ; + } + + res = __SSAT(res>>2,16); + + + return(res); +} + + +__STATIC_FORCEINLINE q15_t arm_atan_q15(q15_t y,q15_t x) +{ + int sign=0; + q15_t res=0; + + if (y<0) + { + /* Negate y */ +#if defined (ARM_MATH_DSP) + y = __QSUB16(0, y); +#else + y = (y == (q15_t) 0x8000) ? (q15_t) 0x7fff : -y; +#endif + + sign=1-sign; + } + + if (x < 0) + { + sign=1 - sign; + + /* Negate x */ +#if defined (ARM_MATH_DSP) + x = __QSUB16(0, x); +#else + x = (x == (q15_t) 0x8000) ? (q15_t) 0x7fff : -x; +#endif + } + + if (y > x) + { + q15_t ratio; + int16_t shift; + + arm_divide_q15(x,y,&ratio,&shift); + + /* Shift ratio by shift */ + if (shift >=0) + { + ratio = __SSAT(((q31_t) ratio << shift), 16); + } + else + { + ratio = (ratio >> -shift); + } + + res = PIHALFQ13 - arm_atan_limited_q15(ratio); + + } + else + { + q15_t ratio; + int16_t shift; + + arm_divide_q15(y,x,&ratio,&shift); + + /* Shift ratio by shift */ + if (shift >=0) + { + ratio = __SSAT(((q31_t) ratio << shift), 16); + } + else + { + ratio = (ratio >> -shift); + } + + + res = arm_atan_limited_q15(ratio); + + } + + + if (sign) + { + /* Negate res */ +#if defined (ARM_MATH_DSP) + res = __QSUB16(0, res); +#else + res = (res == (q15_t) 0x8000) ? (q15_t) 0x7fff : -res; +#endif + } + + return(res); +} + + +/** + @ingroup groupFastMath + */ + + +/** + @addtogroup atan2 + @{ + */ + +/** + @brief Arc Tangent of y/x using sign of y and x to get right quadrant + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result in Q2.13 + @return error status. + + @par Compute the Arc tangent of y/x: + The sign of y and x are used to determine the right quadrant + and compute the right angle. +*/ + + +arm_status arm_atan2_q15(q15_t y,q15_t x,q15_t *result) +{ + if (x > 0) + { + *result=arm_atan_q15(y,x); + return(ARM_MATH_SUCCESS); + } + if (x < 0) + { + if (y > 0) + { + *result=arm_atan_q15(y,x) + PIQ13; + } + else if (y < 0) + { + *result=arm_atan_q15(y,x) - PIQ13; + } + else + { + if (y<0) + { + *result= -PIQ13; + } + else + { + *result= PIQ13; + } + } + return(ARM_MATH_SUCCESS); + } + if (x == 0) + { + if (y > 0) + { + *result=PIHALFQ13; + return(ARM_MATH_SUCCESS); + } + if (y < 0) + { + *result=-PIHALFQ13; + return(ARM_MATH_SUCCESS); + } + } + + + return(ARM_MATH_NANINF); + +} + +/** + @} end of atan2 group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q31.c new file mode 100644 index 0000000..6eba0ce --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_atan2_q31.c @@ -0,0 +1,240 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_atan2_q31.c + * Description: float32 Arc tangent of y/x + * + * $Date: 22 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/utils.h" + +/* + +atan for argument between in [0, 1.0] + +*/ + + +/* Q2.29 */ +#define ATANHALF_Q29 0xed63383 +#define PIHALF_Q29 0x3243f6a9 +#define PIQ29 0x6487ed51 + +#define ATAN2_NB_COEFS_Q31 13 + +static const q31_t atan2_coefs_q31[ATAN2_NB_COEFS_Q31]={0x00000000 +,0x7ffffffe +,0x000001b6 +,0xd555158e +,0x00036463 +,0x1985f617 +,0x001992ae +,0xeed53a7f +,0xf8f15245 +,0x2215a3a4 +,0xe0fab004 +,0x0cdd4825 +,0xfddbc054 +}; + + +__STATIC_FORCEINLINE q31_t arm_atan_limited_q31(q31_t x) +{ + q63_t res=(q63_t)atan2_coefs_q31[ATAN2_NB_COEFS_Q31-1]; + int i=1; + for(i=1;i> 31U; + res = res + ((q63_t) atan2_coefs_q31[ATAN2_NB_COEFS_Q31-1-i]) ; + } + + return(clip_q63_to_q31(res>>2)); +} + + +__STATIC_FORCEINLINE q31_t arm_atan_q31(q31_t y,q31_t x) +{ + int sign=0; + q31_t res=0; + + if (y<0) + { + /* Negate y */ +#if defined (ARM_MATH_DSP) + y = __QSUB(0, y); +#else + y = (y == INT32_MIN) ? INT32_MAX : -y; +#endif + + sign=1-sign; + } + + if (x < 0) + { + sign=1 - sign; + + /* Negate x */ +#if defined (ARM_MATH_DSP) + x = __QSUB(0, x); +#else + x = (x == INT32_MIN) ? INT32_MAX : -x; +#endif + } + + if (y > x) + { + q31_t ratio; + int16_t shift; + + arm_divide_q31(x,y,&ratio,&shift); + + /* Shift ratio by shift */ + if (shift >= 0) + { + ratio = clip_q63_to_q31((q63_t) ratio << shift); + } + else + { + ratio = (ratio >> -shift); + } + + res = PIHALF_Q29 - arm_atan_limited_q31(ratio); + + } + else + { + q31_t ratio; + int16_t shift; + + arm_divide_q31(y,x,&ratio,&shift); + + /* Shift ratio by shift */ + if (shift >= 0) + { + ratio = clip_q63_to_q31((q63_t) ratio << shift); + } + else + { + ratio = (ratio >> -shift); + } + + + res = arm_atan_limited_q31(ratio); + + } + + + if (sign) + { + /* Negate res */ +#if defined (ARM_MATH_DSP) + res = __QSUB(0, res); +#else + res = (res == INT32_MIN) ? INT32_MAX : -res; +#endif + } + + return(res); +} + + +/** + @ingroup groupFastMath + */ + + +/** + @addtogroup atan2 + @{ + */ + +/** + @brief Arc Tangent of y/x using sign of y and x to get right quadrant + @param[in] y y coordinate + @param[in] x x coordinate + @param[out] result Result in Q2.29 + @return error status. + + @par Compute the Arc tangent of y/x: + The sign of y and x are used to determine the right quadrant + and compute the right angle. +*/ + + +arm_status arm_atan2_q31(q31_t y,q31_t x,q31_t *result) +{ + if (x > 0) + { + *result=arm_atan_q31(y,x); + return(ARM_MATH_SUCCESS); + } + if (x < 0) + { + if (y > 0) + { + *result=arm_atan_q31(y,x) + PIQ29; + } + else if (y < 0) + { + *result=arm_atan_q31(y,x) - PIQ29; + } + else + { + if (y<0) + { + *result= -PIQ29; + } + else + { + *result= PIQ29; + } + } + return(ARM_MATH_SUCCESS); + } + if (x == 0) + { + if (y > 0) + { + *result=PIHALF_Q29; + return(ARM_MATH_SUCCESS); + } + if (y < 0) + { + *result=-PIHALF_Q29; + return(ARM_MATH_SUCCESS); + } + } + + + return(ARM_MATH_NANINF); + +} + +/** + @} end of atan2 group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_f32.c index ff7f0a2..ac428dc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_f32.c @@ -5,13 +5,13 @@ * Title: arm_cos_f32.c * Description: Fast cosine calculation for floating-point values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q15.c index ea995fd..c423b06 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q15.c @@ -5,13 +5,13 @@ * Title: arm_cos_q15.c * Description: Fast cosine calculation for Q15 values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q31.c index ab02d2b..749dd0e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_cos_q31.c @@ -5,13 +5,13 @@ * Title: arm_cos_q31.c * Description: Fast cosine calculation for Q31 values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q15.c new file mode 100644 index 0000000..c53a379 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q15.c @@ -0,0 +1,114 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cos_q15.c + * Description: Fast cosine calculation for Q15 values + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" + +#include + +/** + @ingroup groupFastMath + */ + +/** + @defgroup divide Fixed point division + + */ + +/** + @addtogroup divide + @{ + */ + +/** + @brief Fixed point division + @param[in] numerator Numerator + @param[in] denominator Denominator + @param[out] quotient Quotient value normalized between -1.0 and 1.0 + @param[out] shift Shift left value to get the unnormalized quotient + @return error status + + When dividing by 0, an error ARM_MATH_NANINF is returned. And the quotient is forced + to the saturated negative or positive value. + */ + +arm_status arm_divide_q15(q15_t numerator, + q15_t denominator, + q15_t *quotient, + int16_t *shift) +{ + int16_t sign=0; + q31_t temp; + int16_t shiftForNormalizing; + + *shift = 0; + + sign = (numerator>>15) ^ (denominator>>15); + + if (denominator == 0) + { + if (sign) + { + *quotient = 0x8000; + } + else + { + *quotient = 0x7FFF; + } + return(ARM_MATH_NANINF); + } + + arm_abs_q15(&numerator,&numerator,1); + arm_abs_q15(&denominator,&denominator,1); + + temp = ((q31_t)numerator << 15) / ((q31_t)denominator); + + shiftForNormalizing= 17 - __CLZ(temp); + if (shiftForNormalizing > 0) + { + *shift = shiftForNormalizing; + temp = temp >> shiftForNormalizing; + } + + if (sign) + { + temp = -temp; + } + + *quotient=temp; + + return(ARM_MATH_SUCCESS); +} + +/** + @} end of divide group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q31.c new file mode 100644 index 0000000..b1ae866 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_divide_q31.c @@ -0,0 +1,109 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_cos_q31.c + * Description: Fast cosine calculation for Q31 values + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" + +#include + +/** + @ingroup groupFastMath + */ + +/** + @addtogroup divide + @{ + */ + +/** + @brief Fixed point division + @param[in] numerator Numerator + @param[in] denominator Denominator + @param[out] quotient Quotient value normalized between -1.0 and 1.0 + @param[out] shift Shift left value to get the unnormalized quotient + @return error status + + When dividing by 0, an error ARM_MATH_NANINF is returned. And the quotient is forced + to the saturated negative or positive value. + */ + +arm_status arm_divide_q31(q31_t numerator, + q31_t denominator, + q31_t *quotient, + int16_t *shift) +{ + int16_t sign=0; + q63_t temp; + int16_t shiftForNormalizing; + + *shift = 0; + + sign = (numerator>>31) ^ (denominator>>31); + + if (denominator == 0) + { + if (sign) + { + *quotient = 0x80000000; + } + else + { + *quotient = 0x7FFFFFFF; + } + return(ARM_MATH_NANINF); + } + + arm_abs_q31(&numerator,&numerator,1); + arm_abs_q31(&denominator,&denominator,1); + + temp = ((q63_t)numerator << 31) / ((q63_t)denominator); + + shiftForNormalizing= 32 - __CLZ(temp >> 31); + if (shiftForNormalizing > 0) + { + *shift = shiftForNormalizing; + temp = temp >> shiftForNormalizing; + } + + if (sign) + { + temp = -temp; + } + + *quotient=(q31_t)temp; + + return(ARM_MATH_SUCCESS); +} + +/** + @} end of divide group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_f32.c index 08d326d..89cc8b1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_f32.c @@ -5,13 +5,13 @@ * Title: arm_sin_f32.c * Description: Fast sine calculation for floating-point values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q15.c index 439b33a..7d99d9f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q15.c @@ -5,13 +5,13 @@ * Title: arm_sin_q15.c * Description: Fast sine calculation for Q15 values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q31.c index 01d9c6c..92f2ba6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sin_q31.c @@ -5,13 +5,13 @@ * Title: arm_sin_q31.c * Description: Fast sine calculation for Q31 values * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q15.c index e499f2b..bfcb9b2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q15.c @@ -5,13 +5,13 @@ * Title: arm_sqrt_q15.c * Description: Q15 square root function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -31,6 +31,8 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" +#define Q12QUARTER 0x2000 + /** @ingroup groupFastMath */ @@ -53,14 +55,7 @@ arm_status arm_sqrt_q15( q15_t in, q15_t * pOut) { - q31_t bits_val1; - q15_t number, temp1, var1, signBits1, half; - float32_t temp_float1; - union - { - q31_t fracval; - float32_t floatval; - } tempconv; + q15_t number, var1, signBits1,temp; number = in; @@ -78,46 +73,30 @@ arm_status arm_sqrt_q15( { number = number << (signBits1 - 1); } + /* Start value for 1/sqrt(x) for the Newton iteration */ + var1 = sqrt_initial_lut_q15[(number>> 11) - (Q12QUARTER >> 11)]; - /* Calculate half value of the number */ - half = number >> 1; - /* Store the number for later use */ - temp1 = number; - - /* Convert to float */ - temp_float1 = number * 3.051757812500000e-005f; - /* Store as integer */ - tempconv.floatval = temp_float1; - bits_val1 = tempconv.fracval; - /* Subtract the shifted value from the magic number to give intial guess */ - bits_val1 = 0x5f3759df - (bits_val1 >> 1); /* gives initial guess */ - /* Store as float */ - tempconv.fracval = bits_val1; - temp_float1 = tempconv.floatval; - /* Convert to integer format */ - var1 = (q31_t) (temp_float1 * 16384); - + /* 0.5 var1 * (3 - number * var1 * var1) */ /* 1st iteration */ - var1 = ((q15_t) ((q31_t) var1 * (0x3000 - - ((q15_t) - ((((q15_t) - (((q31_t) var1 * var1) >> 15)) * - (q31_t) half) >> 15))) >> 15)) << 2; - /* 2nd iteration */ - var1 = ((q15_t) ((q31_t) var1 * (0x3000 - - ((q15_t) - ((((q15_t) - (((q31_t) var1 * var1) >> 15)) * - (q31_t) half) >> 15))) >> 15)) << 2; - /* 3rd iteration */ - var1 = ((q15_t) ((q31_t) var1 * (0x3000 - - ((q15_t) - ((((q15_t) - (((q31_t) var1 * var1) >> 15)) * - (q31_t) half) >> 15))) >> 15)) << 2; + + temp = ((q31_t) var1 * var1) >> 12; + temp = ((q31_t) number * temp) >> 15; + temp = 0x3000 - temp; + var1 = ((q31_t) var1 * temp) >> 13; + + temp = ((q31_t) var1 * var1) >> 12; + temp = ((q31_t) number * temp) >> 15; + temp = 0x3000 - temp; + var1 = ((q31_t) var1 * temp) >> 13; + + temp = ((q31_t) var1 * var1) >> 12; + temp = ((q31_t) number * temp) >> 15; + temp = 0x3000 - temp; + var1 = ((q31_t) var1 * temp) >> 13; /* Multiply the inverse square root with the original value */ - var1 = ((q15_t) (((q31_t) temp1 * var1) >> 15)) << 1; + + var1 = ((q15_t) (((q31_t) number * var1) >> 12)); /* Shift the output down accordingly */ if ((signBits1 % 2) == 0) @@ -130,6 +109,7 @@ arm_status arm_sqrt_q15( } *pOut = var1; + return (ARM_MATH_SUCCESS); } /* If the number is a negative number then store zero as its square root value */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q31.c index 0dbb6af..0b8954a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_sqrt_q31.c @@ -5,13 +5,13 @@ * Title: arm_sqrt_q31.c * Description: Q31 square root function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -31,6 +31,8 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" +#define Q28QUARTER 0x20000000 + /** @ingroup groupFastMath */ @@ -53,15 +55,8 @@ arm_status arm_sqrt_q31( q31_t in, q31_t * pOut) { - q31_t bits_val1; - q31_t number, temp1, var1, signBits1, half; - float32_t temp_float1; - union - { - q31_t fracval; - float32_t floatval; - } tempconv; - + q31_t number, var1, signBits1 ,temp; + number = in; /* If the input is a positive number then compute the signBits. */ @@ -79,45 +74,33 @@ arm_status arm_sqrt_q31( number = number << (signBits1 - 1); } - /* Calculate half value of the number */ - half = number >> 1; - /* Store the number for later use */ - temp1 = number; - - /* Convert to float */ - temp_float1 = number * 4.6566128731e-010f; - /* Store as integer */ - tempconv.floatval = temp_float1; - bits_val1 = tempconv.fracval; - /* Subtract the shifted value from the magic number to give intial guess */ - bits_val1 = 0x5f3759df - (bits_val1 >> 1); /* gives initial guess */ - /* Store as float */ - tempconv.fracval = bits_val1; - temp_float1 = tempconv.floatval; - /* Convert to integer format */ - var1 = (q31_t) (temp_float1 * 1073741824); + /* Start value for 1/sqrt(x) for the Newton iteration */ + var1 = sqrt_initial_lut_q31[(number>> 26) - (Q28QUARTER >> 26)]; + + /* 0.5 var1 * (3 - number * var1 * var1) */ /* 1st iteration */ - var1 = ((q31_t) ((q63_t) var1 * (0x30000000 - - ((q31_t) - ((((q31_t) - (((q63_t) var1 * var1) >> 31)) * - (q63_t) half) >> 31))) >> 31)) << 2; + + temp = ((q63_t) var1 * var1) >> 28; + temp = ((q63_t) number * temp) >> 31; + temp = 0x30000000 - temp; + var1 = ((q63_t) var1 * temp) >> 29; + + /* 2nd iteration */ - var1 = ((q31_t) ((q63_t) var1 * (0x30000000 - - ((q31_t) - ((((q31_t) - (((q63_t) var1 * var1) >> 31)) * - (q63_t) half) >> 31))) >> 31)) << 2; - /* 3rd iteration */ - var1 = ((q31_t) ((q63_t) var1 * (0x30000000 - - ((q31_t) - ((((q31_t) - (((q63_t) var1 * var1) >> 31)) * - (q63_t) half) >> 31))) >> 31)) << 2; + temp = ((q63_t) var1 * var1) >> 28; + temp = ((q63_t) number * temp) >> 31; + temp = 0x30000000 - temp; + var1 = ((q63_t) var1 * temp) >> 29; + + /* 3nd iteration */ + temp = ((q63_t) var1 * var1) >> 28; + temp = ((q63_t) number * temp) >> 31; + temp = 0x30000000 - temp; + var1 = ((q63_t) var1 * temp) >> 29; /* Multiply the inverse square root with the original value */ - var1 = ((q31_t) (((q63_t) temp1 * var1) >> 31)) << 1; + var1 = ((q31_t) (((q63_t) number * var1) >> 28)); /* Shift the output down accordingly */ if ((signBits1 % 2) == 0) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f16.c index 02864e5..dffb4de 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f16.c @@ -5,13 +5,13 @@ * Title: arm_vlog_f16.c * Description: Fast vectorized log * - * $Date: 15. Octoboer 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -36,7 +36,18 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h" +/** + @addtogroup vexp + @{ + */ +/** + @brief Floating-point vector of exp values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ void arm_vexp_f16( const float16_t * pSrc, float16_t * pDst, @@ -73,7 +84,7 @@ void arm_vexp_f16( /* C = log(A) */ /* Calculate log and store result in destination buffer. */ - *pDst++ = expf(*pSrc++); + *pDst++ = (_Float16)expf((float32_t)*pSrc++); /* Decrement loop counter */ blkCnt--; @@ -82,5 +93,7 @@ void arm_vexp_f16( #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ - +/** + @} end of vexp group + */ #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f32.c index cde8efe..3f23825 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f32.c @@ -5,13 +5,13 @@ * Title: arm_vlog_f32.c * Description: Fast vectorized log * - * $Date: 15. Octoboer 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,6 +35,28 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h" #endif +/** + @ingroup groupFastMath + */ + +/** + @defgroup vexp Vector Exponential + + Compute the exp values of a vector of samples. +*/ + +/** + @addtogroup vexp + @{ + */ + +/** + @brief Floating-point vector of exp values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ void arm_vexp_f32( const float32_t * pSrc, float32_t * pDst, @@ -98,4 +120,7 @@ void arm_vexp_f32( } } +/** + @} end of vexp group + */ #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f64.c new file mode 100644 index 0000000..950c0a5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vexp_f64.c @@ -0,0 +1,70 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_vlog_f64.c + * Description: Fast vectorized log + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" + +/** + @addtogroup vexp + @{ + */ + +/** + @brief Floating-point vector of exp values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ +void arm_vexp_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; + + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = log(A) */ + + /* Calculate log and store result in destination buffer. */ + *pDst++ = exp(*pSrc++); + + /* Decrement loop counter */ + blkCnt--; + } +} + +/** + @} end of vexp group + */ +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vinverse_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vinverse_f16.c index 11f0e8d..ec9e842 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vinverse_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vinverse_f16.c @@ -5,13 +5,13 @@ * Title: arm_vinverse_f16.c * Description: Fast vectorized inverse * - * $Date: 15. Octoboer 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,7 +70,7 @@ void arm_vinverse_f16( while (blkCnt > 0U) { - *pDst++ = 1.0 / *pSrc++; + *pDst++ = 1.0f16 / (_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f16.c index b05f8e6..60b4af3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f16.c @@ -5,13 +5,13 @@ * Title: arm_vlog_f16.c * Description: Fast vectorized log * - * $Date: 15. Octoboer 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,21 +29,157 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions_f16.h" #if defined(ARM_FLOAT16_SUPPORTED) +/* Degree of the polynomial approximation */ +#define NB_DEG_LOGF16 3 + +/* +Related to the Log2 of the number of approximations. +For instance, with 3 there are 1 + 2^3 polynomials +*/ +#define NB_DIV_LOGF16 3 + +/* Length of the LUT table */ +#define NB_LUT_LOGF16 (NB_DEG_LOGF16+1)*(1 + (1< 1000][[2, 1]], {i, 1, 2, (1.0/2^nb)}]; +coefs = Chop@Flatten[CoefficientList[lut, x]]; + +*/ +static float16_t lut_logf16[NB_LUT_LOGF16]={ + 0,0.125,-0.00781197,0.00063974,0.117783, + 0.111111,-0.00617212,0.000447935,0.223144, + 0.1,-0.00499952,0.000327193,0.318454,0.0909091, + -0.00413191,0.000246234,0.405465,0.0833333, + -0.00347199,0.000189928,0.485508,0.0769231, + -0.00295841,0.00014956,0.559616,0.0714286, + -0.0025509,0.000119868,0.628609,0.0666667, + -0.00222213,0.0000975436,0.693147, + 0.0625,-0.00195305,0.0000804357}; + + +float16_t logf16_scalar(float16_t x) +{ + int16_t i = arm_typecast_s16_f16(x); + + int32_t vecExpUnBiased = (i >> 10) - 15; + i = i - (vecExpUnBiased << 10); + float16_t vecTmpFlt1 = arm_typecast_f16_s16(i); + + float16_t *lut; + int n; + float16_t tmp,v; + + tmp = ((_Float16)vecTmpFlt1 - 1.0f16) * (1 << NB_DIV_LOGF16); + n = (int)floor((double)tmp); + v = (_Float16)tmp - (_Float16)n; + + lut = lut_logf16 + n * (1+NB_DEG_LOGF16); + + float16_t res = lut[NB_DEG_LOGF16-1]; + for(int j=NB_DEG_LOGF16-2; j >=0 ; j--) + { + res = (_Float16)lut[j] + (_Float16)v * (_Float16)res; + } + + res = (_Float16)res + 0.693147f16 * (_Float16)vecExpUnBiased; + + + return(res); +} + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h" + +float16x8_t vlogq_lut_f16(float16x8_t vecIn) +{ + int16x8_t i = vreinterpretq_s16_f16(vecIn); + + int16x8_t vecExpUnBiased = vsubq_n_s16(vshrq_n_s16(i,10), 15); + i = vsubq_s16(i,vshlq_n_s16(vecExpUnBiased,10)); + float16x8_t vecTmpFlt1 = vreinterpretq_f16_s16(i); + + + float16x8_t lutV; + int16x8_t n; + int16x8_t offset; + + float16x8_t tmp,v,res; + + tmp = vmulq_n_f16(vsubq_n_f16(vecTmpFlt1,1.0f16),(_Float16)(1 << NB_DIV_LOGF16)); + + n = vcvtq_s16_f16(tmp); + v = vsubq_f16(tmp,vcvtq_f16_s16(n)); + + + offset = vmulq_n_s16(n,(1+NB_DEG_LOGF16)); + offset = vaddq_n_s16(offset,NB_DEG_LOGF16-1); + + res = vldrhq_gather_shifted_offset_f16(lut_logf16,(uint16x8_t)offset); + offset = vsubq_n_s16(offset,1); + + for(int j=NB_DEG_LOGF16-2; j >=0 ; j--) + { + lutV = vldrhq_gather_shifted_offset_f16(lut_logf16,(uint16x8_t)offset); + res = vfmaq_f16(lutV,v,res); + offset = vsubq_n_s16(offset,1); + + } + + res = vfmaq_n_f16(res,vcvtq_f16_s16(vecExpUnBiased),0.693147f16); + + + return(res); + +} + +#endif + +/** + @ingroup groupFastMath + */ + +/** + @addtogroup vlog + @{ + */ + +/** + @brief Floating-point vector of log values. + @param[in] pSrc points to the input vector + @param[out] pDst points to the output vector + @param[in] blockSize number of samples in each vector + @return none + */ + + void arm_vlog_f16( const float16_t * pSrc, float16_t * pDst, uint32_t blockSize) { - uint32_t blkCnt; + uint32_t blkCnt; #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) - f16x8_t src; f16x8_t dst; @@ -52,7 +188,7 @@ void arm_vlog_f16( while (blkCnt > 0U) { src = vld1q(pSrc); - dst = vlogq_f16(src); + dst = vlogq_lut_f16(src); vst1q(pDst, dst); pSrc += 8; @@ -69,16 +205,22 @@ void arm_vlog_f16( while (blkCnt > 0U) { /* C = log(A) */ - + /* Calculate log and store result in destination buffer. */ - *pDst++ = logf(*pSrc++); - + *pDst++ = logf16_scalar(*pSrc++); + /* Decrement loop counter */ blkCnt--; } } -#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of vlog group + */ + + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f32.c index 5e92635..7c59553 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f32.c @@ -5,13 +5,13 @@ * Title: arm_vlog_f32.c * Description: Fast vectorized log * - * $Date: 15. Octoboer 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -31,6 +31,24 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" + +/** + @ingroup groupFastMath + */ + + +/** + @defgroup vlog Vector Log + + Compute the log values of a vector of samples. + + */ + +/** + @addtogroup vlog + @{ + */ + #if (defined(ARM_MATH_MVEF) || defined(ARM_MATH_HELIUM) || defined(ARM_MATH_NEON) || defined(ARM_MATH_NEON_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math.h" #endif @@ -98,4 +116,8 @@ void arm_vlog_f32( } } +/** + @} end of vlog group + */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/SupportFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f64.c similarity index 55% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/SupportFunctionsF16.c rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f64.c index 084e48e..fae58bb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/SupportFunctionsF16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_f64.c @@ -2,16 +2,16 @@ #if EIDSP_LOAD_CMSIS_DSP_SOURCES /* ---------------------------------------------------------------------- * Project: CMSIS DSP Library - * Title: SupportFunctions.c - * Description: Combination of all support function source files. + * Title: arm_vlog_f64.c + * Description: Fast vectorized log * - * $Date: 16. March 2020 - * $Revision: V1.1.0 + * $Date: 13 September 2021 + * $Revision: V1.10.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2019-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -28,13 +28,28 @@ * limitations under the License. */ -#include "arm_copy_f16.c" -#include "arm_fill_f16.c" -#include "arm_f16_to_q15.c" -#include "arm_f16_to_float.c" -#include "arm_q15_to_f16.c" -#include "arm_float_to_f16.c" -#include "arm_weighted_sum_f16.c" -#include "arm_barycenter_f16.c" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" + +void arm_vlog_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; + + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = log(A) */ + + /* Calculate log and store result in destination buffer. */ + *pDst++ = log(*pSrc++); + + /* Decrement loop counter */ + blkCnt--; + } +} #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q15.c new file mode 100644 index 0000000..15d332e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q15.c @@ -0,0 +1,268 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_vlog_q15 + * Description: Q15 vector log + * + * $Date: 19 July 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" + + +#define LOG_Q15_ACCURACY 15 + +/* Bit to represent the normalization factor + It is Ceiling[Log2[LOG_Q15_ACCURACY]] of the previous value. + The Log2 algorithm is assuming that the value x is + 1 <= x < 2. + + But input value could be as small a 2^-LOG_Q15_ACCURACY + which would give an integer part of -15. +*/ +#define LOG_Q15_INTEGER_PART 4 + +/* 2.0 in q14 */ +#define LOQ_Q15_THRESHOLD (1u << LOG_Q15_ACCURACY) + +/* HALF */ +#define LOQ_Q15_Q16_HALF LOQ_Q15_THRESHOLD +#define LOQ_Q15_Q14_HALF (LOQ_Q15_Q16_HALF >> 2) + + +/* 1.0 / Log2[Exp[1]] in q15 */ +#define LOG_Q15_INVLOG2EXP 0x58b9u + + +/* Clay Turner algorithm */ +static uint16_t arm_scalar_log_q15(uint16_t src) +{ + int i; + + int16_t c = __CLZ(src)-16; + int16_t normalization=0; + + /* 0.5 in q11 */ + uint16_t inc = LOQ_Q15_Q16_HALF >> (LOG_Q15_INTEGER_PART + 1); + + /* Will compute y = log2(x) for 1 <= x < 2.0 */ + uint16_t x; + + /* q11 */ + uint16_t y=0; + + /* q11 */ + int16_t tmp; + + + /* Normalize and convert to q14 format */ + x = src; + if ((c-1) < 0) + { + x = x >> (1-c); + } + else + { + x = x << (c-1); + } + normalization = c; + + + + /* Compute the Log2. Result is in q11 instead of q16 + because we know 0 <= y < 1.0 but + we want a result allowing to do a + product on int16 rather than having to go + through int32 + */ + for(i = 0; i < LOG_Q15_ACCURACY ; i++) + { + x = (((int32_t)x*x)) >> (LOG_Q15_ACCURACY - 1); + + if (x >= LOQ_Q15_THRESHOLD) + { + y += inc ; + x = x >> 1; + } + inc = inc >> 1; + } + + + /* + Convert the Log2 to Log and apply normalization. + We compute (y - normalisation) * (1 / Log2[e]). + + */ + + /* q11 */ + //tmp = y - ((int32_t)normalization << (LOG_Q15_ACCURACY + 1)); + tmp = (int16_t)y - (normalization << (LOG_Q15_ACCURACY - LOG_Q15_INTEGER_PART)); + + /* q4.11 */ + y = ((int32_t)tmp * LOG_Q15_INVLOG2EXP) >> 15; + + return(y); + +} + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + + +q15x8_t vlogq_q15(q15x8_t src) +{ + + int i; + + int16x8_t c = vclzq_s16(src); + int16x8_t normalization = c; + + + /* 0.5 in q11 */ + uint16_t inc = LOQ_Q15_Q16_HALF >> (LOG_Q15_INTEGER_PART + 1); + + /* Will compute y = log2(x) for 1 <= x < 2.0 */ + uint16x8_t x; + + + /* q11 */ + uint16x8_t y = vdupq_n_u16(0); + + + /* q11 */ + int16x8_t vtmp; + + + mve_pred16_t p; + + /* Normalize and convert to q14 format */ + + + vtmp = vsubq_n_s16(c,1); + x = vshlq_u16((uint16x8_t)src,vtmp); + + + /* Compute the Log2. Result is in q11 instead of q16 + because we know 0 <= y < 1.0 but + we want a result allowing to do a + product on int16 rather than having to go + through int32 + */ + for(i = 0; i < LOG_Q15_ACCURACY ; i++) + { + x = vmulhq_u16(x,x); + x = vshlq_n_u16(x,2); + + + p = vcmphiq_u16(x,vdupq_n_u16(LOQ_Q15_THRESHOLD)); + y = vaddq_m_n_u16(y, y,inc,p); + x = vshrq_m_n_u16(x,x,1,p); + + inc = inc >> 1; + } + + + /* + Convert the Log2 to Log and apply normalization. + We compute (y - normalisation) * (1 / Log2[e]). + + */ + + /* q11 */ + // tmp = (int16_t)y - (normalization << (LOG_Q15_ACCURACY - LOG_Q15_INTEGER_PART)); + vtmp = vshlq_n_s16(normalization,LOG_Q15_ACCURACY - LOG_Q15_INTEGER_PART); + vtmp = vsubq_s16((int16x8_t)y,vtmp); + + + + /* q4.11 */ + // y = ((int32_t)tmp * LOG_Q15_INVLOG2EXP) >> 15; + vtmp = vqdmulhq_n_s16(vtmp,LOG_Q15_INVLOG2EXP); + + return(vtmp); +} +#endif + +/** + @ingroup groupFastMath + */ + +/** + @addtogroup vlog + @{ + */ + +/** + @brief q15 vector of log values. + @param[in] pSrc points to the input vector in q15 + @param[out] pDst points to the output vector in q4.11 + @param[in] blockSize number of samples in each vector + @return none + + */ + +void arm_vlog_q15( + const q15_t * pSrc, + q15_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* loop counters */ + + #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + q15x8_t src; + q15x8_t dst; + + blkCnt = blockSize >> 3; + + while (blkCnt > 0U) + { + src = vld1q(pSrc); + dst = vlogq_q15(src); + vst1q(pDst, dst); + + pSrc += 8; + pDst += 8; + /* Decrement loop counter */ + blkCnt--; + } + + blkCnt = blockSize & 7; + #else + blkCnt = blockSize; + #endif + + while (blkCnt > 0U) + { + *pDst++ = arm_scalar_log_q15(*pSrc++); + + /* Decrement loop counter */ + blkCnt--; + } +} + +/** + @} end of vlog group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q31.c new file mode 100644 index 0000000..5be5b72 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FastMathFunctions/arm_vlog_q31.c @@ -0,0 +1,262 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_vlog_q31 + * Description: Q31 vector log + * + * $Date: 19 July 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" + +#define LOG_Q31_ACCURACY 31 + +/* Bit to represent the normalization factor + It is Ceiling[Log2[LOG_Q31_ACCURACY]] of the previous value. + The Log2 algorithm is assuming that the value x is + 1 <= x < 2. + + But input value could be as small a 2^-LOG_Q31_ACCURACY + which would give an integer part of -31. +*/ +#define LOG_Q31_INTEGER_PART 5 + +/* 2.0 in Q30 */ +#define LOQ_Q31_THRESHOLD (1u << LOG_Q31_ACCURACY) + +/* HALF */ +#define LOQ_Q31_Q32_HALF LOQ_Q31_THRESHOLD +#define LOQ_Q31_Q30_HALF (LOQ_Q31_Q32_HALF >> 2) + + +/* 1.0 / Log2[Exp[1]] in Q31 */ +#define LOG_Q31_INVLOG2EXP 0x58b90bfbuL + +/* Clay Turner algorithm */ +static uint32_t arm_scalar_log_q31(uint32_t src) +{ + int32_t i; + + int32_t c = __CLZ(src); + int32_t normalization=0; + + /* 0.5 in q26 */ + uint32_t inc = LOQ_Q31_Q32_HALF >> (LOG_Q31_INTEGER_PART + 1); + + /* Will compute y = log2(x) for 1 <= x < 2.0 */ + uint32_t x; + + /* q26 */ + uint32_t y=0; + + /* q26 */ + int32_t tmp; + + + /* Normalize and convert to q30 format */ + x = src; + if ((c-1) < 0) + { + x = x >> (1-c); + } + else + { + x = x << (c-1); + } + normalization = c; + + /* Compute the Log2. Result is in q26 + because we know 0 <= y < 1.0 but + do not want to use q32 to allow + following computation with less instructions. + */ + for(i = 0; i < LOG_Q31_ACCURACY ; i++) + { + x = ((int64_t)x*x) >> (LOG_Q31_ACCURACY - 1); + + if (x >= LOQ_Q31_THRESHOLD) + { + y += inc ; + x = x >> 1; + } + inc = inc >> 1; + } + + /* + Convert the Log2 to Log and apply normalization. + We compute (y - normalisation) * (1 / Log2[e]). + + */ + + /* q26 */ + tmp = (int32_t)y - (normalization << (LOG_Q31_ACCURACY - LOG_Q31_INTEGER_PART)); + + + /* q5.26 */ + y = ((int64_t)tmp * LOG_Q31_INVLOG2EXP) >> 31; + + + + return(y); + +} + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + + +q31x4_t vlogq_q31(q31x4_t src) +{ + + int32_t i; + + int32x4_t c = vclzq_s32(src); + int32x4_t normalization = c; + + + /* 0.5 in q11 */ + uint32_t inc = LOQ_Q31_Q32_HALF >> (LOG_Q31_INTEGER_PART + 1); + + /* Will compute y = log2(x) for 1 <= x < 2.0 */ + uint32x4_t x; + + + /* q11 */ + uint32x4_t y = vdupq_n_u32(0); + + + /* q11 */ + int32x4_t vtmp; + + + mve_pred16_t p; + + /* Normalize and convert to q14 format */ + + + vtmp = vsubq_n_s32(c,1); + x = vshlq_u32((uint32x4_t)src,vtmp); + + + /* Compute the Log2. Result is in Q26 + because we know 0 <= y < 1.0 but + do not want to use Q32 to allow + following computation with less instructions. + */ + for(i = 0; i < LOG_Q31_ACCURACY ; i++) + { + x = vmulhq_u32(x,x); + x = vshlq_n_u32(x,2); + + + p = vcmphiq_u32(x,vdupq_n_u32(LOQ_Q31_THRESHOLD)); + y = vaddq_m_n_u32(y, y,inc,p); + x = vshrq_m_n_u32(x,x,1,p); + + inc = inc >> 1; + } + + + /* + Convert the Log2 to Log and apply normalization. + We compute (y - normalisation) * (1 / Log2[e]). + + */ + + /* q11 */ + // tmp = (int16_t)y - (normalization << (LOG_Q15_ACCURACY - LOG_Q15_INTEGER_PART)); + vtmp = vshlq_n_s32(normalization,LOG_Q31_ACCURACY - LOG_Q31_INTEGER_PART); + vtmp = vsubq_s32((int32x4_t)y,vtmp); + + + + /* q4.11 */ + // y = ((int32_t)tmp * LOG_Q15_INVLOG2EXP) >> 15; + vtmp = vqdmulhq_n_s32(vtmp,LOG_Q31_INVLOG2EXP); + + return(vtmp); +} +#endif + +/** + @ingroup groupFastMath + */ + +/** + @addtogroup vlog + @{ + */ + +/** + @brief q31 vector of log values. + @param[in] pSrc points to the input vector in q31 + @param[out] pDst points to the output vector q5.26 + @param[in] blockSize number of samples in each vector + @return none + + */ +void arm_vlog_q31( + const q31_t * pSrc, + q31_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* loop counters */ + + #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + + q31x4_t src; + q31x4_t dst; + + blkCnt = blockSize >> 2; + + while (blkCnt > 0U) + { + src = vld1q(pSrc); + dst = vlogq_q31(src); + vst1q(pDst, dst); + + pSrc += 4; + pDst += 4; + /* Decrement loop counter */ + blkCnt--; + } + + blkCnt = blockSize & 3; + #else + blkCnt = blockSize; + #endif + + while (blkCnt > 0U) + { + *pDst++=arm_scalar_log_q31(*pSrc++); + + blkCnt--; + } + +} + +/** + @} end of vlog group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/FilteringFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/FilteringFunctionsF16.c deleted file mode 100644 index c696e41..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/FilteringFunctionsF16.c +++ /dev/null @@ -1,39 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: FilteringFunctions.c - * Description: Combination of all filtering function f16 source files. - * - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_fir_f16.c" -#include "arm_fir_init_f16.c" -#include "arm_biquad_cascade_df1_f16.c" -#include "arm_biquad_cascade_df1_init_f16.c" -#include "arm_biquad_cascade_df2T_f16.c" -#include "arm_biquad_cascade_df2T_init_f16.c" -#include "arm_biquad_cascade_stereo_df2T_f16.c" -#include "arm_biquad_cascade_stereo_df2T_init_f16.c" -#include "arm_correlate_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_init_q31.c index 4c1d91a..64d61f1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_32x64_init_q31.c * Description: High precision Q31 Biquad cascade filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_q31.c index 2c01a9c..1111311 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_32x64_q31.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_32x64_q31.c * Description: High precision Q31 Biquad cascade filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -298,7 +298,7 @@ void arm_biquad_cas_df1_32x64_q31( q31_t b0, b1, b2, a1, a2; /* Filter coefficients */ int32_t shift = (int32_t) S->postShift + 1; /* Shift to be applied to the output */ uint32_t sample, stage = S->numStages; /* loop counters */ - q31x4_t vecCoef, vecIn; + q31x4_t vecCoef = { 0 }, vecIn; q63_t acc; if (blockSize <= 3) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f16.c index 4986e95..c38e37b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_f16.c * Description: Processing function for the floating-point Biquad cascade DirectFormI(DF1) filter * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -65,7 +65,7 @@ void arm_biquad_cascade_df1_f16( const float16_t *pCoeffs = S->pCoeffs; /* coefficient pointer */ float16_t Xn1, Xn2, Yn1, Yn2; /* Filter pState variables */ float16_t X0, X1, X2, X3; /* temporary input */ - float16_t X4, X5, X6, X7; /* temporary input */ + float16_t X4, X5, X6, X7 = 0; /* temporary input */ _Float16 lastX, lastY; /* X,Y history for tail handling */ f16x8_t coeffs; f16x8_t accVec; /* accumultor vector */ @@ -491,4 +491,5 @@ void arm_biquad_cascade_df1_f16( #endif /* #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ #endif /*#if defined(ARM_FLOAT16_SUPPORTED)*/ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f32.c index ae17c46..931a6f0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_f32.c * Description: Processing function for the floating-point Biquad cascade DirectFormI(DF1) filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -178,7 +178,7 @@ void arm_biquad_cascade_df1_f32( const float32_t *pCoeffs = S->pCoeffs; /* coefficient pointer */ float32_t Xn1, Xn2, Yn1, Yn2; /* Filter pState variables */ float32_t lastX, lastY; /* X,Y history for tail handling */ - float32_t X0, X1, X2, X3; /* temporary input */ + float32_t X0, X1, X2, X3 = 0; /* temporary input */ f32x4_t coeffs; f32x4_t accVec; /* accumultor vector */ uint32_t sample, stage = S->numStages; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q15.c index e42af39..f6d7243 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_fast_q15.c * Description: Fast processing function for the Q15 Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -81,13 +81,13 @@ void arm_biquad_cascade_df1_fast_q15( do { /* Read the b0 and 0 coefficients using SIMD */ - b0 = read_q15x2_ia ((q15_t **) &pCoeffs); + b0 = read_q15x2_ia (&pCoeffs); /* Read the b1 and b2 coefficients using SIMD */ - b1 = read_q15x2_ia ((q15_t **) &pCoeffs); + b1 = read_q15x2_ia (&pCoeffs); /* Read the a1 and a2 coefficients using SIMD */ - a1 = read_q15x2_ia ((q15_t **) &pCoeffs); + a1 = read_q15x2_ia (&pCoeffs); /* Read the input state values from the state buffer: x[n-1], x[n-2] */ state_in = read_q15x2_ia (&pState); @@ -111,7 +111,7 @@ void arm_biquad_cascade_df1_fast_q15( { /* Read the input */ - in = read_q15x2_ia ((q15_t **) &pIn); + in = read_q15x2_ia (&pIn); /* out = b0 * x[n] + 0 * 0 */ out = __SMUAD(b0, in); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q31.c index dbf2d01..1ddff4d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_fast_q31.c * Description: Processing function for the Q31 Fast Biquad cascade DirectFormI(DF1) filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f16.c index 4f291fe..0cbe6f6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_init_f16.c * Description: Floating-point Biquad cascade DirectFormI(DF1) filter initialization function * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -98,29 +98,35 @@ void arm_biquad_cascade_df1_init_f16( #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) +/* + +The computation of the coefficients is done in float32 otherwise the +resulting filter is too different from the expected one. + +*/ static void generateCoefsFastBiquadF16(float16_t b0, float16_t b1, float16_t b2, float16_t a1, float16_t a2, arm_biquad_mod_coef_f16 * newCoef) { float32_t coeffs[8][12] = { - {0, 0, 0, 0, 0, 0, 0, b0, b1, b2, a1, a2}, - {0, 0, 0, 0, 0, 0, b0, b1, b2, 0, a2, 0}, - {0, 0, 0, 0, 0, b0, b1, b2, 0, 0, 0, 0}, - {0, 0, 0, 0, b0, b1, b2, 0, 0, 0, 0, 0}, - {0, 0, 0, b0, b1, b2, 0, 0, 0, 0, 0, 0}, - {0, 0, b0, b1, b2, 0, 0, 0, 0, 0, 0, 0}, - {0, b0, b1, b2, 0, 0, 0, 0, 0, 0, 0, 0}, - {b0, b1, b2, 0, 0, 0, 0, 0, 0, 0, 0, 0} + {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, (float32_t)a1, (float32_t)a2}, + {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, (float32_t)a2, 0.0f}, + {0.0f, 0.0f, 0.0f, 0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f}, + {0.0f, 0.0f, 0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, + {0.0f, 0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, + {0.0f, 0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, + {0.0f, (float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f}, + {(float32_t)b0, (float32_t)b1, (float32_t)b2, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f, 0.0f} }; for (int i = 0; i < 12; i++) { - coeffs[1][i] += (a1 * coeffs[0][i]); - coeffs[2][i] += (a1 * coeffs[1][i]) + (a2 * coeffs[0][i]); - coeffs[3][i] += (a1 * coeffs[2][i]) + (a2 * coeffs[1][i]); - coeffs[4][i] += (a1 * coeffs[3][i]) + (a2 * coeffs[2][i]); - coeffs[5][i] += (a1 * coeffs[4][i]) + (a2 * coeffs[3][i]); - coeffs[6][i] += (a1 * coeffs[5][i]) + (a2 * coeffs[4][i]); - coeffs[7][i] += (a1 * coeffs[6][i]) + (a2 * coeffs[5][i]); + coeffs[1][i] += ((float32_t)a1 * coeffs[0][i]); + coeffs[2][i] += ((float32_t)a1 * coeffs[1][i]) + ((float32_t)a2 * coeffs[0][i]); + coeffs[3][i] += ((float32_t)a1 * coeffs[2][i]) + ((float32_t)a2 * coeffs[1][i]); + coeffs[4][i] += ((float32_t)a1 * coeffs[3][i]) + ((float32_t)a2 * coeffs[2][i]); + coeffs[5][i] += ((float32_t)a1 * coeffs[4][i]) + ((float32_t)a2 * coeffs[3][i]); + coeffs[6][i] += ((float32_t)a1 * coeffs[5][i]) + ((float32_t)a2 * coeffs[4][i]); + coeffs[7][i] += ((float32_t)a1 * coeffs[6][i]) + ((float32_t)a2 * coeffs[5][i]); /* * transpose @@ -159,5 +165,6 @@ void arm_biquad_cascade_df1_mve_init_f16(arm_biquad_casd_df1_inst_f16 * S, /** @} end of BiquadCascadeDF1 group */ -#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +#endif /* #if defined(ARMfloat16_t_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f32.c index e904fd9..91b079b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_init_f32.c * Description: Floating-point Biquad cascade DirectFormI(DF1) filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q15.c index 54aa5b0..8f3020e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_init_q15.c * Description: Q15 Biquad cascade DirectFormI(DF1) filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q31.c index ee65719..0cc7acc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_init_q31.c * Description: Q31 Biquad cascade DirectFormI(DF1) filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q15.c index 0791bbc..df7d114 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q15.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_q15.c * Description: Processing function for the Q15 Biquad cascade DirectFormI(DF1) filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q31.c index 5dbf177..ca2fce9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df1_q31.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df1_q31.c * Description: Processing function for the Q31 Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -71,7 +71,7 @@ void arm_biquad_cascade_df1_q31( uint32_t stages = S->numStages; /* loop counters */ int postShift = S->postShift; q31x4_t b0Coeffs, b1Coeffs, a0Coeffs, a1Coeffs; /* Coefficients vector */ - q31x4_t stateVec; + q31x4_t stateVec = { 0 }; q31_t *pState = S->pState; /* pState pointer initialization */ q31x4_t inVec0; int64_t acc; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f16.c index ea24338..a9ef2e7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_f16.c * Description: Processing function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -49,7 +49,7 @@ @return none */ -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) +#if (defined(ARM_MATH_MVE_FLOAT16) && defined(ARM_MATH_HELIUM_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) void arm_biquad_cascade_df2T_f16( const arm_biquad_cascade_df2T_instance_f16 * S, const float16_t * pSrc, @@ -188,7 +188,7 @@ void arm_biquad_cascade_df2T_f16( while (stage > 0U); } #else -LOW_OPTIMIZATION_ENTER + void arm_biquad_cascade_df2T_f16( const arm_biquad_cascade_df2T_instance_f16 * S, const float16_t * pSrc, @@ -488,7 +488,6 @@ void arm_biquad_cascade_df2T_f16( } while (stage > 0U); } -LOW_OPTIMIZATION_EXIT #endif /* #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ /** @} end of BiquadCascadeDF2T group diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f32.c index 1398842..f75a614 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_f32.c * Description: Processing function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -47,7 +47,7 @@ @param[in] blockSize number of samples to process @return none */ -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) +#if (defined(ARM_MATH_MVEF) && defined(ARM_MATH_HELIUM_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" void arm_biquad_cascade_df2T_f32( @@ -345,7 +345,7 @@ void arm_biquad_cascade_df2T_f32( } } #else -LOW_OPTIMIZATION_ENTER + void arm_biquad_cascade_df2T_f32( const arm_biquad_cascade_df2T_instance_f32 * S, const float32_t * pSrc, @@ -645,7 +645,7 @@ void arm_biquad_cascade_df2T_f32( } while (stage > 0U); } -LOW_OPTIMIZATION_EXIT + #endif /* #if defined(ARM_MATH_NEON) */ #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f64.c index f935a1b..6d72a5a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_f64.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_f64.c * Description: Processing function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -107,7 +107,7 @@ To do this manually without calling the init function, assign the follow subfields of the instance structure: numStages, pCoeffs, pState. Also set all of the values in pState to zero. @par - Use of the initialization function is optional. + Use of the initialization function is optional except for the vectorized versions (Helium and Neon). However, if the initialization function is used, then the instance structure cannot be placed into a const data section. To place an instance structure into a const data section, the instance structure must be manually initialized. Set the values in the state buffer to zeros before static initialization. @@ -119,6 +119,12 @@ where numStages is the number of Biquad stages in the filter; pState is the address of the state buffer. pCoeffs is the address of the coefficient buffer; + @par Neon version + For Neon version, the function arm_biquad_cascade_df2T_compute_coefs_x must be + used in addition to arm_biquad_cascade_df2T_init_x. + + See the documentation of arm_biquad_cascade_df2T_init_x for more details. + */ /** @@ -135,7 +141,7 @@ @return none */ -LOW_OPTIMIZATION_ENTER + void arm_biquad_cascade_df2T_f64( const arm_biquad_cascade_df2T_instance_f64 * S, const float64_t * pSrc, @@ -438,7 +444,7 @@ void arm_biquad_cascade_df2T_f64( } while (stage > 0U); } -LOW_OPTIMIZATION_EXIT + /** @} end of BiquadCascadeDF2T group diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f16.c index ebd0fc4..fa07f91 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_init_f16.c * Description: Initialization function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -114,4 +114,5 @@ void arm_biquad_cascade_df2T_init_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f32.c index 00375d8..988d6ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_init_f32.c * Description: Initialization function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -39,78 +39,32 @@ @{ */ -/** - @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. - @param[in,out] S points to an instance of the filter data structure. - @param[in] numStages number of 2nd order stages in the filter. - @param[in] pCoeffs points to the filter coefficients. - @param[in] pState points to the state buffer. - @return none - @par Coefficient and State Ordering - The coefficients are stored in the array pCoeffs in the following order - in the not Neon version. -
-      {b10, b11, b12, a11, a12, b20, b21, b22, a21, a22, ...}
-  
- - @par - where b1x and a1x are the coefficients for the first stage, - b2x and a2x are the coefficients for the second stage, - and so on. The pCoeffs array contains a total of 5*numStages values. - - For Neon version, this array is bigger. If numstages = 4x + y, then the array has size: - 32*x + 5*y - and it must be initialized using the function - arm_biquad_cascade_df2T_compute_coefs_f32 which is taking the - standard array coefficient as parameters. - - But, an array of 8*numstages is a good approximation. - - Then, the initialization can be done with: -
-                   arm_biquad_cascade_df2T_init_f32(&SNeon, nbCascade, neonCoefs, stateNeon);
-                   arm_biquad_cascade_df2T_compute_coefs_f32(&SNeon,nbCascade,coefs);
-  
- - @par In this example, neonCoefs is a bigger array of size 8 * numStages. - coefs is the standard array: - -
-      {b10, b11, b12, a11, a12, b20, b21, b22, a21, a22, ...}
-  
- - - @par - The pState is a pointer to state array. - Each Biquad stage has 2 state variables d1, and d2. - The 2 state variables for stage 1 are first, then the 2 state variables for stage 2, and so on. - The state array has a total length of 2*numStages values. - The state variables are updated after each block of data is processed; the coefficients are untouched. - */ #if defined(ARM_MATH_NEON) -/* +/** + @brief Compute new coefficient arrays for use in vectorized filter (Neon only). + @param[in] numStages number of 2nd order stages in the filter. + @param[in] pCoeffs points to the original filter coefficients. + @param[in] pComputedCoeffs points to the new computed coefficients for the vectorized Neon version. + @return none + + @par Size of coefficient arrays: + pCoeffs has size 5 * numStages -Must be called after initializing the biquad instance. -pCoeffs has size 5 * nbCascade -Whereas the pCoeffs for the init has size (4*4 + 4*4)* nbCascade + pComputedCoeffs has size 8 * numStages -So this pCoeffs is the one which would be used for the not Neon version. -The pCoeffs passed in init is bigger than the one for the not Neon version. + pComputedCoeffs is the array to be used in arm_biquad_cascade_df2T_init_f32. */ void arm_biquad_cascade_df2T_compute_coefs_f32( - arm_biquad_cascade_df2T_instance_f32 * S, uint8_t numStages, - float32_t * pCoeffs) + const float32_t * pCoeffs, + float32_t * pComputedCoeffs) { uint8_t cnt; - float32_t *pDstCoeffs; float32_t b0[4],b1[4],b2[4],a1[4],a2[4]; - pDstCoeffs = (float32_t*)S->pCoeffs; - cnt = numStages >> 2; while(cnt > 0) { @@ -125,52 +79,52 @@ void arm_biquad_cascade_df2T_compute_coefs_f32( } /* Vec 1 */ - *pDstCoeffs++ = 0; - *pDstCoeffs++ = b0[1]; - *pDstCoeffs++ = b0[2]; - *pDstCoeffs++ = b0[3]; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = b0[1]; + *pComputedCoeffs++ = b0[2]; + *pComputedCoeffs++ = b0[3]; /* Vec 2 */ - *pDstCoeffs++ = 0; - *pDstCoeffs++ = 0; - *pDstCoeffs++ = b0[1] * b0[2]; - *pDstCoeffs++ = b0[2] * b0[3]; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = b0[1] * b0[2]; + *pComputedCoeffs++ = b0[2] * b0[3]; /* Vec 3 */ - *pDstCoeffs++ = 0; - *pDstCoeffs++ = 0; - *pDstCoeffs++ = 0; - *pDstCoeffs++ = b0[1] * b0[2] * b0[3]; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = 0; + *pComputedCoeffs++ = b0[1] * b0[2] * b0[3]; /* Vec 4 */ - *pDstCoeffs++ = b0[0]; - *pDstCoeffs++ = b0[0] * b0[1]; - *pDstCoeffs++ = b0[0] * b0[1] * b0[2]; - *pDstCoeffs++ = b0[0] * b0[1] * b0[2] * b0[3]; + *pComputedCoeffs++ = b0[0]; + *pComputedCoeffs++ = b0[0] * b0[1]; + *pComputedCoeffs++ = b0[0] * b0[1] * b0[2]; + *pComputedCoeffs++ = b0[0] * b0[1] * b0[2] * b0[3]; /* Vec 5 */ - *pDstCoeffs++ = b1[0]; - *pDstCoeffs++ = b1[1]; - *pDstCoeffs++ = b1[2]; - *pDstCoeffs++ = b1[3]; + *pComputedCoeffs++ = b1[0]; + *pComputedCoeffs++ = b1[1]; + *pComputedCoeffs++ = b1[2]; + *pComputedCoeffs++ = b1[3]; /* Vec 6 */ - *pDstCoeffs++ = b2[0]; - *pDstCoeffs++ = b2[1]; - *pDstCoeffs++ = b2[2]; - *pDstCoeffs++ = b2[3]; + *pComputedCoeffs++ = b2[0]; + *pComputedCoeffs++ = b2[1]; + *pComputedCoeffs++ = b2[2]; + *pComputedCoeffs++ = b2[3]; /* Vec 7 */ - *pDstCoeffs++ = a1[0]; - *pDstCoeffs++ = a1[1]; - *pDstCoeffs++ = a1[2]; - *pDstCoeffs++ = a1[3]; + *pComputedCoeffs++ = a1[0]; + *pComputedCoeffs++ = a1[1]; + *pComputedCoeffs++ = a1[2]; + *pComputedCoeffs++ = a1[3]; /* Vec 8 */ - *pDstCoeffs++ = a2[0]; - *pDstCoeffs++ = a2[1]; - *pDstCoeffs++ = a2[2]; - *pDstCoeffs++ = a2[3]; + *pComputedCoeffs++ = a2[0]; + *pComputedCoeffs++ = a2[1]; + *pComputedCoeffs++ = a2[2]; + *pComputedCoeffs++ = a2[3]; cnt--; } @@ -178,17 +132,66 @@ void arm_biquad_cascade_df2T_compute_coefs_f32( cnt = numStages & 0x3; while(cnt > 0) { - *pDstCoeffs++ = *pCoeffs++; - *pDstCoeffs++ = *pCoeffs++; - *pDstCoeffs++ = *pCoeffs++; - *pDstCoeffs++ = *pCoeffs++; - *pDstCoeffs++ = *pCoeffs++; + *pComputedCoeffs++ = *pCoeffs++; + *pComputedCoeffs++ = *pCoeffs++; + *pComputedCoeffs++ = *pCoeffs++; + *pComputedCoeffs++ = *pCoeffs++; + *pComputedCoeffs++ = *pCoeffs++; cnt--; } } #endif +/** + @brief Initialization function for the floating-point transposed direct form II Biquad cascade filter. + @param[in,out] S points to an instance of the filter data structure. + @param[in] numStages number of 2nd order stages in the filter. + @param[in] pCoeffs points to the filter coefficients. + @param[in] pState points to the state buffer. + @return none + + @par Coefficient and State Ordering + The coefficients are stored in the array pCoeffs in the following order + in the not Neon version. +
+      {b10, b11, b12, a11, a12, b20, b21, b22, a21, a22, ...}
+  
+ + @par + where b1x and a1x are the coefficients for the first stage, + b2x and a2x are the coefficients for the second stage, + and so on. The pCoeffs array contains a total of 5*numStages values. + + For Neon version, this array is bigger. If numstages = 4x + y, then the array has size: + 32*x + 5*y + and it must be initialized using the function + arm_biquad_cascade_df2T_compute_coefs_f32 which is taking the + standard array coefficient as parameters. + + But, an array of 8*numstages is a good approximation. + + Then, the initialization can be done with: +
+                   arm_biquad_cascade_df2T_compute_coefs_f32(nbCascade,coefs,computedCoefs);
+                   arm_biquad_cascade_df2T_init_f32(&SNeon, nbCascade, computedCoefs, stateNeon);
+  
+ + @par In this example, computedCoefs is a bigger array of size 8 * numStages. + coefs is the standard array: + +
+      {b10, b11, b12, a11, a12, b20, b21, b22, a21, a22, ...}
+  
+ + + @par + The pState is a pointer to state array. + Each Biquad stage has 2 state variables d1, and d2. + The 2 state variables for stage 1 are first, then the 2 state variables for stage 2, and so on. + The state array has a total length of 2*numStages values. + The state variables are updated after each block of data is processed; the coefficients are untouched. + */ void arm_biquad_cascade_df2T_init_f32( arm_biquad_cascade_df2T_instance_f32 * S, uint8_t numStages, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f64.c index c33c915..e06f35e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_df2T_init_f64.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_df2T_init_f64.c * Description: Initialization function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f16.c index 2767bc1..ef6b4cb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_stereo_df2T_f16.c * Description: Processing function for floating-point transposed direct form II Biquad cascade filter. 2 channels * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -53,7 +53,7 @@ #pragma GCC warning "Scalar version of arm_biquad_cascade_stereo_df2T_f16 built. Helium version has build issues with gcc." #endif -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) +#if (defined(ARM_MATH_MVE_FLOAT16) && defined(ARM_MATH_HELIUM_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) void arm_biquad_cascade_stereo_df2T_f16( const arm_biquad_cascade_stereo_df2T_instance_f16 * S, const float16_t * pSrc, @@ -194,7 +194,7 @@ void arm_biquad_cascade_stereo_df2T_f16( while (stage > 0U); } #else -LOW_OPTIMIZATION_ENTER + void arm_biquad_cascade_stereo_df2T_f16( const arm_biquad_cascade_stereo_df2T_instance_f16 * S, const float16_t * pSrc, @@ -427,11 +427,12 @@ void arm_biquad_cascade_stereo_df2T_f16( } while (stage > 0U); } -LOW_OPTIMIZATION_EXIT + #endif /* #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ /** @} end of BiquadCascadeDF2T group */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f32.c index 5851c91..e0a5d03 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_stereo_df2T_f32.c * Description: Processing function for floating-point transposed direct form II Biquad cascade filter. 2 channels * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -47,7 +47,7 @@ @param[in] blockSize number of samples to process @return none */ -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) +#if (defined(ARM_MATH_MVEF) && defined(ARM_MATH_HELIUM_EXPERIMENTAL)) && !defined(ARM_MATH_AUTOVECTORIZE) #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" void arm_biquad_cascade_stereo_df2T_f32( @@ -181,7 +181,7 @@ void arm_biquad_cascade_stereo_df2T_f32( } #else -LOW_OPTIMIZATION_ENTER + void arm_biquad_cascade_stereo_df2T_f32( const arm_biquad_cascade_stereo_df2T_instance_f32 * S, const float32_t * pSrc, @@ -414,7 +414,7 @@ void arm_biquad_cascade_stereo_df2T_f32( } while (stage > 0U); } -LOW_OPTIMIZATION_EXIT + #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f16.c index 83f63ed..3277519 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_stereo_df2T_init_f16.c * Description: Initialization function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f32.c index aa4ce89..f7dd819 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_biquad_cascade_stereo_df2T_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_biquad_cascade_stereo_df2T_init_f32.c * Description: Initialization function for floating-point transposed direct form II Biquad cascade filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_f32.c index 9080c75..5e123e4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_f32.c @@ -5,13 +5,13 @@ * Title: arm_conv_f32.c * Description: Convolution of floating-point sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -45,12 +45,14 @@ @par Algorithm Let a[n] and b[n] be sequences of length srcALen and srcBLen samples respectively. Then the convolution -
-     c[n] = a[n] * b[n]
-  
+ \f[ + c[n] = a[n] * b[n] + \f] @par is defined as - \image html ConvolutionEquation.gif + \f[ + c[n] = \sum_{k=0}^{srcALen} a[k] b[n-k] + \f] @par Note that c[n] is of length srcALen + srcBLen - 1 and is defined over the interval n=0, 1, 2, ..., srcALen + srcBLen - 2. pSrcA points to the first input vector of length srcALen and @@ -62,9 +64,9 @@ For each offset \c n, the overlapping portions of a[n] and b[n] are multiplied and summed together. @par Note that convolution is a commutative operation: -
-     a[n] * b[n] = b[n] * a[n].
-  
+ \f[ + a[n] * b[n] = b[n] * a[n]. + \f] @par This means that switching the A and B arguments to the convolution functions has no effect. @@ -80,6 +82,12 @@ @par Opt Versions Opt versions are supported for Q15 and Q7. Design uses internal scratch buffer for getting good optimisation. These versions are optimised in cycles and consumes more memory (Scratch memory) compared to Q15 and Q7 versions + + @par Long versions: + For convolution of long vectors, those functions are + no more adapted and will be very slow. + An implementation based upon FFTs should be used. + */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_opt_q15.c index dda46cf..62b1c95 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_fast_opt_q15.c * Description: Fast Q15 Convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q15.c index a0f4860..d00ad65 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_fast_q15.c * Description: Fast Q15 Convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q31.c index 70949a0..569e484 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_conv_fast_q31.c * Description: Fast Q31 Convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q15.c index ad7bf76..6230627 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_opt_q15.c * Description: Convolution of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q7.c index a4b251d..1afdb5d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_opt_q7.c @@ -5,13 +5,13 @@ * Title: arm_conv_opt_q7.c * Description: Convolution of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_f32.c index 73c732e..1ce871c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_f32.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_f32.c * Description: Partial convolution of floating-point sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,6 +58,12 @@ @par Opt Versions Opt versions are supported for Q15 and Q7. Design uses internal scratch buffer for getting good optimisation. These versions are optimised in cycles and consumes more memory (Scratch memory) compared to Q15 and Q7 versions of partial convolution + + @par Long versions: + For convolution of long vectors, those functions are + no more adapted and will be very slow. + An implementation based upon FFTs should be used. + */ /** @@ -97,7 +103,7 @@ arm_status arm_conv_partial_f32( const float32_t *pSrc1, *pSrc2; /* Intermediate pointers */ float32_t sum; /* Accumulator */ uint32_t j, k, count, blkCnt, check; - uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + int32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ arm_status status; /* Status of Partial convolution */ #if defined (ARM_MATH_LOOPUNROLL) @@ -144,7 +150,7 @@ arm_status arm_conv_partial_f32( blockSize3 = ((int32_t)check > (int32_t)srcALen) ? (int32_t)check - (int32_t)srcALen : 0; blockSize3 = ((int32_t)firstIndex > (int32_t)srcALen - 1) ? blockSize3 - (int32_t)firstIndex + (int32_t)srcALen : blockSize3; blockSize1 = ((int32_t) srcBLen - 1) - (int32_t) firstIndex; - blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : numPoints) : 0; + blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : (int32_t)numPoints) : 0; blockSize2 = ((int32_t) check - blockSize3) - (blockSize1 + (int32_t) firstIndex); blockSize2 = (blockSize2 > 0) ? blockSize2 : 0; @@ -189,7 +195,7 @@ arm_status arm_conv_partial_f32( * ----------------------*/ /* The first stage starts here */ - while (blockSize1 > 0U) + while (blockSize1 > 0) { /* Accumulator is made zero for every iteration */ sum = 0.0f; @@ -541,7 +547,14 @@ arm_status arm_conv_partial_f32( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ @@ -552,7 +565,7 @@ arm_status arm_conv_partial_f32( * Stage3 process * ------------------*/ - while (blockSize3 > 0U) + while (blockSize3 > 0) { /* Accumulator is made zero for every iteration */ sum = 0.0f; @@ -629,7 +642,6 @@ arm_status arm_conv_partial_f32( float32_t sum; /* Accumulator */ uint32_t i, j; /* Loop counters */ arm_status status; /* Status of Partial convolution */ - /* Check for range of output samples to be calculated */ if ((firstIndex + numPoints) > ((srcALen + (srcBLen - 1U)))) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_opt_q15.c index 310d0a7..d181f6e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_fast_opt_q15.c * Description: Fast Q15 Partial convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q15.c index 700e553..96cfe1c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_fast_q15.c * Description: Fast Q15 Partial convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -576,7 +576,14 @@ arm_status arm_conv_partial_fast_q15( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q31.c index 2fb96f3..4f7a01a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_fast_q31.c * Description: Fast Q31 Partial convolution * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -73,7 +73,7 @@ arm_status arm_conv_partial_fast_q31( const q31_t *pSrc1, *pSrc2; /* Intermediate pointers */ q31_t sum; /* Accumulators */ uint32_t j, k, count, check, blkCnt; - uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + int32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ arm_status status; /* Status of Partial convolution */ #if defined (ARM_MATH_LOOPUNROLL) @@ -120,7 +120,7 @@ arm_status arm_conv_partial_fast_q31( blockSize3 = ((int32_t)check > (int32_t)srcALen) ? (int32_t)check - (int32_t)srcALen : 0; blockSize3 = ((int32_t)firstIndex > (int32_t)srcALen - 1) ? blockSize3 - (int32_t)firstIndex + (int32_t)srcALen : blockSize3; blockSize1 = ((int32_t) srcBLen - 1) - (int32_t) firstIndex; - blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : numPoints) : 0; + blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : (int32_t)numPoints) : 0; blockSize2 = (int32_t) check - ((blockSize3 + blockSize1) + (int32_t) firstIndex); blockSize2 = (blockSize2 > 0) ? blockSize2 : 0; @@ -165,7 +165,7 @@ arm_status arm_conv_partial_fast_q31( * ----------------------*/ /* The first stage starts here */ - while (blockSize1 > 0U) + while (blockSize1 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -528,7 +528,14 @@ arm_status arm_conv_partial_fast_q31( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ @@ -539,7 +546,7 @@ arm_status arm_conv_partial_fast_q31( * Stage3 process * ------------------*/ - while (blockSize3 > 0U) + while (blockSize3 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q15.c index a2cc22c..1296674 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_opt_q15.c * Description: Partial convolution of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q7.c index 2befd5d..1b0527d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_opt_q7.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_opt_q7.c * Description: Partial convolution of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q15.c index 52f253c..41cd5c9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_q15.c * Description: Partial convolution of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -78,7 +78,7 @@ arm_status arm_conv_partial_q15( const q15_t *py; /* Intermediate inputB pointer */ const q15_t *pSrc1, *pSrc2; /* Intermediate pointers */ q31_t x0, x1, x2, x3, c0; /* Temporary input variables to hold state and coefficient values */ - uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + int32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ uint32_t j, k, count, blkCnt, check; arm_status status; /* Status of Partial convolution */ @@ -121,7 +121,7 @@ arm_status arm_conv_partial_q15( blockSize3 = ((int32_t)check > (int32_t)srcALen) ? (int32_t)check - (int32_t)srcALen : 0; blockSize3 = ((int32_t)firstIndex > (int32_t)srcALen - 1) ? blockSize3 - (int32_t)firstIndex + (int32_t)srcALen : blockSize3; blockSize1 = ((int32_t) srcBLen - 1) - (int32_t) firstIndex; - blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : numPoints) : 0; + blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : (int32_t)numPoints) : 0; blockSize2 = (int32_t) check - ((blockSize3 + blockSize1) + (int32_t) firstIndex); blockSize2 = (blockSize2 > 0) ? blockSize2 : 0; @@ -170,7 +170,7 @@ arm_status arm_conv_partial_q15( /* Second part of this stage computes the MAC operations greater than or equal to 4 */ /* The first part of the stage starts here */ - while ((count < 4U) && (blockSize1 > 0U)) + while ((count < 4U) && (blockSize1 > 0)) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -208,7 +208,7 @@ arm_status arm_conv_partial_q15( * y[srcBLen] and y[srcBLen-1] coefficients, py is decremented by 1 */ py = py - 1; - while (blockSize1 > 0U) + while (blockSize1 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -582,7 +582,14 @@ arm_status arm_conv_partial_q15( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ @@ -601,7 +608,7 @@ arm_status arm_conv_partial_q15( /* The first part of the stage starts here */ j = count >> 2U; - while ((j > 0U) && (blockSize3 > 0U)) + while ((j > 0U) && (blockSize3 > 0)) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -662,7 +669,7 @@ arm_status arm_conv_partial_q15( * so pointer py is updated to read only one sample at a time */ py = py + 1U; - while (blockSize3 > 0U) + while (blockSize3 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q31.c index eb360b6..887aa71 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q31.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_q31.c * Description: Partial convolution of Q31 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -76,7 +76,7 @@ arm_status arm_conv_partial_q31( const q31_t *pSrc1, *pSrc2; /* Intermediate pointers */ q63_t sum; /* Accumulator */ uint32_t j, k, count, blkCnt, check; - uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + int32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ arm_status status; /* Status of Partial convolution */ #if defined (ARM_MATH_LOOPUNROLL) @@ -123,7 +123,7 @@ arm_status arm_conv_partial_q31( blockSize3 = ((int32_t)check > (int32_t)srcALen) ? (int32_t)check - (int32_t)srcALen : 0; blockSize3 = ((int32_t)firstIndex > (int32_t)srcALen - 1) ? blockSize3 - (int32_t)firstIndex + (int32_t)srcALen : blockSize3; blockSize1 = ((int32_t) srcBLen - 1) - (int32_t) firstIndex; - blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : numPoints) : 0; + blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : (int32_t)numPoints) : 0; blockSize2 = (int32_t) check - ((blockSize3 + blockSize1) + (int32_t) firstIndex); blockSize2 = (blockSize2 > 0) ? blockSize2 : 0; @@ -168,7 +168,7 @@ arm_status arm_conv_partial_q31( * ----------------------*/ /* The first stage starts here */ - while (blockSize1 > 0U) + while (blockSize1 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -501,7 +501,14 @@ arm_status arm_conv_partial_q31( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ @@ -512,7 +519,7 @@ arm_status arm_conv_partial_q31( * Stage3 process * ------------------*/ - while (blockSize3 > 0U) + while (blockSize3 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q7.c index a4f03af..3589f63 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_partial_q7.c @@ -5,13 +5,13 @@ * Title: arm_conv_partial_q7.c * Description: Partial convolution of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -66,7 +66,7 @@ arm_status arm_conv_partial_q7( uint32_t numPoints) { -#if ARM_MATH_DSP +#if defined(ARM_MATH_DSP) const q7_t *pIn1; /* InputA pointer */ const q7_t *pIn2; /* InputB pointer */ @@ -76,7 +76,7 @@ arm_status arm_conv_partial_q7( const q7_t *pSrc1, *pSrc2; /* Intermediate pointers */ q31_t sum; /* Accumulator */ uint32_t j, k, count, blkCnt, check; /* Loop counters */ - uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + int32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ arm_status status; /* Status of Partial convolution */ #if defined (ARM_MATH_LOOPUNROLL) @@ -125,7 +125,7 @@ arm_status arm_conv_partial_q7( blockSize3 = ((int32_t)check > (int32_t)srcALen) ? (int32_t)check - (int32_t)srcALen : 0; blockSize3 = ((int32_t)firstIndex > (int32_t)srcALen - 1) ? blockSize3 - (int32_t)firstIndex + (int32_t)srcALen : blockSize3; blockSize1 = ((int32_t) srcBLen - 1) - (int32_t) firstIndex; - blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : numPoints) : 0; + blockSize1 = (blockSize1 > 0) ? ((check > (srcBLen - 1U)) ? blockSize1 : (int32_t)numPoints) : 0; blockSize2 = (int32_t) check - ((blockSize3 + blockSize1) + (int32_t) firstIndex); blockSize2 = (blockSize2 > 0) ? blockSize2 : 0; @@ -170,7 +170,7 @@ arm_status arm_conv_partial_q7( * ----------------------*/ /* The first stage starts here */ - while (blockSize1 > 0U) + while (blockSize1 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; @@ -604,7 +604,14 @@ arm_status arm_conv_partial_q7( count = srcBLen - 1U; /* Working pointer of inputA */ - pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + if (firstIndex > srcALen) + { + pSrc1 = (pIn1 + firstIndex) - (srcBLen - 1U); + } + else + { + pSrc1 = (pIn1 + srcALen) - (srcBLen - 1U); + } px = pSrc1; /* Working pointer of inputB */ @@ -615,7 +622,7 @@ arm_status arm_conv_partial_q7( * Stage3 process * ------------------*/ - while (blockSize3 > 0U) + while (blockSize3 > 0) { /* Accumulator is made zero for every iteration */ sum = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q15.c index aae3708..38e652c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q15.c @@ -5,13 +5,13 @@ * Title: arm_conv_q15.c * Description: Convolution of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q31.c index 1e133f0..9d2dd29 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q31.c @@ -5,13 +5,13 @@ * Title: arm_conv_q31.c * Description: Convolution of Q31 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q7.c index 0c521c3..a0f96dd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_conv_q7.c @@ -5,13 +5,13 @@ * Title: arm_conv_q7.c * Description: Convolution of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f16.c index d35d92c..d584c25 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f16.c @@ -5,13 +5,13 @@ * Title: arm_correlate_f16.c * Description: Correlation of floating-point sequences * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,51 +35,7 @@ @ingroup groupFilters */ -/** - @defgroup Corr Correlation - - Correlation is a mathematical operation that is similar to convolution. - As with convolution, correlation uses two signals to produce a third signal. - The underlying algorithms in correlation and convolution are identical except that one of the inputs is flipped in convolution. - Correlation is commonly used to measure the similarity between two signals. - It has applications in pattern recognition, cryptanalysis, and searching. - The CMSIS library provides correlation functions for Q7, Q15, Q31 and floating-point data types. - Fast versions of the Q15 and Q31 functions are also provided. - - @par Algorithm - Let a[n] and b[n] be sequences of length srcALen and srcBLen samples respectively. - The convolution of the two signals is denoted by -
-      c[n] = a[n] * b[n]
-  
- In correlation, one of the signals is flipped in time -
-       c[n] = a[n] * b[-n]
-  
- @par - and this is mathematically defined as - \image html CorrelateEquation.gif - @par - The pSrcA points to the first input vector of length srcALen and pSrcB points to the second input vector of length srcBLen. - The result c[n] is of length 2 * max(srcALen, srcBLen) - 1 and is defined over the interval n=0, 1, 2, ..., (2 * max(srcALen, srcBLen) - 2). - The output result is written to pDst and the calling function must allocate 2 * max(srcALen, srcBLen) - 1 words for the result. - - @note - The pDst should be initialized to all zeros before being used. - - @par Fixed-Point Behavior - Correlation requires summing up a large number of intermediate products. - As such, the Q7, Q15, and Q31 functions run a risk of overflow and saturation. - Refer to the function specific documentation below for further details of the particular algorithm used. - - @par Fast Versions - Fast versions are supported for Q31 and Q15. Cycles for Fast versions are less compared to Q31 and Q15 of correlate and the design requires - the input signals should be scaled down to avoid intermediate overflows. - - @par Opt Versions - Opt versions are supported for Q15 and Q7. Design uses internal scratch buffer for getting good optimisation. - These versions are optimised in cycles and consumes more memory (Scratch memory) compared to Q15 and Q7 versions of correlate - */ + /** @addtogroup Corr @@ -640,16 +596,16 @@ void arm_correlate_f16( while (k > 0U) { /* x[0] * y[srcBLen - 4] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* x[1] * y[srcBLen - 3] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* x[2] * y[srcBLen - 2] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* x[3] * y[srcBLen - 1] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement loop counter */ k--; @@ -669,7 +625,7 @@ void arm_correlate_f16( { /* Perform the multiply-accumulate */ /* x[0] * y[srcBLen - 1] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement loop counter */ k--; @@ -752,13 +708,13 @@ void arm_correlate_f16( /* Perform the multiply-accumulate */ /* acc0 += x[0] * y[0] */ - acc0 += x0 * c0; + acc0 += (_Float16)x0 * (_Float16)c0; /* acc1 += x[1] * y[0] */ - acc1 += x1 * c0; + acc1 += (_Float16)x1 * (_Float16)c0; /* acc2 += x[2] * y[0] */ - acc2 += x2 * c0; + acc2 += (_Float16)x2 * (_Float16)c0; /* acc3 += x[3] * y[0] */ - acc3 += x3 * c0; + acc3 += (_Float16)x3 * (_Float16)c0; /* Read y[1] sample */ c0 = *(py++); @@ -767,13 +723,13 @@ void arm_correlate_f16( /* Perform the multiply-accumulate */ /* acc0 += x[1] * y[1] */ - acc0 += x1 * c0; + acc0 += (_Float16)x1 * (_Float16)c0; /* acc1 += x[2] * y[1] */ - acc1 += x2 * c0; + acc1 += (_Float16)x2 * (_Float16)c0; /* acc2 += x[3] * y[1] */ - acc2 += x3 * c0; + acc2 += (_Float16)x3 * (_Float16)c0; /* acc3 += x[4] * y[1] */ - acc3 += x0 * c0; + acc3 += (_Float16)x0 * (_Float16)c0; /* Read y[2] sample */ c0 = *(py++); @@ -782,13 +738,13 @@ void arm_correlate_f16( /* Perform the multiply-accumulate */ /* acc0 += x[2] * y[2] */ - acc0 += x2 * c0; + acc0 += (_Float16)x2 * (_Float16)c0; /* acc1 += x[3] * y[2] */ - acc1 += x3 * c0; + acc1 += (_Float16)x3 * (_Float16)c0; /* acc2 += x[4] * y[2] */ - acc2 += x0 * c0; + acc2 += (_Float16)x0 * (_Float16)c0; /* acc3 += x[5] * y[2] */ - acc3 += x1 * c0; + acc3 += (_Float16)x1 * (_Float16)c0; /* Read y[3] sample */ c0 = *(py++); @@ -797,13 +753,13 @@ void arm_correlate_f16( /* Perform the multiply-accumulate */ /* acc0 += x[3] * y[3] */ - acc0 += x3 * c0; + acc0 += (_Float16)x3 * (_Float16)c0; /* acc1 += x[4] * y[3] */ - acc1 += x0 * c0; + acc1 += (_Float16)x0 * (_Float16)c0; /* acc2 += x[5] * y[3] */ - acc2 += x1 * c0; + acc2 += (_Float16)x1 * (_Float16)c0; /* acc3 += x[6] * y[3] */ - acc3 += x2 * c0; + acc3 += (_Float16)x2 * (_Float16)c0; } while (--k); @@ -820,13 +776,13 @@ void arm_correlate_f16( /* Perform the multiply-accumulate */ /* acc0 += x[4] * y[4] */ - acc0 += x0 * c0; + acc0 += (_Float16)x0 * (_Float16)c0; /* acc1 += x[5] * y[4] */ - acc1 += x1 * c0; + acc1 += (_Float16)x1 * (_Float16)c0; /* acc2 += x[6] * y[4] */ - acc2 += x2 * c0; + acc2 += (_Float16)x2 * (_Float16)c0; /* acc3 += x[7] * y[4] */ - acc3 += x3 * c0; + acc3 += (_Float16)x3 * (_Float16)c0; /* Reuse the present samples for the next MAC */ x0 = x1; @@ -888,10 +844,10 @@ void arm_correlate_f16( while (k > 0U) { /* Perform the multiply-accumulate */ - sum += *px++ * *py++; - sum += *px++ * *py++; - sum += *px++ * *py++; - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; + sum += (_Float16)*px++ * (_Float16)*py++; + sum += (_Float16)*px++ * (_Float16)*py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement loop counter */ k--; @@ -909,7 +865,7 @@ void arm_correlate_f16( while (k > 0U) { /* Perform the multiply-accumulate */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement the loop counter */ k--; @@ -949,7 +905,7 @@ void arm_correlate_f16( while (k > 0U) { /* Perform the multiply-accumulate */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement the loop counter */ k--; @@ -1016,16 +972,16 @@ void arm_correlate_f16( { /* Perform the multiply-accumulate */ /* sum += x[srcALen - srcBLen + 4] * y[3] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* sum += x[srcALen - srcBLen + 3] * y[2] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* sum += x[srcALen - srcBLen + 2] * y[1] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* sum += x[srcALen - srcBLen + 1] * y[0] */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement loop counter */ k--; @@ -1044,7 +1000,7 @@ void arm_correlate_f16( while (k > 0U) { /* Perform the multiply-accumulate */ - sum += *px++ * *py++; + sum += (_Float16)*px++ * (_Float16)*py++; /* Decrement loop counter */ k--; @@ -1138,7 +1094,7 @@ void arm_correlate_f16( if ((((i - j) < srcBLen) && (j < srcALen))) { /* z[i] += x[i-j] * y[j] */ - sum += pIn1[j] * pIn2[-((int32_t) i - j)]; + sum += (_Float16)pIn1[j] * (_Float16)pIn2[-((int32_t) i - (int32_t) j)]; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f32.c index bf1eaf5..7d4880e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f32.c @@ -5,13 +5,13 @@ * Title: arm_correlate_f32.c * Description: Correlation of floating-point sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -48,16 +48,20 @@ @par Algorithm Let a[n] and b[n] be sequences of length srcALen and srcBLen samples respectively. The convolution of the two signals is denoted by -
-      c[n] = a[n] * b[n]
-  
+ \f[ + c[n] = a[n] * b[n] + \f] + In correlation, one of the signals is flipped in time -
-       c[n] = a[n] * b[-n]
-  
+ + \f[ + c[n] = a[n] * b[-n] + \f] @par and this is mathematically defined as - \image html CorrelateEquation.gif + \f[ + c[n] = \sum_{k=0}^{srcALen} a[k] b[k-n] + \f] @par The pSrcA points to the first input vector of length srcALen and pSrcB points to the second input vector of length srcBLen. The result c[n] is of length 2 * max(srcALen, srcBLen) - 1 and is defined over the interval n=0, 1, 2, ..., (2 * max(srcALen, srcBLen) - 2). @@ -78,6 +82,11 @@ @par Opt Versions Opt versions are supported for Q15 and Q7. Design uses internal scratch buffer for getting good optimisation. These versions are optimised in cycles and consumes more memory (Scratch memory) compared to Q15 and Q7 versions of correlate + + @par Long versions: + For convolution of long vectors, those functions are + no more adapted and will be very slow. + An implementation based upon FFTs should be used. */ /** @@ -1076,7 +1085,7 @@ void arm_correlate_f32( if ((((i - j) < srcBLen) && (j < srcALen))) { /* z[i] += x[i-j] * y[j] */ - sum += pIn1[j] * pIn2[-((int32_t) i - j)]; + sum += pIn1[j] * pIn2[-((int32_t) i - (int32_t) j)]; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f64.c new file mode 100644 index 0000000..e0e9ba6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_f64.c @@ -0,0 +1,369 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_correlate_f64.c + * Description: Correlation of floating-point sequences + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h" + +/** + @ingroup groupFilters + */ + +/** + @addtogroup Corr + @{ + */ + +/** + @brief Correlation of floating-point sequences. + @param[in] pSrcA points to the first input sequence + @param[in] srcALen length of the first input sequence + @param[in] pSrcB points to the second input sequence + @param[in] srcBLen length of the second input sequence + @param[out] pDst points to the location where the output result is written. Length 2 * max(srcALen, srcBLen) - 1. + @return none + */ + +void arm_correlate_f64( + const float64_t * pSrcA, + uint32_t srcALen, + const float64_t * pSrcB, + uint32_t srcBLen, + float64_t * pDst) +{ + const float64_t *pIn1; /* InputA pointer */ + const float64_t *pIn2; /* InputB pointer */ + float64_t *pOut = pDst; /* Output pointer */ + const float64_t *px; /* Intermediate inputA pointer */ + const float64_t *py; /* Intermediate inputB pointer */ + const float64_t *pSrc1; + float64_t sum; + uint32_t blockSize1, blockSize2, blockSize3; /* Loop counters */ + uint32_t j, k, count, blkCnt; /* Loop counters */ + uint32_t outBlockSize; /* Loop counter */ + int32_t inc = 1; /* Destination address modifier */ + + /* The algorithm implementation is based on the lengths of the inputs. */ + /* srcB is always made to slide across srcA. */ + /* So srcBLen is always considered as shorter or equal to srcALen */ + /* But CORR(x, y) is reverse of CORR(y, x) */ + /* So, when srcBLen > srcALen, output pointer is made to point to the end of the output buffer */ + /* and the destination pointer modifier, inc is set to -1 */ + /* If srcALen > srcBLen, zero pad has to be done to srcB to make the two inputs of same length */ + /* But to improve the performance, + * we assume zeroes in the output instead of zero padding either of the the inputs*/ + /* If srcALen > srcBLen, + * (srcALen - srcBLen) zeroes has to included in the starting of the output buffer */ + /* If srcALen < srcBLen, + * (srcALen - srcBLen) zeroes has to included in the ending of the output buffer */ + if (srcALen >= srcBLen) + { + /* Initialization of inputA pointer */ + pIn1 = pSrcA; + + /* Initialization of inputB pointer */ + pIn2 = pSrcB; + + /* Number of output samples is calculated */ + outBlockSize = (2U * srcALen) - 1U; + + /* When srcALen > srcBLen, zero padding has to be done to srcB + * to make their lengths equal. + * Instead, (outBlockSize - (srcALen + srcBLen - 1)) + * number of output samples are made zero */ + j = outBlockSize - (srcALen + (srcBLen - 1U)); + + /* Updating the pointer position to non zero value */ + pOut += j; + } + else + { + /* Initialization of inputA pointer */ + pIn1 = pSrcB; + + /* Initialization of inputB pointer */ + pIn2 = pSrcA; + + /* srcBLen is always considered as shorter or equal to srcALen */ + j = srcBLen; + srcBLen = srcALen; + srcALen = j; + + /* CORR(x, y) = Reverse order(CORR(y, x)) */ + /* Hence set the destination pointer to point to the last output sample */ + pOut = pDst + ((srcALen + srcBLen) - 2U); + + /* Destination address modifier is set to -1 */ + inc = -1; + } + + /* The function is internally + * divided into three stages according to the number of multiplications that has to be + * taken place between inputA samples and inputB samples. In the first stage of the + * algorithm, the multiplications increase by one for every iteration. + * In the second stage of the algorithm, srcBLen number of multiplications are done. + * In the third stage of the algorithm, the multiplications decrease by one + * for every iteration. */ + + /* The algorithm is implemented in three stages. + The loop counters of each stage is initiated here. */ + blockSize1 = srcBLen - 1U; + blockSize2 = srcALen - (srcBLen - 1U); + blockSize3 = blockSize1; + + /* -------------------------- + * Initializations of stage1 + * -------------------------*/ + + /* sum = x[0] * y[srcBlen - 1] + * sum = x[0] * y[srcBlen-2] + x[1] * y[srcBlen - 1] + * .... + * sum = x[0] * y[0] + x[1] * y[1] +...+ x[srcBLen - 1] * y[srcBLen - 1] + */ + + /* In this stage the MAC operations are increased by 1 for every iteration. + The count variable holds the number of MAC operations performed */ + count = 1U; + + /* Working pointer of inputA */ + px = pIn1; + + /* Working pointer of inputB */ + pSrc1 = pIn2 + (srcBLen - 1U); + py = pSrc1; + + /* ------------------------ + * Stage1 process + * ----------------------*/ + + /* The first stage starts here */ + while (blockSize1 > 0U) + { + /* Accumulator is made zero for every iteration */ + sum = 0.; + + /* Initialize k with number of samples */ + k = count; + + while (k > 0U) + { + /* Perform the multiply-accumulate */ + /* x[0] * y[srcBLen - 1] */ + sum += *px++ * *py++; + + /* Decrement loop counter */ + k--; + } + + /* Store the result in the accumulator in the destination buffer. */ + *pOut = sum; + /* Destination pointer is updated according to the address modifier, inc */ + pOut += inc; + + /* Update the inputA and inputB pointers for next MAC calculation */ + py = pSrc1 - count; + px = pIn1; + + /* Increment MAC count */ + count++; + + /* Decrement loop counter */ + blockSize1--; + } + + /* -------------------------- + * Initializations of stage2 + * ------------------------*/ + + /* sum = x[0] * y[0] + x[1] * y[1] +...+ x[srcBLen-1] * y[srcBLen-1] + * sum = x[1] * y[0] + x[2] * y[1] +...+ x[srcBLen] * y[srcBLen-1] + * .... + * sum = x[srcALen-srcBLen-2] * y[0] + x[srcALen-srcBLen-1] * y[1] +...+ x[srcALen-1] * y[srcBLen-1] + */ + + /* Working pointer of inputA */ + px = pIn1; + + /* Working pointer of inputB */ + py = pIn2; + + /* count is index by which the pointer pIn1 to be incremented */ + count = 0U; + + /* ------------------- + * Stage2 process + * ------------------*/ + + /* Stage2 depends on srcBLen as in this stage srcBLen number of MACS are performed. + * So, to loop unroll over blockSize2, + * srcBLen should be greater than or equal to 4 */ + if (srcBLen >= 4U) + { + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize2; + + while (blkCnt > 0U) + { + /* Accumulator is made zero for every iteration */ + sum = 0.; + + /* Initialize blkCnt with number of samples */ + k = srcBLen; + + while (k > 0U) + { + /* Perform the multiply-accumulate */ + sum += *px++ * *py++; + + /* Decrement the loop counter */ + k--; + } + + /* Store the result in the accumulator in the destination buffer. */ + *pOut = sum; + + /* Destination pointer is updated according to the address modifier, inc */ + pOut += inc; + + /* Increment the pointer pIn1 index, count by 1 */ + count++; + + /* Update the inputA and inputB pointers for next MAC calculation */ + px = pIn1 + count; + py = pIn2; + + /* Decrement the loop counter */ + blkCnt--; + } + } + else + { + /* If the srcBLen is not a multiple of 4, + * the blockSize2 loop cannot be unrolled by 4 */ + blkCnt = blockSize2; + + while (blkCnt > 0U) + { + /* Accumulator is made zero for every iteration */ + sum = 0.; + + /* Loop over srcBLen */ + k = srcBLen; + + while (k > 0U) + { + /* Perform the multiply-accumulate */ + sum += *px++ * *py++; + + /* Decrement the loop counter */ + k--; + } + + /* Store the result in the accumulator in the destination buffer. */ + *pOut = sum; + /* Destination pointer is updated according to the address modifier, inc */ + pOut += inc; + + /* Increment the pointer pIn1 index, count by 1 */ + count++; + + /* Update the inputA and inputB pointers for next MAC calculation */ + px = pIn1 + count; + py = pIn2; + + /* Decrement the loop counter */ + blkCnt--; + } + } + + + /* -------------------------- + * Initializations of stage3 + * -------------------------*/ + + /* sum += x[srcALen-srcBLen+1] * y[0] + x[srcALen-srcBLen+2] * y[1] +...+ x[srcALen-1] * y[srcBLen-1] + * sum += x[srcALen-srcBLen+2] * y[0] + x[srcALen-srcBLen+3] * y[1] +...+ x[srcALen-1] * y[srcBLen-1] + * .... + * sum += x[srcALen-2] * y[0] + x[srcALen-1] * y[1] + * sum += x[srcALen-1] * y[0] + */ + + /* In this stage the MAC operations are decreased by 1 for every iteration. + The count variable holds the number of MAC operations performed */ + count = srcBLen - 1U; + + /* Working pointer of inputA */ + pSrc1 = pIn1 + (srcALen - (srcBLen - 1U)); + px = pSrc1; + + /* Working pointer of inputB */ + py = pIn2; + + /* ------------------- + * Stage3 process + * ------------------*/ + + while (blockSize3 > 0U) + { + /* Accumulator is made zero for every iteration */ + sum = 0.; + + /* Initialize blkCnt with number of samples */ + k = count; + + while (k > 0U) + { + /* Perform the multiply-accumulate */ + sum += *px++ * *py++; + + /* Decrement loop counter */ + k--; + } + + /* Store the result in the accumulator in the destination buffer. */ + *pOut = sum; + /* Destination pointer is updated according to the address modifier, inc */ + pOut += inc; + + /* Update the inputA and inputB pointers for next MAC calculation */ + px = ++pSrc1; + py = pIn2; + + /* Decrement MAC count */ + count--; + + /* Decrement the loop counter */ + blockSize3--; + } +} + +/** + @} end of Corr group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_opt_q15.c index 71f01a9..2f655d7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_correlate_fast_opt_q15.c * Description: Fast Q15 Correlation * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q15.c index 970c7aa..ecb26da 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_correlate_fast_q15.c * Description: Fast Q15 Correlation * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q31.c index a2967d9..5747e13 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_correlate_fast_q31.c * Description: Fast Q31 Correlation * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q15.c index c7d0dd1..5283f24 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q15.c @@ -5,13 +5,13 @@ * Title: arm_correlate_opt_q15.c * Description: Correlation of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q7.c index db70a77..0cab9f2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_opt_q7.c @@ -5,13 +5,13 @@ * Title: arm_correlate_opt_q7.c * Description: Correlation of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q15.c index b7882cc..aa8bc35 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q15.c @@ -5,13 +5,13 @@ * Title: arm_correlate_q15.c * Description: Correlation of Q15 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -884,7 +884,7 @@ void arm_correlate_q15( if (((i - j) < srcBLen) && (j < srcALen)) { /* z[i] += x[i-j] * y[j] */ - sum += ((q31_t) pIn1[j] * pIn2[-((int32_t) i - j)]); + sum += ((q31_t) pIn1[j] * pIn2[-((int32_t) i - (int32_t) j)]); } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q31.c index 44d2f27..4aa50da 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q31.c @@ -5,13 +5,13 @@ * Title: arm_correlate_q31.c * Description: Correlation of Q31 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -860,7 +860,7 @@ void arm_correlate_q31( if (((i - j) < srcBLen) && (j < srcALen)) { /* z[i] += x[i-j] * y[j] */ - sum += ((q63_t) pIn1[j] * pIn2[-((int32_t) i - j)]); + sum += ((q63_t) pIn1[j] * pIn2[-((int32_t) i - (int32_t) j)]); } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q7.c index 4ff13c4..095ec99 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_correlate_q7.c @@ -5,13 +5,13 @@ * Title: arm_correlate_q7.c * Description: Correlation of Q7 sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -983,7 +983,7 @@ void arm_correlate_q7( if (((i - j) < srcBLen) && (j < srcALen)) { /* z[i] += x[i-j] * y[j] */ - sum += ((q15_t) pIn1[j] * pIn2[-((int32_t) i - j)]); + sum += ((q15_t) pIn1[j] * pIn2[-((int32_t) i - (int32_t) j)]); } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_f32.c index 6bcf66f..cf641ec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_f32.c * Description: FIR decimation for floating-point sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -144,7 +144,7 @@ void arm_fir_decimate_f32( uint32_t i, tapCnt, blkCnt, outBlockSize = blockSize / S->M; /* Loop counters */ uint32_t blkCntN4; const float32_t *px0, *px1, *px2, *px3; - f32x4_t accv, acc0v, acc1v, acc2v, acc3v; + f32x4_t accv = { 0 }, acc0v, acc1v, acc2v, acc3v; f32x4_t x0v, x1v, x2v, x3v; f32x4_t c0v; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q15.c index 42fdade..66f0e90 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_fast_q15.c * Description: Fast Q15 FIR Decimator * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q31.c index 61c7c27..6aa1a23 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_fast_q31.c * Description: Fast Q31 FIR Decimator * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_f32.c index 8e08403..c67b49c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_init_f32.c * Description: Floating-point FIR Decimator initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q15.c index 61562f9..9c4913f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_init_q15.c * Description: Initialization function for the Q15 FIR Decimator * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q31.c index 04248e7..a4bb036 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_init_q31.c * Description: Initialization function for Q31 FIR Decimation filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q15.c index 419c544..cd03e0c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_q15.c * Description: Q15 FIR Decimator * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q31.c index 0eb7123..d104b35 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_decimate_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_decimate_q31.c * Description: Q31 FIR Decimator * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f16.c index ff74a44..28a974e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f16.c @@ -5,10 +5,13 @@ * Title: arm_fir_f16.c * Description: Floating-point FIR filter processing function * - * Target Processor: Cortex-M cores + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,6 +61,7 @@ vecAcc0 = vfmaq(vecAcc0, vecIn0, c[i]); \ } +#define NB_TAPS 4 __STATIC_INLINE void arm_fir_f16_1_4_mve(const arm_fir_instance_f16 * S, const float16_t * __restrict pSrc, float16_t * __restrict pDst, uint32_t blockSize) @@ -73,7 +77,6 @@ __STATIC_INLINE void arm_fir_f16_1_4_mve(const arm_fir_instance_f16 * S, int32_t blkCnt; float16x8_t vecIn0; float16x8_t vecAcc0; - const int NB_TAPS=4; float16_t c[NB_TAPS]; @@ -146,8 +149,9 @@ __STATIC_INLINE void arm_fir_f16_1_4_mve(const arm_fir_instance_f16 * S, } } +#undef NB_TAPS - +#define NB_TAPS 8 __STATIC_INLINE void arm_fir_f16_5_8_mve(const arm_fir_instance_f16 * S, const float16_t * __restrict pSrc, float16_t * __restrict pDst, uint32_t blockSize) @@ -163,7 +167,6 @@ __STATIC_INLINE void arm_fir_f16_5_8_mve(const arm_fir_instance_f16 * S, int32_t blkCnt; float16x8_t vecIn0; float16x8_t vecAcc0; - const int NB_TAPS=8; float16_t c[NB_TAPS]; @@ -236,7 +239,7 @@ __STATIC_INLINE void arm_fir_f16_5_8_mve(const arm_fir_instance_f16 * S, } } - +#undef NB_TAPS void arm_fir_f16(const arm_fir_instance_f16 * S, const float16_t * pSrc, @@ -871,7 +874,7 @@ void arm_fir_f16( while (i > 0U) { /* acc = b[numTaps-1] * x[n-numTaps-1] + b[numTaps-2] * x[n-numTaps-2] + b[numTaps-3] * x[n-numTaps-3] +...+ b[0] * x[0] */ - acc0 += *px++ * *pb++; + acc0 += (_Float16)*px++ * (_Float16)*pb++; i--; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f32.c index d213bc4..8fcc5ae 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_f32.c * Description: Floating-point FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -99,13 +99,23 @@ where numTaps is the number of filter coefficients in the filter; pState is the address of the state buffer; pCoeffs is the address of the coefficient buffer. @par Initialization of Helium version - For Helium version the array of coefficients must be a multiple of 16 even if less - then 16 coefficients are used. The additional coefficients must be set to 0. - It does not mean that all the coefficients will be used in the filter (numTaps - is still set to its right value in the init function.) It just means that + For Helium version the array of coefficients must be padded with zero to contain + a full number of lanes. + + The array length L must be a multiple of x. L = x * a : + - x is 4 for f32 + - x is 4 for q31 + - x is 4 for f16 (so managed like the f32 version and not like the q15 one) + - x is 8 for q15 + - x is 16 for q7 + + The additional coefficients + (x * a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that the implementation may require to read more coefficients due to the vectorization and to avoid having to manage too many different cases in the code. + @par Helium state buffer The state buffer must contain some additional temporary data used during the computation but which is not the state of the FIR. @@ -152,6 +162,7 @@ } +#define NB_TAPS 4 __STATIC_INLINE void arm_fir_f32_1_4_mve(const arm_fir_instance_f32 * S, const float32_t * __restrict pSrc, float32_t * __restrict pDst, uint32_t blockSize) @@ -168,7 +179,6 @@ __STATIC_INLINE void arm_fir_f32_1_4_mve(const arm_fir_instance_f32 * S, int32_t blkCnt; float32x4_t vecIn0; float32x4_t vecAcc0; - const int NB_TAPS=4; float32_t c[NB_TAPS]; const float32_t *pCoeffsCur = pCoeffs; @@ -235,8 +245,7 @@ __STATIC_INLINE void arm_fir_f32_1_4_mve(const arm_fir_instance_f32 * S, } while (blkCnt > 0); } - - +#undef NB_TAPS __STATIC_INLINE void arm_fir_f32_5_8_mve(const arm_fir_instance_f32 * S, const float32_t * __restrict pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f64.c new file mode 100644 index 0000000..2aaa4fb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_f64.c @@ -0,0 +1,133 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_fir_f64.c + * Description: Floating-point FIR filter processing function + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h" + +/** + @ingroup groupFilters + */ + +/** + @addtogroup FIR + @{ + */ + +/** + @brief Processing function for floating-point FIR filter. + @param[in] S points to an instance of the floating-point FIR filter structure + @param[in] pSrc points to the block of input data + @param[out] pDst points to the block of output data + @param[in] blockSize number of samples to process + @return none + */ + +void arm_fir_f64( + const arm_fir_instance_f64 * S, + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + float64_t *pState = S->pState; /* State pointer */ + const float64_t *pCoeffs = S->pCoeffs; /* Coefficient pointer */ + float64_t *pStateCurnt; /* Points to the current sample of the state */ + float64_t *px; /* Temporary pointer for state buffer */ + const float64_t *pb; /* Temporary pointer for coefficient buffer */ + float64_t acc0; /* Accumulator */ + uint32_t numTaps = S->numTaps; /* Number of filter coefficients in the filter */ + uint32_t i, tapCnt, blkCnt; /* Loop counters */ + + /* S->pState points to state array which contains previous frame (numTaps - 1) samples */ + /* pStateCurnt points to the location where the new input data should be written */ + pStateCurnt = &(S->pState[(numTaps - 1U)]); + + /* Initialize blkCnt with number of taps */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* Copy one sample at a time into state buffer */ + *pStateCurnt++ = *pSrc++; + + /* Set the accumulator to zero */ + acc0 = 0.; + + /* Initialize state pointer */ + px = pState; + + /* Initialize Coefficient pointer */ + pb = pCoeffs; + + i = numTaps; + + /* Perform the multiply-accumulates */ + while (i > 0U) + { + /* acc = b[numTaps-1] * x[n-numTaps-1] + b[numTaps-2] * x[n-numTaps-2] + b[numTaps-3] * x[n-numTaps-3] +...+ b[0] * x[0] */ + acc0 += *px++ * *pb++; + + i--; + } + + /* Store result in destination buffer. */ + *pDst++ = acc0; + + /* Advance state pointer by 1 for the next sample */ + pState = pState + 1U; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Processing is complete. + Now copy the last numTaps - 1 samples to the start of the state buffer. + This prepares the state buffer for the next function call. */ + + /* Points to the start of the state buffer */ + pStateCurnt = S->pState; + + /* Initialize tapCnt with number of taps */ + tapCnt = (numTaps - 1U); + + /* Copy remaining data */ + while (tapCnt > 0U) + { + *pStateCurnt++ = *pState++; + + /* Decrement loop counter */ + tapCnt--; + } + +} + +/** +* @} end of FIR group +*/ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q15.c index 0603ce3..d33fb86 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_fast_q15.c * Description: Q15 Fast FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q31.c index 991af2f..d50f463 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_fast_q31.c * Description: Processing function for the Q31 Fast FIR filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,7 +60,6 @@ Use function \ref arm_fir_init_q31() to initialize the filter structure. */ -IAR_ONLY_LOW_OPTIMIZATION_ENTER void arm_fir_fast_q31( const arm_fir_instance_q31 * S, const q31_t * pSrc, @@ -320,7 +319,6 @@ void arm_fir_fast_q31( } } -IAR_ONLY_LOW_OPTIMIZATION_EXIT /** @} end of FIR group */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f16.c index 9e52dc3..2bc43b5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f16.c @@ -5,10 +5,13 @@ * Title: arm_fir_init_f16.c * Description: Floating-point FIR filter initialization function * - * Target Processor: Cortex-M cores + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -56,13 +59,14 @@ pState points to the array of state variables. pState is of length numTaps+blockSize-1 samples (except for Helium - see below), where blockSize is the number of input samples processed by each call to arm_fir_f16(). @par Initialization of Helium version - For Helium version the array of coefficients must be a multiple of 16 even if less - then 16 coefficients are used. The additional coefficients must be set to 0. - It does not mean that all the coefficients will be used in the filter (numTaps - is still set to its right value in the init function.) It just means that + For Helium version the array of coefficients must be a multiple of 4 (4a) even if less + then 4a coefficients are defined in the FIR. The additional coefficients + (4a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that the implementation may require to read more coefficients due to the vectorization and to avoid having to manage too many different cases in the code. + @par Helium state buffer The state buffer must contain some additional temporary data used during the computation but which is not the state of the FIR. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f32.c index 4dd4333..cbc3989 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_init_f32.c * Description: Floating-point FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -57,10 +57,10 @@ pState points to the array of state variables and some working memory for the Helium version. pState is of length numTaps+blockSize-1 samples (except for Helium - see below), where blockSize is the number of input samples processed by each call to arm_fir_f32(). @par Initialization of Helium version - For Helium version the array of coefficients must be a multiple of 16 even if less - then 16 coefficients are used. The additional coefficients must be set to 0. - It does not mean that all the coefficients will be used in the filter (numTaps - is still set to its right value in the init function.) It just means that + For Helium version the array of coefficients must be a multiple of 4 (4a) even if less + then 4a coefficients are defined in the FIR. The additional coefficients + (4a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that the implementation may require to read more coefficients due to the vectorization and to avoid having to manage too many different cases in the code. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f64.c new file mode 100644 index 0000000..16ca036 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_f64.c @@ -0,0 +1,88 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_fir_init_f64.c + * Description: Floating-point FIR filter initialization function + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h" + +/** + @ingroup groupFilters + */ + +/** + @addtogroup FIR + @{ + */ + +/** + @brief Initialization function for the floating-point FIR filter. + @param[in,out] S points to an instance of the floating-point FIR filter structure + @param[in] numTaps number of filter coefficients in the filter + @param[in] pCoeffs points to the filter coefficients buffer + @param[in] pState points to the state buffer + @param[in] blockSize number of samples processed per call + @return none + + @par Details + pCoeffs points to the array of filter coefficients stored in time reversed order: +
+      {b[numTaps-1], b[numTaps-2], b[N-2], ..., b[1], b[0]}
+  
+ @par + pState points to the array of state variables. + pState is of length numTaps+blockSize-1 samples, where blockSize is the number of input samples processed by each call to arm_fir_f64(). + + @par + There is no Helium version of the fir F64. + + */ + +void arm_fir_init_f64( + arm_fir_instance_f64 * S, + uint16_t numTaps, + const float64_t * pCoeffs, + float64_t * pState, + uint32_t blockSize) +{ + /* Assign filter taps */ + S->numTaps = numTaps; + + /* Assign coefficient pointer */ + S->pCoeffs = pCoeffs; + + /* Clear state buffer. The size is always (blockSize + numTaps - 1) */ + memset(pState, 0, (numTaps + (blockSize - 1U)) * sizeof(float64_t)); + /* Assign state pointer */ + S->pState = pState; +} + +/** + @} end of FIR group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q15.c index 605aff1..6853f1f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_init_q15.c * Description: Q15 FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -75,6 +75,14 @@ pState points to the array of state variables. pState is of length numTaps+blockSize, when running on Cortex-M4 and Cortex-M3 and is of length numTaps+blockSize-1, when running on Cortex-M0 where blockSize is the number of input samples processed by each call to arm_fir_q15(). + + @par Initialization of Helium version + For Helium version the array of coefficients must be a multiple of 8 (8a) even if less + then 8a coefficients are defined in the FIR. The additional coefficients + (8a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that + the implementation may require to read more coefficients due to the vectorization and + to avoid having to manage too many different cases in the code. */ arm_status arm_fir_init_q15( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q31.c index df552ae..de44f74 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_init_q31.c * Description: Q31 FIR filter initialization function. * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -57,10 +57,10 @@ pState is of length numTaps+blockSize-1 samples (except for Helium - see below), where blockSize is the number of input samples processed by each call to arm_fir_q31(). @par Initialization of Helium version - For Helium version the array of coefficients must be a multiple of 16 even if less - then 16 coefficients are used. The additional coefficients must be set to 0. - It does not mean that all the coefficients will be used in the filter (numTaps - is still set to its right value in the init function.) It just means that + For Helium version the array of coefficients must be a multiple of 4 (4a) even if less + then 4a coefficients are defined in the FIR. The additional coefficients + (4a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that the implementation may require to read more coefficients due to the vectorization and to avoid having to manage too many different cases in the code. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q7.c index 5101d72..db14670 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_init_q7.c @@ -5,13 +5,13 @@ * Title: arm_fir_init_q7.c * Description: Q7 FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -56,6 +56,15 @@ @par pState points to the array of state variables. pState is of length numTaps+blockSize-1 samples, where blockSize is the number of input samples processed by each call to arm_fir_q7(). + + @par Initialization of Helium version + For Helium version the array of coefficients must be a multiple of 16 (16a) even if less + then 16a coefficients are defined in the FIR. The additional coefficients + (16a - numTaps) must be set to 0. + numTaps is still set to its right value in the init function. It means that + the implementation may require to read more coefficients due to the vectorization and + to avoid having to manage too many different cases in the code. + */ void arm_fir_init_q7( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_f32.c index c5a349b..ddff5c2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_f32.c * Description: Floating-point FIR interpolation sequences * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -149,7 +149,7 @@ static void arm_fir_interpolate2_f32_mve( uint32_t blkCnt; /* Loop counters */ uint16_t phaseLen = S->phaseLength; /* Length of each polyphase filter component */ uint32_t strides[4] = { 0, 1 * 2, 2 * 2, 3 * 2 }; - uint32x4_t vec_strides0 = *(uint32x4_t *) strides; + uint32x4_t vec_strides0 = vld1q_u32(strides); uint32x4_t vec_strides1 = vec_strides0 + 1; f32x4_t acc0, acc1; @@ -273,8 +273,8 @@ void arm_fir_interpolate_f32( uint16_t phaseLen = S->phaseLength; /* Length of each polyphase filter component */ uint32_t strides[4] = { 0, 1 * S->L, 2 * S->L, 3 * S->L }; uint32_t stridesM[4] = { 4, 3, 2, 1 }; - uint32x4_t vec_stridesM = *(uint32x4_t *) stridesM; - uint32x4_t vec_strides = *(uint32x4_t *) strides; + uint32x4_t vec_stridesM = vld1q_u32(stridesM); + uint32x4_t vec_strides = vld1q_u32(strides); f32x4_t acc; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_f32.c index b135fa9..cfbf102 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_init_f32.c * Description: Floating-point FIR interpolator initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q15.c index 4cd35cb..f016592 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_init_q15.c * Description: Q15 FIR interpolator initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q31.c index 682ba10..cd40905 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_init_q31.c * Description: Q31 FIR interpolator initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q15.c index de3d48e..21691ee 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_q15.c * Description: Q15 FIR interpolation * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q31.c index 4e737da..edd0c70 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_interpolate_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_interpolate_q31.c * Description: Q31 FIR interpolation * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -73,7 +73,7 @@ void arm_fir_interpolate_q31( uint32_t i, blkCnt; /* Loop counters */ uint16_t phaseLen = S->phaseLength; /* Length of each polyphase filter component */ uint32_t strides[4] = { 0, 1 * S->L, 2 * S->L, 3 * S->L }; - uint32x4_t vec_strides0 = *(uint32x4_t *) strides; + uint32x4_t vec_strides0 = vld1q_u32(strides); uint32x4_t vec_strides1 = vec_strides0 + 1; uint32x4_t vec_strides2 = vec_strides0 + 2; uint32x4_t vec_strides3 = vec_strides0 + 3; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_f32.c index 0f28abe..9655bb0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_f32.c * Description: Processing function for floating-point FIR Lattice filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,6 +37,9 @@ /** @defgroup FIR_Lattice Finite Impulse Response (FIR) Lattice Filters + @deprecated Those functions are no more tested nor maintained and will be removed in + a future version. + This set of functions implements Finite Impulse Response (FIR) lattice filters for Q15, Q31 and floating-point data types. Lattice filters are used in a variety of adaptive filter applications. The filter structure is feedforward and diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_f32.c index 720dd17..2e7b6a4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_init_f32.c * Description: Floating-point FIR Lattice filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q15.c index 7743ebd..27fe5ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_init_q15.c * Description: Q15 FIR Lattice filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q31.c index e85c34a..c2f29d2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_init_q31.c * Description: Q31 FIR lattice filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q15.c index ec87561..dbb91c2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_q15.c * Description: Q15 FIR lattice filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q31.c index ecf5880..e5de1f7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_lattice_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_lattice_q31.c * Description: Q31 FIR lattice filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q15.c index e1531b6..f197d15 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_q15.c * Description: Q15 FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -88,14 +88,13 @@ uint32_t numTaps = S->numTaps; /* Number of filter coefficients in the filter */\ int32_t blkCnt; \ q15x8_t vecIn0; \ - const int32_t nbVecTaps = (NBTAPS / 8); \ \ /* \ * load coefs \ */ \ - q15x8_t vecCoeffs[nbVecTaps]; \ + q15x8_t vecCoeffs[NBVECTAPS]; \ \ - for (int i = 0; i < nbVecTaps; i++) \ + for (int i = 0; i < NBVECTAPS; i++) \ vecCoeffs[i] = vldrhq_s16(pCoeffs + 8 * i); \ \ /* \ @@ -116,7 +115,7 @@ pStateCur += 4; \ pTempSrc += 4; \ \ - FIR_Q15_CORE(pOutput, 4, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q15_CORE(pOutput, 4, NBVECTAPS, pSamples, vecCoeffs); \ pSamples += 4; \ \ blkCnt--; \ @@ -128,7 +127,7 @@ for (int i = 0; i < residual; i++) \ *pStateCur++ = *pTempSrc++; \ \ - FIR_Q15_CORE(pOutput, residual, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q15_CORE(pOutput, residual, NBVECTAPS, pSamples, vecCoeffs); \ \ /* \ * Copy the samples back into the history buffer start \ @@ -158,7 +157,9 @@ static void arm_fir_q15_25_32_mve(const arm_fir_instance_q15 * S, q15_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 32 + #define NBVECTAPS (NBTAPS / 8) FIR_Q15_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -167,7 +168,9 @@ static void arm_fir_q15_17_24_mve(const arm_fir_instance_q15 * S, q15_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 24 + #define NBVECTAPS (NBTAPS / 8) FIR_Q15_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -177,7 +180,9 @@ static void arm_fir_q15_9_16_mve(const arm_fir_instance_q15 * S, q15_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 16 + #define NBVECTAPS (NBTAPS / 8) FIR_Q15_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -186,7 +191,9 @@ static void arm_fir_q15_1_8_mve(const arm_fir_instance_q15 * S, q15_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 8 + #define NBVECTAPS (NBTAPS / 8) FIR_Q15_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -520,7 +527,7 @@ void arm_fir_q15( while (tapCnt > 0U) { /* Read the first two coefficients using SIMD: b[N] and b[N-1] coefficients */ - c0 = read_q15x2_ia ((q15_t **) &pb); + c0 = read_q15x2_ia (&pb); /* acc0 += b[N] * x[n-N] + b[N-1] * x[n-N-1] */ acc0 = __SMLALD(x0, c0, acc0); @@ -552,7 +559,7 @@ void arm_fir_q15( acc3 = __SMLALDX(x1, c0, acc3); /* Read coefficients b[N-2], b[N-3] */ - c0 = read_q15x2_ia ((q15_t **) &pb); + c0 = read_q15x2_ia (&pb); /* acc0 += b[N-2] * x[n-N-2] + b[N-3] * x[n-N-3] */ acc0 = __SMLALD(x2, c0, acc0); @@ -585,7 +592,7 @@ void arm_fir_q15( if ((numTaps & 0x3U) != 0U) { /* Read last two coefficients */ - c0 = read_q15x2_ia ((q15_t **) &pb); + c0 = read_q15x2_ia (&pb); /* Perform the multiply-accumulates */ acc0 = __SMLALD(x0, c0, acc0); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q31.c index 0b02824..16bd7e9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_q31.c * Description: Q31 FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -119,14 +119,13 @@ q31_t *pTempDest; /* Temporary pointer to the destination buffer */\ uint32_t numTaps = S->numTaps; /* Number of filter coefficients in the filter */\ int32_t blkCnt; \ - const int32_t nbVecTaps = (NBTAPS / 4); \ \ /* \ * load coefs \ */ \ - q31x4_t vecCoeffs[nbVecTaps]; \ + q31x4_t vecCoeffs[NBVECTAPS]; \ \ - for (int i = 0; i < nbVecTaps; i++) \ + for (int i = 0; i < NBVECTAPS; i++) \ vecCoeffs[i] = vld1q(pCoeffs + 4 * i); \ \ /* \ @@ -147,7 +146,7 @@ pStateCur += 4; \ pTempSrc += 4; \ \ - FIR_Q31_CORE(4, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q31_CORE(4, NBVECTAPS, pSamples, vecCoeffs); \ \ pSamples += 4; \ /* \ @@ -164,7 +163,7 @@ for (int i = 0; i < residual; i++) \ *pStateCur++ = *pTempSrc++; \ \ - FIR_Q31_CORE(3, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q31_CORE(3, NBVECTAPS, pSamples, vecCoeffs); \ } \ break; \ \ @@ -173,7 +172,7 @@ for (int i = 0; i < residual; i++) \ *pStateCur++ = *pTempSrc++; \ \ - FIR_Q31_CORE(2, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q31_CORE(2, NBVECTAPS, pSamples, vecCoeffs); \ } \ break; \ \ @@ -182,7 +181,7 @@ for (int i = 0; i < residual; i++) \ *pStateCur++ = *pTempSrc++; \ \ - FIR_Q31_CORE(1, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q31_CORE(1, NBVECTAPS, pSamples, vecCoeffs); \ } \ break; \ } \ @@ -384,7 +383,9 @@ static void arm_fir_q31_5_8_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 8 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -394,7 +395,9 @@ static void arm_fir_q31_9_12_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 12 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -404,7 +407,9 @@ static void arm_fir_q31_13_16_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 16 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -414,7 +419,9 @@ static void arm_fir_q31_17_20_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 20 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -424,7 +431,9 @@ static void arm_fir_q31_21_24_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 24 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -434,7 +443,9 @@ static void arm_fir_q31_25_28_mve(const arm_fir_instance_q31 * S, q31_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 28 + #define NBVECTAPS (NBTAPS / 4) FIR_Q31_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q7.c index 241f896..5966646 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_q7.c @@ -5,13 +5,13 @@ * Title: arm_fir_q7.c * Description: Q7 FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -83,14 +83,13 @@ uint32_t numTaps = S->numTaps; /* Number of filter coefficients in the filter */\ int32_t blkCnt; \ q7x16_t vecIn0; \ - const int32_t nbVecTaps = (NBTAPS / 16); \ \ /* \ * load coefs \ */ \ - q7x16_t vecCoeffs[nbVecTaps]; \ + q7x16_t vecCoeffs[NBVECTAPS]; \ \ - for (int i = 0; i < nbVecTaps; i++) \ + for (int i = 0; i < NBVECTAPS; i++) \ vecCoeffs[i] = vldrbq_s8(pCoeffs + 16 * i); \ \ /* \ @@ -111,7 +110,7 @@ pStateCur += 4; \ pTempSrc += 4; \ \ - FIR_Q7_CORE(pOutput, 4, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q7_CORE(pOutput, 4, NBVECTAPS, pSamples, vecCoeffs); \ pSamples += 4; \ \ blkCnt--; \ @@ -123,7 +122,7 @@ for (int i = 0; i < residual; i++) \ *pStateCur++ = *pTempSrc++; \ \ - FIR_Q7_CORE(pOutput, residual, nbVecTaps, pSamples, vecCoeffs); \ + FIR_Q7_CORE(pOutput, residual, NBVECTAPS, pSamples, vecCoeffs); \ \ \ /* \ @@ -143,22 +142,50 @@ while (blkCnt > 0); \ } -static void arm_fir_q7_17_32_mve(const arm_fir_instance_q7 * S, + +static void arm_fir_q7_49_64_mve(const arm_fir_instance_q7 * S, + const q7_t * __restrict pSrc, + q7_t * __restrict pDst, uint32_t blockSize) +{ + #define NBTAPS 64 + #define NBVECTAPS (NBTAPS / 16) + FIR_Q7_MAIN_CORE(); + #undef NBVECTAPS + #undef NBTAPS +} + + +void arm_fir_q7_33_48_mve(const arm_fir_instance_q7 * S, + const q7_t * __restrict pSrc, + q7_t * __restrict pDst, uint32_t blockSize) +{ + #define NBTAPS 48 + #define NBVECTAPS (NBTAPS / 16) + FIR_Q7_MAIN_CORE(); + #undef NBVECTAPS + #undef NBTAPS +} + +static void arm_fir_q7_17_32_mve(const arm_fir_instance_q7 * S, const q7_t * __restrict pSrc, q7_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 32 + #define NBVECTAPS (NBTAPS / 16) FIR_Q7_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } -void arm_fir_q7_1_16_mve(const arm_fir_instance_q7 * S, - const q7_t * __restrict pSrc, +void arm_fir_q7_1_16_mve(const arm_fir_instance_q7 * S, + const q7_t * __restrict pSrc, q7_t * __restrict pDst, uint32_t blockSize) { #define NBTAPS 16 + #define NBVECTAPS (NBTAPS / 16) FIR_Q7_MAIN_CORE(); + #undef NBVECTAPS #undef NBTAPS } @@ -198,6 +225,22 @@ void arm_fir_q7( arm_fir_q7_17_32_mve(S, pSrc, pDst, blockSize); return; } + else if (numTaps <= 48) + { + /* + * [33 to 48 taps] specialized routine + */ + arm_fir_q7_33_48_mve(S, pSrc, pDst, blockSize); + return; + } + else if (numTaps <= 64) + { + /* + * [49 to 64 taps] specialized routine + */ + arm_fir_q7_49_64_mve(S, pSrc, pDst, blockSize); + return; + } /* * pState points to state array which contains previous frame (numTaps - 1) samples @@ -609,7 +652,7 @@ void arm_fir_q7( { acc0 += (q15_t) * (px++) * (*(pb++)); i--; - } + } /* The result is in 2.14 format. Convert to 1.7 Then store the output in the destination buffer. */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_f32.c index ca71b9a..b95ec65 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_f32.c * Description: Floating-point sparse FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,6 +37,9 @@ /** @defgroup FIR_Sparse Finite Impulse Response (FIR) Sparse Filters + @deprecated Those functions are no more tested nor maintained and will be removed in + a future version. + This group of functions implements sparse FIR filters. Sparse FIR filters are equivalent to standard FIR filters except that most of the coefficients are equal to zero. Sparse filters are used for simulating reflections in communications and audio applications. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_f32.c index c3e134b..963c050 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_init_f32.c * Description: Floating-point sparse FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q15.c index 688bb0b..72ec65a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_init_q15.c * Description: Q15 sparse FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q31.c index fcb0153..509c85e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_init_q31.c * Description: Q31 sparse FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q7.c index e2a437c..4f0f793 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_init_q7.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_init_q7.c * Description: Q7 sparse FIR filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q15.c index 5b19f77..8784737 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q15.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_q15.c * Description: Q15 sparse FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q31.c index 04cc5ea..6524e26 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q31.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_q31.c * Description: Q31 sparse FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q7.c index 193bc2b..85ec295 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_fir_sparse_q7.c @@ -5,13 +5,13 @@ * Title: arm_fir_sparse_q7.c * Description: Q7 sparse FIR filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_f32.c index 5cf0548..4c48c85 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_f32.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_f32.c * Description: Floating-point IIR Lattice filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_f32.c index 94ebb5d..d9922ec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_init_f32.c * Description: Floating-point IIR lattice filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q15.c index 5f2b5e6..1dae546 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_init_q15.c * Description: Q15 IIR lattice filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q31.c index a14b217..779d09e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_init_q31.c * Description: Initialization function for the Q31 IIR lattice filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q15.c index 25ed237..2768ffa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q15.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_q15.c * Description: Q15 IIR Lattice filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q31.c index e5e6ee0..430c090 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_iir_lattice_q31.c @@ -5,13 +5,13 @@ * Title: arm_iir_lattice_q31.c * Description: Q31 IIR Lattice filter processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f16.c new file mode 100644 index 0000000..5129666 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f16.c @@ -0,0 +1,277 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_levinson_durbin_f16.c + * Description: f16 version of Levinson Durbin algorithm + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions_f16.h" + +/** + @ingroup groupFilters + */ + + + +/** + @addtogroup LD + @{ + */ + +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H) +#pragma GCC warning "Scalar version of arm_levinson_durbin_f16 built. Helium version has build issues with gcc." +#endif + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +#define LANE4567_MASK 0xFF00 + +void arm_levinson_durbin_f16(const float16_t *phi, + float16_t *a, + float16_t *err, + int nbCoefs) +{ + _Float16 e; + static const uint16_t revOffsetArray[8] = {7,6,5,4,3,2,1,0}; + + a[0] = (_Float16)phi[1] / (_Float16)phi[0]; + + e = (_Float16)phi[0] - (_Float16)phi[1] * (_Float16)a[0]; + for(int p=1; p < nbCoefs; p++) + { + _Float16 suma = 0.0f16; + _Float16 sumb = 0.0f16; + f16x8_t vecA,vecRevPhi,vecPhi,vecSumA, vecSumB; + _Float16 k; + uint32_t blkCnt; + const float16_t *pPhi,*pRevPhi,*pA; + uint16x8_t revOffset; + + int nb,j,i; + + revOffset = vld1q(revOffsetArray); + vecSumA = vdupq_n_f16(0.0f16); + vecSumB = vdupq_n_f16(0.0f16); + + pRevPhi = &phi[p-7]; + pPhi = &phi[1]; + pA = a; + + i = 0; + blkCnt = p >> 3; + while(blkCnt > 0) + { + vecA = vld1q(pA); + pA += 8; + + vecPhi = vld1q(pPhi); + pPhi += 8; + + vecRevPhi = vldrhq_gather_shifted_offset_f16(pRevPhi,revOffset); + pRevPhi -= 8; + + vecSumA = vfmaq(vecSumA,vecA,vecRevPhi); + vecSumB = vfmaq(vecSumB,vecA,vecPhi); + + i += 8; + blkCnt--; + + } + + suma = vecAddAcrossF16Mve(vecSumA); + sumb = vecAddAcrossF16Mve(vecSumB); + + blkCnt = p & 7; + while(blkCnt > 0) + { + suma += (_Float16)a[i] * (_Float16)phi[p - i]; + sumb += (_Float16)a[i] * (_Float16)phi[i + 1]; + + i++; + blkCnt--; + } + + k = ((_Float16)phi[p+1] - suma)/((_Float16)phi[0] - sumb); + + f16x8_t vecRevA,tmp; + static int16_t orgOffsetArray[8]={0,1,2,3,-1,-2,-3,-4}; + static const int16_t offsetIncArray[8]={4,4,4,4,-4,-4,-4,-4}; + + uint16x8_t offset,offsetInc,vecTmp; + + + offset = vld1q_u16((uint16_t*)orgOffsetArray); + vecTmp = vdupq_n_u16(p); + + offset = vaddq_m_u16(offset,offset,vecTmp,LANE4567_MASK); + offsetInc = vld1q_u16((uint16_t*)offsetIncArray); + + nb = p >> 3; + j=0; + for(int i = 0; i < nb ; i++) + { + + /* + x0=a[j] - k * a[p-1-j]; + x1=a[j+1] - k * a[p-2-j]; + x3=a[p-1-j] - k * a[j]; + x4=a[p-2-j] - k * a[j+1]; + + a[j] = x0; + a[j+1] = x1; + a[p-1-j] = x2; + a[p-2-j] = x3; + */ + + uint64_t tmpa,tmpb; + vecA = vldrhq_gather_shifted_offset_f16(a,offset); + + + tmpa = vgetq_lane_u64((uint64x2_t)vecA,0); + tmpb = vgetq_lane_u64((uint64x2_t)vecA,1); + vecRevA = (f16x8_t) vsetq_lane_u64(tmpb,(uint64x2_t)vecRevA,0); + vecRevA = (f16x8_t) vsetq_lane_u64(tmpa,(uint64x2_t)vecRevA,1); + + + tmp = vsubq(vecA,vmulq_n_f16(vecRevA,k)); + vstrhq_scatter_shifted_offset_f16(a, offset, tmp); + + offset = vaddq(offset,offsetInc); + + j+=4; + + } + + blkCnt = p & 7; + + if (blkCnt) + { + nb = blkCnt >> 1; + for(int i =0;i < nb ; i++) + { + _Float16 x,y; + + x=(_Float16)a[j] - (_Float16)k * (_Float16)a[p-1-j]; + y=(_Float16)a[p-1-j] - (_Float16)k * (_Float16)a[j]; + + a[j] = x; + a[p-1-j] = y; + + j++; + } + + nb = blkCnt & 1; + if (nb) + { + a[j]=(_Float16)a[j]- (_Float16)k * (_Float16)a[p-1-j]; + } + } + + + a[p] = k; + e = e * (1.0f16 - k*k); + + + } + *err = e; +} + +#else + +#if defined(ARM_FLOAT16_SUPPORTED) + +void arm_levinson_durbin_f16(const float16_t *phi, + float16_t *a, + float16_t *err, + int nbCoefs) +{ + _Float16 e; + + a[0] = (_Float16)phi[1] / (_Float16)phi[0]; + + e = (_Float16)phi[0] - (_Float16)phi[1] * (_Float16)a[0]; + for(int p=1; p < nbCoefs; p++) + { + _Float16 suma=0.0f16; + _Float16 sumb=0.0f16; + _Float16 k; + int nb,j; + + for(int i=0; i < p; i++) + { + suma += (_Float16)a[i] * (_Float16)phi[p - i]; + sumb += (_Float16)a[i] * (_Float16)phi[i + 1]; + } + + k = ((_Float16)phi[p+1]-suma)/((_Float16)phi[0] - sumb); + + + nb = p >> 1; + j=0; + for(int i =0;i < nb ; i++) + { + _Float16 x,y; + + x=(_Float16)a[j] - (_Float16)k * (_Float16)a[p-1-j]; + y=(_Float16)a[p-1-j] - (_Float16)k * (_Float16)a[j]; + + a[j] = x; + a[p-1-j] = y; + + j++; + } + + nb = p & 1; + if (nb) + { + a[j]=(_Float16)a[j]- (_Float16)k * (_Float16)a[p-1-j]; + } + + a[p] = k; + e = e * (1.0f16 - k*k); + + + } + *err = e; +} +#endif /* defined(ARM_FLOAT16_SUPPORTED */ +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of LD group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f32.c new file mode 100644 index 0000000..0c4e650 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_f32.c @@ -0,0 +1,283 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_levinson_durbin_f32.c + * Description: f32 version of Levinson Durbin algorithm + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h" + +/** + @ingroup groupFilters + */ + +/** + @defgroup LD Levinson Durbin Algorithm + + */ + +/** + @addtogroup LD + @{ + */ + +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H) +#pragma GCC warning "Scalar version of arm_levinson_durbin_f32 built. Helium version has build issues with gcc." +#endif + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +#define LANE23_MASK 0xFF00 + +void arm_levinson_durbin_f32(const float32_t *phi, + float32_t *a, + float32_t *err, + int nbCoefs) +{ + float32_t e; + static const uint32_t revOffsetArray[4] = {3,2,1,0}; + + a[0] = phi[1] / phi[0]; + + e = phi[0] - phi[1] * a[0]; + for(int p=1; p < nbCoefs; p++) + { + float32_t suma = 0.0f; + float32_t sumb = 0.0f; + f32x4_t vecA,vecRevPhi,vecPhi,vecSumA, vecSumB; + float32_t k; + uint32_t blkCnt; + const float32_t *pPhi,*pRevPhi,*pA; + uint32x4_t revOffset; + + int nb,j,i; + + revOffset = vld1q(revOffsetArray); + vecSumA = vdupq_n_f32(0.0f); + vecSumB = vdupq_n_f32(0.0f); + + pRevPhi = &phi[p-3]; + pPhi = &phi[1]; + pA = a; + + i = 0; + blkCnt = p >> 2; + while(blkCnt > 0) + { + vecA = vld1q(pA); + pA += 4; + + vecPhi = vld1q(pPhi); + pPhi += 4; + + vecRevPhi = vldrwq_gather_shifted_offset_f32(pRevPhi,revOffset); + pRevPhi -= 4; + + vecSumA = vfmaq(vecSumA,vecA,vecRevPhi); + vecSumB = vfmaq(vecSumB,vecA,vecPhi); + + i += 4; + blkCnt--; + + } + + suma = vecAddAcrossF32Mve(vecSumA); + sumb = vecAddAcrossF32Mve(vecSumB); + + blkCnt = p & 3; + while(blkCnt > 0) + { + suma += a[i] * phi[p - i]; + sumb += a[i] * phi[i + 1]; + + i++; + blkCnt--; + } + + k = (phi[p+1] - suma)/(phi[0] - sumb); + + f32x4_t vecRevA,tmp; + static int32_t orgOffsetArray[4]={0,1,-1,-2}; + static const int32_t offsetIncArray[4]={2,2,-2,-2}; + + uint32x4_t offset,offsetInc,vecTmp; + + + offset = vld1q_u32((uint32_t*)orgOffsetArray); + vecTmp = vdupq_n_u32(p); + + offset = vaddq_m_u32(offset,offset,vecTmp,LANE23_MASK); + offsetInc = vld1q_u32((uint32_t*)offsetIncArray); + + nb = p >> 2; + j=0; + for(int i = 0; i < nb ; i++) + { + + /* + x0=a[j] - k * a[p-1-j]; + x1=a[j+1] - k * a[p-2-j]; + x3=a[p-1-j] - k * a[j]; + x4=a[p-2-j] - k * a[j+1]; + + a[j] = x0; + a[j+1] = x1; + a[p-1-j] = x2; + a[p-2-j] = x3; + */ + + uint64_t tmpa,tmpb; + vecA = vldrwq_gather_shifted_offset_f32(a,offset); + + + tmpa = vgetq_lane_u64((uint64x2_t)vecA,0); + tmpb = vgetq_lane_u64((uint64x2_t)vecA,1); + vecRevA = (f32x4_t) vsetq_lane_u64(tmpb,(uint64x2_t)vecRevA,0); + vecRevA = (f32x4_t) vsetq_lane_u64(tmpa,(uint64x2_t)vecRevA,1); + + + tmp = vsubq(vecA,vmulq_n_f32(vecRevA,k)); + vstrwq_scatter_shifted_offset_f32(a, offset, tmp); + + offset = vaddq(offset,offsetInc); + + j+=2; + + } + + switch(p & 3) + { + case 3: + { + float32_t x,y; + x = a[j] - k * a[p-1-j]; + y = a[p-1-j] - k * a[j]; + + a[j] = x; + a[p-1-j] = y; + + a[j+1] = a[j+1] - k * a[p-1-(j+1)]; + } + break; + + case 2: + { + float32_t x,y; + x = a[j] - k * a[p-1-j]; + y = a[p-1-j] - k * a[j]; + + a[j] = x; + a[p-1-j] = y; + } + break; + + case 1: + a[j] = a[j]- k * a[p-1-j]; + break; + } + + a[p] = k; + e = e * (1.0f - k*k); + + + } + *err = e; +} + +#else +void arm_levinson_durbin_f32(const float32_t *phi, + float32_t *a, + float32_t *err, + int nbCoefs) +{ + float32_t e; + int p; + + a[0] = phi[1] / phi[0]; + + e = phi[0] - phi[1] * a[0]; + for(p=1; p < nbCoefs; p++) + { + float32_t suma=0.0f; + float32_t sumb=0.0f; + float32_t k; + int nb,j,i; + + for(i=0; i < p; i++) + { + suma += a[i] * phi[p - i]; + sumb += a[i] * phi[i + 1]; + } + + k = (phi[p+1]-suma)/(phi[0] - sumb); + + + nb = p >> 1; + j=0; + for(i =0; i < nb ; i++) + { + float32_t x,y; + + x=a[j] - k * a[p-1-j]; + y=a[p-1-j] - k * a[j]; + + a[j] = x; + a[p-1-j] = y; + + j++; + } + + nb = p & 1; + if (nb) + { + a[j]=a[j]- k * a[p-1-j]; + } + + a[p] = k; + e = e * (1.0f - k*k); + + + } + *err = e; +} +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +/** + @} end of LD group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_q31.c new file mode 100644 index 0000000..b38b792 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_levinson_durbin_q31.c @@ -0,0 +1,380 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_levinson_durbin_q31.c + * Description: q31 version of Levinson Durbin algorithm + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/filtering_functions.h" + +#define ONE_Q31 0x7FFFFFFFL +#define TWO_Q30 0x7FFFFFFFL + +#define HALF_Q31 0x00008000L +#define ONE_Q15 0x7FFF +#define HALF_Q15 0x3FFF +#define LOWPART_MASK 0x07FFF + +__STATIC_FORCEINLINE q31_t mul32x16(q31_t a, q15_t b) +{ + q31_t r = ((q63_t)a * (q63_t)b) >> 15; + + return(r); + +} + +__STATIC_FORCEINLINE q31_t mul32x32(q31_t a, q31_t b) +{ + //q31_t r = __SSAT(((q63_t)a * b) >> 31,31); + q31_t r = ((q63_t)a * b) >> 31; + + return(r); + +} + +__STATIC_FORCEINLINE q31_t divide(q31_t n, q31_t d) +{ + arm_status status; + int16_t shift; + q15_t inverse; + q31_t r; + // We are computing: + // n / d = n / (h + l) where h and l are the high end and low end part. + // 1 / (h + l) = 1 / h (1 - l / h) + // Our division algorithm has a shift. So it is returning a scaled value sh. + // So we need a << shift to convert 1/ sh to 1/h. + // In below code, we are organizing the computation differently. Instead of computing: + // 1 / h (1 - l / h) + // we are computing + // 1 / h (2 - (l + h) / h) + // 1 / h (2 - d / h) + // Also, we are not computing 1/h in Q15 but in Q14. + // 2 is expressed in Q30. + // So at the end of all computation we need a << 2 + + // Result is in Q14 because of use of HALF_Q15 instead of ONE_Q15. + status=arm_divide_q15(HALF_Q15,d>>16,&inverse,&shift); + (void)status; + + // d is used instead of l + // So we will need to substract to 2 instead of 1. + r = mul32x16(d,inverse); + r = TWO_Q30 - (r << shift); + r = mul32x16(r, inverse); + r = mul32x32(r,n) ; + r = r << (shift + 2); + + return(r); + +} + +/** + @ingroup groupFilters + */ + + + +/** + @addtogroup LD + @{ + */ + +/** + @brief Levinson Durbin + @param[in] phi autocovariance vector starting with lag 0 (length is nbCoefs + 1) + @param[out] a autoregressive coefficients + @param[out] err prediction error (variance) + @param[in] nbCoefs number of autoregressive coefficients + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H) +#pragma GCC warning "Scalar version of arm_levinson_durbin_q31 built. Helium version has build issues with gcc." +#endif + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) + +#define LANE23_MASK 0xFF00 + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_levinson_durbin_q31(const q31_t *phi, + q31_t *a, + q31_t *err, + int nbCoefs) +{ + q31_t e; + + static const uint32_t revOffsetArray[4] = {3,2,1,0}; + + //a[0] = phi[1] / phi[0]; + a[0] = divide(phi[1], phi[0]); + + + //e = phi[0] - phi[1] * a[0]; + e = phi[0] - mul32x32(phi[1],a[0]); + + for(int p=1; p < nbCoefs; p++) + { + q63_t suma=0; + q63_t sumb=0; + q31x4_t vecA,vecRevPhi,vecPhi; + q31_t k; + uint32_t blkCnt; + const q31_t *pPhi,*pRevPhi,*pA; + uint32x4_t revOffset; + + + int nb,j,i; + + revOffset = vld1q(revOffsetArray); + + pRevPhi = &phi[p-3]; + pPhi = &phi[1]; + pA = a; + + i = 0; + blkCnt = p >> 2; + while(blkCnt > 0) + { + vecA = vld1q(pA); + pA += 4; + + vecPhi = vld1q(pPhi); + pPhi += 4; + + vecRevPhi = vldrwq_gather_shifted_offset_s32(pRevPhi,revOffset); + pRevPhi -= 4; + + suma = vmlaldavaq(suma,vecA,vecRevPhi); + sumb = vmlaldavaq(sumb,vecA,vecPhi); + + i += 4; + blkCnt--; + } + + + blkCnt = p & 3; + while(blkCnt > 0) + { + suma += ((q63_t)a[i] * phi[p - i]); + sumb += ((q63_t)a[i] * phi[i + 1]); + + i++; + blkCnt--; + } + + suma = asrl(suma, 31); + sumb = asrl(sumb, 31); + + + + //k = (phi[p+1]-suma)/(phi[0] - sumb); + k = divide(phi[p+1]-(q31_t)suma,phi[0] - (q31_t)sumb); + + q31x4_t vecRevA,tmp; + static int32_t orgOffsetArray[4]={0,1,-1,-2}; + static const int32_t offsetIncArray[4]={2,2,-2,-2}; + + uint32x4_t offset,offsetInc,vecTmp; + + + offset = vld1q_u32((uint32_t*)orgOffsetArray); + vecTmp = vdupq_n_u32(p); + + offset = vaddq_m_u32(offset,offset,vecTmp,LANE23_MASK); + offsetInc = vld1q_u32((uint32_t*)offsetIncArray); + + + nb = p >> 2; + j=0; + for(int i =0;i < nb ; i++) + { + /* + q31_t x0,x1,x2,x3; + + //x = a[j] - k * a[p-1-j]; + x0 = a[j] - mul32x32(k,a[p-1-j]); + x1 = a[j+1] - mul32x32(k,a[p-2-j]); + + //y = a[p-1-j] - k * a[j]; + x2 = a[p-1-j] - mul32x32(k , a[j]); + x3 = a[p-2-j] - mul32x32(k , a[j+1]); + + a[j] = x0; + a[j+1] = x1; + a[p-1-j] = x2; + a[p-2-j] = x3; + */ + + uint64_t tmpa,tmpb; + vecA = vldrwq_gather_shifted_offset_s32(a,offset); + + + tmpa = vgetq_lane_u64((uint64x2_t)vecA,0); + tmpb = vgetq_lane_u64((uint64x2_t)vecA,1); + vecRevA = (q31x4_t) vsetq_lane_u64(tmpb,(uint64x2_t)vecRevA,0); + vecRevA = (q31x4_t) vsetq_lane_u64(tmpa,(uint64x2_t)vecRevA,1); + + + tmp = vsubq(vecA,vqdmulhq_n_s32(vecRevA,k)); + vstrwq_scatter_shifted_offset_s32(a, offset, tmp); + + offset = vaddq(offset,offsetInc); + + j+=2; + } + + switch(p & 3) + { + case 3: + { + q31_t x,y; + + //x = a[j] - k * a[p-1-j]; + x = a[j] - mul32x32(k,a[p-1-j]); + + //y = a[p-1-j] - k * a[j]; + y = a[p-1-j] - mul32x32(k , a[j]); + + a[j] = x; + a[p-1-j] = y; + + //a[j] = a[j]- k * a[p-1-j]; + a[j+1] = a[j+1] - mul32x32(k,a[p-2-j]); + } + break; + + case 2: + { + q31_t x,y; + + //x = a[j] - k * a[p-1-j]; + x = a[j] - mul32x32(k,a[p-1-j]); + + //y = a[p-1-j] - k * a[j]; + y = a[p-1-j] - mul32x32(k , a[j]); + + a[j] = x; + a[p-1-j] = y; + } + break; + + case 1: + //a[j] = a[j]- k * a[p-1-j]; + a[j] = a[j] - mul32x32(k,a[p-1-j]); + break; + } + + a[p] = k; + + // e = e * (1 - k*k); + e = mul32x32(e,ONE_Q31 - mul32x32(k,k)); + + + } + *err = e; +} + +#else + +void arm_levinson_durbin_q31(const q31_t *phi, + q31_t *a, + q31_t *err, + int nbCoefs) +{ + q31_t e; + int p; + + //a[0] = phi[1] / phi[0]; + a[0] = divide(phi[1], phi[0]); + + + //e = phi[0] - phi[1] * a[0]; + e = phi[0] - mul32x32(phi[1],a[0]); + + for(p=1; p < nbCoefs; p++) + { + q63_t suma=0; + q63_t sumb=0; + q31_t k; + int nb,j,i; + + for(i=0; i < p; i++) + { + suma += ((q63_t)a[i] * phi[p - i]); + sumb += ((q63_t)a[i] * phi[i + 1]); + } + + suma = suma >> 31; + sumb = sumb >> 31; + + + + //k = (phi[p+1]-suma)/(phi[0] - sumb); + k = divide(phi[p+1]-(q31_t)suma,phi[0] - (q31_t)sumb); + + + nb = p >> 1; + j=0; + for(i =0;i < nb ; i++) + { + q31_t x,y; + + //x = a[j] - k * a[p-1-j]; + x = a[j] - mul32x32(k,a[p-1-j]); + + //y = a[p-1-j] - k * a[j]; + y = a[p-1-j] - mul32x32(k , a[j]); + + a[j] = x; + a[p-1-j] = y; + + j++; + } + + nb = p & 1; + if (nb) + { + //a[j] = a[j]- k * a[p-1-j]; + a[j] = a[j] - mul32x32(k,a[p-1-j]); + } + + a[p] = k; + + // e = e * (1 - k*k); + e = mul32x32(e,ONE_Q31 - mul32x32(k,k)); + + + } + *err = e; +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of LD group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_f32.c index ef2e832..865999f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_f32.c @@ -5,13 +5,13 @@ * Title: arm_lms_f32.c * Description: Processing function for the floating-point LMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_f32.c index e4e53a0..8d8e144 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_lms_init_f32.c * Description: Floating-point LMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q15.c index 4918436..871caa0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_lms_init_q15.c * Description: Q15 LMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q31.c index 42d76f5..f4482d3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_lms_init_q31.c * Description: Q31 LMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_f32.c index e269d7d..0e99319 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_f32.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_f32.c * Description: Processing function for the floating-point NLMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_f32.c index 8fc9597..949f6c3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_init_f32.c * Description: Floating-point NLMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q15.c index 0c41794..aa05875 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_init_q15.c * Description: Q15 NLMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q31.c index a261a30..28e3c5b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_init_q31.c * Description: Q31 NLMS filter initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q15.c index 9785a78..2e4befd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q15.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_q15.c * Description: Processing function for Q15 normalized LMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q31.c index 37cce57..322219d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_norm_q31.c @@ -5,13 +5,13 @@ * Title: arm_lms_norm_q31.c * Description: Processing function for the Q31 NLMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q15.c index 536409b..b165d7f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q15.c @@ -5,13 +5,13 @@ * Title: arm_lms_q15.c * Description: Processing function for Q15 LMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q31.c index cc63338..fedf570 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/FilteringFunctions/arm_lms_q31.c @@ -5,13 +5,13 @@ * Title: arm_lms_q31.c * Description: Processing function for the Q31 LMS filter * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/InterpolationFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/InterpolationFunctionsF16.c deleted file mode 100644 index 26a9bba..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/InterpolationFunctionsF16.c +++ /dev/null @@ -1,37 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: InterpolationFunctions.c - * Description: Combination of all interpolation function source files. - * - * $Date: 22. July 2020 - * $Revision: V1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_bilinear_interp_f16.c" -#include "arm_linear_interp_f16.c" - - - - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f16.c index 1e974b6..d9a7d7c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f16.c @@ -5,13 +5,13 @@ * Title: arm_bilinear_interp_f16.c * Description: Floating-point bilinear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,57 +37,6 @@ @ingroup groupInterpolation */ -/** - * @defgroup BilinearInterpolate Bilinear Interpolation - * - * Bilinear interpolation is an extension of linear interpolation applied to a two dimensional grid. - * The underlying function f(x, y) is sampled on a regular grid and the interpolation process - * determines values between the grid points. - * Bilinear interpolation is equivalent to two step linear interpolation, first in the x-dimension and then in the y-dimension. - * Bilinear interpolation is often used in image processing to rescale images. - * The CMSIS DSP library provides bilinear interpolation functions for Q7, Q15, Q31, and floating-point data types. - * - * Algorithm - * \par - * The instance structure used by the bilinear interpolation functions describes a two dimensional data table. - * For floating-point, the instance structure is defined as: - *
-   *   typedef struct
-   *   {
-   *     uint16_t numRows;
-   *     uint16_t numCols;
-   *     float16_t *pData;
-   * } arm_bilinear_interp_instance_f16;
-   * 
- * - * \par - * where numRows specifies the number of rows in the table; - * numCols specifies the number of columns in the table; - * and pData points to an array of size numRows*numCols values. - * The data table pTable is organized in row order and the supplied data values fall on integer indexes. - * That is, table element (x,y) is located at pTable[x + y*numCols] where x and y are integers. - * - * \par - * Let (x, y) specify the desired interpolation point. Then define: - *
-   *     XF = floor(x)
-   *     YF = floor(y)
-   * 
- * \par - * The interpolated output point is computed as: - *
-   *  f(x, y) = f(XF, YF) * (1-(x-XF)) * (1-(y-YF))
-   *           + f(XF+1, YF) * (x-XF)*(1-(y-YF))
-   *           + f(XF, YF+1) * (1-(x-XF))*(y-YF)
-   *           + f(XF+1, YF+1) * (x-XF)*(y-YF)
-   * 
- * Note that the coordinates (x, y) contain integer and fractional components. - * The integer components specify which portion of the table to use while the - * fractional components control the interpolation processor. - * - * \par - * if (x,y) are outside of the table boundary, Bilinear interpolation returns zero output. - */ /** @@ -143,18 +92,19 @@ /* Calculation of intermediate values */ b1 = f00; - b2 = f01 - f00; - b3 = f10 - f00; - b4 = f00 - f01 - f10 + f11; + b2 = (_Float16)f01 - (_Float16)f00; + b3 = (_Float16)f10 - (_Float16)f00; + b4 = (_Float16)f00 - (_Float16)f01 - (_Float16)f10 + (_Float16)f11; /* Calculation of fractional part in X */ - xdiff = X - xIndex; + xdiff = (_Float16)X - (_Float16)xIndex; /* Calculation of fractional part in Y */ - ydiff = Y - yIndex; + ydiff = (_Float16)Y - (_Float16)yIndex; /* Calculation of bi-linear interpolated output */ - out = b1 + b2 * xdiff + b3 * ydiff + b4 * xdiff * ydiff; + out = (_Float16)b1 + (_Float16)b2 * (_Float16)xdiff + + (_Float16)b3 * (_Float16)ydiff + (_Float16)b4 * (_Float16)xdiff * (_Float16)ydiff; /* return to application */ return (out); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f32.c index 41e99a4..3008a7a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_f32.c @@ -5,13 +5,13 @@ * Title: arm_bilinear_interp_f32.c * Description: Floating-point bilinear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q15.c index 484b404..bc92417 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q15.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q15.c * Description: Q15 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q31.c index 4a5f654..2375763 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q31.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q31.c * Description: Q31 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q7.c index 31b3a68..0a78876 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_bilinear_interp_q7.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q7.c * Description: Q7 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f16.c index f2b0b36..c25a217 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f16.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_f16.c * Description: Floating-point linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,37 +37,6 @@ @ingroup groupInterpolation */ -/** - * @defgroup LinearInterpolate Linear Interpolation - * - * Linear interpolation is a method of curve fitting using linear polynomials. - * Linear interpolation works by effectively drawing a straight line between two neighboring samples and returning the appropriate point along that line - * - * \par - * \image html LinearInterp.gif "Linear interpolation" - * - * \par - * A Linear Interpolate function calculates an output value(y), for the input(x) - * using linear interpolation of the input values x0, x1( nearest input values) and the output values y0 and y1(nearest output values) - * - * \par Algorithm: - *
-   *       y = y0 + (x - x0) * ((y1 - y0)/(x1-x0))
-   *       where x0, x1 are nearest values of input x
-   *             y0, y1 are nearest values to output y
-   * 
- * - * \par - * This set of functions implements Linear interpolation process - * for Q7, Q15, Q31, and floating-point data types. The functions operate on a single - * sample of data and each call to the function returns a single processed value. - * S points to an instance of the Linear Interpolate function data structure. - * x is the input sample value. The functions returns the output value. - * - * \par - * if x is outside of the table boundary, Linear interpolation returns first value of the table - * if x is below input range and returns last value of table if x is above range. - */ /** * @addtogroup LinearInterpolate @@ -93,7 +62,7 @@ float16_t *pYData = S->pYData; /* pointer to output table */ /* Calculation of index */ - i = (int32_t) ((x - S->x1) / xSpacing); + i = (int32_t) (((_Float16)x - (_Float16)S->x1) / (_Float16)xSpacing); if (i < 0) { @@ -108,15 +77,16 @@ else { /* Calculation of nearest input values */ - x0 = S->x1 + i * xSpacing; - x1 = S->x1 + (i + 1) * xSpacing; + x0 = (_Float16)S->x1 + (_Float16)i * (_Float16)xSpacing; + x1 = (_Float16)S->x1 + (_Float16)(i + 1) * (_Float16)xSpacing; /* Read of nearest output values */ y0 = pYData[i]; y1 = pYData[i + 1]; /* Calculation of output */ - y = y0 + (x - x0) * ((y1 - y0) / (x1 - x0)); + y = (_Float16)y0 + ((_Float16)x - (_Float16)x0) * + (((_Float16)y1 - (_Float16)y0) / ((_Float16)x1 - (_Float16)x0)); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f32.c index f8caa74..834d54e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_f32.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_f32.c * Description: Floating-point linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q15.c index 690c44a..f2cfc80 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q15.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q15.c * Description: Q15 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -53,7 +53,7 @@ * */ q15_t arm_linear_interp_q15( - q15_t * pYData, + const q15_t * pYData, q31_t x, uint32_t nValues) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q31.c index 783e125..bdeefb8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q31.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q31.c * Description: Q31 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -54,7 +54,7 @@ * */ q31_t arm_linear_interp_q31( - q31_t * pYData, + const q31_t * pYData, q31_t x, uint32_t nValues) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q7.c index 0f32e3d..bde5678 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_linear_interp_q7.c @@ -5,13 +5,13 @@ * Title: arm_linear_interp_q7.c * Description: Q7 linear interpolation * - * $Date: 22 July 2020 + * $Date: 23 April 2021 * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -53,7 +53,7 @@ * This function can support maximum of table size 2^12. */ q7_t arm_linear_interp_q7( - q7_t * pYData, + const q7_t * pYData, q31_t x, uint32_t nValues) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_f32.c index 822986c..21a5edb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_f32.c @@ -5,13 +5,13 @@ * Title: arm_spline_interp_f32.c * Description: Floating-point cubic spline interpolation * - * $Date: 13 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -140,7 +140,7 @@ /** * @brief Processing function for the floating-point cubic spline interpolation. * @param[in] S points to an instance of the floating-point spline structure. - * @param[in] xq points to the x values ot the interpolated data points. + * @param[in] xq points to the x values of the interpolated data points. * @param[out] pDst points to the block of output data. * @param[in] blockSize number of samples of output data. */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_init_f32.c index 5e5c442..80e4d76 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/InterpolationFunctions/arm_spline_interp_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_spline_interp_init_f32.c * Description: Floating-point cubic spline initialization function * - * $Date: 13 November 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/MatrixFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/MatrixFunctionsF16.c deleted file mode 100644 index 8fff7f9..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/MatrixFunctionsF16.c +++ /dev/null @@ -1,45 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: MatrixFunctions.c - * Description: Combination of all matrix function f16 source files. - * - * $Date: 18. March 2020 - * $Revision: V1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_mat_add_f16.c" -#include "arm_mat_sub_f16.c" -#include "arm_mat_trans_f16.c" -#include "arm_mat_scale_f16.c" -#include "arm_mat_mult_f16.c" -#include "arm_mat_vec_mult_f16.c" -#include "arm_mat_cmplx_trans_f16.c" -#include "arm_mat_cmplx_mult_f16.c" -#include "arm_mat_inverse_f16.c" -#include "arm_mat_init_f16.c" -#include "arm_mat_cholesky_f16.c" -#include "arm_mat_solve_upper_triangular_f16.c" -#include "arm_mat_solve_lower_triangular_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f16.c new file mode 100644 index 0000000..e9f15bc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f16.c @@ -0,0 +1,125 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_householder_f16.c + * Description: Half floating-point Householder transform + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + + +#include + + + +/** + @ingroup groupMatrix + */ + + +/** + @addtogroup MatrixHouseholder + @{ + */ + +/** + @brief Householder transform of a half floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[out] pOut points to the output vector. + @return beta return the scaling factor beta + */ + + +#if defined(ARM_FLOAT16_SUPPORTED) + + + +float16_t arm_householder_f16( + const float16_t * pSrc, + const float16_t threshold, + uint32_t blockSize, + float16_t * pOut + ) + +{ + uint32_t i; + float16_t epsilon; + float16_t x1norm2,alpha; + float16_t beta,tau,r; + + epsilon = threshold; + + alpha = pSrc[0]; + + for(i=1; i < blockSize; i++) + { + pOut[i] = pSrc[i]; + } + pOut[0] = 1.0f16; + + arm_dot_prod_f16(pSrc+1,pSrc+1,blockSize-1,&x1norm2); + + if ((_Float16)x1norm2<=(_Float16)epsilon) + { + tau = 0.0f16; + memset(pOut,0,blockSize * sizeof(float16_t)); + } + else + { + beta = (_Float16)alpha * (_Float16)alpha + (_Float16)x1norm2; + (void)arm_sqrt_f16(beta,&beta); + + if ((_Float16)alpha > 0.0f16) + { + beta = -(_Float16)beta; + } + + r = 1.0f16 / ((_Float16)alpha -(_Float16)beta); + arm_scale_f16(pOut,r,pOut,blockSize); + pOut[0] = 1.0f16; + + + tau = ((_Float16)beta - (_Float16)alpha) / (_Float16)beta; + + } + + return(tau); + +} + + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of MatrixHouseholder group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f32.c new file mode 100644 index 0000000..c1510bc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f32.c @@ -0,0 +1,196 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_householder_f32.c + * Description: Floating-point Householder transform + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + +#include + + + +/** + @ingroup groupMatrix + */ + +/** + @defgroup MatrixHouseholder Householder transform of a vector + + Computes the Householder transform of a vector x. + + The Householder transform of x is a vector v with + + \f[ + v_0 = 1 + \f] + + and a scalar \f$\beta\f$ such that: + + \f[ + P = I - \beta v v^T + \f] + + is an orthogonal matrix and + + \f[ + P x = ||x||_2 e_1 + \f] + + So P is an hyperplane reflection such that the image of x + is proportional to \f$e_1\f$. + + \f$e_1\f$ is the vector of coordinates: + + \f[ + \begin{pmatrix} + 1 \\ + 0 \\ + \vdots \\ + \end{pmatrix} + \f] + + If x is already proportional to \f$e_1\f$ then + the matrix P should be the identity. + + Thus, \f$\beta\f$ should be 0 and in this case the vector v + can also be null. + + But how do we detect that x is already proportional to + \f$e_1\f$. + + If x + \f[ + x = + \begin{pmatrix} + x_0 \\ + xr \\ + \end{pmatrix} + \f] + + where \f$xr\f$ is a vector. + + The algorithm is computing the norm squared of this vector: + + \f[ + ||xr||^2 + \f] + + and this value is compared to a `threshold`. If the value + is smaller than the `threshold`, the algorithm is + returning 0 for \f$\beta\f$ and the householder vector. + + This `threshold` is an argument of the function. + + Default values are provided in the header + `dsp/matrix_functions.h` like for instance + `DEFAULT_HOUSEHOLDER_THRESHOLD_F32` + + + + */ + +/** + @addtogroup MatrixHouseholder + @{ + */ + +/** + @brief Householder transform of a floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[out] pOut points to the output vector. + @return beta return the scaling factor beta + */ + + + + +float32_t arm_householder_f32( + const float32_t * pSrc, + const float32_t threshold, + uint32_t blockSize, + float32_t * pOut + ) + +{ + uint32_t i; + float32_t epsilon; + float32_t x1norm2,alpha; + float32_t beta,tau,r; + + epsilon = threshold; + + alpha = pSrc[0]; + + for(i=1; i < blockSize; i++) + { + pOut[i] = pSrc[i]; + } + pOut[0] = 1.0f; + + arm_dot_prod_f32(pSrc+1,pSrc+1,blockSize-1,&x1norm2); + + if (x1norm2<=epsilon) + { + tau = 0.0f; + memset(pOut,0,blockSize * sizeof(float32_t)); + } + else + { + beta = alpha * alpha + x1norm2; + (void)arm_sqrt_f32(beta,&beta); + + if (alpha > 0.0f) + { + beta = -beta; + } + + r = 1.0f / (alpha -beta); + arm_scale_f32(pOut,r,pOut,blockSize); + pOut[0] = 1.0f; + + + tau = (beta - alpha) / beta; + + } + + return(tau); + +} + + +/** + @} end of MatrixHouseholder group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f64.c new file mode 100644 index 0000000..3f41011 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_householder_f64.c @@ -0,0 +1,121 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_householder_f64.c + * Description: Double floating-point Householder transform + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + +#include + + + +/** + @ingroup groupMatrix + */ + + +/** + @addtogroup MatrixHouseholder + @{ + */ + +/** + @brief Householder transform of a double floating point vector. + @param[in] pSrc points to the input vector. + @param[in] threshold norm2 threshold. + @param[in] blockSize dimension of the vector space. + @param[out] pOut points to the output vector. + @return beta return the scaling factor beta + */ + + + + +float64_t arm_householder_f64( + const float64_t * pSrc, + const float64_t threshold, + uint32_t blockSize, + float64_t * pOut + ) + +{ + uint32_t i; + float64_t epsilon; + float64_t x1norm2,alpha; + float64_t beta,tau,r; + + epsilon = threshold; + + alpha = pSrc[0]; + + for(i=1; i < blockSize; i++) + { + pOut[i] = pSrc[i]; + } + pOut[0] = 1.0; + + arm_dot_prod_f64(pSrc+1,pSrc+1,blockSize-1,&x1norm2); + + if (x1norm2<=epsilon) + { + tau = 0.0; + memset(pOut,0,blockSize * sizeof(float64_t)); + } + else + { + beta = alpha * alpha + x1norm2; + beta=sqrt(beta); + + if (alpha > 0.0) + { + beta = -beta; + } + + r = 1.0 / (alpha -beta); + arm_scale_f64(pOut,r,pOut,blockSize); + pOut[0] = 1.0; + + + tau = (beta - alpha) / beta; + + } + + return(tau); + +} + + +/** + @} end of MatrixHouseholder group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f16.c index b598b4a..77b4fab 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_add_f16.c * Description: Floating-point matrix addition * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -64,7 +64,7 @@ arm_status arm_mat_add_f16( arm_status status; uint32_t numSamples; /* total number of elements in the matrix */ float16_t *pDataA, *pDataB, *pDataDst; - f16x8_t vecA, vecB, vecDst; + f16x8_t vecA, vecB, vecDst = { 0 }; float16_t const *pSrcAVec; float16_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ @@ -169,13 +169,13 @@ arm_status arm_mat_add_f16( /* C(m,n) = A(m,n) + B(m,n) */ /* Add and store result in destination buffer. */ - *pOut++ = *pInA++ + *pInB++; + *pOut++ = (_Float16)*pInA++ + (_Float16)*pInB++; - *pOut++ = *pInA++ + *pInB++; + *pOut++ = (_Float16)*pInA++ + (_Float16)*pInB++; - *pOut++ = *pInA++ + *pInB++; + *pOut++ = (_Float16)*pInA++ + (_Float16)*pInB++; - *pOut++ = *pInA++ + *pInB++; + *pOut++ = (_Float16)*pInA++ + (_Float16)*pInB++; /* Decrement loop counter */ blkCnt--; @@ -196,7 +196,7 @@ arm_status arm_mat_add_f16( /* C(m,n) = A(m,n) + B(m,n) */ /* Add and store result in destination buffer. */ - *pOut++ = *pInA++ + *pInB++; + *pOut++ = (_Float16)*pInA++ + (_Float16)*pInB++; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f32.c index 705d680..b777249 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_add_f32.c * Description: Floating-point matrix addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -38,7 +38,27 @@ @defgroup MatrixAdd Matrix Addition Adds two matrices. - \image html MatrixAddition.gif "Addition of two 3 x 3 matrices" + @par Addition of two 3 x 3 matrices + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} \\ + a_{2,1} & a_{2,2} & a_{2,3} \\ + a_{3,1} & a_{3,2} & a_{3,3} \\ + \end{pmatrix} + + + \begin{pmatrix} + b_{1,1} & b_{1,2} & b_{1,3} \\ + b_{2,1} & b_{2,2} & b_{2,3} \\ + b_{3,1} & b_{3,2} & b_{3,3} \\ + \end{pmatrix} + = + \begin{pmatrix} + a_{1,1}+b_{1,1} & a_{1,2}+b_{1,2} & a_{1,3}+b_{1,3} \\ + a_{2,1}+b_{2,1} & a_{2,2}+b_{2,2} & a_{2,3}+b_{2,3} \\ + a_{3,1}+b_{3,1} & a_{3,2}+b_{3,2} & a_{3,3}+b_{3,3} \\ + \end{pmatrix} + \f] The functions check to make sure that pSrcA, pSrcB, and pDst have the same @@ -70,7 +90,7 @@ arm_status arm_mat_add_f32( arm_status status; uint32_t numSamples; /* total number of elements in the matrix */ float32_t *pDataA, *pDataB, *pDataDst; - f32x4_t vecA, vecB, vecDst; + f32x4_t vecA, vecB, vecDst = { 0 }; float32_t const *pSrcAVec; float32_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q15.c index 0e7d5fa..9a1dfa0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_add_q15.c * Description: Q15 matrix addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -61,7 +61,7 @@ arm_status arm_mat_add_q15( { uint32_t numSamples; /* total number of elements in the matrix */ q15_t *pDataA, *pDataB, *pDataDst; - q15x8_t vecA, vecB, vecDst; + q15x8_t vecA, vecB, vecDst = { 0 }; q15_t const *pSrcAVec; q15_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q31.c index b5c98e4..7b08dbf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_add_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_add_q31.c * Description: Q31 matrix addition * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -61,7 +61,7 @@ arm_status arm_mat_add_q31( arm_status status; /* status of matrix addition */ uint32_t numSamples; /* total number of elements in the matrix */ q31_t *pDataA, *pDataB, *pDataDst; - q31x4_t vecA, vecB, vecDst; + q31x4_t vecA, vecB, vecDst = { 0 }; q31_t const *pSrcAVec; q31_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f16.c index f6429f6..31b245e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f16.c @@ -5,11 +5,13 @@ * Title: arm_mat_cholesky_f16.c * Description: Floating-point Cholesky decomposition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,6 +29,7 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" #if defined(ARM_FLOAT16_SUPPORTED) @@ -50,7 +53,7 @@ - \ref ARM_MATH_DECOMPOSITION_FAILURE : Input matrix cannot be decomposed * @par * If the matrix is ill conditioned or only semi-definite, then it is better using the LDL^t decomposition. - * The decomposition of A is returning a lower triangular matrix U such that A = U U^t + * The decomposition of A is returning a lower triangular matrix U such that A = L L^t */ #if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) @@ -158,16 +161,13 @@ arm_status arm_mat_cholesky_f16( pG[j * n + i] = vecAddAcrossF16Mve(acc); } - if (pG[i * n + i] <= 0.0f16) + if ((_Float16)pG[i * n + i] <= 0.0f16) { return(ARM_MATH_DECOMPOSITION_FAILURE); } - invSqrtVj = (_Float16)1.0f/sqrtf(pG[i * n + i]); - for(j=i; j < n ; j++) - { - pG[j * n + i] = (_Float16)pG[j * n + i] * invSqrtVj ; - } + invSqrtVj = 1.0f16/(_Float16)sqrtf((float32_t)pG[i * n + i]); + SCALE_COL_F16(pDst,i,invSqrtVj,i); } status = ARM_MATH_SUCCESS; @@ -220,20 +220,21 @@ arm_status arm_mat_cholesky_f16( for(k=0; k < i ; k++) { - pG[j * n + i] = pG[j * n + i] - pG[i * n + k] * pG[j * n + k]; + pG[j * n + i] = (_Float16)pG[j * n + i] - (_Float16)pG[i * n + k] * (_Float16)pG[j * n + k]; } } - if (pG[i * n + i] <= 0.0f) + if ((_Float16)pG[i * n + i] <= 0.0f16) { return(ARM_MATH_DECOMPOSITION_FAILURE); } - invSqrtVj = 1.0f/sqrtf(pG[i * n + i]); - for(j=i ; j < n ; j++) - { - pG[j * n + i] = pG[j * n + i] * invSqrtVj ; - } + /* The division is done in float32 for accuracy reason and + because doing it in f16 would not have any impact on the performances. + */ + invSqrtVj = 1.0f/sqrtf((float32_t)pG[i * n + i]); + SCALE_COL_F16(pDst,i,invSqrtVj,i); + } status = ARM_MATH_SUCCESS; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f32.c index 89f5ae9..77890a4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f32.c @@ -5,11 +5,13 @@ * Title: arm_mat_cholesky_f32.c * Description: Floating-point Cholesky decomposition * + * $Date: 05 October 2021 + * $Revision: V1.9.1 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,6 +29,7 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" /** @ingroup groupMatrix @@ -35,7 +38,7 @@ /** @defgroup MatrixChol Cholesky and LDLT decompositions - Computes the Cholesky or LDL^t decomposition of a matrix. + Computes the Cholesky or LL^t decomposition of a matrix. If the input matrix does not have a decomposition, then the @@ -58,7 +61,7 @@ - \ref ARM_MATH_DECOMPOSITION_FAILURE : Input matrix cannot be decomposed * @par * If the matrix is ill conditioned or only semi-definite, then it is better using the LDL^t decomposition. - * The decomposition of A is returning a lower triangular matrix U such that A = U U^t + * The decomposition of A is returning a lower triangular matrix L such that A = L L^t */ #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) @@ -170,10 +173,7 @@ arm_status arm_mat_cholesky_f32( } invSqrtVj = 1.0f/sqrtf(pG[i * n + i]); - for(j=i; j < n ; j++) - { - pG[j * n + i] = pG[j * n + i] * invSqrtVj ; - } + SCALE_COL_F32(pDst,i,invSqrtVj,i); } status = ARM_MATH_SUCCESS; @@ -221,7 +221,9 @@ arm_status arm_mat_cholesky_f32( f32x4_t acc, acc0, acc1, acc2, acc3; f32x4_t vecGi; f32x4_t vecGj,vecGj0,vecGj1,vecGj2,vecGj3; - f32x2_t tmp = vdup_n_f32(0); +#if !defined(__aarch64__) + f32x2_t tmp = vdup_n_f32(0); +#endif float32_t sum=0.0f; float32_t sum0=0.0f,sum1=0.0f,sum2=0.0f,sum3=0.0f; @@ -264,7 +266,7 @@ arm_status arm_mat_cholesky_f32( k+=4; } -#if __aarch64__ +#if defined(__aarch64__) sum0 = vpadds_f32(vpadd_f32(vget_low_f32(acc0), vget_high_f32(acc0))); sum1 = vpadds_f32(vpadd_f32(vget_low_f32(acc1), vget_high_f32(acc1))); sum2 = vpadds_f32(vpadd_f32(vget_low_f32(acc2), vget_high_f32(acc2))); @@ -322,7 +324,7 @@ arm_status arm_mat_cholesky_f32( k+=4; } -#if __aarch64__ +#if defined(__aarch64__) sum = vpadds_f32(vpadd_f32(vget_low_f32(acc), vget_high_f32(acc))); #else tmp = vpadd_f32(vget_low_f32(acc), vget_high_f32(acc)); @@ -348,10 +350,7 @@ arm_status arm_mat_cholesky_f32( } invSqrtVj = 1.0f/sqrtf(pG[i * n + i]); - for(j=i; j < n ; j++) - { - pG[j * n + i] = pG[j * n + i] * invSqrtVj ; - } + SCALE_COL_F32(pDst,i,invSqrtVj,i); } status = ARM_MATH_SUCCESS; @@ -414,10 +413,8 @@ arm_status arm_mat_cholesky_f32( } invSqrtVj = 1.0f/sqrtf(pG[i * n + i]); - for(j=i ; j < n ; j++) - { - pG[j * n + i] = pG[j * n + i] * invSqrtVj ; - } + SCALE_COL_F32(pDst,i,invSqrtVj,i); + } status = ARM_MATH_SUCCESS; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f64.c index d73edfe..b42f296 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cholesky_f64.c @@ -5,11 +5,13 @@ * Title: arm_mat_cholesky_f64.c * Description: Floating-point Cholesky decomposition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,6 +29,7 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" /** @ingroup groupMatrix @@ -48,7 +51,7 @@ - \ref ARM_MATH_DECOMPOSITION_FAILURE : Input matrix cannot be decomposed * @par * If the matrix is ill conditioned or only semi-definite, then it is better using the LDL^t decomposition. - * The decomposition of A is returning a lower triangular matrix U such that A = U U^t + * The decomposition of A is returning a lower triangular matrix L such that A = L L^t */ @@ -96,16 +99,14 @@ arm_status arm_mat_cholesky_f64( } } - if (pG[i * n + i] <= 0.0f) + if (pG[i * n + i] <= 0.0) { return(ARM_MATH_DECOMPOSITION_FAILURE); } invSqrtVj = 1.0/sqrt(pG[i * n + i]); - for(j=i ; j < n ; j++) - { - pG[j * n + i] = pG[j * n + i] * invSqrtVj ; - } + SCALE_COL_F64(pDst,i,invSqrtVj,i); + } status = ARM_MATH_SUCCESS; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f16.c index 398e5eb..3822659 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_mult_f16.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -69,7 +69,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_2x2_mve( const arm_matrix_instance_f16 * pSrcB, arm_matrix_instance_f16 * pDst) { - const uint16_t MATRIX_DIM = 2; +#define MATRIX_DIM 2 float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */ float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */ float16_t *pOut = pDst->pData; /* output data matrix pointer */ @@ -133,6 +133,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_2x2_mve( * Return to application */ return (ARM_MATH_SUCCESS); +#undef MATRIX_DIM } @@ -142,7 +143,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_3x3_mve( const arm_matrix_instance_f16 * pSrcB, arm_matrix_instance_f16 * pDst) { - const uint16_t MATRIX_DIM = 3; +#define MATRIX_DIM 3 float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */ float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */ float16_t *pOut = pDst->pData; /* output data matrix pointer */ @@ -228,6 +229,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_3x3_mve( * Return to application */ return (ARM_MATH_SUCCESS); +#undef MATRIX_DIM } @@ -238,7 +240,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_4x4_mve( const arm_matrix_instance_f16 * pSrcB, arm_matrix_instance_f16 * pDst) { - const uint16_t MATRIX_DIM = 4; +#define MATRIX_DIM 4 float16_t const *pInB = pSrcB->pData; /* input data matrix pointer B */ float16_t *pInA = pSrcA->pData; /* input data matrix pointer A */ float16_t *pOut = pDst->pData; /* output data matrix pointer */ @@ -373,6 +375,7 @@ __STATIC_FORCEINLINE arm_status arm_mat_cmplx_mult_f16_4x4_mve( * Return to application */ return (ARM_MATH_SUCCESS); +#undef MATRIX_DIM } @@ -417,8 +420,8 @@ if ((pSrcA->numCols != pSrcB->numRows) || { if (numRowsA == 1) { - pOut[0] = pInA[0] * pInB[0] - pInA[1] * pInB[1]; - pOut[1] = pInA[0] * pInB[1] + pInA[1] * pInB[0]; + pOut[0] = (_Float16)pInA[0] * (_Float16)pInB[0] - (_Float16)pInA[1] * (_Float16)pInB[1]; + pOut[1] = (_Float16)pInA[0] * (_Float16)pInB[1] + (_Float16)pInA[1] * (_Float16)pInB[0]; return (ARM_MATH_SUCCESS); } else if (numRowsA == 2) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f32.c index 1619ccd..941849d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_mult_f32.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -534,7 +534,7 @@ arm_status arm_mat_cmplx_mult_f32( uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ - uint16_t col, i = 0U, row = numRowsA, colCnt; /* loop counters */ + uint16_t col, i = 0U, row = numRowsA; /* loop counters */ arm_status status; /* status of matrix multiplication */ uint32x4_t vecOffs, vecColBOffs; uint32_t blkCnt, rowCnt; /* loop counters */ @@ -613,7 +613,6 @@ arm_status arm_mat_cmplx_mult_f32( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; float32_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec; float32_t const *pInA0 = pInA; @@ -754,7 +753,6 @@ arm_status arm_mat_cmplx_mult_f32( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; float32_t const *pSrcA0Vec; float32_t const *pInA0 = pInA; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q15.c index 120c925..09f457f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q15.c @@ -5,13 +5,13 @@ * Title: arm_cmplx_mat_mult_q15.c * Description: Q15 complex matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -463,8 +463,8 @@ arm_status arm_mat_cmplx_mult_q15( #if defined (ARM_MATH_DSP) /* read real and imag values from pSrcA and pSrcB buffer */ - pSourceA = read_q15x2_ia ((q15_t **) &pInA); - pSourceB = read_q15x2_ia ((q15_t **) &pInB); + pSourceA = read_q15x2_ia (&pInA); + pSourceB = read_q15x2_ia (&pInB); /* Multiply and Accumlates */ #ifdef ARM_MATH_BIG_ENDIAN @@ -477,8 +477,8 @@ arm_status arm_mat_cmplx_mult_q15( sumImag += (q63_t) prod2; /* read real and imag values from pSrcA and pSrcB buffer */ - pSourceA = read_q15x2_ia ((q15_t **) &pInA); - pSourceB = read_q15x2_ia ((q15_t **) &pInB); + pSourceA = read_q15x2_ia (&pInA); + pSourceB = read_q15x2_ia (&pInB); /* Multiply and Accumlates */ #ifdef ARM_MATH_BIG_ENDIAN @@ -536,8 +536,8 @@ arm_status arm_mat_cmplx_mult_q15( #if defined (ARM_MATH_DSP) /* read real and imag values from pSrcA and pSrcB buffer */ - pSourceA = read_q15x2_ia ((q15_t **) &pInA); - pSourceB = read_q15x2_ia ((q15_t **) &pInB); + pSourceA = read_q15x2_ia (&pInA); + pSourceB = read_q15x2_ia (&pInB); /* Multiply and Accumlates */ #ifdef ARM_MATH_BIG_ENDIAN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q31.c index 960f4a6..9933865 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_mult_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_mult_q31.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -528,7 +528,7 @@ arm_status arm_mat_cmplx_mult_q31( uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ - uint16_t col, i = 0U, row = numRowsA, colCnt; /* loop counters */ + uint16_t col, i = 0U, row = numRowsA; /* loop counters */ arm_status status; /* status of matrix multiplication */ uint32x4_t vecOffs, vecColBOffs; uint32_t blkCnt, rowCnt; /* loop counters */ @@ -613,7 +613,6 @@ arm_status arm_mat_cmplx_mult_q31( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; q31_t const *pSrcA0Vec, *pSrcA1Vec; q31_t const *pInA0 = pInA; @@ -744,7 +743,6 @@ arm_status arm_mat_cmplx_mult_q31( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; q31_t const *pSrcA0Vec; q31_t const *pInA0 = pInA; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f16.c index bbcbe04..3e96414 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_trans_f16.c * Description: Floating-point complex matrix transpose * - * $Date: 08. July 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f32.c index 38e77c3..e551d07 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_trans_f32.c * Description: Floating-point complex matrix transpose * - * $Date: 08. July 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -40,7 +40,23 @@ Tranposes a complex matrix. Transposing an M x N matrix flips it around the center diagonal and results in an N x M matrix. - \image html MatrixTranspose.gif "Transpose of a 3 x 3 matrix" + + @par Transpose of a 3 x 3 matrix + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} \\ + a_{2,1} & a_{2,2} & a_{2,3} \\ + a_{3,1} & a_{3,2} & a_{3,3} \\ + \end{pmatrix}^T + = + \begin{pmatrix} + a_{1,1} & a_{2,1} & a_{3,1} \\ + a_{1,2} & a_{2,2} & a_{3,2} \\ + a_{1,3} & a_{2,3} & a_{3,3} \\ + \end{pmatrix} + \f] + */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q15.c index 37d1724..1f80f12 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_trans_q31.c * Description: Q15 complex matrix transpose * - * $Date: 08. July 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q31.c index 1ad551a..ba38341 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_cmplx_trans_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_cmplx_trans_q31.c * Description: Q31 complex matrix transpose * - * $Date: 08. July 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f16.c index ddf9390..e5a7aa2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_init_f16.c * Description: Floating-point matrix initialization * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f32.c index 0122f3d..c9348fd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_init_f32.c * Description: Floating-point matrix initialization * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q15.c index 281b165..bd2a7c2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_init_q15.c * Description: Q15 matrix initialization * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q31.c index 64f2e7e..a9bcb52 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_init_q31.c * Description: Q31 matrix initialization * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,10 +34,7 @@ @ingroup groupMatrix */ -/** - @defgroup MatrixInit Matrix Initialization - - */ + /** @addtogroup MatrixInit diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f16.c index 4565796..27ad218 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_inverse_f16.c * Description: Floating-point matrix inverse * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +29,7 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" #if defined(ARM_FLOAT16_SUPPORTED) @@ -52,526 +53,20 @@ - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed - \ref ARM_MATH_SINGULAR : Input matrix is found to be singular (non-invertible) */ -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) - -arm_status arm_mat_inverse_f16( - const arm_matrix_instance_f16 * pSrc, - arm_matrix_instance_f16 * pDst) -{ - float16_t *pIn = pSrc->pData; /* input data matrix pointer */ - float16_t *pOut = pDst->pData; /* output data matrix pointer */ - float16_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float16_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float16_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ - - uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ - uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ - float16_t *pTmpA, *pTmpB; - - _Float16 in = 0.0f16; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ - arm_status status; /* status of matrix inverse */ - uint32_t blkCnt; - -#ifdef ARM_MATH_MATRIX_CHECK - /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pSrc->numCols) || (pDst->numRows != pDst->numCols) - || (pSrc->numRows != pDst->numRows)) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else -#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - { - - /*-------------------------------------------------------------------------------------------------------------- - * Matrix Inverse can be solved using elementary row operations. - * - * Gauss-Jordan Method: - * - * 1. First combine the identity matrix and the input matrix separated by a bar to form an - * augmented matrix as follows: - * _ _ _ _ _ _ _ _ - * | | a11 a12 | | | 1 0 | | | X11 X12 | - * | | | | | | | = | | - * |_ |_ a21 a22 _| | |_0 1 _| _| |_ X21 X21 _| - * - * 2. In our implementation, pDst Matrix is used as identity matrix. - * - * 3. Begin with the first row. Let i = 1. - * - * 4. Check to see if the pivot for row i is zero. - * The pivot is the element of the main diagonal that is on the current row. - * For instance, if working with row i, then the pivot element is aii. - * If the pivot is zero, exchange that row with a row below it that does not - * contain a zero in column i. If this is not possible, then an inverse - * to that matrix does not exist. - * - * 5. Divide every element of row i by the pivot. - * - * 6. For every row below and row i, replace that row with the sum of that row and - * a multiple of row i so that each new element in column i below row i is zero. - * - * 7. Move to the next row and column and repeat steps 2 through 5 until you have zeros - * for every element below and above the main diagonal. - * - * 8. Now an identical matrix is formed to the left of the bar(input matrix, src). - * Therefore, the matrix to the right of the bar is our solution(dst matrix, dst). - *----------------------------------------------------------------------------------------------------------------*/ - - /* - * Working pointer for destination matrix - */ - pOutT1 = pOut; - /* - * Loop over the number of rows - */ - rowCnt = numRows; - /* - * Making the destination matrix as identity matrix - */ - while (rowCnt > 0U) - { - /* - * Writing all zeroes in lower triangle of the destination matrix - */ - j = numRows - rowCnt; - while (j > 0U) - { - *pOutT1++ = 0.0f16; - j--; - } - /* - * Writing all ones in the diagonal of the destination matrix - */ - *pOutT1++ = 1.0f16; - /* - * Writing all zeroes in upper triangle of the destination matrix - */ - j = rowCnt - 1U; - while (j > 0U) - { - *pOutT1++ = 0.0f16; - j--; - } - /* - * Decrement the loop counter - */ - rowCnt--; - } - - /* - * Loop over the number of columns of the input matrix. - * All the elements in each column are processed by the row operations - */ - loopCnt = numCols; - /* - * Index modifier to navigate through the columns - */ - l = 0U; - while (loopCnt > 0U) - { - /* - * Check if the pivot element is zero.. - * If it is zero then interchange the row with non zero row below. - * If there is no non zero element to replace in the rows below, - * then the matrix is Singular. - */ - - /* - * Working pointer for the input matrix that points - * * to the pivot element of the particular row - */ - pInT1 = pIn + (l * numCols); - /* - * Working pointer for the destination matrix that points - * * to the pivot element of the particular row - */ - pOutT1 = pOut + (l * numCols); - /* - * Temporary variable to hold the pivot value - */ - in = *pInT1; - /* - * Destination pointer modifier - */ - k = 1U; - - /* - * Check if the pivot element is zero - */ - if (*pInT1 == 0.0f16) - { - /* - * Loop over the number rows present below - */ - for (i = (l + 1U); i < numRows; i++) - { - /* - * Update the input and destination pointers - */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - /* - * Check if there is a non zero pivot element to - * * replace in the rows below - */ - if (*pInT2 != 0.0f16) - { - f16x8_t vecA, vecB; - /* - * Loop over number of columns - * * to the right of the pilot element - */ - pTmpA = pInT1; - pTmpB = pInT2; - blkCnt = (numCols - l) >> 3; - while (blkCnt > 0U) - { - - vecA = vldrhq_f16(pTmpA); - vecB = vldrhq_f16(pTmpB); - vstrhq_f16(pTmpB, vecA); - vstrhq_f16(pTmpA, vecB); - - pTmpA += 8; - pTmpB += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (numCols - l) & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - - vecA = vldrhq_f16(pTmpA); - vecB = vldrhq_f16(pTmpB); - vstrhq_p_f16(pTmpB, vecA, p0); - vstrhq_p_f16(pTmpA, vecB, p0); - } - - pInT1 += numCols - l; - pInT2 += numCols - l; - pTmpA = pOutT1; - pTmpB = pOutT2; - blkCnt = numCols >> 3; - while (blkCnt > 0U) - { - - vecA = vldrhq_f16(pTmpA); - vecB = vldrhq_f16(pTmpB); - vstrhq_f16(pTmpB, vecA); - vstrhq_f16(pTmpA, vecB); - pTmpA += 8; - pTmpB += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - */ - blkCnt = numCols & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - - vecA = vldrhq_f16(pTmpA); - vecB = vldrhq_f16(pTmpB); - vstrhq_p_f16(pTmpB, vecA, p0); - vstrhq_p_f16(pTmpA, vecB, p0); - } - - pOutT1 += numCols; - pOutT2 += numCols; - /* - * Flag to indicate whether exchange is done or not - */ - flag = 1U; - - /* - * Break after exchange is done - */ - break; - } - /* - * Update the destination pointer modifier - */ - k++; - } - } - - /* - * Update the status if the matrix is singular - */ - if ((flag != 1U) && (in == 0.0f16)) - { - return ARM_MATH_SINGULAR; - } - - /* - * Points to the pivot row of input and destination matrices - */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* - * Temporary pointers to the pivot row pointers - */ - pInT1 = pPivotRowIn; - pOutT1 = pPivotRowDst; - - /* - * Pivot element of the row - */ - in = *(pIn + (l * numCols)); - - pTmpA = pInT1; - - f16x8_t invIn = vdupq_n_f16(1.0f16 / in); - - blkCnt = (numCols - l) >> 3; - f16x8_t vecA; - while (blkCnt > 0U) - { - *(f16x8_t *) pTmpA = *(f16x8_t *) pTmpA * invIn; - pTmpA += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - */ - blkCnt = (numCols - l) & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - - - vecA = vldrhq_f16(pTmpA); - vecA = vecA * invIn; - vstrhq_p_f16(pTmpA, vecA, p0); - } - - pInT1 += numCols - l; - /* - * Loop over number of columns - * * to the right of the pilot element - */ - - pTmpA = pOutT1; - blkCnt = numCols >> 3; - while (blkCnt > 0U) - { - *(f16x8_t *) pTmpA = *(f16x8_t *) pTmpA *invIn; - pTmpA += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = numCols & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - - vecA = vldrhq_f16(pTmpA); - vecA = vecA * invIn; - vstrhq_p_f16(pTmpA, vecA, p0); - } - - pOutT1 += numCols; - - /* - * Replace the rows with the sum of that row and a multiple of row i - * * so that each new element in column i above row i is zero. - */ - - /* - * Temporary pointers for input and destination matrices - */ - pInT1 = pIn; - pOutT1 = pOut; - - for (i = 0U; i < numRows; i++) - { - /* - * Check for the pivot element - */ - if (i == l) - { - /* - * If the processing element is the pivot element, - * only the columns to the right are to be processed - */ - pInT1 += numCols - l; - pOutT1 += numCols; - } - else - { - /* - * Element of the reference row - */ - - /* - * Working pointers for input and destination pivot rows - */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - /* - * Loop over the number of columns to the right of the pivot element, - * to replace the elements in the input matrix - */ - - in = *pInT1; - f16x8_t tmpV = vdupq_n_f16(in); - - blkCnt = (numCols - l) >> 3; - while (blkCnt > 0U) - { - f16x8_t vec1, vec2; - /* - * Replace the element by the sum of that row - * and a multiple of the reference row - */ - vec1 = vldrhq_f16(pInT1); - vec2 = vldrhq_f16(pPRT_in); - vec1 = vfmsq_f16(vec1, tmpV, vec2); - vstrhq_f16(pInT1, vec1); - pPRT_in += 8; - pInT1 += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (numCols - l) & 7; - if (blkCnt > 0U) - { - f16x8_t vec1, vec2; - mve_pred16_t p0 = vctp16q(blkCnt); - - vec1 = vldrhq_f16(pInT1); - vec2 = vldrhq_f16(pPRT_in); - vec1 = vfmsq_f16(vec1, tmpV, vec2); - vstrhq_p_f16(pInT1, vec1, p0); - pInT1 += blkCnt; - } - - blkCnt = numCols >> 3; - while (blkCnt > 0U) - { - f16x8_t vec1, vec2; - - /* - * Replace the element by the sum of that row - * and a multiple of the reference row - */ - vec1 = vldrhq_f16(pOutT1); - vec2 = vldrhq_f16(pPRT_pDst); - vec1 = vfmsq_f16(vec1, tmpV, vec2); - vstrhq_f16(pOutT1, vec1); - pPRT_pDst += 8; - pOutT1 += 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = numCols & 7; - if (blkCnt > 0U) - { - f16x8_t vec1, vec2; - mve_pred16_t p0 = vctp16q(blkCnt); - - vec1 = vldrhq_f16(pOutT1); - vec2 = vldrhq_f16(pPRT_pDst); - vec1 = vfmsq_f16(vec1, tmpV, vec2); - vstrhq_p_f16(pOutT1, vec1, p0); - - pInT2 += blkCnt; - pOutT1 += blkCnt; - } - } - /* - * Increment the temporary input pointer - */ - pInT1 = pInT1 + l; - } - /* - * Increment the input pointer - */ - pIn++; - /* - * Decrement the loop counter - */ - loopCnt--; - /* - * Increment the index modifier - */ - l++; - } - - /* - * Set status as ARM_MATH_SUCCESS - */ - status = ARM_MATH_SUCCESS; - - if ((flag != 1U) && (in == 0.0f16)) - { - pIn = pSrc->pData; - for (i = 0; i < numRows * numCols; i++) - { - if (pIn[i] != 0.0f16) - break; - } - - if (i == numRows * numCols) - status = ARM_MATH_SINGULAR; - } - } - /* Return to application */ - return (status); -} - -#else - arm_status arm_mat_inverse_f16( const arm_matrix_instance_f16 * pSrc, arm_matrix_instance_f16 * pDst) { float16_t *pIn = pSrc->pData; /* input data matrix pointer */ float16_t *pOut = pDst->pData; /* output data matrix pointer */ - float16_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float16_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float16_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ + + float16_t *pTmp; uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ - _Float16 Xchg, in = 0.0f16, in1; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ + + float16_t pivot = 0.0f16, newPivot=0.0f16; /* Temporary input values */ + uint32_t selectedRow,pivotRow,i, rowNb, rowCnt, flag = 0U, j,column; /* loop counters */ arm_status status; /* status of matrix inverse */ #ifdef ARM_MATH_MATRIX_CHECK @@ -589,7 +84,6 @@ arm_status arm_mat_inverse_f16( #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ { - /*-------------------------------------------------------------------------------------------------------------- * Matrix Inverse can be solved using elementary row operations. * @@ -626,7 +120,7 @@ arm_status arm_mat_inverse_f16( *----------------------------------------------------------------------------------------------------------------*/ /* Working pointer for destination matrix */ - pOutT1 = pOut; + pTmp = pOut; /* Loop over the number of rows */ rowCnt = numRows; @@ -638,18 +132,18 @@ arm_status arm_mat_inverse_f16( j = numRows - rowCnt; while (j > 0U) { - *pOutT1++ = 0.0f16; + *pTmp++ = 0.0f16; j--; } /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0f16; + *pTmp++ = 1.0f16; /* Writing all zeroes in upper triangle of the destination matrix */ j = rowCnt - 1U; while (j > 0U) { - *pOutT1++ = 0.0f16; + *pTmp++ = 0.0f16; j--; } @@ -659,232 +153,105 @@ arm_status arm_mat_inverse_f16( /* Loop over the number of columns of the input matrix. All the elements in each column are processed by the row operations */ - loopCnt = numCols; /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) + for(column = 0U; column < numCols; column++) { /* Check if the pivot element is zero.. * If it is zero then interchange the row with non zero row below. * If there is no non zero element to replace in the rows below, * then the matrix is Singular. */ - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); + pivotRow = column; /* Temporary variable to hold the pivot value */ - in = *pInT1; - - - /* Destination pointer modifier */ - k = 1U; + pTmp = ELEM(pSrc,column,column) ; + pivot = *pTmp; + selectedRow = column; - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0f16) - { + /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { + for (rowNb = column+1; rowNb < numRows; rowNb++) + { /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); + pTmp = ELEM(pSrc,rowNb,column); + newPivot = *pTmp; + if (fabsf((float32_t)newPivot) > fabsf((float32_t)pivot)) + { + selectedRow = rowNb; + pivot = newPivot; + } + + } /* Check if there is a non zero pivot element to * replace in the rows below */ - if (*pInT2 != 0.0f16) - { + if (((_Float16)pivot != 0.0f16) && (selectedRow != column)) + { /* Loop over number of columns * to the right of the pilot element */ - j = numCols - l; - - while (j > 0U) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Exchange the row elements of the destination matrix */ - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - - /* Decrement loop counter */ - j--; - } + SWAP_ROWS_F16(pSrc,column, pivotRow,selectedRow); + SWAP_ROWS_F16(pDst,0, pivotRow,selectedRow); + /* Flag to indicate whether exchange is done or not */ flag = 1U; - /* Break after exchange is done */ - break; - } - - /* Update the destination pointer modifier */ - k++; - - /* Decrement loop counter */ - } } + /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0f16)) + if ((flag != 1U) && ((_Float16)pivot == 0.0f16)) { return ARM_MATH_SINGULAR; } - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pInT2 = pPivotRowDst; - + /* Pivot element of the row */ - in = *pPivotRowIn; + pivot = 1.0f16 / (_Float16)pivot; - /* Loop over number of columns - * to the right of the pilot element */ - j = (numCols - l); - - while (j > 0U) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - in1 = *pInT1; - *pInT1++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - in1 = *pInT2; - *pInT2++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } + SCALE_ROW_F16(pSrc,column,pivot,pivotRow); + SCALE_ROW_F16(pDst,0,pivot,pivotRow); + /* Replace the rows with the sum of that row and a multiple of row i * so that each new element in column i above row i is zero.*/ - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pInT2 = pOut; - - /* index used to check for pivot element */ - i = 0U; - - /* Loop over number of rows */ - /* to be replaced by the sum of that row and a multiple of row i */ - k = numRows; - - while (k > 0U) + rowNb = 0; + for (;rowNb < pivotRow; rowNb++) { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - - pInT2 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - j = (numCols - l); - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT1; - *pInT1++ = in1 - (in * *pPRT_in++); - - /* Decrement the loop counter */ - j--; - } - - /* Loop over the number of columns to - replace the elements in the destination matrix */ - j = numCols; + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT2; - *pInT2++ = in1 - (in * *pPRT_pDst++); + MAS_ROW_F16(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F16(0 ,pDst,rowNb,pivot,pDst,pivotRow); - /* Decrement loop counter */ - j--; - } - } + } - /* Increment temporary input pointer */ - pInT1 = pInT1 + l; + for (rowNb = pivotRow + 1; rowNb < numRows; rowNb++) + { + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - /* Decrement loop counter */ - k--; + MAS_ROW_F16(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F16(0 ,pDst,rowNb,pivot,pDst,pivotRow); - /* Increment pivot index */ - i++; } - /* Increment the input pointer */ - pIn++; - - /* Decrement the loop counter */ - loopCnt--; - - /* Increment the index modifier */ - l++; } /* Set status as ARM_MATH_SUCCESS */ status = ARM_MATH_SUCCESS; - if ((flag != 1U) && (in == 0.0f16)) + if ((flag != 1U) && ((_Float16)pivot == 0.0f16)) { pIn = pSrc->pData; for (i = 0; i < numRows * numCols; i++) { - if (pIn[i] != 0.0f16) + if ((_Float16)pIn[i] != 0.0f16) break; } @@ -896,8 +263,6 @@ arm_status arm_mat_inverse_f16( /* Return to application */ return (status); } -#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ - /** @} end of MatrixInv group */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f32.c index f4c753b..83e8577 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_inverse_f32.c * Description: Floating-point matrix inverse * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +29,8 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + /** @ingroup groupMatrix @@ -52,7 +54,22 @@ of elementary row-operations to an identity matrix yields the inverse matrix. If the input matrix is singular, then the algorithm terminates and returns error status ARM_MATH_SINGULAR. - \image html MatrixInverse.gif "Matrix Inverse of a 3 x 3 matrix using Gauss-Jordan Method" + + @par Matrix Inverse of a 3 x 3 matrix using Gauss-Jordan Method + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} & | & 1 & 0 & 0\\ + a_{2,1} & a_{2,2} & a_{2,3} & | & 0 & 1 & 0\\ + a_{3,1} & a_{3,2} & a_{3,3} & | & 0 & 0 & 1\\ + \end{pmatrix} + \rightarrow + \begin{pmatrix} + 1 & 0 & 0 & | & x_{1,1} & x_{2,1} & x_{3,1} \\ + 0 & 1 & 0 & | & x_{1,2} & x_{2,2} & x_{3,2} \\ + 0 & 0 & 1 & | & x_{1,3} & x_{2,3} & x_{3,3} \\ + \end{pmatrix} + \f] */ /** @@ -69,937 +86,20 @@ - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed - \ref ARM_MATH_SINGULAR : Input matrix is found to be singular (non-invertible) */ -#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) - -arm_status arm_mat_inverse_f32( - const arm_matrix_instance_f32 * pSrc, - arm_matrix_instance_f32 * pDst) -{ - float32_t *pIn = pSrc->pData; /* input data matrix pointer */ - float32_t *pOut = pDst->pData; /* output data matrix pointer */ - float32_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float32_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float32_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ - - uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ - uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ - float32_t *pTmpA, *pTmpB; - - float32_t in = 0.0f; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ - arm_status status; /* status of matrix inverse */ - uint32_t blkCnt; - -#ifdef ARM_MATH_MATRIX_CHECK - /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pSrc->numCols) || (pDst->numRows != pDst->numCols) - || (pSrc->numRows != pDst->numRows)) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else -#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - { - - /*-------------------------------------------------------------------------------------------------------------- - * Matrix Inverse can be solved using elementary row operations. - * - * Gauss-Jordan Method: - * - * 1. First combine the identity matrix and the input matrix separated by a bar to form an - * augmented matrix as follows: - * _ _ _ _ _ _ _ _ - * | | a11 a12 | | | 1 0 | | | X11 X12 | - * | | | | | | | = | | - * |_ |_ a21 a22 _| | |_0 1 _| _| |_ X21 X21 _| - * - * 2. In our implementation, pDst Matrix is used as identity matrix. - * - * 3. Begin with the first row. Let i = 1. - * - * 4. Check to see if the pivot for row i is zero. - * The pivot is the element of the main diagonal that is on the current row. - * For instance, if working with row i, then the pivot element is aii. - * If the pivot is zero, exchange that row with a row below it that does not - * contain a zero in column i. If this is not possible, then an inverse - * to that matrix does not exist. - * - * 5. Divide every element of row i by the pivot. - * - * 6. For every row below and row i, replace that row with the sum of that row and - * a multiple of row i so that each new element in column i below row i is zero. - * - * 7. Move to the next row and column and repeat steps 2 through 5 until you have zeros - * for every element below and above the main diagonal. - * - * 8. Now an identical matrix is formed to the left of the bar(input matrix, src). - * Therefore, the matrix to the right of the bar is our solution(dst matrix, dst). - *----------------------------------------------------------------------------------------------------------------*/ - - /* - * Working pointer for destination matrix - */ - pOutT1 = pOut; - /* - * Loop over the number of rows - */ - rowCnt = numRows; - /* - * Making the destination matrix as identity matrix - */ - while (rowCnt > 0U) - { - /* - * Writing all zeroes in lower triangle of the destination matrix - */ - j = numRows - rowCnt; - while (j > 0U) - { - *pOutT1++ = 0.0f; - j--; - } - /* - * Writing all ones in the diagonal of the destination matrix - */ - *pOutT1++ = 1.0f; - /* - * Writing all zeroes in upper triangle of the destination matrix - */ - j = rowCnt - 1U; - while (j > 0U) - { - *pOutT1++ = 0.0f; - j--; - } - /* - * Decrement the loop counter - */ - rowCnt--; - } - - /* - * Loop over the number of columns of the input matrix. - * All the elements in each column are processed by the row operations - */ - loopCnt = numCols; - /* - * Index modifier to navigate through the columns - */ - l = 0U; - while (loopCnt > 0U) - { - /* - * Check if the pivot element is zero.. - * If it is zero then interchange the row with non zero row below. - * If there is no non zero element to replace in the rows below, - * then the matrix is Singular. - */ - - /* - * Working pointer for the input matrix that points - * * to the pivot element of the particular row - */ - pInT1 = pIn + (l * numCols); - /* - * Working pointer for the destination matrix that points - * * to the pivot element of the particular row - */ - pOutT1 = pOut + (l * numCols); - /* - * Temporary variable to hold the pivot value - */ - in = *pInT1; - /* - * Destination pointer modifier - */ - k = 1U; - - /* - * Check if the pivot element is zero - */ - if (*pInT1 == 0.0f) - { - /* - * Loop over the number rows present below - */ - for (i = (l + 1U); i < numRows; i++) - { - /* - * Update the input and destination pointers - */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - /* - * Check if there is a non zero pivot element to - * * replace in the rows below - */ - if (*pInT2 != 0.0f) - { - f32x4_t vecA, vecB; - /* - * Loop over number of columns - * * to the right of the pilot element - */ - pTmpA = pInT1; - pTmpB = pInT2; - blkCnt = (numCols - l) >> 2; - while (blkCnt > 0U) - { - - vecA = vldrwq_f32(pTmpA); - vecB = vldrwq_f32(pTmpB); - vstrwq_f32(pTmpB, vecA); - vstrwq_f32(pTmpA, vecB); - - pTmpA += 4; - pTmpB += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (numCols - l) & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - - vecA = vldrwq_f32(pTmpA); - vecB = vldrwq_f32(pTmpB); - vstrwq_p_f32(pTmpB, vecA, p0); - vstrwq_p_f32(pTmpA, vecB, p0); - } - - pInT1 += numCols - l; - pInT2 += numCols - l; - pTmpA = pOutT1; - pTmpB = pOutT2; - blkCnt = numCols >> 2; - while (blkCnt > 0U) - { - - vecA = vldrwq_f32(pTmpA); - vecB = vldrwq_f32(pTmpB); - vstrwq_f32(pTmpB, vecA); - vstrwq_f32(pTmpA, vecB); - pTmpA += 4; - pTmpB += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - */ - blkCnt = numCols & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - - vecA = vldrwq_f32(pTmpA); - vecB = vldrwq_f32(pTmpB); - vstrwq_p_f32(pTmpB, vecA, p0); - vstrwq_p_f32(pTmpA, vecB, p0); - } - - pOutT1 += numCols; - pOutT2 += numCols; - /* - * Flag to indicate whether exchange is done or not - */ - flag = 1U; - - /* - * Break after exchange is done - */ - break; - } - /* - * Update the destination pointer modifier - */ - k++; - } - } - - /* - * Update the status if the matrix is singular - */ - if ((flag != 1U) && (in == 0.0f)) - { - return ARM_MATH_SINGULAR; - } - - /* - * Points to the pivot row of input and destination matrices - */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* - * Temporary pointers to the pivot row pointers - */ - pInT1 = pPivotRowIn; - pOutT1 = pPivotRowDst; - - /* - * Pivot element of the row - */ - in = *(pIn + (l * numCols)); - - pTmpA = pInT1; - - f32x4_t invIn = vdupq_n_f32(1.0f / in); - - blkCnt = (numCols - l) >> 2; - f32x4_t vecA; - while (blkCnt > 0U) - { - *(f32x4_t *) pTmpA = *(f32x4_t *) pTmpA * invIn; - pTmpA += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - */ - blkCnt = (numCols - l) & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - - - vecA = vldrwq_f32(pTmpA); - vecA = vecA * invIn; - vstrwq_p_f32(pTmpA, vecA, p0); - } - - pInT1 += numCols - l; - /* - * Loop over number of columns - * * to the right of the pilot element - */ - - pTmpA = pOutT1; - blkCnt = numCols >> 2; - while (blkCnt > 0U) - { - *(f32x4_t *) pTmpA = *(f32x4_t *) pTmpA *invIn; - pTmpA += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = numCols & 3; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp32q(blkCnt); - - vecA = vldrwq_f32(pTmpA); - vecA = vecA * invIn; - vstrwq_p_f32(pTmpA, vecA, p0); - } - - pOutT1 += numCols; - - /* - * Replace the rows with the sum of that row and a multiple of row i - * * so that each new element in column i above row i is zero. - */ - - /* - * Temporary pointers for input and destination matrices - */ - pInT1 = pIn; - pOutT1 = pOut; - - for (i = 0U; i < numRows; i++) - { - /* - * Check for the pivot element - */ - if (i == l) - { - /* - * If the processing element is the pivot element, - * only the columns to the right are to be processed - */ - pInT1 += numCols - l; - pOutT1 += numCols; - } - else - { - /* - * Element of the reference row - */ - - /* - * Working pointers for input and destination pivot rows - */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - /* - * Loop over the number of columns to the right of the pivot element, - * to replace the elements in the input matrix - */ - - in = *pInT1; - f32x4_t tmpV = vdupq_n_f32(in); - - blkCnt = (numCols - l) >> 2; - while (blkCnt > 0U) - { - f32x4_t vec1, vec2; - /* - * Replace the element by the sum of that row - * and a multiple of the reference row - */ - vec1 = vldrwq_f32(pInT1); - vec2 = vldrwq_f32(pPRT_in); - vec1 = vfmsq_f32(vec1, tmpV, vec2); - vstrwq_f32(pInT1, vec1); - pPRT_in += 4; - pInT1 += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = (numCols - l) & 3; - if (blkCnt > 0U) - { - f32x4_t vec1, vec2; - mve_pred16_t p0 = vctp32q(blkCnt); - - vec1 = vldrwq_f32(pInT1); - vec2 = vldrwq_f32(pPRT_in); - vec1 = vfmsq_f32(vec1, tmpV, vec2); - vstrwq_p_f32(pInT1, vec1, p0); - pInT1 += blkCnt; - } - - blkCnt = numCols >> 2; - while (blkCnt > 0U) - { - f32x4_t vec1, vec2; - - /* - * Replace the element by the sum of that row - * and a multiple of the reference row - */ - vec1 = vldrwq_f32(pOutT1); - vec2 = vldrwq_f32(pPRT_pDst); - vec1 = vfmsq_f32(vec1, tmpV, vec2); - vstrwq_f32(pOutT1, vec1); - pPRT_pDst += 4; - pOutT1 += 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = numCols & 3; - if (blkCnt > 0U) - { - f32x4_t vec1, vec2; - mve_pred16_t p0 = vctp32q(blkCnt); - - vec1 = vldrwq_f32(pOutT1); - vec2 = vldrwq_f32(pPRT_pDst); - vec1 = vfmsq_f32(vec1, tmpV, vec2); - vstrwq_p_f32(pOutT1, vec1, p0); - - pInT2 += blkCnt; - pOutT1 += blkCnt; - } - } - /* - * Increment the temporary input pointer - */ - pInT1 = pInT1 + l; - } - /* - * Increment the input pointer - */ - pIn++; - /* - * Decrement the loop counter - */ - loopCnt--; - /* - * Increment the index modifier - */ - l++; - } - - /* - * Set status as ARM_MATH_SUCCESS - */ - status = ARM_MATH_SUCCESS; - - if ((flag != 1U) && (in == 0.0f)) - { - pIn = pSrc->pData; - for (i = 0; i < numRows * numCols; i++) - { - if (pIn[i] != 0.0f) - break; - } - - if (i == numRows * numCols) - status = ARM_MATH_SINGULAR; - } - } - /* Return to application */ - return (status); -} - -#else -#if defined(ARM_MATH_NEON) -arm_status arm_mat_inverse_f32( - const arm_matrix_instance_f32 * pSrc, - arm_matrix_instance_f32 * pDst) -{ - float32_t *pIn = pSrc->pData; /* input data matrix pointer */ - float32_t *pOut = pDst->pData; /* output data matrix pointer */ - float32_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float32_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float32_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ - uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ - uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ - - - float32_t Xchg, in = 0.0f, in1; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ - arm_status status; /* status of matrix inverse */ - float32x4_t vec1; - float32x4_t vec2; - float32x4_t tmpV; - -#ifdef ARM_MATH_MATRIX_CHECK - - /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pSrc->numCols) || (pDst->numRows != pDst->numCols) - || (pSrc->numRows != pDst->numRows)) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else -#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - - { - /*-------------------------------------------------------------------------------------------------------------- - * Matrix Inverse can be solved using elementary row operations. - * - * Gauss-Jordan Method: - * - * 1. First combine the identity matrix and the input matrix separated by a bar to form an - * augmented matrix as follows: - * _ _ _ _ - * | a11 a12 | 1 0 | | X11 X12 | - * | | | = | | - * |_ a21 a22 | 0 1 _| |_ X21 X21 _| - * - * 2. In our implementation, pDst Matrix is used as identity matrix. - * - * 3. Begin with the first row. Let i = 1. - * - * 4. Check to see if the pivot for row i is zero. - * The pivot is the element of the main diagonal that is on the current row. - * For instance, if working with row i, then the pivot element is aii. - * If the pivot is zero, exchange that row with a row below it that does not - * contain a zero in column i. If this is not possible, then an inverse - * to that matrix does not exist. - * - * 5. Divide every element of row i by the pivot. - * - * 6. For every row below and row i, replace that row with the sum of that row and - * a multiple of row i so that each new element in column i below row i is zero. - * - * 7. Move to the next row and column and repeat steps 2 through 5 until you have zeros - * for every element below and above the main diagonal. - * - * 8. Now an identical matrix is formed to the left of the bar(input matrix, pSrc). - * Therefore, the matrix to the right of the bar is our solution(pDst matrix, pDst). - *----------------------------------------------------------------------------------------------------------------*/ - - /* Working pointer for destination matrix */ - pOutT1 = pOut; - - /* Loop over the number of rows */ - rowCnt = numRows; - - /* Making the destination matrix as identity matrix */ - while (rowCnt > 0U) - { - /* Writing all zeroes in lower triangle of the destination matrix */ - j = numRows - rowCnt; - while (j > 0U) - { - *pOutT1++ = 0.0f; - j--; - } - - /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0f; - - /* Writing all zeroes in upper triangle of the destination matrix */ - j = rowCnt - 1U; - - while (j > 0U) - { - *pOutT1++ = 0.0f; - j--; - } - - /* Decrement the loop counter */ - rowCnt--; - } - - /* Loop over the number of columns of the input matrix. - All the elements in each column are processed by the row operations */ - loopCnt = numCols; - - /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) - { - /* Check if the pivot element is zero.. - * If it is zero then interchange the row with non zero row below. - * If there is no non zero element to replace in the rows below, - * then the matrix is Singular. */ - - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); - - /* Temporary variable to hold the pivot value */ - in = *pInT1; - - - /* Destination pointer modifier */ - k = 1U; - - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0f) - { - /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { - /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - - /* Check if there is a non zero pivot element to - * replace in the rows below */ - if (*pInT2 != 0.0f) - { - /* Loop over number of columns - * to the right of the pilot element */ - j = numCols - l; - - while (j > 0U) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Exchange the row elements of the destination matrix */ - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - - /* Decrement the loop counter */ - j--; - } - - /* Flag to indicate whether exchange is done or not */ - flag = 1U; - - /* Break after exchange is done */ - break; - } - - /* Update the destination pointer modifier */ - k++; - } - } - - /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0f)) - { - return ARM_MATH_SINGULAR; - } - - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pInT2 = pPivotRowDst; - - /* Pivot element of the row */ - in = *pPivotRowIn; - tmpV = vdupq_n_f32(1.0f/in); - - /* Loop over number of columns - * to the right of the pilot element */ - j = (numCols - l) >> 2; - - while (j > 0U) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - vec1 = vld1q_f32(pInT1); - - vec1 = vmulq_f32(vec1, tmpV); - vst1q_f32(pInT1, vec1); - pInT1 += 4; - - /* Decrement the loop counter */ - j--; - } - - /* Tail */ - j = (numCols - l) & 3; - - while (j > 0U) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - in1 = *pInT1; - *pInT1++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols >> 2; - - while (j > 0U) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - vec1 = vld1q_f32(pInT2); - - vec1 = vmulq_f32(vec1, tmpV); - vst1q_f32(pInT2, vec1); - pInT2 += 4; - - /* Decrement the loop counter */ - j--; - } - - /* Tail */ - j = numCols & 3; - - while (j > 0U) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - in1 = *pInT2; - *pInT2++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Replace the rows with the sum of that row and a multiple of row i - * so that each new element in column i above row i is zero.*/ - - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pInT2 = pOut; - - /* index used to check for pivot element */ - i = 0U; - - /* Loop over number of rows */ - /* to be replaced by the sum of that row and a multiple of row i */ - k = numRows; - - while (k > 0U) - { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - - pInT2 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - tmpV = vdupq_n_f32(in); - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - j = (numCols - l) >> 2; - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - vec1 = vld1q_f32(pInT1); - vec2 = vld1q_f32(pPRT_in); - vec1 = vmlsq_f32(vec1, tmpV, vec2); - vst1q_f32(pInT1, vec1); - pPRT_in += 4; - pInT1 += 4; - - /* Decrement the loop counter */ - j--; - } - - /* Tail */ - j = (numCols - l) & 3; - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT1; - *pInT1++ = in1 - (in * *pPRT_in++); - - /* Decrement the loop counter */ - j--; - } - - /* Loop over the number of columns to - replace the elements in the destination matrix */ - j = numCols >> 2; - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - vec1 = vld1q_f32(pInT2); - vec2 = vld1q_f32(pPRT_pDst); - vec1 = vmlsq_f32(vec1, tmpV, vec2); - vst1q_f32(pInT2, vec1); - pPRT_pDst += 4; - pInT2 += 4; - - /* Decrement the loop counter */ - j--; - } - - /* Tail */ - j = numCols & 3; - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT2; - *pInT2++ = in1 - (in * *pPRT_pDst++); - - /* Decrement the loop counter */ - j--; - } - - } - - /* Increment the temporary input pointer */ - pInT1 = pInT1 + l; - - /* Decrement the loop counter */ - k--; - - /* Increment the pivot index */ - i++; - } - - /* Increment the input pointer */ - pIn++; - - /* Decrement the loop counter */ - loopCnt--; - - /* Increment the index modifier */ - l++; - } - - /* Set status as ARM_MATH_SUCCESS */ - status = ARM_MATH_SUCCESS; - - if ((flag != 1U) && (in == 0.0f)) - { - pIn = pSrc->pData; - for (i = 0; i < numRows * numCols; i++) - { - if (pIn[i] != 0.0f) - break; - } - - if (i == numRows * numCols) - status = ARM_MATH_SINGULAR; - } - } - /* Return to application */ - return (status); -} -#else arm_status arm_mat_inverse_f32( const arm_matrix_instance_f32 * pSrc, arm_matrix_instance_f32 * pDst) { float32_t *pIn = pSrc->pData; /* input data matrix pointer */ float32_t *pOut = pDst->pData; /* output data matrix pointer */ - float32_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float32_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float32_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ + + float32_t *pTmp; uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ -#if defined (ARM_MATH_DSP) - float32_t Xchg, in = 0.0f, in1; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ + float32_t pivot = 0.0f, newPivot=0.0f; /* Temporary input values */ + uint32_t selectedRow,pivotRow,i, rowNb, rowCnt, flag = 0U, j,column; /* loop counters */ arm_status status; /* status of matrix inverse */ #ifdef ARM_MATH_MATRIX_CHECK @@ -1017,7 +117,6 @@ arm_status arm_mat_inverse_f32( #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ { - /*-------------------------------------------------------------------------------------------------------------- * Matrix Inverse can be solved using elementary row operations. * @@ -1054,7 +153,7 @@ arm_status arm_mat_inverse_f32( *----------------------------------------------------------------------------------------------------------------*/ /* Working pointer for destination matrix */ - pOutT1 = pOut; + pTmp = pOut; /* Loop over the number of rows */ rowCnt = numRows; @@ -1066,18 +165,18 @@ arm_status arm_mat_inverse_f32( j = numRows - rowCnt; while (j > 0U) { - *pOutT1++ = 0.0f; + *pTmp++ = 0.0f; j--; } /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0f; + *pTmp++ = 1.0f; /* Writing all zeroes in upper triangle of the destination matrix */ j = rowCnt - 1U; while (j > 0U) { - *pOutT1++ = 0.0f; + *pTmp++ = 0.0f; j--; } @@ -1087,486 +186,100 @@ arm_status arm_mat_inverse_f32( /* Loop over the number of columns of the input matrix. All the elements in each column are processed by the row operations */ - loopCnt = numCols; /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) + for(column = 0U; column < numCols; column++) { /* Check if the pivot element is zero.. * If it is zero then interchange the row with non zero row below. * If there is no non zero element to replace in the rows below, * then the matrix is Singular. */ - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); + pivotRow = column; /* Temporary variable to hold the pivot value */ - in = *pInT1; + pTmp = ELEM(pSrc,column,column) ; + pivot = *pTmp; + selectedRow = column; + /* Find maximum pivot in column */ - /* Destination pointer modifier */ - k = 1U; - - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0f) - { /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { - /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - - /* Check if there is a non zero pivot element to - * replace in the rows below */ - if (*pInT2 != 0.0f) - { - /* Loop over number of columns - * to the right of the pilot element */ - j = numCols - l; - - while (j > 0U) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Exchange the row elements of the destination matrix */ - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - - /* Decrement loop counter */ - j--; - } - - /* Flag to indicate whether exchange is done or not */ - flag = 1U; - - /* Break after exchange is done */ - break; - } - - /* Update the destination pointer modifier */ - k++; - - /* Decrement loop counter */ - } - } - - /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0f)) - { - return ARM_MATH_SINGULAR; - } - - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pInT2 = pPivotRowDst; - - /* Pivot element of the row */ - in = *pPivotRowIn; - - /* Loop over number of columns - * to the right of the pilot element */ - j = (numCols - l); - - while (j > 0U) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - in1 = *pInT1; - *pInT1++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - in1 = *pInT2; - *pInT2++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Replace the rows with the sum of that row and a multiple of row i - * so that each new element in column i above row i is zero.*/ - - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pInT2 = pOut; - - /* index used to check for pivot element */ - i = 0U; - - /* Loop over number of rows */ - /* to be replaced by the sum of that row and a multiple of row i */ - k = numRows; - - while (k > 0U) + for (rowNb = column+1; rowNb < numRows; rowNb++) { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - - pInT2 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - j = (numCols - l); - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT1; - *pInT1++ = in1 - (in * *pPRT_in++); - - /* Decrement the loop counter */ - j--; - } - - /* Loop over the number of columns to - replace the elements in the destination matrix */ - j = numCols; - - while (j > 0U) + /* Update the input and destination pointers */ + pTmp = ELEM(pSrc,rowNb,column); + newPivot = *pTmp; + if (fabsf(newPivot) > fabsf(pivot)) { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT2; - *pInT2++ = in1 - (in * *pPRT_pDst++); - - /* Decrement loop counter */ - j--; + selectedRow = rowNb; + pivot = newPivot; } - - } - - /* Increment temporary input pointer */ - pInT1 = pInT1 + l; - - /* Decrement loop counter */ - k--; - - /* Increment pivot index */ - i++; - } - - /* Increment the input pointer */ - pIn++; - - /* Decrement the loop counter */ - loopCnt--; - - /* Increment the index modifier */ - l++; - } - - -#else - - float32_t Xchg, in = 0.0f; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ - arm_status status; /* status of matrix inverse */ - -#ifdef ARM_MATH_MATRIX_CHECK - - /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pSrc->numCols) || - (pDst->numRows != pDst->numCols) || - (pSrc->numRows != pDst->numRows) ) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else - -#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - - { - - /*-------------------------------------------------------------------------------------------------------------- - * Matrix Inverse can be solved using elementary row operations. - * - * Gauss-Jordan Method: - * - * 1. First combine the identity matrix and the input matrix separated by a bar to form an - * augmented matrix as follows: - * _ _ _ _ _ _ _ _ - * | | a11 a12 | | | 1 0 | | | X11 X12 | - * | | | | | | | = | | - * |_ |_ a21 a22 _| | |_0 1 _| _| |_ X21 X21 _| - * - * 2. In our implementation, pDst Matrix is used as identity matrix. - * - * 3. Begin with the first row. Let i = 1. - * - * 4. Check to see if the pivot for row i is zero. - * The pivot is the element of the main diagonal that is on the current row. - * For instance, if working with row i, then the pivot element is aii. - * If the pivot is zero, exchange that row with a row below it that does not - * contain a zero in column i. If this is not possible, then an inverse - * to that matrix does not exist. - * - * 5. Divide every element of row i by the pivot. - * - * 6. For every row below and row i, replace that row with the sum of that row and - * a multiple of row i so that each new element in column i below row i is zero. - * - * 7. Move to the next row and column and repeat steps 2 through 5 until you have zeros - * for every element below and above the main diagonal. - * - * 8. Now an identical matrix is formed to the left of the bar(input matrix, src). - * Therefore, the matrix to the right of the bar is our solution(dst matrix, dst). - *----------------------------------------------------------------------------------------------------------------*/ - - /* Working pointer for destination matrix */ - pOutT1 = pOut; - - /* Loop over the number of rows */ - rowCnt = numRows; - - /* Making the destination matrix as identity matrix */ - while (rowCnt > 0U) - { - /* Writing all zeroes in lower triangle of the destination matrix */ - j = numRows - rowCnt; - while (j > 0U) - { - *pOutT1++ = 0.0f; - j--; } - - /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0f; - - /* Writing all zeroes in upper triangle of the destination matrix */ - j = rowCnt - 1U; - while (j > 0U) + + /* Check if there is a non zero pivot element to + * replace in the rows below */ + if ((pivot != 0.0f) && (selectedRow != column)) { - *pOutT1++ = 0.0f; - j--; - } - - /* Decrement loop counter */ - rowCnt--; - } - - /* Loop over the number of columns of the input matrix. - All the elements in each column are processed by the row operations */ - loopCnt = numCols; - - /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) - { - /* Check if the pivot element is zero.. - * If it is zero then interchange the row with non zero row below. - * If there is no non zero element to replace in the rows below, - * then the matrix is Singular. */ - - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); - - /* Temporary variable to hold the pivot value */ - in = *pInT1; - - /* Destination pointer modifier */ - k = 1U; - - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0f) - { - /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { - /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - - /* Check if there is a non zero pivot element to - * replace in the rows below */ - if (*pInT2 != 0.0f) - { - /* Loop over number of columns - * to the right of the pilot element */ - for (j = 0U; j < (numCols - l); j++) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - } - - for (j = 0U; j < numCols; j++) - { - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - } + + SWAP_ROWS_F32(pSrc,column, pivotRow,selectedRow); + SWAP_ROWS_F32(pDst,0, pivotRow,selectedRow); + /* Flag to indicate whether exchange is done or not */ flag = 1U; + } - /* Break after exchange is done */ - break; - } - /* Update the destination pointer modifier */ - k++; - } - } + + /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0f)) + if ((flag != 1U) && (pivot == 0.0f)) { return ARM_MATH_SINGULAR; } - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pOutT1 = pPivotRowDst; - + /* Pivot element of the row */ - in = *(pIn + (l * numCols)); + pivot = 1.0f / pivot; - /* Loop over number of columns - * to the right of the pilot element */ - for (j = 0U; j < (numCols - l); j++) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - *pInT1 = *pInT1 / in; - pInT1++; - } - for (j = 0U; j < numCols; j++) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - *pOutT1 = *pOutT1 / in; - pOutT1++; - } + SCALE_ROW_F32(pSrc,column,pivot,pivotRow); + SCALE_ROW_F32(pDst,0,pivot,pivotRow); + /* Replace the rows with the sum of that row and a multiple of row i * so that each new element in column i above row i is zero.*/ - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pOutT1 = pOut; - - for (i = 0U; i < numRows; i++) + rowNb = 0; + for (;rowNb < pivotRow; rowNb++) { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - pOutT1 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - for (j = 0U; j < (numCols - l); j++) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - *pInT1 = *pInT1 - (in * *pPRT_in++); - pInT1++; - } + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - /* Loop over the number of columns to - replace the elements in the destination matrix */ - for (j = 0U; j < numCols; j++) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - *pOutT1 = *pOutT1 - (in * *pPRT_pDst++); - pOutT1++; - } + MAS_ROW_F32(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F32(0 ,pDst,rowNb,pivot,pDst,pivotRow); - } - /* Increment temporary input pointer */ - pInT1 = pInT1 + l; } - /* Increment the input pointer */ - pIn++; + for (rowNb = pivotRow + 1; rowNb < numRows; rowNb++) + { + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - /* Decrement the loop counter */ - loopCnt--; + MAS_ROW_F32(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F32(0 ,pDst,rowNb,pivot,pDst,pivotRow); - /* Increment the index modifier */ - l++; - } + } -#endif /* #if defined (ARM_MATH_DSP) */ + } /* Set status as ARM_MATH_SUCCESS */ status = ARM_MATH_SUCCESS; - if ((flag != 1U) && (in == 0.0f)) + if ((flag != 1U) && (pivot == 0.0f)) { pIn = pSrc->pData; for (i = 0; i < numRows * numCols; i++) @@ -1583,9 +296,6 @@ arm_status arm_mat_inverse_f32( /* Return to application */ return (status); } -#endif /* #if defined(ARM_MATH_NEON) */ -#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ - /** @} end of MatrixInv group */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f64.c index 73bbbee..9b13e3b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_inverse_f64.c @@ -5,13 +5,13 @@ * Title: arm_mat_inverse_f64.c * Description: Floating-point matrix inverse * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +29,7 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" /** @ingroup groupMatrix @@ -56,16 +57,14 @@ arm_status arm_mat_inverse_f64( { float64_t *pIn = pSrc->pData; /* input data matrix pointer */ float64_t *pOut = pDst->pData; /* output data matrix pointer */ - float64_t *pInT1, *pInT2; /* Temporary input data matrix pointer */ - float64_t *pOutT1, *pOutT2; /* Temporary output data matrix pointer */ - float64_t *pPivotRowIn, *pPRT_in, *pPivotRowDst, *pPRT_pDst; /* Temporary input and output data matrix pointer */ + + float64_t *pTmp; uint32_t numRows = pSrc->numRows; /* Number of rows in the matrix */ uint32_t numCols = pSrc->numCols; /* Number of Cols in the matrix */ -#if defined (ARM_MATH_DSP) - float64_t Xchg, in = 0.0, in1; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ + float64_t pivot = 0.0, newPivot=0.0; /* Temporary input values */ + uint32_t selectedRow,pivotRow,i, rowNb, rowCnt, flag = 0U, j,column; /* loop counters */ arm_status status; /* status of matrix inverse */ #ifdef ARM_MATH_MATRIX_CHECK @@ -83,7 +82,6 @@ arm_status arm_mat_inverse_f64( #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ { - /*-------------------------------------------------------------------------------------------------------------- * Matrix Inverse can be solved using elementary row operations. * @@ -120,7 +118,7 @@ arm_status arm_mat_inverse_f64( *----------------------------------------------------------------------------------------------------------------*/ /* Working pointer for destination matrix */ - pOutT1 = pOut; + pTmp = pOut; /* Loop over the number of rows */ rowCnt = numRows; @@ -132,18 +130,18 @@ arm_status arm_mat_inverse_f64( j = numRows - rowCnt; while (j > 0U) { - *pOutT1++ = 0.0; + *pTmp++ = 0.0; j--; } /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0; + *pTmp++ = 1.0; /* Writing all zeroes in upper triangle of the destination matrix */ j = rowCnt - 1U; while (j > 0U) { - *pOutT1++ = 0.0; + *pTmp++ = 0.0; j--; } @@ -153,485 +151,99 @@ arm_status arm_mat_inverse_f64( /* Loop over the number of columns of the input matrix. All the elements in each column are processed by the row operations */ - loopCnt = numCols; /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) + for(column = 0U; column < numCols; column++) { /* Check if the pivot element is zero.. * If it is zero then interchange the row with non zero row below. * If there is no non zero element to replace in the rows below, * then the matrix is Singular. */ - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); + pivotRow = column; /* Temporary variable to hold the pivot value */ - in = *pInT1; + pTmp = ELEM(pSrc,column,column) ; + pivot = *pTmp; + selectedRow = column; - /* Destination pointer modifier */ - k = 1U; + + /* Loop over the number rows present below */ - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0) + for (rowNb = column+1; rowNb < numRows; rowNb++) { - /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); + pTmp = ELEM(pSrc,rowNb,column); + newPivot = *pTmp; + if (fabs(newPivot) > fabs(pivot)) + { + selectedRow = rowNb; + pivot = newPivot; + } + } /* Check if there is a non zero pivot element to * replace in the rows below */ - if (*pInT2 != 0.0) - { + if ((pivot != 0.0) && (selectedRow != column)) + { /* Loop over number of columns * to the right of the pilot element */ - j = numCols - l; - - while (j > 0U) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - - /* Decrement the loop counter */ - j--; - } - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Exchange the row elements of the destination matrix */ - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - - /* Decrement loop counter */ - j--; - } + SWAP_ROWS_F64(pSrc,column, pivotRow,selectedRow); + SWAP_ROWS_F64(pDst,0, pivotRow,selectedRow); + /* Flag to indicate whether exchange is done or not */ flag = 1U; - /* Break after exchange is done */ - break; - } - - /* Update the destination pointer modifier */ - k++; - - /* Decrement loop counter */ - i--; - } } + /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0)) + if ((flag != 1U) && (pivot == 0.0)) { return ARM_MATH_SINGULAR; } - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); - - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pInT2 = pPivotRowDst; - + /* Pivot element of the row */ - in = *pPivotRowIn; + pivot = 1.0 / pivot; - /* Loop over number of columns - * to the right of the pilot element */ - j = (numCols - l); - - while (j > 0U) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - in1 = *pInT1; - *pInT1++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } - - /* Loop over number of columns of the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - in1 = *pInT2; - *pInT2++ = in1 / in; - - /* Decrement the loop counter */ - j--; - } + SCALE_ROW_F64(pSrc,column,pivot,pivotRow); + SCALE_ROW_F64(pDst,0,pivot,pivotRow); + /* Replace the rows with the sum of that row and a multiple of row i * so that each new element in column i above row i is zero.*/ - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pInT2 = pOut; - - /* index used to check for pivot element */ - i = 0U; - - /* Loop over number of rows */ - /* to be replaced by the sum of that row and a multiple of row i */ - k = numRows; - - while (k > 0U) - { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - - pInT2 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - j = (numCols - l); - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT1; - *pInT1++ = in1 - (in * *pPRT_in++); - - /* Decrement the loop counter */ - j--; - } - - /* Loop over the number of columns to - replace the elements in the destination matrix */ - j = numCols; - - while (j > 0U) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - in1 = *pInT2; - *pInT2++ = in1 - (in * *pPRT_pDst++); - - /* Decrement loop counter */ - j--; - } - - } - - /* Increment temporary input pointer */ - pInT1 = pInT1 + l; - - /* Decrement loop counter */ - k--; - - /* Increment pivot index */ - i++; - } - - /* Increment the input pointer */ - pIn++; - - /* Decrement the loop counter */ - loopCnt--; - - /* Increment the index modifier */ - l++; - } - - -#else - - float64_t Xchg, in = 0.0; /* Temporary input values */ - uint32_t i, rowCnt, flag = 0U, j, loopCnt, k, l; /* loop counters */ - arm_status status; /* status of matrix inverse */ - -#ifdef ARM_MATH_MATRIX_CHECK - - /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pSrc->numCols) || - (pDst->numRows != pDst->numCols) || - (pSrc->numRows != pDst->numRows) ) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else - -#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - - { - - /*-------------------------------------------------------------------------------------------------------------- - * Matrix Inverse can be solved using elementary row operations. - * - * Gauss-Jordan Method: - * - * 1. First combine the identity matrix and the input matrix separated by a bar to form an - * augmented matrix as follows: - * _ _ _ _ _ _ _ _ - * | | a11 a12 | | | 1 0 | | | X11 X12 | - * | | | | | | | = | | - * |_ |_ a21 a22 _| | |_0 1 _| _| |_ X21 X21 _| - * - * 2. In our implementation, pDst Matrix is used as identity matrix. - * - * 3. Begin with the first row. Let i = 1. - * - * 4. Check to see if the pivot for row i is zero. - * The pivot is the element of the main diagonal that is on the current row. - * For instance, if working with row i, then the pivot element is aii. - * If the pivot is zero, exchange that row with a row below it that does not - * contain a zero in column i. If this is not possible, then an inverse - * to that matrix does not exist. - * - * 5. Divide every element of row i by the pivot. - * - * 6. For every row below and row i, replace that row with the sum of that row and - * a multiple of row i so that each new element in column i below row i is zero. - * - * 7. Move to the next row and column and repeat steps 2 through 5 until you have zeros - * for every element below and above the main diagonal. - * - * 8. Now an identical matrix is formed to the left of the bar(input matrix, src). - * Therefore, the matrix to the right of the bar is our solution(dst matrix, dst). - *----------------------------------------------------------------------------------------------------------------*/ - - /* Working pointer for destination matrix */ - pOutT1 = pOut; - - /* Loop over the number of rows */ - rowCnt = numRows; - - /* Making the destination matrix as identity matrix */ - while (rowCnt > 0U) - { - /* Writing all zeroes in lower triangle of the destination matrix */ - j = numRows - rowCnt; - while (j > 0U) - { - *pOutT1++ = 0.0; - j--; - } - - /* Writing all ones in the diagonal of the destination matrix */ - *pOutT1++ = 1.0; - - /* Writing all zeroes in upper triangle of the destination matrix */ - j = rowCnt - 1U; - while (j > 0U) - { - *pOutT1++ = 0.0; - j--; - } - - /* Decrement loop counter */ - rowCnt--; - } - - /* Loop over the number of columns of the input matrix. - All the elements in each column are processed by the row operations */ - loopCnt = numCols; - - /* Index modifier to navigate through the columns */ - l = 0U; - - while (loopCnt > 0U) - { - /* Check if the pivot element is zero.. - * If it is zero then interchange the row with non zero row below. - * If there is no non zero element to replace in the rows below, - * then the matrix is Singular. */ - - /* Working pointer for the input matrix that points - * to the pivot element of the particular row */ - pInT1 = pIn + (l * numCols); - - /* Working pointer for the destination matrix that points - * to the pivot element of the particular row */ - pOutT1 = pOut + (l * numCols); - - /* Temporary variable to hold the pivot value */ - in = *pInT1; - - /* Destination pointer modifier */ - k = 1U; - - /* Check if the pivot element is zero */ - if (*pInT1 == 0.0) + rowNb = 0; + for (;rowNb < pivotRow; rowNb++) { - /* Loop over the number rows present below */ - for (i = (l + 1U); i < numRows; i++) - { - /* Update the input and destination pointers */ - pInT2 = pInT1 + (numCols * i); - pOutT2 = pOutT1 + (numCols * k); - - /* Check if there is a non zero pivot element to - * replace in the rows below */ - if (*pInT2 != 0.0) - { - /* Loop over number of columns - * to the right of the pilot element */ - for (j = 0U; j < (numCols - l); j++) - { - /* Exchange the row elements of the input matrix */ - Xchg = *pInT2; - *pInT2++ = *pInT1; - *pInT1++ = Xchg; - } - - for (j = 0U; j < numCols; j++) - { - Xchg = *pOutT2; - *pOutT2++ = *pOutT1; - *pOutT1++ = Xchg; - } + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - /* Flag to indicate whether exchange is done or not */ - flag = 1U; + MAS_ROW_F64(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F64(0 ,pDst,rowNb,pivot,pDst,pivotRow); - /* Break after exchange is done */ - break; - } - /* Update the destination pointer modifier */ - k++; - } } - /* Update the status if the matrix is singular */ - if ((flag != 1U) && (in == 0.0)) + for (rowNb = pivotRow + 1; rowNb < numRows; rowNb++) { - return ARM_MATH_SINGULAR; - } - - /* Points to the pivot row of input and destination matrices */ - pPivotRowIn = pIn + (l * numCols); - pPivotRowDst = pOut + (l * numCols); + pTmp = ELEM(pSrc,rowNb,column) ; + pivot = *pTmp; - /* Temporary pointers to the pivot row pointers */ - pInT1 = pPivotRowIn; - pOutT1 = pPivotRowDst; + MAS_ROW_F64(column,pSrc,rowNb,pivot,pSrc,pivotRow); + MAS_ROW_F64(0 ,pDst,rowNb,pivot,pDst,pivotRow); - /* Pivot element of the row */ - in = *(pIn + (l * numCols)); - - /* Loop over number of columns - * to the right of the pilot element */ - for (j = 0U; j < (numCols - l); j++) - { - /* Divide each element of the row of the input matrix - * by the pivot element */ - *pInT1 = *pInT1 / in; - pInT1++; } - for (j = 0U; j < numCols; j++) - { - /* Divide each element of the row of the destination matrix - * by the pivot element */ - *pOutT1 = *pOutT1 / in; - pOutT1++; - } - - /* Replace the rows with the sum of that row and a multiple of row i - * so that each new element in column i above row i is zero.*/ - - /* Temporary pointers for input and destination matrices */ - pInT1 = pIn; - pOutT1 = pOut; - for (i = 0U; i < numRows; i++) - { - /* Check for the pivot element */ - if (i == l) - { - /* If the processing element is the pivot element, - only the columns to the right are to be processed */ - pInT1 += numCols - l; - pOutT1 += numCols; - } - else - { - /* Element of the reference row */ - in = *pInT1; - - /* Working pointers for input and destination pivot rows */ - pPRT_in = pPivotRowIn; - pPRT_pDst = pPivotRowDst; - - /* Loop over the number of columns to the right of the pivot element, - to replace the elements in the input matrix */ - for (j = 0U; j < (numCols - l); j++) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - *pInT1 = *pInT1 - (in * *pPRT_in++); - pInT1++; - } - - /* Loop over the number of columns to - replace the elements in the destination matrix */ - for (j = 0U; j < numCols; j++) - { - /* Replace the element by the sum of that row - and a multiple of the reference row */ - *pOutT1 = *pOutT1 - (in * *pPRT_pDst++); - pOutT1++; - } - - } - - /* Increment temporary input pointer */ - pInT1 = pInT1 + l; - } - - /* Increment the input pointer */ - pIn++; - - /* Decrement the loop counter */ - loopCnt--; - - /* Increment the index modifier */ - l++; } -#endif /* #if defined (ARM_MATH_DSP) */ - /* Set status as ARM_MATH_SUCCESS */ status = ARM_MATH_SUCCESS; - if ((flag != 1U) && (in == 0.0)) + if ((flag != 1U) && (pivot == 0.0)) { pIn = pSrc->pData; for (i = 0; i < numRows * numCols; i++) @@ -648,7 +260,6 @@ arm_status arm_mat_inverse_f64( /* Return to application */ return (status); } - /** @} end of MatrixInv group */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f32.c index b974031..3fa0b43 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f32.c @@ -5,11 +5,13 @@ * Title: arm_mat_ldl_f32.c * Description: Floating-point LDL decomposition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,44 +29,12 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" - - +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) - -/// @private -#define SWAP_ROWS(A,i,j) \ - { \ - int cnt = n; \ - \ - for(int w=0;w < n; w+=4) \ - { \ - f32x4_t tmpa,tmpb; \ - mve_pred16_t p0 = vctp32q(cnt); \ - \ - tmpa=vldrwq_z_f32(&A[i*n + w],p0);\ - tmpb=vldrwq_z_f32(&A[j*n + w],p0);\ - \ - vstrwq_p(&A[i*n + w], tmpb, p0); \ - vstrwq_p(&A[j*n + w], tmpa, p0); \ - \ - cnt -= 4; \ - } \ - } - -/// @private -#define SWAP_COLS(A,i,j) \ - for(int w=0;w < n; w++) \ - { \ - float32_t tmp; \ - tmp = A[w*n + i]; \ - A[w*n + i] = A[w*n + j];\ - A[w*n + j] = tmp; \ - } - /** @ingroup groupMatrix */ @@ -96,7 +66,7 @@ arm_status arm_mat_ldlt_f32( { arm_status status; /* status of matrix inverse */ - + #ifdef ARM_MATH_MATRIX_CHECK @@ -104,8 +74,7 @@ arm_status arm_mat_ldlt_f32( if ((pSrc->numRows != pSrc->numCols) || (pl->numRows != pl->numCols) || (pd->numRows != pd->numCols) || - (pp->numRows != pp->numCols) || - (pl->numRows != pl->numRows) ) + (pl->numRows != pd->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -120,6 +89,7 @@ arm_status arm_mat_ldlt_f32( int fullRank = 1, diag,k; float32_t *pA; + memset(pd->pData,0,sizeof(float32_t)*n*n); memcpy(pl->pData,pSrc->pData,n*n*sizeof(float32_t)); pA = pl->pData; @@ -143,7 +113,7 @@ arm_status arm_mat_ldlt_f32( { /* Find pivot */ float32_t m=F32_MIN,a; - int j=k; + int j=k; for(int r=k;rpData[row*n+col], zero, p0); + + vstrwq_p(&pl->pData[row*n+col], zero, p0); cnt -= 4; } @@ -293,15 +263,15 @@ arm_status arm_mat_ldlt_f32( for(int row=0; row < n;row++) { - mve_pred16_t p0; + mve_pred16_t p0; int cnt= n-row-1; f32x4_t zero=vdupq_n_f32(0.0f); - + for(int col=row+1; col < n;col+=4) { p0 = vctp32q(cnt); - - vstrwq_p(&pl->pData[row*n+col], zero, p0); + + vstrwq_p(&pl->pData[row*n+col], zero, p0); cnt -= 4; } @@ -312,36 +282,17 @@ arm_status arm_mat_ldlt_f32( pd->pData[d*n+d] = pl->pData[d*n+d]; pl->pData[d*n+d] = 1.0; } - + status = ARM_MATH_SUCCESS; } - + /* Return to application */ return (status); } #else -/// @private -#define SWAP_ROWS(A,i,j) \ - for(int w=0;w < n; w++) \ - { \ - float32_t tmp; \ - tmp = A[i*n + w]; \ - A[i*n + w] = A[j*n + w];\ - A[j*n + w] = tmp; \ - } - -/// @private -#define SWAP_COLS(A,i,j) \ - for(int w=0;w < n; w++) \ - { \ - float32_t tmp; \ - tmp = A[w*n + i]; \ - A[w*n + i] = A[w*n + j];\ - A[w*n + j] = tmp; \ - } /** @ingroup groupMatrix @@ -351,7 +302,7 @@ arm_status arm_mat_ldlt_f32( @addtogroup MatrixChol @{ */ - + /** * @brief Floating-point LDL^t decomposition of positive semi-definite matrix. * @param[in] pSrc points to the instance of the input floating-point matrix structure. @@ -374,7 +325,7 @@ arm_status arm_mat_ldlt_f32( { arm_status status; /* status of matrix inverse */ - + #ifdef ARM_MATH_MATRIX_CHECK @@ -382,8 +333,7 @@ arm_status arm_mat_ldlt_f32( if ((pSrc->numRows != pSrc->numCols) || (pl->numRows != pl->numCols) || (pd->numRows != pd->numCols) || - (pp->numRows != pp->numCols) || - (pl->numRows != pl->numRows) ) + (pl->numRows != pd->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -397,11 +347,13 @@ arm_status arm_mat_ldlt_f32( const int n=pSrc->numRows; int fullRank = 1, diag,k; float32_t *pA; + int row,d; + memset(pd->pData,0,sizeof(float32_t)*n*n); memcpy(pl->pData,pSrc->pData,n*n*sizeof(float32_t)); pA = pl->pData; - for(int k=0;k < n; k++) + for(k=0;k < n; k++) { pp[k] = k; } @@ -411,10 +363,13 @@ arm_status arm_mat_ldlt_f32( { /* Find pivot */ float32_t m=F32_MIN,a; - int j=k; + int j=k; - for(int r=k;r m) { @@ -425,8 +380,8 @@ arm_status arm_mat_ldlt_f32( if(j != k) { - SWAP_ROWS(pA,k,j); - SWAP_COLS(pA,k,j); + SWAP_ROWS_F32(pl,0,k,j); + SWAP_COLS_F32(pl,0,k,j); } @@ -434,27 +389,28 @@ arm_status arm_mat_ldlt_f32( a = pA[k*n+k]; - if (fabs(a) < 1.0e-8) + if (fabsf(a) < 1.0e-8f) { fullRank = 0; break; } - for(int w=k+1;wpData[row*n+col]=0.0; } } } - for(int row=0; row < n;row++) + for(row=0; row < n;row++) { - for(int col=row+1; col < n;col++) + int col; + for(col=row+1; col < n;col++) { pl->pData[row*n+col] = 0.0; } } - for(int d=0; d < diag;d++) + for(d=0; d < diag;d++) { pd->pData[d*n+d] = pl->pData[d*n+d]; pl->pData[d*n+d] = 1.0; } - + status = ARM_MATH_SUCCESS; } - + /* Return to application */ return (status); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f64.c index 64e4d1a..55b131a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_ldlt_f64.c @@ -5,11 +5,13 @@ * Title: arm_mat_ldl_f64.c * Description: Floating-point LDL decomposition * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -27,29 +29,11 @@ */ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" -#include +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" +#include -/// @private -#define SWAP_ROWS(A,i,j) \ - for(int w=0;w < n; w++) \ - { \ - float64_t tmp; \ - tmp = A[i*n + w]; \ - A[i*n + w] = A[j*n + w];\ - A[j*n + w] = tmp; \ - } -/// @private -#define SWAP_COLS(A,i,j) \ - for(int w=0;w < n; w++) \ - { \ - float64_t tmp; \ - tmp = A[w*n + i]; \ - A[w*n + i] = A[w*n + j];\ - A[w*n + j] = tmp; \ - } - /** @ingroup groupMatrix */ @@ -90,8 +74,7 @@ arm_status arm_mat_ldlt_f64( if ((pSrc->numRows != pSrc->numCols) || (pl->numRows != pl->numCols) || (pd->numRows != pd->numCols) || - (pp->numRows != pp->numCols) || - (pl->numRows != pl->numRows) ) + (pl->numRows != pd->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -106,10 +89,12 @@ arm_status arm_mat_ldlt_f64( int fullRank = 1, diag,k; float64_t *pA; + memset(pd->pData,0,sizeof(float64_t)*n*n); + memcpy(pl->pData,pSrc->pData,n*n*sizeof(float64_t)); pA = pl->pData; - for(int k=0;k < n; k++) + for(k=0;k < n; k++) { pp[k] = k; } @@ -119,10 +104,10 @@ arm_status arm_mat_ldlt_f64( { /* Find pivot */ float64_t m=F64_MIN,a; - int j=k; + int w,r,j=k; - for(int r=k;r m) { @@ -133,8 +118,8 @@ arm_status arm_mat_ldlt_f64( if(j != k) { - SWAP_ROWS(pA,k,j); - SWAP_COLS(pA,k,j); + SWAP_ROWS_F64(pl,0,k,j); + SWAP_COLS_F64(pl,0,k,j); } @@ -149,15 +134,16 @@ arm_status arm_mat_ldlt_f64( break; } - for(int w=k+1;wpData[row*n+col]=0.0; + int col; + for(col=k; col < n;col++) + { + pl->pData[row*n+col]=0.0; + } } } } - for(int row=0; row < n;row++) { - for(int col=row+1; col < n;col++) - { - pl->pData[row*n+col] = 0.0; - } + int row; + for(row=0; row < n;row++) + { + int col; + for(col=row+1; col < n;col++) + { + pl->pData[row*n+col] = 0.0; + } + } } - for(int d=0; d < diag;d++) { - pd->pData[d*n+d] = pl->pData[d*n+d]; - pl->pData[d*n+d] = 1.0; + int d; + for(d=0; d < diag;d++) + { + pd->pData[d*n+d] = pl->pData[d*n+d]; + pl->pData[d*n+d] = 1.0; + } } status = ARM_MATH_SUCCESS; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f16.c index 571da6f..45c6570 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_f16.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -404,7 +404,7 @@ arm_status arm_mat_mult_f16( (pSrcB->numCols != pDst->numCols) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; + return(ARM_MATH_SIZE_MISMATCH); } else @@ -689,16 +689,16 @@ arm_status arm_mat_mult_f16( /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */ /* Perform the multiply-accumulates */ - sum += *pIn1++ * *pIn2; + sum += (_Float16)*pIn1++ * (_Float16)*pIn2; pIn2 += numColsB; - sum += *pIn1++ * *pIn2; + sum += (_Float16)*pIn1++ * (_Float16)*pIn2; pIn2 += numColsB; - sum += *pIn1++ * *pIn2; + sum += (_Float16)*pIn1++ * (_Float16)*pIn2; pIn2 += numColsB; - sum += *pIn1++ * *pIn2; + sum += (_Float16)*pIn1++ * (_Float16)*pIn2; pIn2 += numColsB; /* Decrement loop counter */ @@ -720,7 +720,7 @@ arm_status arm_mat_mult_f16( /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */ /* Perform the multiply-accumulates */ - sum += *pIn1++ * *pIn2; + sum += (_Float16)*pIn1++ * (_Float16)*pIn2; pIn2 += numColsB; /* Decrement loop counter */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f32.c index 26eaec6..ed1fe6e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_f32.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -30,6 +30,10 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#if defined(ARM_MATH_NEON) +#define GROUPOFROWS 8 +#endif + /** * @ingroup groupMatrix */ @@ -39,7 +43,27 @@ * * Multiplies two matrices. * - * \image html MatrixMultiplication.gif "Multiplication of two 3 x 3 matrices" + * @par Multiplication of two 3x3 matrices: + * + * \f[ + * \begin{pmatrix} + * a_{1,1} & a_{1,2} & a_{1,3} \\ + * a_{2,1} & a_{2,2} & a_{2,3} \\ + * a_{3,1} & a_{3,2} & a_{3,3} \\ + * \end{pmatrix} + * + * \begin{pmatrix} + * b_{1,1} & b_{1,2} & b_{1,3} \\ + * b_{2,1} & b_{2,2} & b_{2,3} \\ + * b_{3,1} & b_{3,2} & b_{3,3} \\ + * \end{pmatrix} + * = + * \begin{pmatrix} + * a_{1,1} b_{1,1}+a_{1,2} b_{2,1}+a_{1,3} b_{3,1} & a_{1,1} b_{1,2}+a_{1,2} b_{2,2}+a_{1,3} b_{3,2} & a_{1,1} b_{1,3}+a_{1,2} b_{2,3}+a_{1,3} b_{3,3} \\ + * a_{2,1} b_{1,1}+a_{2,2} b_{2,1}+a_{2,3} b_{3,1} & a_{2,1} b_{1,2}+a_{2,2} b_{2,2}+a_{2,3} b_{3,2} & a_{2,1} b_{1,3}+a_{2,2} b_{2,3}+a_{2,3} b_{3,3} \\ + * a_{3,1} b_{1,1}+a_{3,2} b_{2,1}+a_{3,3} b_{3,1} & a_{3,1} b_{1,2}+a_{3,2} b_{2,2}+a_{3,3} b_{3,2} & a_{3,1} b_{1,3}+a_{3,2} b_{2,3}+a_{3,3} b_{3,3} \\ + * \end{pmatrix} + * \f] * Matrix multiplication is only defined if the number of columns of the * first matrix equals the number of rows of the second matrix. @@ -56,14 +80,7 @@ * @{ */ -/** - * @brief Floating-point matrix multiplication. - * @param[in] *pSrcA points to the first input matrix structure - * @param[in] *pSrcB points to the second input matrix structure - * @param[out] *pDst points to output matrix structure - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - */ + #if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) @@ -260,6 +277,14 @@ __STATIC_INLINE arm_status arm_mat_mult_f32_4x4_mve( } +/** + * @brief Floating-point matrix multiplication. + * @param[in] *pSrcA points to the first input matrix structure + * @param[in] *pSrcB points to the second input matrix structure + * @param[out] *pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ arm_status arm_mat_mult_f32( const arm_matrix_instance_f32 * pSrcA, const arm_matrix_instance_f32 * pSrcB, @@ -514,9 +539,14 @@ arm_status arm_mat_mult_f32( #else #if defined(ARM_MATH_NEON) - -#define GROUPOFROWS 8 - +/** + * @brief Floating-point matrix multiplication. + * @param[in] *pSrcA points to the first input matrix structure + * @param[in] *pSrcB points to the second input matrix structure + * @param[out] *pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ arm_status arm_mat_mult_f32( const arm_matrix_instance_f32 * pSrcA, const arm_matrix_instance_f32 * pSrcB, @@ -845,6 +875,14 @@ arm_status arm_mat_mult_f32( return (status); } #else +/** + * @brief Floating-point matrix multiplication. + * @param[in] *pSrcA points to the first input matrix structure + * @param[in] *pSrcB points to the second input matrix structure + * @param[out] *pDst points to output matrix structure + * @return The function returns either + * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. + */ arm_status arm_mat_mult_f32( const arm_matrix_instance_f32 * pSrcA, const arm_matrix_instance_f32 * pSrcB, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f64.c index 29e3a3e..08571c7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_f64.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_f64.c * Description: Floating-point matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,23 +34,6 @@ * @ingroup groupMatrix */ -/** - * @defgroup MatrixMult Matrix Multiplication - * - * Multiplies two matrices. - * - * \image html MatrixMultiplication.gif "Multiplication of two 3 x 3 matrices" - - * Matrix multiplication is only defined if the number of columns of the - * first matrix equals the number of rows of the second matrix. - * Multiplying an M x N matrix with an N x P matrix results - * in an M x P matrix. - * When matrix size checking is enabled, the functions check: (1) that the inner dimensions of - * pSrcA and pSrcB are equal; and (2) that the size of the output - * matrix equals the outer dimensions of pSrcA and pSrcB. - */ - - /** * @addtogroup MatrixMult * @{ @@ -116,7 +99,7 @@ arm_status arm_mat_mult_f64( do { /* Set the variable sum, that acts as accumulator, to zero */ - sum = 0.0f; + sum = 0.0; /* Initialize pointer pIn1 to point to starting address of column being processed */ pIn1 = pInA; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q15.c index 46981ff..57eda5b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_fast_q15.c * Description: Q15 matrix multiplication (fast variant) * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -80,7 +80,7 @@ arm_status arm_mat_mult_fast_q15( uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */ uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */ uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */ - uint16_t numRowsB = pSrcB->numRows; /* Number of rows of input matrix A */ + uint16_t numRowsB = pSrcB->numRows; /* Number of rows of input matrix B */ uint32_t col, i = 0U, row = numRowsB, colCnt; /* Loop counters */ arm_status status; /* Status of matrix multiplication */ @@ -127,7 +127,7 @@ arm_status arm_mat_mult_fast_q15( #if defined (ARM_MATH_DSP) /* Read two elements from row */ - in = read_q15x2_ia ((q15_t **) &pInB); + in = read_q15x2_ia (&pInB); /* Unpack and store one element in destination */ #ifndef ARM_MATH_BIG_ENDIAN @@ -149,7 +149,7 @@ arm_status arm_mat_mult_fast_q15( /* Update pointer px to point to next row of transposed matrix */ px += numRowsB; - in = read_q15x2_ia ((q15_t **) &pInB); + in = read_q15x2_ia (&pInB); #ifndef ARM_MATH_BIG_ENDIAN *px = (q15_t) in; #else @@ -260,7 +260,7 @@ arm_status arm_mat_mult_fast_q15( pInA2 = pInA + numColsA; pInB2 = pInB + numRowsB; - /* Read in two elements at once - alows dual MAC instruction */ + /* Read in two elements at once - allows dual MAC instruction */ colCnt = numColsA >> 1U; #else colCnt = numColsA >> 2U; @@ -273,13 +273,13 @@ arm_status arm_mat_mult_fast_q15( #if defined (ARM_MATH_DSP) /* read real and imag values from pSrcA and pSrcB buffer */ - inA1 = read_q15x2_ia ((q15_t **) &pInA); - inB1 = read_q15x2_ia ((q15_t **) &pInB); + inA1 = read_q15x2_ia (&pInA); + inB1 = read_q15x2_ia (&pInB); - inA2 = read_q15x2_ia ((q15_t **) &pInA2); - inB2 = read_q15x2_ia ((q15_t **) &pInB2); + inA2 = read_q15x2_ia (&pInA2); + inB2 = read_q15x2_ia (&pInB2); - /* Multiply and Accumlates */ + /* Multiply and Accumulates */ sum = __SMLAD(inA1, inB1, sum); sum2 = __SMLAD(inA1, inB2, sum2); sum3 = __SMLAD(inA2, inB1, sum3); @@ -288,7 +288,7 @@ arm_status arm_mat_mult_fast_q15( /* read real and imag values from pSrcA and pSrcB buffer */ inA1 = *pInA++; inB1 = *pInB++; - /* Multiply and Accumlates */ + /* Multiply and Accumulates */ sum += inA1 * inB1; inA2 = *pInA++; @@ -391,10 +391,10 @@ arm_status arm_mat_mult_fast_q15( /* matrix multiplication */ while (colCnt > 0U) { - inA1 = read_q15x2_ia ((q15_t **) &pInA); - inA2 = read_q15x2_ia ((q15_t **) &pInA); - inB1 = read_q15x2_ia ((q15_t **) &pInB); - inB2 = read_q15x2_ia ((q15_t **) &pInB); + inA1 = read_q15x2_ia (&pInA); + inA2 = read_q15x2_ia (&pInA); + inB1 = read_q15x2_ia (&pInB); + inB2 = read_q15x2_ia (&pInB); sum = __SMLAD(inA1, inB1, sum); sum = __SMLAD(inA2, inB2, sum); @@ -443,10 +443,10 @@ arm_status arm_mat_mult_fast_q15( /* matrix multiplication */ while (colCnt > 0U) { - inA1 = read_q15x2_ia ((q15_t **) &pInA); - inA2 = read_q15x2_ia ((q15_t **) &pInA); - inB1 = read_q15x2_ia ((q15_t **) &pInB); - inB2 = read_q15x2_ia ((q15_t **) &pInB); + inA1 = read_q15x2_ia (&pInA); + inA2 = read_q15x2_ia (&pInA); + inB1 = read_q15x2_ia (&pInB); + inB2 = read_q15x2_ia (&pInB); sum = __SMLAD(inA1, inB1, sum); sum = __SMLAD(inA2, inB2, sum); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q31.c index f4214af..1107562 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_fast_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_fast_q31.c * Description: Q31 matrix multiplication (fast variant) * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_opt_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_opt_q31.c new file mode 100644 index 0000000..69e0142 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_opt_q31.c @@ -0,0 +1,788 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mat_mult_opt_q31.c + * Description: Q31 matrix multiplication + * + * $Date: 3 Nov 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" + +/** + @ingroup groupMatrix + */ + +/** + @addtogroup MatrixMult + @{ + */ + +/** + @brief Q31 matrix multiplication. + @param[in] pSrcA points to the first input matrix structure + @param[in] pSrcB points to the second input matrix structure + @param[out] pDst points to output matrix structure + @param[in] pState points to the array for storing intermediate results + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + + @par Scaling and Overflow Behavior + The function is implemented using an internal 64-bit accumulator. + The accumulator has a 2.62 format and maintains full precision of the intermediate + multiplication results but provides only a single guard bit. There is no saturation + on intermediate additions. Thus, if the accumulator overflows it wraps around and + distorts the result. The input signals should be scaled down to avoid intermediate + overflows. The input is thus scaled down by log2(numColsA) bits + to avoid overflows, as a total of numColsA additions are performed internally. + The 2.62 accumulator is right shifted by 31 bits and saturated to 1.31 format to yield the final result. + @remark + Refer to \ref arm_mat_mult_fast_q31() for a faster but less precise implementation of this function. + @remark + This function is a faster implementation of arm_mat_mult_q31 for MVE but it is requiring + additional storage for intermediate results. + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#define MATRIX_DIM2 2 +#define MATRIX_DIM3 3 +#define MATRIX_DIM4 4 + +__STATIC_INLINE arm_status arm_mat_mult_opt_q31_2x2_mve( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst) +{ + q31_t *pInB = pSrcB->pData; /* input data matrix pointer B */ + q31_t *pInA = pSrcA->pData; /* input data matrix pointer A */ + q31_t *pOut = pDst->pData; /* output data matrix pointer */ + uint32x4_t vecColBOffs; + q31_t *pInA0 = pInA; + q31_t *pInA1 = pInA0 + MATRIX_DIM2; + q63_t acc0, acc1; + q31x4_t vecB, vecA0, vecA1; + /* enable predication to disable half of vector elements */ + mve_pred16_t p0 = vctp32q(MATRIX_DIM2); + + vecColBOffs = vidupq_u32((uint32_t)0, 1); + vecColBOffs = vecColBOffs * MATRIX_DIM2; + + pInB = pSrcB->pData; + + /* load 1st B column (partial load) */ + vecB = vldrwq_gather_shifted_offset_z_s32(pInB, vecColBOffs, p0); + + /* load A rows */ + vecA0 = vldrwq_s32(pInA0); + vecA1 = vldrwq_s32(pInA1); + + acc0 = vrmlaldavhq(vecA0, vecB); + acc1 = vrmlaldavhq(vecA1, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + + pOut[0 * MATRIX_DIM2] = (q31_t) acc0; + pOut[1 * MATRIX_DIM2] = (q31_t) acc1; + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_z_s32(pInB, vecColBOffs, p0); + + acc0 = vrmlaldavhq(vecA0, vecB); + acc1 = vrmlaldavhq(vecA1, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + + pOut[0 * MATRIX_DIM2] = (q31_t) acc0; + pOut[1 * MATRIX_DIM2] = (q31_t) acc1; + /* + * Return to application + */ + return (ARM_MATH_SUCCESS); +} + + + +__STATIC_INLINE arm_status arm_mat_mult_opt_q31_3x3_mve( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst) +{ + q31_t *pInB = pSrcB->pData; /* input data matrix pointer B */ + q31_t *pInA = pSrcA->pData; /* input data matrix pointer A */ + q31_t *pOut = pDst->pData; /* output data matrix pointer */ + uint32x4_t vecColBOffs; + q31_t *pInA0 = pInA; + q31_t *pInA1 = pInA0 + MATRIX_DIM3; + q31_t *pInA2 = pInA1 + MATRIX_DIM3; + q63_t acc0, acc1, acc2; + q31x4_t vecB, vecA; + /* enable predication to disable last (4th) vector element */ + mve_pred16_t p0 = vctp32q(MATRIX_DIM3); + + vecColBOffs = vidupq_u32((uint32_t)0, 1); + vecColBOffs = vecColBOffs * MATRIX_DIM3; + + pInB = pSrcB->pData; + + vecB = vldrwq_gather_shifted_offset_z_s32(pInB, vecColBOffs, p0); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + + pOut[0 * MATRIX_DIM3] = (q31_t) acc0; + pOut[1 * MATRIX_DIM3] = (q31_t) acc1; + pOut[2 * MATRIX_DIM3] = (q31_t) acc2; + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_z_s32(pInB, vecColBOffs, p0); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + + pOut[0 * MATRIX_DIM3] = (q31_t) acc0; + pOut[1 * MATRIX_DIM3] = (q31_t) acc1; + pOut[2 * MATRIX_DIM3] = (q31_t) acc2; + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_z_s32(pInB, vecColBOffs, p0); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + + pOut[0 * MATRIX_DIM3] = (q31_t) acc0; + pOut[1 * MATRIX_DIM3] = (q31_t) acc1; + pOut[2 * MATRIX_DIM3] = (q31_t) acc2; + /* + * Return to application + */ + return (ARM_MATH_SUCCESS); +} + +__STATIC_INLINE arm_status arm_mat_mult_opt_q31_4x4_mve( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst) +{ + q31_t *pInB = pSrcB->pData; /* input data matrix pointer B */ + q31_t *pInA = pSrcA->pData; /* input data matrix pointer A */ + q31_t *pOut = pDst->pData; /* output data matrix pointer */ + uint32x4_t vecColBOffs; + q31_t *pInA0 = pInA; + q31_t *pInA1 = pInA0 + MATRIX_DIM4; + q31_t *pInA2 = pInA1 + MATRIX_DIM4; + q31_t *pInA3 = pInA2 + MATRIX_DIM4; + q63_t acc0, acc1, acc2, acc3; + q31x4_t vecB, vecA; + + vecColBOffs = vidupq_u32((uint32_t)0, 4); + + pInB = pSrcB->pData; + + vecB = vldrwq_gather_shifted_offset_s32(pInB, vecColBOffs); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA3); + acc3 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + acc3 = asrl(acc3, 23); + + pOut[0 * MATRIX_DIM4] = (q31_t) acc0; + pOut[1 * MATRIX_DIM4] = (q31_t) acc1; + pOut[2 * MATRIX_DIM4] = (q31_t) acc2; + pOut[3 * MATRIX_DIM4] = (q31_t) acc3; + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_s32(pInB, vecColBOffs); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA3); + acc3 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + acc3 = asrl(acc3, 23); + + pOut[0 * MATRIX_DIM4] = (q31_t) acc0; + pOut[1 * MATRIX_DIM4] = (q31_t) acc1; + pOut[2 * MATRIX_DIM4] = (q31_t) acc2; + pOut[3 * MATRIX_DIM4] = (q31_t) acc3; + + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_s32(pInB, vecColBOffs); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA3); + acc3 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + acc3 = asrl(acc3, 23); + + pOut[0 * MATRIX_DIM4] = (q31_t) acc0; + pOut[1 * MATRIX_DIM4] = (q31_t) acc1; + pOut[2 * MATRIX_DIM4] = (q31_t) acc2; + pOut[3 * MATRIX_DIM4] = (q31_t) acc3; + + pOut++; + + /* move to next B column */ + pInB = pInB + 1; + + vecB = vldrwq_gather_shifted_offset_s32(pInB, vecColBOffs); + + vecA = vldrwq_s32(pInA0); + acc0 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA1); + acc1 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA2); + acc2 = vrmlaldavhq(vecA, vecB); + vecA = vldrwq_s32(pInA3); + acc3 = vrmlaldavhq(vecA, vecB); + + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + acc3 = asrl(acc3, 23); + + pOut[0 * MATRIX_DIM4] = (q31_t) acc0; + pOut[1 * MATRIX_DIM4] = (q31_t) acc1; + pOut[2 * MATRIX_DIM4] = (q31_t) acc2; + pOut[3 * MATRIX_DIM4] = (q31_t) acc3; + /* + * Return to application + */ + return (ARM_MATH_SUCCESS); +} + + +arm_status arm_mat_mult_opt_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst, + q31_t *pState) +{ + q31_t *pInA = pSrcA->pData; /* input data matrix pointer A */ + q31_t *pInB = pSrcB->pData; /* input data matrix pointer B */ + q31_t *pInA2; + q31_t *pInB2; + q31_t *px; /* Temporary output data matrix pointer */ + q31_t *px2; /* Temporary output data matrix pointer */ + uint32_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ + uint32_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ + uint32_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ + uint32_t numRowsB = pSrcB->numRows; /* number of rows of input matrix A */ + uint32_t col, i = 0u, j, row = numRowsB; /* loop counters */ + q31_t *pSrcBT = pState; /* input data matrix pointer for transpose */ + uint32_t blkCnt; /* loop counters */ + arm_status status; /* Status of matrix multiplication */ + arm_matrix_instance_q31 BT; +#ifdef ARM_MATH_MATRIX_CHECK + + /* Check for matrix mismatch condition */ + if ((pSrcA->numCols != pSrcB->numRows) || + (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols)) { + /* Set status as ARM_MATH_SIZE_MISMATCH */ + status = ARM_MATH_SIZE_MISMATCH; + } else +#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ + { + + /* small squared matrix specialized routines */ + if(numRowsA == numColsB && numColsB == numColsA) { + if (numRowsA == 1) + { + q63_t sum = (q63_t) *pInA * *pInB; + pDst->pData[0] = (q31_t)(sum >> 31); + return (ARM_MATH_SUCCESS); + } + else if(numRowsA == 2) + return arm_mat_mult_opt_q31_2x2_mve(pSrcA, pSrcB, pDst); + else if(numRowsA == 3) + return arm_mat_mult_opt_q31_3x3_mve(pSrcA, pSrcB, pDst); + else if (numRowsA == 4) + return arm_mat_mult_opt_q31_4x4_mve(pSrcA, pSrcB, pDst); + } + + + /* + * Matrix transpose + */ + BT.numRows = numColsB; + BT.numCols = numRowsB; + BT.pData = pSrcBT; + + arm_mat_trans_q31(pSrcB, &BT); + + + /* + * Reset the variables for the usage in the following multiplication process + */ + i = 0; + row = numRowsA >> 1; + px = pDst->pData; + px2 = px + numColsB; + + /* + * main loop + * compute 2 x 2 output blocks + * with dot products (Matrix A rows * Transposed MAtrix B rows) + */ + while (row > 0u) { + /* + * For every row wise process, the column loop counter is to be initiated + * Compute 2 columns and 2 rows in parrallel + */ + col = numColsB >> 1; + j = 0; + + /* + * column pair loop + */ + while (col > 0u) { + q31_t const *pSrcAVec, *pSrcBVec, *pSrcA2Vec, *pSrcB2Vec; + q31x4_t vecA, vecA2, vecB, vecB2; + q63_t acc0, acc1, acc2, acc3; + + /* + * Initiate the pointers + * - 2 x consecutive Matrix A rows (i increment is 2 x numColsA) + * - 2 x consecutive Matrix B' rows (j increment is 2 x numRowsB) + */ + pInA = pSrcA->pData + i; + pInA2 = pInA + numColsA; + pInB = pSrcBT + j; + pInB2 = pInB + numRowsB; + + + pSrcAVec = (q31_t const *) pInA; + pSrcA2Vec = (q31_t const *) pInA2; + pSrcBVec = (q31_t const *) pInB; + pSrcB2Vec = (q31_t const *) pInB2; + + acc0 = 0LL; + acc1 = 0LL; + acc2 = 0LL; + acc3 = 0LL; + + /* load scheduling */ + vecA = vld1q(pSrcAVec); + pSrcAVec += 4; + + blkCnt = (numColsA / 4); + while (blkCnt > 0U) { + vecB = vld1q(pSrcBVec); + pSrcBVec += 4; + acc0 = vrmlaldavhaq(acc0, vecA, vecB); + vecA2 = vld1q(pSrcA2Vec); + pSrcA2Vec += 4; + acc1 = vrmlaldavhaq(acc1, vecA2, vecB); + vecB2 = vld1q(pSrcB2Vec); + pSrcB2Vec += 4; + acc2 = vrmlaldavhaq(acc2, vecA, vecB2); + vecA = vld1q(pSrcAVec); + pSrcAVec += 4; + acc3 = vrmlaldavhaq(acc3, vecA2, vecB2); + + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = (numColsA & 3); + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp32q(blkCnt); + vecB = vld1q(pSrcBVec); + acc0 = vrmlaldavhaq_p(acc0, vecA, vecB, p0); + vecA2 = vld1q(pSrcA2Vec); + acc1 = vrmlaldavhaq_p(acc1, vecA2, vecB, p0); + vecB2 = vld1q(pSrcB2Vec); + acc2 = vrmlaldavhaq_p(acc2, vecA, vecB2, p0); + vecA = vld1q(pSrcAVec); + acc3 = vrmlaldavhaq_p(acc3, vecA2, vecB2, p0); + } + + /* Convert to 1.31 */ + acc0 = asrl(acc0, 23); + acc1 = asrl(acc1, 23); + acc2 = asrl(acc2, 23); + acc3 = asrl(acc3, 23); + + /* Store the results (2 x 2 block) in the destination buffer */ + *px++ = (q31_t) acc0; + *px++ = (q31_t) acc2; + *px2++ = (q31_t) acc1; + *px2++ = (q31_t) acc3; + + j += numRowsB * 2; + /* + * Decrement the column pair loop counter + */ + col--; + + } + + i = i + numColsA * 2; + px = px2 + (numColsB & 1u); + px2 = px + numColsB; + /* + * Decrement the row pair loop counter + */ + row--; + } + + /* + * Compute remaining row and/or column below + */ + if (numColsB & 1u) { + row = numRowsA & (~0x1); //avoid redundant computation + px = pDst->pData + numColsB - 1; + i = 0; + + /* + * row loop + */ + while (row > 0) { + q31_t const *pSrcAVec, *pSrcBVec; + q31x4_t vecA, vecB; + q63_t acc0; + + /* + * point to last column in matrix B + */ + pInB = pSrcBT + numRowsB * (numColsB - 1); + pInA = pSrcA->pData + i; + + pSrcAVec = (q31_t const *) pInA; + pSrcBVec = (q31_t const *) pInB; + + /* single dot-product */ + acc0 = 0LL; + blkCnt = (numColsA / 4); + while (blkCnt > 0U) { + vecA = vld1q(pSrcAVec); + pSrcAVec += 4; + vecB = vld1q(pSrcBVec); + pSrcBVec += 4; + acc0 = vrmlaldavhaq(acc0, vecA, vecB); + + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = (numColsA & 3); + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp32q(blkCnt); + vecA = vld1q(pSrcAVec); + vecB = vld1q(pSrcBVec); + acc0 = vrmlaldavhaq_p(acc0, vecA, vecB, p0); + } + + acc0 = asrl(acc0, 23); + *px = (q31_t) acc0; + + px += numColsB; + + i += numColsA; + /* + * Decrement the row loop counter + */ + row--; + } + } + + if (numRowsA & 1u) { + col = numColsB; + i = 0u; + /* + * point to last row in output matrix + */ + px = pDst->pData + (numColsB) * (numRowsA - 1); + /* + * col loop + */ + while (col > 0) { + q31_t const *pSrcAVec, *pSrcBVec; + q31x4_t vecA, vecB; + q63_t acc0; + + /* + * point to last row in matrix A + */ + pInA = pSrcA->pData + (numRowsA - 1) * numColsA; + pInB = pSrcBT + i; + + /* + * Set the variable sum, that acts as accumulator, to zero + */ + pSrcAVec = (q31_t const *) pInA; + pSrcBVec = (q31_t const *) pInB; + acc0 = 0LL; + + blkCnt = (numColsA / 4); + while (blkCnt > 0U) { + vecA = vld1q(pSrcAVec); + pSrcAVec += 4; + vecB = vld1q(pSrcBVec); + pSrcBVec += 4; + acc0 = vrmlaldavhaq(acc0, vecA, vecB); + + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = (numColsA & 3); + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp32q(blkCnt); + vecA = vld1q(pSrcAVec); + vecB = vld1q(pSrcBVec); + acc0 = vrmlaldavhaq_p(acc0, vecA, vecB, p0); + } + + acc0 = asrl(acc0, 23); + *px++ = (q31_t) acc0; + + i += numColsA; + /* + * Decrement the col loop counter + */ + col--; + } + } + /* Set status as ARM_MATH_SUCCESS */ + status = ARM_MATH_SUCCESS; + } + /* + * Return to application + */ + return (status); +} + +#else +arm_status arm_mat_mult_opt_q31( + const arm_matrix_instance_q31 * pSrcA, + const arm_matrix_instance_q31 * pSrcB, + arm_matrix_instance_q31 * pDst, + q31_t *pState) +{ + q31_t *pIn1 = pSrcA->pData; /* Input data matrix pointer A */ + q31_t *pIn2 = pSrcB->pData; /* Input data matrix pointer B */ + q31_t *pInA = pSrcA->pData; /* Input data matrix pointer A */ + q31_t *pInB = pSrcB->pData; /* Input data matrix pointer B */ + q31_t *pOut = pDst->pData; /* Output data matrix pointer */ + q31_t *px; /* Temporary output data matrix pointer */ + q63_t sum; /* Accumulator */ + uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */ + uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */ + uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */ + uint32_t col, i = 0U, row = numRowsA, colCnt; /* Loop counters */ + arm_status status; /* Status of matrix multiplication */ + (void)pState; +#ifdef ARM_MATH_MATRIX_CHECK + + /* Check for matrix mismatch condition */ + if ((pSrcA->numCols != pSrcB->numRows) || + (pSrcA->numRows != pDst->numRows) || + (pSrcB->numCols != pDst->numCols) ) + { + /* Set status as ARM_MATH_SIZE_MISMATCH */ + status = ARM_MATH_SIZE_MISMATCH; + } + else + +#endif /* #ifdef ARM_MATH_MATRIX_CHECK */ + + { + /* The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */ + /* row loop */ + do + { + /* Output pointer is set to starting address of row being processed */ + px = pOut + i; + + /* For every row wise process, column loop counter is to be initiated */ + col = numColsB; + + /* For every row wise process, pIn2 pointer is set to starting address of pSrcB data */ + pIn2 = pSrcB->pData; + + /* column loop */ + do + { + /* Set the variable sum, that acts as accumulator, to zero */ + sum = 0; + + /* Initialize pointer pIn1 to point to starting address of column being processed */ + pIn1 = pInA; + +#if defined (ARM_MATH_LOOPUNROLL) + + /* Loop unrolling: Compute 4 MACs at a time. */ + colCnt = numColsA >> 2U; + + /* matrix multiplication */ + while (colCnt > 0U) + { + /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */ + + /* Perform the multiply-accumulates */ + sum += (q63_t) *pIn1++ * *pIn2; + pIn2 += numColsB; + + sum += (q63_t) *pIn1++ * *pIn2; + pIn2 += numColsB; + + sum += (q63_t) *pIn1++ * *pIn2; + pIn2 += numColsB; + + sum += (q63_t) *pIn1++ * *pIn2; + pIn2 += numColsB; + + /* Decrement loop counter */ + colCnt--; + } + + /* Loop unrolling: Compute remaining MACs */ + colCnt = numColsA % 0x4U; + +#else + + /* Initialize cntCnt with number of columns */ + colCnt = numColsA; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (colCnt > 0U) + { + /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */ + + /* Perform the multiply-accumulates */ + sum += (q63_t) *pIn1++ * *pIn2; + pIn2 += numColsB; + + /* Decrement loop counter */ + colCnt--; + } + + /* Convert result from 2.62 to 1.31 format and store in destination buffer */ + *px++ = (q31_t) (sum >> 31); + + /* Decrement column loop counter */ + col--; + + /* Update pointer pIn2 to point to starting address of next column */ + pIn2 = pInB + (numColsB - col); + + } while (col > 0U); + + /* Update pointer pInA to point to starting address of next row */ + i = i + numColsB; + pInA = pInA + numColsA; + + /* Decrement row loop counter */ + row--; + + } while (row > 0U); + + /* Set status as ARM_MATH_SUCCESS */ + status = ARM_MATH_SUCCESS; + } + + /* Return to application */ + return (status); +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of MatrixMult group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q15.c index 612ad92..026a993 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_q15.c * Description: Q15 matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 3 Nov 2021 + * $Revision: V1.10.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -44,7 +44,7 @@ @param[in] pSrcA points to the first input matrix structure @param[in] pSrcB points to the second input matrix structure @param[out] pDst points to output matrix structure - @param[in] pState points to the array for storing intermediate results (Unused) + @param[in] pState points to the array for storing intermediate results @return execution status - \ref ARM_MATH_SUCCESS : Operation successful - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed @@ -317,282 +317,309 @@ __STATIC_INLINE arm_status arm_mat_mult_q15_4x4_mve( return (ARM_MATH_SUCCESS); } + arm_status arm_mat_mult_q15( - const arm_matrix_instance_q15 * pSrcA, - const arm_matrix_instance_q15 * pSrcB, - arm_matrix_instance_q15 * pDst, - q15_t * pState) + const arm_matrix_instance_q15 * pSrcA, + const arm_matrix_instance_q15 * pSrcB, + arm_matrix_instance_q15 * pDst, + q15_t * pState) { - q15_t *pInB = pSrcB->pData; /* input data matrix pointer B */ - q15_t *pInA = pSrcA->pData; /* input data matrix pointer A */ - q15_t *pOut = pDst->pData; /* output data matrix pointer */ - q15_t *px; /* Temporary output data matrix pointer */ - uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ - uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ - uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ - uint16_t col, i = 0U, row = numRowsA, colCnt; /* loop counters */ - uint16x8_t vecOffs, vecColBOffs; - uint32_t blkCnt,rowCnt; /* loop counters */ - arm_status status; /* Status of matrix multiplication */ - (void)pState; + q15_t *pInA = pSrcA->pData; /* input data matrix pointer A */ + q15_t *pInB = pSrcB->pData; /* input data matrix pointer B */ + q15_t *pInA2; + q15_t *pInB2; + q15_t *px; /* Temporary output data matrix pointer */ + q15_t *px2; /* Temporary output data matrix pointer */ + uint32_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ + uint32_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ + uint32_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ + uint32_t numRowsB = pSrcB->numRows; /* number of rows of input matrix A */ + uint32_t col, i = 0u, j, row = numRowsB; /* loop counters */ + q15_t *pSrcBT = pState; /* input data matrix pointer for transpose */ + uint32_t blkCnt; /* loop counters */ + arm_status status; /* Status of matrix multiplication */ + arm_matrix_instance_q15 BT; #ifdef ARM_MATH_MATRIX_CHECK - /* Check for matrix mismatch condition */ - if ((pSrcA->numCols != pSrcB->numRows) || + /* Check for matrix mismatch condition */ + if ((pSrcA->numCols != pSrcB->numRows) || (pSrcA->numRows != pDst->numRows) || (pSrcB->numCols != pDst->numCols) ) - { - /* Set status as ARM_MATH_SIZE_MISMATCH */ - status = ARM_MATH_SIZE_MISMATCH; - } - else -#endif - { - /* small squared matrix specialized routines */ - if(numRowsA == numColsB && numColsB == numColsA) { - - if (numRowsA == 1) - { - q63_t sum; - sum = pInA[0] * pInB[0]; - pOut[0] = (q15_t) __SSAT((sum >> 15), 16); - return (ARM_MATH_SUCCESS); - } - else if(numRowsA == 2) - return arm_mat_mult_q15_2x2_mve(pSrcA, pSrcB, pDst); - else if(numRowsA == 3) - return arm_mat_mult_q15_3x3_mve(pSrcA, pSrcB, pDst); - else if (numRowsA == 4) - return arm_mat_mult_q15_4x4_mve(pSrcA, pSrcB, pDst); + { + /* Set status as ARM_MATH_SIZE_MISMATCH */ + status = ARM_MATH_SIZE_MISMATCH; } - - vecColBOffs = vidupq_u16((uint32_t)0, 1); - vecColBOffs = vecColBOffs * (uint16_t) (numColsB); - - /* - * The following loop performs the dot-product of each row in pSrcA with each column in pSrcB - */ - - /* - * row loop - */ - rowCnt = row >> 2; - while (rowCnt > 0U) + else +#endif { + /* small squared matrix specialized routines */ + if (numRowsA == numColsB && numColsB == numColsA) { + + if (numRowsA == 1) { + q63_t sum; + sum = pInA[0] * pInB[0]; + pDst->pData[0] = (q15_t) __SSAT((sum >> 15), 16); + return (ARM_MATH_SUCCESS); + } else if (numRowsA == 2) + return arm_mat_mult_q15_2x2_mve(pSrcA, pSrcB, pDst); + else if (numRowsA == 3) + return arm_mat_mult_q15_3x3_mve(pSrcA, pSrcB, pDst); + else if (numRowsA == 4) + return arm_mat_mult_q15_4x4_mve(pSrcA, pSrcB, pDst); + } + /* - * Output pointer is set to starting address of the row being processed + * Matrix transpose */ - px = pOut + i; - i = i + 4 * numColsB; + + BT.numRows = numColsB; + BT.numCols = numRowsB; + BT.pData = pSrcBT; + + arm_mat_trans_q15(pSrcB, &BT); + + /* - * For every row wise process, the column loop counter is to be initiated + * Reset the variables for the usage in the following multiplication process */ - col = numColsB; + i = 0; + row = numRowsA >> 1; + px = pDst->pData; + px2 = px + numColsB; + /* - * For every row wise process, the pInB pointer is set - * to the starting address of the pSrcB data + * The following loop performs the dot-product of each row in pSrcA with each column in pSrcB */ - pInB = pSrcB->pData; + /* - * column loop + * row loop */ - while (col > 0U) - { + while (row > 0u) { /* - * generate 4 columns elements + * For every row wise process, the column loop counter is to be initiated */ + col = numColsB >> 1; /* - * Matrix A columns number of MAC operations are to be performed + * For every row wise process, the pIn2 pointer is set + * to the starting address of the transposed pSrcB data */ - colCnt = numColsA; - - q15_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec; - q15_t *pInA0 = pInA; - q15_t *pInA1 = pInA0 + numColsA; - q15_t *pInA2 = pInA1 + numColsA; - q15_t *pInA3 = pInA2 + numColsA; - q63_t acc0, acc1, acc2, acc3; - - acc0 = 0LL; - acc1 = 0LL; - acc2 = 0LL; - acc3 = 0LL; - - pSrcA0Vec = (q15_t const *) pInA0; - pSrcA1Vec = (q15_t const *) pInA1; - pSrcA2Vec = (q15_t const *) pInA2; - pSrcA3Vec = (q15_t const *) pInA3; - - vecOffs = vecColBOffs; - - blkCnt = (numColsA) >> 3; - while (blkCnt > 0U) - { - q15x8_t vecB, vecA; - - vecB = vldrhq_gather_shifted_offset((int16_t const *)pInB, vecOffs); - vecOffs = vecOffs + (uint16_t) (numColsB * 8); - - vecA = vld1q(pSrcA0Vec); pSrcA0Vec += 8; - acc0 = vmlaldavaq(acc0, vecA, vecB); - vecA = vld1q(pSrcA1Vec); pSrcA1Vec += 8; - acc1 = vmlaldavaq(acc1, vecA, vecB); - vecA = vld1q(pSrcA2Vec); pSrcA2Vec += 8; - acc2 = vmlaldavaq(acc2, vecA, vecB); - vecA = vld1q(pSrcA3Vec); pSrcA3Vec += 8; - acc3 = vmlaldavaq(acc3, vecA, vecB); - blkCnt--; + pInB = pSrcBT; + pInB2 = pInB + numRowsB; + j = 0; - } /* - * tail + * column loop */ - blkCnt = numColsA & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - q15x8_t vecB, vecA; - - vecB = vldrhq_gather_shifted_offset((int16_t const *)pInB, vecOffs); - vecOffs = vecOffs + (uint16_t) (numColsB * 8); - - vecA = vld1q(pSrcA0Vec); - acc0 = vmlaldavaq_p(acc0, vecA, vecB, p0); - vecA = vld1q(pSrcA1Vec); - acc1 = vmlaldavaq_p(acc1, vecA, vecB, p0); - vecA = vld1q(pSrcA2Vec); - acc2 = vmlaldavaq_p(acc2, vecA, vecB, p0); - vecA = vld1q(pSrcA3Vec); - acc3 = vmlaldavaq_p(acc3, vecA, vecB, p0); + while (col > 0u) { + q15_t const *pSrcAVec, *pSrcBVec, *pSrcA2Vec, *pSrcB2Vec; + q15x8_t vecA, vecA2, vecB, vecB2; + q63_t acc0, acc1, acc2, acc3; + + /* + * Initiate the pointer pIn1 to point to the starting address of the column being processed + */ + pInA = pSrcA->pData + i; + pInA2 = pInA + numColsA; + pInB = pSrcBT + j; + pInB2 = pInB + numRowsB; + + + pSrcAVec = (q15_t const *) pInA; + pSrcA2Vec = (q15_t const *) pInA2; + pSrcBVec = (q15_t const *) pInB; + pSrcB2Vec = (q15_t const *) pInB2; + + acc0 = 0LL; + acc1 = 0LL; + acc2 = 0LL; + acc3 = 0LL; + + vecA = vld1q(pSrcAVec); + pSrcAVec += 8; + + blkCnt = numColsA / 8; + while (blkCnt > 0U) { + vecB = vld1q(pSrcBVec); + pSrcBVec += 8; + acc0 = vmlaldavaq(acc0, vecA, vecB); + vecA2 = vld1q(pSrcA2Vec); + pSrcA2Vec += 8; + acc1 = vmlaldavaq(acc1, vecA2, vecB); + vecB2 = vld1q(pSrcB2Vec); + pSrcB2Vec += 8; + acc2 = vmlaldavaq(acc2, vecA, vecB2); + vecA = vld1q(pSrcAVec); + pSrcAVec += 8; + acc3 = vmlaldavaq(acc3, vecA2, vecB2); + + blkCnt--; + } + /* + * tail + */ + blkCnt = numColsA & 7; + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp16q(blkCnt); + vecB = vld1q(pSrcBVec); + acc0 = vmlaldavaq_p(acc0, vecA, vecB, p0); + vecA2 = vld1q(pSrcA2Vec); + acc1 = vmlaldavaq_p(acc1, vecA2, vecB, p0); + vecB2 = vld1q(pSrcB2Vec); + acc2 = vmlaldavaq_p(acc2, vecA, vecB2, p0); + vecA = vld1q(pSrcAVec); + acc3 = vmlaldavaq_p(acc3, vecA2, vecB2, p0); + } + + *px++ = (q15_t) MVE_ASRL_SAT16(acc0, 15); + *px++ = (q15_t) MVE_ASRL_SAT16(acc2, 15); + *px2++ = (q15_t) MVE_ASRL_SAT16(acc1, 15); + *px2++ = (q15_t) MVE_ASRL_SAT16(acc3, 15); + j += numRowsB * 2; + /* + * Decrement the column loop counter + */ + col--; + } - px[0] = (q15_t)MVE_ASRL_SAT16(acc0, 15); - px[1 * numColsB] = (q15_t)MVE_ASRL_SAT16(acc1, 15); - px[2 * numColsB] = (q15_t)MVE_ASRL_SAT16(acc2, 15); - px[3 * numColsB] = (q15_t)MVE_ASRL_SAT16(acc3, 15); - px++; - /* - * Decrement the column loop counter - */ - col--; + i = i + numColsA * 2; + px = px2 + (numColsB & 1u); + px2 = px + numColsB; /* - * Update the pointer pInB to point to the starting address of the next column + * Decrement the row loop counter */ - pInB = pSrcB->pData + (numColsB - col); + row--; } /* - * Update the pointer pInA to point to the starting address of the next row - */ - pInA += (numColsA * 4); - /* - * Decrement the row loop counter - */ - rowCnt --; - - } - - rowCnt = row & 3; - while (rowCnt > 0U) - { - /* - * Output pointer is set to starting address of the row being processed - */ - px = pOut + i; - i = i + numColsB; - /* - * For every row wise process, the column loop counter is to be initiated + * Compute remaining row and/or column below */ - col = numColsB; - /* - * For every row wise process, the pInB pointer is set - * to the starting address of the pSrcB data - */ - pInB = pSrcB->pData; - /* - * column loop - */ - while (col > 0U) - { - /* - * generate 4 columns elements - */ - /* - * Matrix A columns number of MAC operations are to be performed - */ - colCnt = numColsA; - - q15_t const *pSrcA0Vec; - q15_t *pInA0 = pInA; - q63_t acc0; - - acc0 = 0LL; - - pSrcA0Vec = (q15_t const *) pInA0; - - vecOffs = vecColBOffs; - - blkCnt = (numColsA) >> 3; - while (blkCnt > 0U) - { - q15x8_t vecB, vecA; - vecB = vldrhq_gather_shifted_offset((int16_t const *)pInB, vecOffs); - vecOffs = vecOffs + (uint16_t) (numColsB * 8); + if (numColsB & 1u) { + row = numRowsA & (~0x1); //avoid redundant computation + px = pDst->pData + numColsB - 1; + i = 0; - vecA = vld1q(pSrcA0Vec); - pSrcA0Vec += 8; - acc0 = vmlaldavaq(acc0, vecA, vecB); - - blkCnt--; - - } /* - * tail + * row loop */ - blkCnt = numColsA & 7; - if (blkCnt > 0U) - { - mve_pred16_t p0 = vctp16q(blkCnt); - q15x8_t vecB, vecA; - - vecB = vldrhq_gather_shifted_offset((int16_t const *)pInB, vecOffs); - vecOffs = vecOffs + (uint16_t) (numColsB * 8); - - vecA = vld1q(pSrcA0Vec); - acc0 = vmlaldavaq_p(acc0, vecA, vecB, p0); - + while (row > 0) { + q15_t const *pSrcAVec, *pSrcBVec; + q15x8_t vecA, vecB; + q63_t acc0; + + /* + * point to last column in matrix B + */ + pInB = pSrcBT + numRowsB * (numColsB - 1); + pInA = pSrcA->pData + i; + + pSrcAVec = (q15_t const *) pInA; + pSrcBVec = (q15_t const *) pInB; + + acc0 = 0LL; + blkCnt = (numColsA) / 8; + while (blkCnt > 0U) { + vecA = vld1q(pSrcAVec); + pSrcAVec += 8; + vecB = vld1q(pSrcBVec); + pSrcBVec += 8; + acc0 = vmlaldavaq(acc0, vecA, vecB); + + blkCnt--; + } + /* + * tail + */ + blkCnt = (numColsA & 7); + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp16q(blkCnt); + vecA = vld1q(pSrcAVec); + vecB = vld1q(pSrcBVec); + acc0 = vmlaldavaq_p(acc0, vecA, vecB, p0); + } + + *px = (q15_t) MVE_ASRL_SAT16(acc0, 15); + + px += numColsB; + + i += numColsA; + /* + * Decrement the row loop counter + */ + row--; } + } - px[0] = (q15_t)MVE_ASRL_SAT16(acc0, 15); - - px++; + if (numRowsA & 1u) { + col = numColsB; + i = 0u; /* - * Decrement the column loop counter + * point to last row in output matrix */ - col--; + px = pDst->pData + (numColsB) * (numRowsA - 1); /* - * Update the pointer pInB to point to the starting address of the next column + * col loop */ - pInB = pSrcB->pData + (numColsB - col); + while (col > 0) { + q15_t const *pSrcAVec, *pSrcBVec; + q15x8_t vecA, vecB; + q63_t acc0; + + /* + * point to last row in matrix A + */ + pInA = pSrcA->pData + (numRowsA - 1) * numColsA; + pInB = pSrcBT + i; + + /* + * Set the variable sum, that acts as accumulator, to zero + */ + pSrcAVec = (q15_t const *) pInA; + pSrcBVec = (q15_t const *) pInB; + acc0 = 0LL; + + blkCnt = ((numColsA) / 8); + while (blkCnt > 0U) { + vecA = vld1q(pSrcAVec); + pSrcAVec += 8; + vecB = vld1q(pSrcBVec); + pSrcBVec += 8; + acc0 = vmlaldavaq(acc0, vecA, vecB); + + blkCnt--; + } + /* + * tail + */ + blkCnt = (numColsA & 7); + if (blkCnt > 0U) { + mve_pred16_t p0 = vctp16q(blkCnt); + vecA = vld1q(pSrcAVec); + vecB = vld1q(pSrcBVec); + acc0 = vmlaldavaq_p(acc0, vecA, vecB, p0); + } + + *px++ = (q15_t) MVE_ASRL_SAT16(acc0, 15); + + i += numColsA; + + /* + * Decrement the col loop counter + */ + col--; + } } - /* - * Update the pointer pInA to point to the starting address of the next row - */ - pInA += (numColsA ); - rowCnt--; + /* Set status as ARM_MATH_SUCCESS */ + status = ARM_MATH_SUCCESS; } - /* Set status as ARM_MATH_SUCCESS */ - status = ARM_MATH_SUCCESS; - } - - /* Return to application */ - return (status); - + /* Return to application */ + return (status); } -#else + +#else arm_status arm_mat_mult_q15( const arm_matrix_instance_q15 * pSrcA, const arm_matrix_instance_q15 * pSrcB, @@ -610,12 +637,12 @@ arm_status arm_mat_mult_q15( uint16_t numRowsA = pSrcA->numRows; /* Number of rows of input matrix A */ uint16_t numColsB = pSrcB->numCols; /* Number of columns of input matrix B */ uint16_t numColsA = pSrcA->numCols; /* Number of columns of input matrix A */ - uint16_t numRowsB = pSrcB->numRows; /* Number of rows of input matrix A */ + uint16_t numRowsB = pSrcB->numRows; /* Number of rows of input matrix B */ uint32_t col, i = 0U, row = numRowsB, colCnt; /* Loop counters */ arm_status status; /* Status of matrix multiplication */ - - q31_t in; /* Temporary variable to hold the input value */ + q31_t inA1, inB1, inA2, inB2; + arm_matrix_instance_q15 BT; #ifdef ARM_MATH_MATRIX_CHECK @@ -630,89 +657,13 @@ arm_status arm_mat_mult_q15( else #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ - { - /* Matrix transpose */ - do - { - /* The pointer px is set to starting address of column being processed */ - px = pSrcBT + i; - - /* Apply loop unrolling and exchange columns with row elements */ - col = numColsB >> 2U; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (col > 0U) - { - /* Read two elements from row */ - in = read_q15x2_ia ((q15_t **) &pInB); - - /* Unpack and store one element in destination */ -#ifndef ARM_MATH_BIG_ENDIAN - *px = (q15_t) in; -#else - *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16); -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - - /* Update pointer px to point to next row of transposed matrix */ - px += numRowsB; - - /* Unpack and store second element in destination */ -#ifndef ARM_MATH_BIG_ENDIAN - *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16); -#else - *px = (q15_t) in; -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - - /* Update pointer px to point to next row of transposed matrix */ - px += numRowsB; - - /* Read two elements from row */ - in = read_q15x2_ia ((q15_t **) &pInB); - - /* Unpack and store one element in destination */ -#ifndef ARM_MATH_BIG_ENDIAN - *px = (q15_t) in; -#else - *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16); -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - px += numRowsB; - -#ifndef ARM_MATH_BIG_ENDIAN - *px = (q15_t) ((in & (q31_t) 0xffff0000) >> 16); -#else - *px = (q15_t) in; -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - px += numRowsB; - - /* Decrement column loop counter */ - col--; - } - /* If the columns of pSrcB is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - col = numColsB % 0x4U; - - while (col > 0U) - { - /* Read and store input element in destination */ - *px = *pInB++; - - /* Update pointer px to point to next row of transposed matrix */ - px += numRowsB; - - /* Decrement column loop counter */ - col--; - } - - i++; - - /* Decrement row loop counter */ - row--; - - } while (row > 0U); + BT.numRows = numColsB; + BT.numCols = numRowsB; + BT.pData = pSrcBT; + arm_mat_trans_q15(pSrcB,&BT); /* Reset variables for usage in following multiplication process */ row = numRowsA; i = 0U; @@ -746,13 +697,13 @@ arm_status arm_mat_mult_q15( /* c(m,n) = a(1,1) * b(1,1) + a(1,2) * b(2,1) + .... + a(m,p) * b(p,n) */ /* read real and imag values from pSrcA and pSrcB buffer */ - inA1 = read_q15x2_ia ((q15_t **) &pInA); - inB1 = read_q15x2_ia ((q15_t **) &pInB); + inA1 = read_q15x2_ia (&pInA); + inB1 = read_q15x2_ia (&pInB); - inA2 = read_q15x2_ia ((q15_t **) &pInA); - inB2 = read_q15x2_ia ((q15_t **) &pInB); + inA2 = read_q15x2_ia (&pInA); + inB2 = read_q15x2_ia (&pInB); - /* Multiply and Accumlates */ + /* Multiply and Accumulates */ sum = __SMLALD(inA1, inB1, sum); sum = __SMLALD(inA2, inB2, sum); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q31.c index 54f1c09..252eebf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_mult_q31.c * Description: Q31 matrix multiplication * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -346,7 +346,7 @@ arm_status arm_mat_mult_q31( uint16_t numRowsA = pSrcA->numRows; /* number of rows of input matrix A */ uint16_t numColsB = pSrcB->numCols; /* number of columns of input matrix B */ uint16_t numColsA = pSrcA->numCols; /* number of columns of input matrix A */ - uint16_t col, i = 0U, row = numRowsA, colCnt; /* loop counters */ + uint16_t col, i = 0U, row = numRowsA; /* loop counters */ arm_status status; /* status of matrix multiplication */ uint32x4_t vecOffs, vecColBOffs; uint32_t blkCnt, rowCnt; /* loop counters */ @@ -420,7 +420,6 @@ arm_status arm_mat_mult_q31( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; q31_t const *pSrcA0Vec, *pSrcA1Vec, *pSrcA2Vec, *pSrcA3Vec; q31_t const *pInA0 = pInA; @@ -543,7 +542,6 @@ arm_status arm_mat_mult_q31( /* * Matrix A columns number of MAC operations are to be performed */ - colCnt = numColsA; q31_t const *pSrcA0Vec; q31_t const *pInA0 = pInA; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q7.c index 79334e9..e9541fa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_mult_q7.c @@ -5,12 +5,14 @@ * Title: arm_mat_mult_q7.c * Description: Q15 matrix multiplication * - * $Date: 06. July 2020 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f16.c new file mode 100644 index 0000000..1e04295 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f16.c @@ -0,0 +1,784 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mat_qr_f16.c + * Description: Half floating-point matrix QR decomposition. + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + + +#if !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVE_FLOAT16) +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +#endif +#endif + +/** + @ingroup groupMatrix + */ + + +/** + @addtogroup MatrixQR + @{ + */ + +/** + @brief QR decomposition of a m x n half floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m (can be NULL) + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension m. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + + @par pOutQ is optional: + pOutQ can be a NULL pointer. + In this case, the argument will be ignored + and the output Q matrix won't be computed. + + @par f16 implementation + The f16 implementation is not very accurate. + + @par Norm2 threshold + For the meaning of this argument please + refer to the \ref MatrixHouseholder documentation + + */ + +#if !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVE_FLOAT16) + +arm_status arm_mat_qr_f16( + const arm_matrix_instance_f16 * pSrc, + const float16_t threshold, + arm_matrix_instance_f16 * pOutR, + arm_matrix_instance_f16 * pOutQ, + float16_t * pOutTau, + float16_t *pTmpA, + float16_t *pTmpB + ) + +{ + int32_t col=0; + int32_t nb,pos; + float16_t *pa,*pc; + float16_t beta; + float16_t *pv; + float16_t *pdst; + float16_t *p; + + if (pSrc->numRows < pSrc->numCols) + { + return(ARM_MATH_SIZE_MISMATCH); + } + + memcpy(pOutR->pData,pSrc->pData,pSrc->numCols * pSrc->numRows*sizeof(float16_t)); + pOutR->numCols = pSrc->numCols; + pOutR->numRows = pSrc->numRows; + + p = pOutR->pData; + + pc = pOutTau; + for(col=0 ; col < pSrc->numCols; col++) + { + int32_t j,k,blkCnt,blkCnt2; + float16_t *pa0,*pa1,*pa2,*pa3,*ptemp; + float16_t temp; + float16x8_t v1,v2,vtemp; + + COPY_COL_F16(pOutR,col,col,pTmpA); + + beta = arm_householder_f16(pTmpA,threshold,pSrc->numRows - col,pTmpA); + *pc++ = beta; + + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + pv = pTmpA; + pa = p; + + temp = *pv; + blkCnt = (pSrc->numCols-col) >> 3; + while (blkCnt > 0) + { + v1 = vld1q_f16(pa); + v2 = vmulq_n_f16(v1,temp); + vst1q_f16(pdst,v2); + + pa += 8; + pdst += 8; + blkCnt--; + } + blkCnt = (pSrc->numCols-col) & 7; + if (blkCnt > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt); + v1 = vld1q_f16(pa); + v2 = vmulq_n_f16(v1,temp); + vst1q_p_f16(pdst,v2,p0); + + pa += blkCnt; + } + + pa += col; + pv++; + pdst = pTmpB; + + pa0 = pa; + pa1 = pa0 + pSrc->numCols; + pa2 = pa1 + pSrc->numCols; + pa3 = pa2 + pSrc->numCols; + + /* Unrolled loop */ + blkCnt = (pSrc->numRows-col - 1) >> 2; + k=1; + while(blkCnt > 0) + { + vtemp=vld1q_f16(pv); + + blkCnt2 = (pSrc->numCols-col) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pdst); + + v2 = vld1q_f16(pa0); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,0)); + + v2 = vld1q_f16(pa1); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,1)); + + v2 = vld1q_f16(pa2); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,2)); + + v2 = vld1q_f16(pa3); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,3)); + + vst1q_f16(pdst,v1); + + pdst += 8; + pa0 += 8; + pa1 += 8; + pa2 += 8; + pa3 += 8; + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + + v1 = vld1q_f16(pdst); + + v2 = vld1q_f16(pa0); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,0)); + + v2 = vld1q_f16(pa1); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,1)); + + v2 = vld1q_f16(pa2); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,2)); + + v2 = vld1q_f16(pa3); + v1 = vfmaq_n_f16(v1,v2,vgetq_lane(vtemp,3)); + + vst1q_p_f16(pdst,v1,p0); + + pa0 += blkCnt2; + pa1 += blkCnt2; + pa2 += blkCnt2; + pa3 += blkCnt2; + } + + pa0 += col + 3*pSrc->numCols; + pa1 += col + 3*pSrc->numCols; + pa2 += col + 3*pSrc->numCols; + pa3 += col + 3*pSrc->numCols; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-col; k++) + { + temp = *pv; + blkCnt2 = (pSrc->numCols-col) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pa); + v2 = vld1q_f16(pdst); + v2 = vfmaq_n_f16(v2,v1,temp); + vst1q_f16(pdst,v2); + + pa += 8; + pdst += 8; + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + v1 = vld1q_f16(pa); + v2 = vld1q_f16(pdst); + v2 = vfmaq_n_f16(v2,v1,temp); + vst1q_p_f16(pdst,v2,p0); + + pa += blkCnt2; + } + + pa += col; + pv++; + pdst = pTmpB; + } + + /* A(col:,col:) - beta v tmpb */ + pa = p; + for(j=0;jnumRows-col; j++) + { + float16_t f = -(_Float16)beta * (_Float16)pTmpA[j]; + ptemp = pTmpB; + + blkCnt2 = (pSrc->numCols-col) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pa); + v2 = vld1q_f16(ptemp); + v1 = vfmaq_n_f16(v1,v2,f); + vst1q_f16(pa,v1); + + pa += 8; + ptemp += 8; + + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + + v1 = vld1q_f16(pa); + v2 = vld1q_f16(ptemp); + v1 = vfmaq_n_f16(v1,v2,f); + vst1q_p_f16(pa,v1,p0); + + pa += blkCnt2; + } + + pa += col; + } + + /* Copy Householder reflectors into R matrix */ + pa = p + pOutR->numCols; + for(k=0;knumRows-col-1; k++) + { + *pa = pTmpA[k+1]; + pa += pOutR->numCols; + } + + p += 1 + pOutR->numCols; + } + + /* Generate Q if requested by user matrix */ + + if (pOutQ != NULL) + { + /* Initialize Q matrix to identity */ + memset(pOutQ->pData,0,sizeof(float16_t)*pOutQ->numRows*pOutQ->numRows); + + pa = pOutQ->pData; + for(col=0 ; col < pOutQ->numCols; col++) + { + *pa = 1.0f16; + pa += pOutQ->numCols+1; + } + + nb = pOutQ->numRows - pOutQ->numCols + 1; + + pc = pOutTau + pOutQ->numCols - 1; + for(col=0 ; col < pOutQ->numCols; col++) + { + int32_t j,k, blkCnt, blkCnt2; + float16_t *pa0,*pa1,*pa2,*pa3,*ptemp; + float16_t temp; + float16x8_t v1,v2,vtemp; + + pos = pSrc->numRows - nb; + p = pOutQ->pData + pos + pOutQ->numCols*pos ; + + + COPY_COL_F16(pOutR,pos,pos,pTmpA); + pTmpA[0] = 1.0f16; + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + + pv = pTmpA; + pa = p; + + temp = *pv; + blkCnt2 = (pOutQ->numRows-pos) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pa); + v1 = vmulq_n_f16(v1, temp); + vst1q_f16(pdst,v1); + + pa += 8; + pdst += 8; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + + v1 = vld1q_f16(pa); + v1 = vmulq_n_f16(v1, temp); + vst1q_p_f16(pdst,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + pv++; + pdst = pTmpB; + pa0 = pa; + pa1 = pa0 + pOutQ->numRows; + pa2 = pa1 + pOutQ->numRows; + pa3 = pa2 + pOutQ->numRows; + + /* Unrolled loop */ + blkCnt = (pOutQ->numRows-pos - 1) >> 2; + k=1; + while(blkCnt > 0) + { + + vtemp = vld1q_f16(pv); + blkCnt2 = (pOutQ->numRows-pos) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pdst); + + v2 = vld1q_f16(pa0); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,0)); + + v2 = vld1q_f16(pa1); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,1)); + + v2 = vld1q_f16(pa2); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,2)); + + v2 = vld1q_f16(pa3); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,3)); + + vst1q_f16(pdst,v1); + + pa0 += 8; + pa1 += 8; + pa2 += 8; + pa3 += 8; + pdst += 8; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + + v1 = vld1q_f16(pdst); + + v2 = vld1q_f16(pa0); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,0)); + + v2 = vld1q_f16(pa1); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,1)); + + v2 = vld1q_f16(pa2); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,2)); + + v2 = vld1q_f16(pa3); + v1 = vfmaq_n_f16(v1, v2, vgetq_lane(vtemp,3)); + + vst1q_p_f16(pdst,v1,p0); + + pa0 += blkCnt2; + pa1 += blkCnt2; + pa2 += blkCnt2; + pa3 += blkCnt2; + + } + + pa0 += pos + 3*pOutQ->numRows; + pa1 += pos + 3*pOutQ->numRows; + pa2 += pos + 3*pOutQ->numRows; + pa3 += pos + 3*pOutQ->numRows; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-pos; k++) + { + temp = *pv; + blkCnt2 = (pOutQ->numRows-pos) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pdst); + v2 = vld1q_f16(pa); + v1 = vfmaq_n_f16(v1, v2, temp); + vst1q_f16(pdst,v1); + + pdst += 8; + pa += 8; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + v1 = vld1q_f16(pdst); + v2 = vld1q_f16(pa); + v1 = vfmaq_n_f16(v1, v2, temp); + vst1q_p_f16(pdst,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + pv++; + pdst = pTmpB; + } + + pa = p; + beta = *pc--; + for(j=0;jnumRows-pos; j++) + { + float16_t f = -(_Float16)beta * (_Float16)pTmpA[j]; + ptemp = pTmpB; + + blkCnt2 = (pOutQ->numCols-pos) >> 3; + while (blkCnt2 > 0) + { + v1 = vld1q_f16(pa); + v2 = vld1q_f16(ptemp); + v1 = vfmaq_n_f16(v1,v2,f); + vst1q_f16(pa,v1); + + pa += 8; + ptemp += 8; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numCols-pos) & 7; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp16q(blkCnt2); + + v1 = vld1q_f16(pa); + v2 = vld1q_f16(ptemp); + v1 = vfmaq_n_f16(v1,v2,f); + vst1q_p_f16(pa,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + } + + + nb++; + } + } + + arm_status status = ARM_MATH_SUCCESS; + /* Return to application */ + return (status); +} + +#endif /*#if !defined(ARM_MATH_MVEF)*/ + + +#endif /*#if !defined(ARM_MATH_AUTOVECTORIZE)*/ + + +#if defined(ARM_FLOAT16_SUPPORTED) + +#if (!defined(ARM_MATH_MVE_FLOAT16)) || defined(ARM_MATH_AUTOVECTORIZE) + + +arm_status arm_mat_qr_f16( + const arm_matrix_instance_f16 * pSrc, + const float16_t threshold, + arm_matrix_instance_f16 * pOutR, + arm_matrix_instance_f16 * pOutQ, + float16_t * pOutTau, + float16_t *pTmpA, + float16_t *pTmpB + ) + +{ + int32_t col=0; + int32_t nb,pos; + float16_t *pa,*pc; + float16_t beta; + float16_t *pv; + float16_t *pdst; + float16_t *p; + + if (pSrc->numRows < pSrc->numCols) + { + return(ARM_MATH_SIZE_MISMATCH); + } + + memcpy(pOutR->pData,pSrc->pData,pSrc->numCols * pSrc->numRows*sizeof(float16_t)); + pOutR->numCols = pSrc->numCols; + pOutR->numRows = pSrc->numRows; + + p = pOutR->pData; + + pc = pOutTau; + for(col=0 ; col < pSrc->numCols; col++) + { + int32_t i,j,k,blkCnt; + float16_t *pa0,*pa1,*pa2,*pa3; + COPY_COL_F16(pOutR,col,col,pTmpA); + + beta = arm_householder_f16(pTmpA,threshold,pSrc->numRows - col,pTmpA); + *pc++ = beta; + + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + pv = pTmpA; + pa = p; + for(j=0;jnumCols-col; j++) + { + *pdst++ = (_Float16)*pv * (_Float16)*pa++; + } + pa += col; + pv++; + pdst = pTmpB; + + pa0 = pa; + pa1 = pa0 + pSrc->numCols; + pa2 = pa1 + pSrc->numCols; + pa3 = pa2 + pSrc->numCols; + + /* Unrolled loop */ + blkCnt = (pSrc->numRows-col - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float16_t sum; + + for(j=0;jnumCols-col; j++) + { + sum = *pdst; + + sum += (_Float16)pv[0] * (_Float16)*pa0++; + sum += (_Float16)pv[1] * (_Float16)*pa1++; + sum += (_Float16)pv[2] * (_Float16)*pa2++; + sum += (_Float16)pv[3] * (_Float16)*pa3++; + + *pdst++ = sum; + } + pa0 += col + 3*pSrc->numCols; + pa1 += col + 3*pSrc->numCols; + pa2 += col + 3*pSrc->numCols; + pa3 += col + 3*pSrc->numCols; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-col; k++) + { + for(j=0;jnumCols-col; j++) + { + *pdst++ += (_Float16)*pv * (_Float16)*pa++; + } + pa += col; + pv++; + pdst = pTmpB; + } + + /* A(col:,col:) - beta v tmpb */ + pa = p; + for(j=0;jnumRows-col; j++) + { + float16_t f = (_Float16)beta * (_Float16)pTmpA[j]; + + for(i=0;inumCols-col; i++) + { + *pa = (_Float16)*pa - (_Float16)f * (_Float16)pTmpB[i] ; + pa++; + } + pa += col; + } + + /* Copy Householder reflectors into R matrix */ + pa = p + pOutR->numCols; + for(k=0;knumRows-col-1; k++) + { + *pa = pTmpA[k+1]; + pa += pOutR->numCols; + } + + p += 1 + pOutR->numCols; + } + + /* Generate Q if requested by user matrix */ + + if (pOutQ != NULL) + { + /* Initialize Q matrix to identity */ + memset(pOutQ->pData,0,sizeof(float16_t)*pOutQ->numRows*pOutQ->numRows); + + pa = pOutQ->pData; + for(col=0 ; col < pOutQ->numCols; col++) + { + *pa = 1.0f16; + pa += pOutQ->numCols+1; + } + + nb = pOutQ->numRows - pOutQ->numCols + 1; + + pc = pOutTau + pOutQ->numCols - 1; + for(col=0 ; col < pOutQ->numCols; col++) + { + int32_t i,j,k, blkCnt; + float16_t *pa0,*pa1,*pa2,*pa3; + pos = pSrc->numRows - nb; + p = pOutQ->pData + pos + pOutQ->numCols*pos ; + + + COPY_COL_F16(pOutR,pos,pos,pTmpA); + pTmpA[0] = 1.0f16; + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + + pv = pTmpA; + pa = p; + for(j=0;jnumRows-pos; j++) + { + *pdst++ = (_Float16)*pv * (_Float16)*pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + pa0 = pa; + pa1 = pa0 + pOutQ->numRows; + pa2 = pa1 + pOutQ->numRows; + pa3 = pa2 + pOutQ->numRows; + + /* Unrolled loop */ + blkCnt = (pOutQ->numRows-pos - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float16_t sum; + + for(j=0;jnumRows-pos; j++) + { + sum = *pdst; + + sum += (_Float16)pv[0] * (_Float16)*pa0++; + sum += (_Float16)pv[1] * (_Float16)*pa1++; + sum += (_Float16)pv[2] * (_Float16)*pa2++; + sum += (_Float16)pv[3] * (_Float16)*pa3++; + + *pdst++ = sum; + } + pa0 += pos + 3*pOutQ->numRows; + pa1 += pos + 3*pOutQ->numRows; + pa2 += pos + 3*pOutQ->numRows; + pa3 += pos + 3*pOutQ->numRows; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-pos; k++) + { + for(j=0;jnumRows-pos; j++) + { + *pdst++ += (_Float16)*pv * (_Float16)*pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + } + + pa = p; + beta = *pc--; + for(j=0;jnumRows-pos; j++) + { + float16_t f = (_Float16)beta * (_Float16)pTmpA[j]; + + for(i=0;inumCols-pos; i++) + { + *pa = (_Float16)*pa - (_Float16)f * (_Float16)pTmpB[i] ; + pa++; + } + pa += pos; + } + + + nb++; + } + } + + arm_status status = ARM_MATH_SUCCESS; + /* Return to application */ + return (status); +} + +#endif /* end of test for Helium or Neon availability */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of MatrixQR group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f32.c new file mode 100644 index 0000000..3e3027e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f32.c @@ -0,0 +1,854 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mat_qr_f32.c + * Description: Floating-point matrix QR decomposition. + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + + +#if !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVEF) +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +#endif +#endif + +/** + @ingroup groupMatrix + */ + +/** + @defgroup MatrixQR QR decomposition of a Matrix + + Computes the QR decomposition of a matrix M using Householder algorithm. + + \f[ + M = Q R + \f] + + where Q is an orthogonal matrix and R is upper triangular. + No pivoting strategy is used. + + The returned value for R is using a format a bit similar + to LAPACK : it is not just containing the matrix R but + also the Householder reflectors. + + The function is also returning a vector \f$\tau\f$ + that is containing the scaling factor for the reflectors. + + Returned value R has the structure: + + \f[ + \begin{pmatrix} + r_{11} & r_{12} & \dots & r_{1n} \\ + v_{12} & r_{22} & \dots & r_{2n} \\ + v_{13} & v_{22} & \dots & r_{3n} \\ + \vdots & \vdots & \ddots & \vdots \\ + v_{1m} & v_{2(m-1)} & \dots & r_{mn} \\ + \end{pmatrix} + \f] + + where + + \f[ + v_1 = + \begin{pmatrix} + 1 \\ + v_{12} \\ + \vdots \\ + v_{1m} \\ + \end{pmatrix} + \f] + + is the first householder reflector. + + The Householder Matrix is given by \f$H_1\f$ + + \f[ + H_1 = I - \tau_1 v_1 v_1^T + \f] + + The Matrix Q is the product of the Householder matrices: + + \f[ + Q = H_1 H_2 \dots H_n + \f] + + The computation of the matrix Q by this function is + optional. + + And the matrix R, would be the returned value R without the + householder reflectors: + + \f[ + \begin{pmatrix} + r_{11} & r_{12} & \dots & r_{1n} \\ + 0 & r_{22} & \dots & r_{2n} \\ + 0 & 0 & \dots & r_{3n} \\ + \vdots & \vdots & \ddots & \vdots \\ + 0 & 0 & \dots & r_{mn} \\ + \end{pmatrix} + \f] + + + */ + +/** + @addtogroup MatrixQR + @{ + */ + +/** + @brief QR decomposition of a m x n floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m (can be NULL) + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension m. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + + @par pOutQ is optional: + pOutQ can be a NULL pointer. + In this case, the argument will be ignored + and the output Q matrix won't be computed. + + + @par Norm2 threshold + For the meaning of this argument please + refer to the \ref MatrixHouseholder documentation + + */ + +#if !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVEF) + +arm_status arm_mat_qr_f32( + const arm_matrix_instance_f32 * pSrc, + const float32_t threshold, + arm_matrix_instance_f32 * pOutR, + arm_matrix_instance_f32 * pOutQ, + float32_t * pOutTau, + float32_t *pTmpA, + float32_t *pTmpB + ) + +{ + int32_t col=0; + int32_t nb,pos; + float32_t *pa,*pc; + float32_t beta; + float32_t *pv; + float32_t *pdst; + float32_t *p; + + if (pSrc->numRows < pSrc->numCols) + { + return(ARM_MATH_SIZE_MISMATCH); + } + + memcpy(pOutR->pData,pSrc->pData,pSrc->numCols * pSrc->numRows*sizeof(float32_t)); + pOutR->numCols = pSrc->numCols; + pOutR->numRows = pSrc->numRows; + + p = pOutR->pData; + + pc = pOutTau; + for(col=0 ; col < pSrc->numCols; col++) + { + int32_t j,k,blkCnt,blkCnt2; + float32_t *pa0,*pa1,*pa2,*pa3,*ptemp; + float32_t temp; + float32x4_t v1,v2,vtemp; + + COPY_COL_F32(pOutR,col,col,pTmpA); + + beta = arm_householder_f32(pTmpA,threshold,pSrc->numRows - col,pTmpA); + *pc++ = beta; + + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + pv = pTmpA; + pa = p; + + temp = *pv; + blkCnt = (pSrc->numCols-col) >> 2; + while (blkCnt > 0) + { + v1 = vld1q_f32(pa); + v2 = vmulq_n_f32(v1,temp); + vst1q_f32(pdst,v2); + + pa += 4; + pdst += 4; + blkCnt--; + } + blkCnt = (pSrc->numCols-col) & 3; + if (blkCnt > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt); + v1 = vld1q_f32(pa); + v2 = vmulq_n_f32(v1,temp); + vst1q_p_f32(pdst,v2,p0); + + pa += blkCnt; + } + + pa += col; + pv++; + pdst = pTmpB; + + pa0 = pa; + pa1 = pa0 + pSrc->numCols; + pa2 = pa1 + pSrc->numCols; + pa3 = pa2 + pSrc->numCols; + + /* Unrolled loop */ + blkCnt = (pSrc->numRows-col - 1) >> 2; + k=1; + while(blkCnt > 0) + { + vtemp=vld1q_f32(pv); + + blkCnt2 = (pSrc->numCols-col) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pdst); + + v2 = vld1q_f32(pa0); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,0)); + + v2 = vld1q_f32(pa1); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,1)); + + v2 = vld1q_f32(pa2); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,2)); + + v2 = vld1q_f32(pa3); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,3)); + + vst1q_f32(pdst,v1); + + pdst += 4; + pa0 += 4; + pa1 += 4; + pa2 += 4; + pa3 += 4; + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + + v1 = vld1q_f32(pdst); + + v2 = vld1q_f32(pa0); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,0)); + + v2 = vld1q_f32(pa1); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,1)); + + v2 = vld1q_f32(pa2); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,2)); + + v2 = vld1q_f32(pa3); + v1 = vfmaq_n_f32(v1,v2,vgetq_lane(vtemp,3)); + + vst1q_p_f32(pdst,v1,p0); + + pa0 += blkCnt2; + pa1 += blkCnt2; + pa2 += blkCnt2; + pa3 += blkCnt2; + } + + pa0 += col + 3*pSrc->numCols; + pa1 += col + 3*pSrc->numCols; + pa2 += col + 3*pSrc->numCols; + pa3 += col + 3*pSrc->numCols; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-col; k++) + { + temp = *pv; + blkCnt2 = (pSrc->numCols-col) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pa); + v2 = vld1q_f32(pdst); + v2 = vfmaq_n_f32(v2,v1,temp); + vst1q_f32(pdst,v2); + + pa += 4; + pdst += 4; + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + v1 = vld1q_f32(pa); + v2 = vld1q_f32(pdst); + v2 = vfmaq_n_f32(v2,v1,temp); + vst1q_p_f32(pdst,v2,p0); + + pa += blkCnt2; + } + + pa += col; + pv++; + pdst = pTmpB; + } + + /* A(col:,col:) - beta v tmpb */ + pa = p; + for(j=0;jnumRows-col; j++) + { + float32_t f = -beta * pTmpA[j]; + ptemp = pTmpB; + + blkCnt2 = (pSrc->numCols-col) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pa); + v2 = vld1q_f32(ptemp); + v1 = vfmaq_n_f32(v1,v2,f); + vst1q_f32(pa,v1); + + pa += 4; + ptemp += 4; + + blkCnt2--; + } + blkCnt2 = (pSrc->numCols-col) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + + v1 = vld1q_f32(pa); + v2 = vld1q_f32(ptemp); + v1 = vfmaq_n_f32(v1,v2,f); + vst1q_p_f32(pa,v1,p0); + + pa += blkCnt2; + } + + pa += col; + } + + /* Copy Householder reflectors into R matrix */ + pa = p + pOutR->numCols; + for(k=0;knumRows-col-1; k++) + { + *pa = pTmpA[k+1]; + pa += pOutR->numCols; + } + + p += 1 + pOutR->numCols; + } + + /* Generate Q if requested by user matrix */ + + if (pOutQ != NULL) + { + /* Initialize Q matrix to identity */ + memset(pOutQ->pData,0,sizeof(float32_t)*pOutQ->numRows*pOutQ->numRows); + + pa = pOutQ->pData; + for(col=0 ; col < pOutQ->numCols; col++) + { + *pa = 1.0f; + pa += pOutQ->numCols+1; + } + + nb = pOutQ->numRows - pOutQ->numCols + 1; + + pc = pOutTau + pOutQ->numCols - 1; + for(col=0 ; col < pOutQ->numCols; col++) + { + int32_t j,k, blkCnt, blkCnt2; + float32_t *pa0,*pa1,*pa2,*pa3,*ptemp; + float32_t temp; + float32x4_t v1,v2,vtemp; + + pos = pSrc->numRows - nb; + p = pOutQ->pData + pos + pOutQ->numCols*pos ; + + + COPY_COL_F32(pOutR,pos,pos,pTmpA); + pTmpA[0] = 1.0f; + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + + pv = pTmpA; + pa = p; + + temp = *pv; + blkCnt2 = (pOutQ->numRows-pos) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pa); + v1 = vmulq_n_f32(v1, temp); + vst1q_f32(pdst,v1); + + pa += 4; + pdst += 4; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + + v1 = vld1q_f32(pa); + v1 = vmulq_n_f32(v1, temp); + vst1q_p_f32(pdst,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + pv++; + pdst = pTmpB; + pa0 = pa; + pa1 = pa0 + pOutQ->numRows; + pa2 = pa1 + pOutQ->numRows; + pa3 = pa2 + pOutQ->numRows; + + /* Unrolled loop */ + blkCnt = (pOutQ->numRows-pos - 1) >> 2; + k=1; + while(blkCnt > 0) + { + + vtemp = vld1q_f32(pv); + blkCnt2 = (pOutQ->numRows-pos) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pdst); + + v2 = vld1q_f32(pa0); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,0)); + + v2 = vld1q_f32(pa1); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,1)); + + v2 = vld1q_f32(pa2); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,2)); + + v2 = vld1q_f32(pa3); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,3)); + + vst1q_f32(pdst,v1); + + pa0 += 4; + pa1 += 4; + pa2 += 4; + pa3 += 4; + pdst += 4; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + + v1 = vld1q_f32(pdst); + + v2 = vld1q_f32(pa0); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,0)); + + v2 = vld1q_f32(pa1); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,1)); + + v2 = vld1q_f32(pa2); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,2)); + + v2 = vld1q_f32(pa3); + v1 = vfmaq_n_f32(v1, v2, vgetq_lane(vtemp,3)); + + vst1q_p_f32(pdst,v1,p0); + + pa0 += blkCnt2; + pa1 += blkCnt2; + pa2 += blkCnt2; + pa3 += blkCnt2; + + } + + pa0 += pos + 3*pOutQ->numRows; + pa1 += pos + 3*pOutQ->numRows; + pa2 += pos + 3*pOutQ->numRows; + pa3 += pos + 3*pOutQ->numRows; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-pos; k++) + { + temp = *pv; + blkCnt2 = (pOutQ->numRows-pos) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pdst); + v2 = vld1q_f32(pa); + v1 = vfmaq_n_f32(v1, v2, temp); + vst1q_f32(pdst,v1); + + pdst += 4; + pa += 4; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numRows-pos) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + v1 = vld1q_f32(pdst); + v2 = vld1q_f32(pa); + v1 = vfmaq_n_f32(v1, v2, temp); + vst1q_p_f32(pdst,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + pv++; + pdst = pTmpB; + } + + pa = p; + beta = *pc--; + for(j=0;jnumRows-pos; j++) + { + float32_t f = -beta * pTmpA[j]; + ptemp = pTmpB; + + blkCnt2 = (pOutQ->numCols-pos) >> 2; + while (blkCnt2 > 0) + { + v1 = vld1q_f32(pa); + v2 = vld1q_f32(ptemp); + v1 = vfmaq_n_f32(v1,v2,f); + vst1q_f32(pa,v1); + + pa += 4; + ptemp += 4; + + blkCnt2--; + } + blkCnt2 = (pOutQ->numCols-pos) & 3; + if (blkCnt2 > 0) + { + mve_pred16_t p0 = vctp32q(blkCnt2); + + v1 = vld1q_f32(pa); + v2 = vld1q_f32(ptemp); + v1 = vfmaq_n_f32(v1,v2,f); + vst1q_p_f32(pa,v1,p0); + + pa += blkCnt2; + } + + pa += pos; + } + + + nb++; + } + } + + arm_status status = ARM_MATH_SUCCESS; + /* Return to application */ + return (status); +} + +#endif /*#if !defined(ARM_MATH_MVEF)*/ + + +#endif /*#if !defined(ARM_MATH_AUTOVECTORIZE)*/ + + + +#if (!defined(ARM_MATH_MVEF)) || defined(ARM_MATH_AUTOVECTORIZE) + +arm_status arm_mat_qr_f32( + const arm_matrix_instance_f32 * pSrc, + const float32_t threshold, + arm_matrix_instance_f32 * pOutR, + arm_matrix_instance_f32 * pOutQ, + float32_t * pOutTau, + float32_t *pTmpA, + float32_t *pTmpB + ) + +{ + int32_t col=0; + int32_t nb,pos; + float32_t *pa,*pc; + float32_t beta; + float32_t *pv; + float32_t *pdst; + float32_t *p; + + if (pSrc->numRows < pSrc->numCols) + { + return(ARM_MATH_SIZE_MISMATCH); + } + + memcpy(pOutR->pData,pSrc->pData,pSrc->numCols * pSrc->numRows*sizeof(float32_t)); + pOutR->numCols = pSrc->numCols; + pOutR->numRows = pSrc->numRows; + + p = pOutR->pData; + + pc = pOutTau; + for(col=0 ; col < pSrc->numCols; col++) + { + int32_t i,j,k,blkCnt; + float32_t *pa0,*pa1,*pa2,*pa3; + COPY_COL_F32(pOutR,col,col,pTmpA); + + beta = arm_householder_f32(pTmpA,threshold,pSrc->numRows - col,pTmpA); + *pc++ = beta; + + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + pv = pTmpA; + pa = p; + for(j=0;jnumCols-col; j++) + { + *pdst++ = *pv * *pa++; + } + pa += col; + pv++; + pdst = pTmpB; + + pa0 = pa; + pa1 = pa0 + pSrc->numCols; + pa2 = pa1 + pSrc->numCols; + pa3 = pa2 + pSrc->numCols; + + /* Unrolled loop */ + blkCnt = (pSrc->numRows-col - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float32_t sum; + + for(j=0;jnumCols-col; j++) + { + sum = *pdst; + + sum += pv[0] * *pa0++; + sum += pv[1] * *pa1++; + sum += pv[2] * *pa2++; + sum += pv[3] * *pa3++; + + *pdst++ = sum; + } + pa0 += col + 3*pSrc->numCols; + pa1 += col + 3*pSrc->numCols; + pa2 += col + 3*pSrc->numCols; + pa3 += col + 3*pSrc->numCols; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-col; k++) + { + for(j=0;jnumCols-col; j++) + { + *pdst++ += *pv * *pa++; + } + pa += col; + pv++; + pdst = pTmpB; + } + + /* A(col:,col:) - beta v tmpb */ + pa = p; + for(j=0;jnumRows-col; j++) + { + float32_t f = beta * pTmpA[j]; + + for(i=0;inumCols-col; i++) + { + *pa = *pa - f * pTmpB[i] ; + pa++; + } + pa += col; + } + + /* Copy Householder reflectors into R matrix */ + pa = p + pOutR->numCols; + for(k=0;knumRows-col-1; k++) + { + *pa = pTmpA[k+1]; + pa += pOutR->numCols; + } + + p += 1 + pOutR->numCols; + } + + /* Generate Q if requested by user matrix */ + + if (pOutQ != NULL) + { + /* Initialize Q matrix to identity */ + memset(pOutQ->pData,0,sizeof(float32_t)*pOutQ->numRows*pOutQ->numRows); + + pa = pOutQ->pData; + for(col=0 ; col < pOutQ->numCols; col++) + { + *pa = 1.0f; + pa += pOutQ->numCols+1; + } + + nb = pOutQ->numRows - pOutQ->numCols + 1; + + pc = pOutTau + pOutQ->numCols - 1; + for(col=0 ; col < pOutQ->numCols; col++) + { + int32_t i,j,k, blkCnt; + float32_t *pa0,*pa1,*pa2,*pa3; + pos = pSrc->numRows - nb; + p = pOutQ->pData + pos + pOutQ->numCols*pos ; + + + COPY_COL_F32(pOutR,pos,pos,pTmpA); + pTmpA[0] = 1.0f; + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + + pv = pTmpA; + pa = p; + for(j=0;jnumRows-pos; j++) + { + *pdst++ = *pv * *pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + pa0 = pa; + pa1 = pa0 + pOutQ->numRows; + pa2 = pa1 + pOutQ->numRows; + pa3 = pa2 + pOutQ->numRows; + + /* Unrolled loop */ + blkCnt = (pOutQ->numRows-pos - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float32_t sum; + + for(j=0;jnumRows-pos; j++) + { + sum = *pdst; + + sum += pv[0] * *pa0++; + sum += pv[1] * *pa1++; + sum += pv[2] * *pa2++; + sum += pv[3] * *pa3++; + + *pdst++ = sum; + } + pa0 += pos + 3*pOutQ->numRows; + pa1 += pos + 3*pOutQ->numRows; + pa2 += pos + 3*pOutQ->numRows; + pa3 += pos + 3*pOutQ->numRows; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-pos; k++) + { + for(j=0;jnumRows-pos; j++) + { + *pdst++ += *pv * *pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + } + + pa = p; + beta = *pc--; + for(j=0;jnumRows-pos; j++) + { + float32_t f = beta * pTmpA[j]; + + for(i=0;inumCols-pos; i++) + { + *pa = *pa - f * pTmpB[i] ; + pa++; + } + pa += pos; + } + + + nb++; + } + } + + arm_status status = ARM_MATH_SUCCESS; + /* Return to application */ + return (status); +} + +#endif /* end of test for Helium or Neon availability */ + +/** + @} end of MatrixQR group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f64.c new file mode 100644 index 0000000..84ce3fb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_qr_f64.c @@ -0,0 +1,311 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mat_qr_f64.c + * Description: Double floating-point matrix QR decomposition. + * + * $Date: 15 June 2022 + * $Revision: V1.11.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_utils.h" + + + +/** + @ingroup groupMatrix + */ + + +/** + @addtogroup MatrixQR + @{ + */ + +/** + @brief QR decomposition of a m x n double floating point matrix with m >= n. + @param[in] pSrc points to input matrix structure. The source matrix is modified by the function. + @param[in] threshold norm2 threshold. + @param[out] pOutR points to output R matrix structure of dimension m x n + @param[out] pOutQ points to output Q matrix structure of dimension m x m (can be NULL) + @param[out] pOutTau points to Householder scaling factors of dimension n + @param[inout] pTmpA points to a temporary vector of dimension m. + @param[inout] pTmpB points to a temporary vector of dimension m. + @return execution status + - \ref ARM_MATH_SUCCESS : Operation successful + - \ref ARM_MATH_SIZE_MISMATCH : Matrix size check failed + + @par pOutQ is optional: + pOutQ can be a NULL pointer. + In this case, the argument will be ignored + and the output Q matrix won't be computed. + + + @par Norm2 threshold + For the meaning of this argument please + refer to the \ref MatrixHouseholder documentation + + */ + + + + +arm_status arm_mat_qr_f64( + const arm_matrix_instance_f64 * pSrc, + const float64_t threshold, + arm_matrix_instance_f64 * pOutR, + arm_matrix_instance_f64 * pOutQ, + float64_t * pOutTau, + float64_t *pTmpA, + float64_t *pTmpB + ) + +{ + int32_t col=0; + int32_t nb,pos; + float64_t *pa,*pc; + float64_t beta; + float64_t *pv; + float64_t *pdst; + float64_t *p; + + if (pSrc->numRows < pSrc->numCols) + { + return(ARM_MATH_SIZE_MISMATCH); + } + + memcpy(pOutR->pData,pSrc->pData,pSrc->numCols * pSrc->numRows*sizeof(float64_t)); + pOutR->numCols = pSrc->numCols; + pOutR->numRows = pSrc->numRows; + + p = pOutR->pData; + + pc = pOutTau; + for(col=0 ; col < pSrc->numCols; col++) + { + int32_t i,j,k,blkCnt; + float64_t *pa0,*pa1,*pa2,*pa3; + COPY_COL_F64(pOutR,col,col,pTmpA); + + beta = arm_householder_f64(pTmpA,threshold,pSrc->numRows - col,pTmpA); + *pc++ = beta; + + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + pv = pTmpA; + pa = p; + for(j=0;jnumCols-col; j++) + { + *pdst++ = *pv * *pa++; + } + pa += col; + pv++; + pdst = pTmpB; + + pa0 = pa; + pa1 = pa0 + pSrc->numCols; + pa2 = pa1 + pSrc->numCols; + pa3 = pa2 + pSrc->numCols; + + /* Unrolled loop */ + blkCnt = (pSrc->numRows-col - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float64_t sum; + + for(j=0;jnumCols-col; j++) + { + sum = *pdst; + + sum += pv[0] * *pa0++; + sum += pv[1] * *pa1++; + sum += pv[2] * *pa2++; + sum += pv[3] * *pa3++; + + *pdst++ = sum; + } + pa0 += col + 3*pSrc->numCols; + pa1 += col + 3*pSrc->numCols; + pa2 += col + 3*pSrc->numCols; + pa3 += col + 3*pSrc->numCols; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-col; k++) + { + for(j=0;jnumCols-col; j++) + { + *pdst++ += *pv * *pa++; + } + pa += col; + pv++; + pdst = pTmpB; + } + + /* A(col:,col:) - beta v tmpb */ + pa = p; + for(j=0;jnumRows-col; j++) + { + float64_t f = beta * pTmpA[j]; + + for(i=0;inumCols-col; i++) + { + *pa = *pa - f * pTmpB[i] ; + pa++; + } + pa += col; + } + + /* Copy Householder reflectors into R matrix */ + pa = p + pOutR->numCols; + for(k=0;knumRows-col-1; k++) + { + *pa = pTmpA[k+1]; + pa += pOutR->numCols; + } + + p += 1 + pOutR->numCols; + } + + /* Generate Q if requested by user matrix */ + + if (pOutQ != NULL) + { + /* Initialize Q matrix to identity */ + memset(pOutQ->pData,0,sizeof(float64_t)*pOutQ->numRows*pOutQ->numRows); + + pa = pOutQ->pData; + for(col=0 ; col < pOutQ->numCols; col++) + { + *pa = 1.0; + pa += pOutQ->numCols+1; + } + + nb = pOutQ->numRows - pOutQ->numCols + 1; + + pc = pOutTau + pOutQ->numCols - 1; + for(col=0 ; col < pOutQ->numCols; col++) + { + int32_t i,j,k, blkCnt; + float64_t *pa0,*pa1,*pa2,*pa3; + pos = pSrc->numRows - nb; + p = pOutQ->pData + pos + pOutQ->numCols*pos ; + + + COPY_COL_F64(pOutR,pos,pos,pTmpA); + pTmpA[0] = 1.0; + pdst = pTmpB; + + /* v.T A(col:,col:) -> tmpb */ + + pv = pTmpA; + pa = p; + for(j=0;jnumRows-pos; j++) + { + *pdst++ = *pv * *pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + pa0 = pa; + pa1 = pa0 + pOutQ->numRows; + pa2 = pa1 + pOutQ->numRows; + pa3 = pa2 + pOutQ->numRows; + + /* Unrolled loop */ + blkCnt = (pOutQ->numRows-pos - 1) >> 2; + k=1; + while(blkCnt > 0) + { + float64_t sum; + + for(j=0;jnumRows-pos; j++) + { + sum = *pdst; + + sum += pv[0] * *pa0++; + sum += pv[1] * *pa1++; + sum += pv[2] * *pa2++; + sum += pv[3] * *pa3++; + + *pdst++ = sum; + } + pa0 += pos + 3*pOutQ->numRows; + pa1 += pos + 3*pOutQ->numRows; + pa2 += pos + 3*pOutQ->numRows; + pa3 += pos + 3*pOutQ->numRows; + pv += 4; + pdst = pTmpB; + k += 4; + blkCnt--; + } + + pa = pa0; + for(;knumRows-pos; k++) + { + for(j=0;jnumRows-pos; j++) + { + *pdst++ += *pv * *pa++; + } + pa += pos; + pv++; + pdst = pTmpB; + } + + pa = p; + beta = *pc--; + for(j=0;jnumRows-pos; j++) + { + float64_t f = beta * pTmpA[j]; + + for(i=0;inumCols-pos; i++) + { + *pa = *pa - f * pTmpB[i] ; + pa++; + } + pa += pos; + } + + + nb++; + } + } + + arm_status status = ARM_MATH_SUCCESS; + /* Return to application */ + return (status); +} + + +/** + @} end of MatrixQR group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f16.c index 4c8d4eb..3b14b51 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_scale_f16.c * Description: Multiplies a floating-point matrix by a scalar * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -163,10 +163,10 @@ arm_status arm_mat_scale_f16( /* C(m,n) = A(m,n) * scale */ /* Scale and store result in destination buffer. */ - *pOut++ = (*pIn++) * scale; - *pOut++ = (*pIn++) * scale; - *pOut++ = (*pIn++) * scale; - *pOut++ = (*pIn++) * scale; + *pOut++ = (_Float16)(*pIn++) * (_Float16)scale; + *pOut++ = (_Float16)(*pIn++) * (_Float16)scale; + *pOut++ = (_Float16)(*pIn++) * (_Float16)scale; + *pOut++ = (_Float16)(*pIn++) * (_Float16)scale; /* Decrement loop counter */ blkCnt--; @@ -187,7 +187,7 @@ arm_status arm_mat_scale_f16( /* C(m,n) = A(m,n) * scale */ /* Scale and store result in destination buffer. */ - *pOut++ = (*pIn++) * scale; + *pOut++ = (_Float16)(*pIn++) * (_Float16)scale; /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f32.c index 5d1dfd5..63aad92 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_scale_f32.c * Description: Multiplies a floating-point matrix by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -39,7 +39,22 @@ Multiplies a matrix by a scalar. This is accomplished by multiplying each element in the matrix by the scalar. For example: - \image html MatrixScale.gif "Matrix Scaling of a 3 x 3 matrix" + + @par Matrix Scaling of a 3 x 3 matrix + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} \\ + a_{2,1} & a_{2,2} & a_{2,3} \\ + a_{3,1} & a_{3,2} & a_{3,3} \\ + \end{pmatrix} + * K = + \begin{pmatrix} + K a_{1,1} & K a_{1,2} & K a_{1,3} \\ + K a_{2,1} & K a_{2,2} & K a_{2,3} \\ + K a_{3,1} & K a_{3,2} & K a_{3,3} \\ + \end{pmatrix} + \f] The function checks to make sure that the input and output matrices are of the same size. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q15.c index 800ca46..5d1ea8b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_scale_q15.c * Description: Multiplies a Q15 matrix by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -179,8 +179,8 @@ arm_status arm_mat_scale_q15( #if defined (ARM_MATH_DSP) /* read 2 times 2 samples at a time from source */ - inA1 = read_q15x2_ia ((q15_t **) &pIn); - inA2 = read_q15x2_ia ((q15_t **) &pIn); + inA1 = read_q15x2_ia (&pIn); + inA2 = read_q15x2_ia (&pIn); /* Scale inputs and store result in temporary variables * in single cycle by packing the outputs */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q31.c index 1292c65..f4e87e6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_scale_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_scale_q31.c * Description: Multiplies a Q31 matrix by a scalar * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f16.c index 8833566..6b3de66 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f16.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_lower_triangular_f16.c * Description: Solve linear system LT X = A with LT lower triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,9 +65,8 @@ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -83,9 +84,10 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float16_t *pX = dst->pData; float16_t *pLT = lt->pData; @@ -102,45 +104,45 @@ for(i=0; i < n ; i++) { - for(j=0; j+7 < n; j += 8) + for(j=0; j+7 < cols; j += 8) { - vecA = vld1q_f16(&pA[i * n + j]); + vecA = vld1q_f16(&pA[i * cols + j]); for(k=0; k < i; k++) { - vecX = vld1q_f16(&pX[n*k+j]); + vecX = vld1q_f16(&pX[cols*k+j]); vecA = vfmsq(vecA,vdupq_n_f16(pLT[n*i + k]),vecX); } - if (pLT[n*i + i]==0.0f16) + if ((_Float16)pLT[n*i + i]==0.0f16) { return(ARM_MATH_SINGULAR); } invLT = 1.0f16 / (_Float16)pLT[n*i + i]; vecA = vmulq(vecA,vdupq_n_f16(invLT)); - vst1q(&pX[i*n+j],vecA); + vst1q(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; lt_row = &pLT[n*i]; - _Float16 tmp=a_col[i * n]; + _Float16 tmp=a_col[i * cols]; for(k=0; k < i; k++) { - tmp -= (_Float16)lt_row[k] * (_Float16)pX[n*k+j]; + tmp -= (_Float16)lt_row[k] * (_Float16)pX[cols*k+j]; } - if (lt_row[i]==0.0f16) + if ((_Float16)lt_row[i]==0.0f16) { return(ARM_MATH_SINGULAR); } tmp = tmp / (_Float16)lt_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -164,9 +166,8 @@ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -184,9 +185,10 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float16_t *pX = dst->pData; float16_t *pLT = lt->pData; @@ -195,7 +197,7 @@ float16_t *lt_row; float16_t *a_col; - for(j=0; j < n; j ++) + for(j=0; j < cols; j ++) { a_col = &pA[j]; @@ -203,19 +205,19 @@ { lt_row = &pLT[n*i]; - float16_t tmp=a_col[i * n]; + float16_t tmp=a_col[i * cols]; for(k=0; k < i; k++) { - tmp -= lt_row[k] * pX[n*k+j]; + tmp -= (_Float16)lt_row[k] * (_Float16)pX[cols*k+j]; } - if (lt_row[i]==0.0f) + if ((_Float16)lt_row[i]==0.0f16) { return(ARM_MATH_SINGULAR); } - tmp = tmp / lt_row[i]; - pX[i*n+j] = tmp; + tmp = (_Float16)tmp / (_Float16)lt_row[i]; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f32.c index dcd529c..3ffd076 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f32.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_lower_triangular_f32.c * Description: Solve linear system LT X = A with LT lower triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,9 +64,8 @@ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -82,9 +83,10 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float32_t *pX = dst->pData; float32_t *pLT = lt->pData; @@ -101,13 +103,13 @@ for(i=0; i < n ; i++) { - for(j=0; j+3 < n; j += 4) + for(j=0; j+3 < cols; j += 4) { - vecA = vld1q_f32(&pA[i * n + j]); + vecA = vld1q_f32(&pA[i * cols + j]); for(k=0; k < i; k++) { - vecX = vld1q_f32(&pX[n*k+j]); + vecX = vld1q_f32(&pX[cols*k+j]); vecA = vfmsq(vecA,vdupq_n_f32(pLT[n*i + k]),vecX); } @@ -118,20 +120,20 @@ invLT = 1.0f / pLT[n*i + i]; vecA = vmulq(vecA,vdupq_n_f32(invLT)); - vst1q(&pX[i*n+j],vecA); + vst1q(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; lt_row = &pLT[n*i]; - float32_t tmp=a_col[i * n]; + float32_t tmp=a_col[i * cols]; for(k=0; k < i; k++) { - tmp -= lt_row[k] * pX[n*k+j]; + tmp -= lt_row[k] * pX[cols*k+j]; } if (lt_row[i]==0.0f) @@ -139,7 +141,7 @@ return(ARM_MATH_SINGULAR); } tmp = tmp / lt_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -163,9 +165,8 @@ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -183,9 +184,10 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float32_t *pX = dst->pData; float32_t *pLT = lt->pData; @@ -202,13 +204,13 @@ for(i=0; i < n ; i++) { - for(j=0; j+3 < n; j += 4) + for(j=0; j+3 < cols; j += 4) { - vecA = vld1q_f32(&pA[i * n + j]); + vecA = vld1q_f32(&pA[i * cols + j]); for(k=0; k < i; k++) { - vecX = vld1q_f32(&pX[n*k+j]); + vecX = vld1q_f32(&pX[cols*k+j]); vecA = vfmsq_f32(vecA,vdupq_n_f32(pLT[n*i + k]),vecX); } @@ -219,20 +221,20 @@ invLT = 1.0f / pLT[n*i + i]; vecA = vmulq_f32(vecA,vdupq_n_f32(invLT)); - vst1q_f32(&pX[i*n+j],vecA); + vst1q_f32(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; lt_row = &pLT[n*i]; - float32_t tmp=a_col[i * n]; + float32_t tmp=a_col[i * cols]; for(k=0; k < i; k++) { - tmp -= lt_row[k] * pX[n*k+j]; + tmp -= lt_row[k] * pX[cols*k+j]; } if (lt_row[i]==0.0f) @@ -240,7 +242,7 @@ return(ARM_MATH_SINGULAR); } tmp = tmp / lt_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -261,11 +263,9 @@ #ifdef ARM_MATH_MATRIX_CHECK - /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -283,9 +283,7 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; - - n = dst->numRows; + int i,j,k,n,cols; float32_t *pX = dst->pData; float32_t *pLT = lt->pData; @@ -294,19 +292,23 @@ float32_t *lt_row; float32_t *a_col; - for(j=0; j < n; j ++) + n = dst->numRows; + cols = dst -> numCols; + + + for(j=0; j < cols; j ++) { a_col = &pA[j]; for(i=0; i < n ; i++) { - lt_row = &pLT[n*i]; + float32_t tmp=a_col[i * cols]; - float32_t tmp=a_col[i * n]; + lt_row = &pLT[n*i]; for(k=0; k < i; k++) { - tmp -= lt_row[k] * pX[n*k+j]; + tmp -= lt_row[k] * pX[cols*k+j]; } if (lt_row[i]==0.0f) @@ -314,7 +316,7 @@ return(ARM_MATH_SINGULAR); } tmp = tmp / lt_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f64.c index 67dc3be..cc73005 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_lower_triangular_f64.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_lower_triangular_f64.c * Description: Solve linear system LT X = A with LT lower triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -57,9 +59,8 @@ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((ut->numRows != lt->numCols) || - (a->numRows != a->numCols) || - (ut->numRows != a->numRows) ) + if ((lt->numRows != lt->numCols) || + (lt->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; @@ -77,9 +78,7 @@ x2 = (a2 - c2 x3) / b2 */ - int i,j,k,n; - - n = dst->numRows; + int i,j,k,n,cols; float64_t *pX = dst->pData; float64_t *pLT = lt->pData; @@ -88,27 +87,30 @@ float64_t *lt_row; float64_t *a_col; - for(j=0; j < n; j ++) + n = dst->numRows; + cols = dst->numCols; + + for(j=0; j < cols; j ++) { a_col = &pA[j]; for(i=0; i < n ; i++) { + float64_t tmp=a_col[i * cols]; + lt_row = &pLT[n*i]; - float64_t tmp=a_col[i * n]; - for(k=0; k < i; k++) { - tmp -= lt_row[k] * pX[n*k+j]; + tmp -= lt_row[k] * pX[cols*k+j]; } - if (lt_row[i]==0.0f) + if (lt_row[i]==0.0) { return(ARM_MATH_SINGULAR); } tmp = tmp / lt_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f16.c index 427317c..0f03eaa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f16.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_upper_triangular_f16.c * Description: Solve linear system UT X = A with UT upper triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -66,7 +68,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -78,9 +79,10 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float16_t *pX = dst->pData; float16_t *pUT = ut->pData; @@ -96,17 +98,17 @@ arm_status status; /* status of matrix inverse */ for(i=n-1; i >= 0 ; i--) { - for(j=0; j+7 < n; j +=8) + for(j=0; j+7 < cols; j +=8) { - vecA = vld1q_f16(&pA[i * n + j]); + vecA = vld1q_f16(&pA[i * cols + j]); for(k=n-1; k > i; k--) { - vecX = vld1q_f16(&pX[n*k+j]); + vecX = vld1q_f16(&pX[cols*k+j]); vecA = vfmsq(vecA,vdupq_n_f16(pUT[n*i + k]),vecX); } - if (pUT[n*i + i]==0.0f16) + if ((_Float16)pUT[n*i + i]==0.0f16) { return(ARM_MATH_SINGULAR); } @@ -115,28 +117,28 @@ arm_status status; /* status of matrix inverse */ vecA = vmulq(vecA,vdupq_n_f16(invUT)); - vst1q(&pX[i*n+j],vecA); + vst1q(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; ut_row = &pUT[n*i]; - _Float16 tmp=a_col[i * n]; + _Float16 tmp=a_col[i * cols]; for(k=n-1; k > i; k--) { - tmp -= (_Float16)ut_row[k] * (_Float16)pX[n*k+j]; + tmp -= (_Float16)ut_row[k] * (_Float16)pX[cols*k+j]; } - if (ut_row[i]==0.0f16) + if ((_Float16)ut_row[i]==0.0f16) { return(ARM_MATH_SINGULAR); } tmp = tmp / (_Float16)ut_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -162,7 +164,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -174,9 +175,10 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float16_t *pX = dst->pData; float16_t *pUT = ut->pData; @@ -185,7 +187,7 @@ arm_status status; /* status of matrix inverse */ float16_t *ut_row; float16_t *a_col; - for(j=0; j < n; j ++) + for(j=0; j < cols; j ++) { a_col = &pA[j]; @@ -193,19 +195,19 @@ arm_status status; /* status of matrix inverse */ { ut_row = &pUT[n*i]; - float16_t tmp=a_col[i * n]; + float16_t tmp=a_col[i * cols]; for(k=n-1; k > i; k--) { - tmp -= ut_row[k] * pX[n*k+j]; + tmp -= (_Float16)ut_row[k] * (_Float16)pX[cols*k+j]; } - if (ut_row[i]==0.0f) + if ((_Float16)ut_row[i]==0.0f16) { return(ARM_MATH_SINGULAR); } - tmp = tmp / ut_row[i]; - pX[i*n+j] = tmp; + tmp = (_Float16)tmp / (_Float16)ut_row[i]; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f32.c index 074901d..4b3ef86 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f32.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_upper_triangular_f32.c * Description: Solve linear system UT X = A with UT upper triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,7 +65,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -75,9 +76,10 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float32_t *pX = dst->pData; float32_t *pUT = ut->pData; @@ -93,13 +95,13 @@ arm_status status; /* status of matrix inverse */ for(i=n-1; i >= 0 ; i--) { - for(j=0; j+3 < n; j +=4) + for(j=0; j+3 < cols; j +=4) { - vecA = vld1q_f32(&pA[i * n + j]); + vecA = vld1q_f32(&pA[i * cols + j]); for(k=n-1; k > i; k--) { - vecX = vld1q_f32(&pX[n*k+j]); + vecX = vld1q_f32(&pX[cols*k+j]); vecA = vfmsq(vecA,vdupq_n_f32(pUT[n*i + k]),vecX); } @@ -112,20 +114,20 @@ arm_status status; /* status of matrix inverse */ vecA = vmulq(vecA,vdupq_n_f32(invUT)); - vst1q(&pX[i*n+j],vecA); + vst1q(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; ut_row = &pUT[n*i]; - float32_t tmp=a_col[i * n]; + float32_t tmp=a_col[i * cols]; for(k=n-1; k > i; k--) { - tmp -= ut_row[k] * pX[n*k+j]; + tmp -= ut_row[k] * pX[cols*k+j]; } if (ut_row[i]==0.0f) @@ -133,7 +135,7 @@ arm_status status; /* status of matrix inverse */ return(ARM_MATH_SINGULAR); } tmp = tmp / ut_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -160,7 +162,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -172,9 +173,10 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; + int i,j,k,n,cols; n = dst->numRows; + cols = dst->numCols; float32_t *pX = dst->pData; float32_t *pUT = ut->pData; @@ -190,13 +192,13 @@ arm_status status; /* status of matrix inverse */ for(i=n-1; i >= 0 ; i--) { - for(j=0; j+3 < n; j +=4) + for(j=0; j+3 < cols; j +=4) { - vecA = vld1q_f32(&pA[i * n + j]); + vecA = vld1q_f32(&pA[i * cols + j]); for(k=n-1; k > i; k--) { - vecX = vld1q_f32(&pX[n*k+j]); + vecX = vld1q_f32(&pX[cols*k+j]); vecA = vfmsq_f32(vecA,vdupq_n_f32(pUT[n*i + k]),vecX); } @@ -209,20 +211,20 @@ arm_status status; /* status of matrix inverse */ vecA = vmulq_f32(vecA,vdupq_n_f32(invUT)); - vst1q_f32(&pX[i*n+j],vecA); + vst1q_f32(&pX[i*cols+j],vecA); } - for(; j < n; j ++) + for(; j < cols; j ++) { a_col = &pA[j]; ut_row = &pUT[n*i]; - float32_t tmp=a_col[i * n]; + float32_t tmp=a_col[i * cols]; for(k=n-1; k > i; k--) { - tmp -= ut_row[k] * pX[n*k+j]; + tmp -= ut_row[k] * pX[cols*k+j]; } if (ut_row[i]==0.0f) @@ -230,7 +232,7 @@ arm_status status; /* status of matrix inverse */ return(ARM_MATH_SINGULAR); } tmp = tmp / ut_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } @@ -256,7 +258,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -268,9 +269,7 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; - - n = dst->numRows; + int i,j,k,n,cols; float32_t *pX = dst->pData; float32_t *pUT = ut->pData; @@ -279,19 +278,22 @@ arm_status status; /* status of matrix inverse */ float32_t *ut_row; float32_t *a_col; - for(j=0; j < n; j ++) + n = dst->numRows; + cols = dst->numCols; + + for(j=0; j < cols; j ++) { a_col = &pA[j]; for(i=n-1; i >= 0 ; i--) { + float32_t tmp=a_col[i * cols]; + ut_row = &pUT[n*i]; - float32_t tmp=a_col[i * n]; - for(k=n-1; k > i; k--) { - tmp -= ut_row[k] * pX[n*k+j]; + tmp -= ut_row[k] * pX[cols*k+j]; } if (ut_row[i]==0.0f) @@ -299,7 +301,7 @@ arm_status status; /* status of matrix inverse */ return(ARM_MATH_SINGULAR); } tmp = tmp / ut_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f64.c index d10eae2..ce6153b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_solve_upper_triangular_f64.c @@ -5,11 +5,13 @@ * Title: arm_mat_solve_upper_triangular_f64.c * Description: Solve linear system UT X = A with UT upper triangular matrix * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,7 +60,6 @@ arm_status status; /* status of matrix inverse */ /* Check for matrix mismatch condition */ if ((ut->numRows != ut->numCols) || - (a->numRows != a->numCols) || (ut->numRows != a->numRows) ) { /* Set status as ARM_MATH_SIZE_MISMATCH */ @@ -70,9 +71,7 @@ arm_status status; /* status of matrix inverse */ { - int i,j,k,n; - - n = dst->numRows; + int i,j,k,n,cols; float64_t *pX = dst->pData; float64_t *pUT = ut->pData; @@ -81,27 +80,30 @@ arm_status status; /* status of matrix inverse */ float64_t *ut_row; float64_t *a_col; - for(j=0; j < n; j ++) + n = dst->numRows; + cols = dst->numCols; + + for(j=0; j < cols; j ++) { a_col = &pA[j]; for(i=n-1; i >= 0 ; i--) { + float64_t tmp=a_col[i * cols]; + ut_row = &pUT[n*i]; - float64_t tmp=a_col[i * n]; - for(k=n-1; k > i; k--) { - tmp -= ut_row[k] * pX[n*k+j]; + tmp -= ut_row[k] * pX[cols*k+j]; } - if (ut_row[i]==0.0f) + if (ut_row[i]==0.0) { return(ARM_MATH_SINGULAR); } tmp = tmp / ut_row[i]; - pX[i*n+j] = tmp; + pX[i*cols+j] = tmp; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f16.c index fb0f7b7..2e07194 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_sub_f16.c * Description: Floating-point matrix subtraction * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,7 +62,7 @@ arm_status arm_mat_sub_f16( arm_status status; /* status of matrix subtraction */ uint32_t numSamples; /* total number of elements in the matrix */ float16_t *pDataA, *pDataB, *pDataDst; - f16x8_t vecA, vecB, vecDst; + f16x8_t vecA, vecB, vecDst = { 0 }; float16_t const *pSrcAVec; float16_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ @@ -170,10 +170,10 @@ arm_status arm_mat_sub_f16( /* C(m,n) = A(m,n) - B(m,n) */ /* Subtract and store result in destination buffer. */ - *pOut++ = (*pInA++) - (*pInB++); - *pOut++ = (*pInA++) - (*pInB++); - *pOut++ = (*pInA++) - (*pInB++); - *pOut++ = (*pInA++) - (*pInB++); + *pOut++ = (_Float16)(*pInA++) - (_Float16)(*pInB++); + *pOut++ = (_Float16)(*pInA++) - (_Float16)(*pInB++); + *pOut++ = (_Float16)(*pInA++) - (_Float16)(*pInB++); + *pOut++ = (_Float16)(*pInA++) - (_Float16)(*pInB++); /* Decrement loop counter */ blkCnt--; @@ -194,7 +194,7 @@ arm_status arm_mat_sub_f16( /* C(m,n) = A(m,n) - B(m,n) */ /* Subtract and store result in destination buffer. */ - *pOut++ = (*pInA++) - (*pInB++); + *pOut++ = (_Float16)(*pInA++) - (_Float16)(*pInB++); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f32.c index 0748e08..df58b98 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_sub_f32.c * Description: Floating-point matrix subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -38,8 +38,27 @@ @defgroup MatrixSub Matrix Subtraction Subtract two matrices. - \image html MatrixSubtraction.gif "Subraction of two 3 x 3 matrices" - + @par Subraction of two 3 x 3 matrices + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} \\ + a_{2,1} & a_{2,2} & a_{2,3} \\ + a_{3,1} & a_{3,2} & a_{3,3} \\ + \end{pmatrix} + - + \begin{pmatrix} + b_{1,1} & b_{1,2} & b_{1,3} \\ + b_{2,1} & b_{2,2} & b_{2,3} \\ + b_{3,1} & b_{3,2} & b_{3,3} \\ + \end{pmatrix} + = + \begin{pmatrix} + a_{1,1}-b_{1,1} & a_{1,2}-b_{1,2} & a_{1,3}-b_{1,3} \\ + a_{2,1}-b_{2,1} & a_{2,2}-b_{2,2} & a_{2,3}-b_{2,3} \\ + a_{3,1}-b_{3,1} & a_{3,2}-b_{3,2} & a_{3,3}-b_{3,3} \\ + \end{pmatrix} + \f] The functions check to make sure that pSrcA, pSrcB, and pDst have the same number of rows and columns. @@ -68,7 +87,7 @@ arm_status arm_mat_sub_f32( arm_status status; /* status of matrix subtraction */ uint32_t numSamples; /* total number of elements in the matrix */ float32_t *pDataA, *pDataB, *pDataDst; - f32x4_t vecA, vecB, vecDst; + f32x4_t vecA, vecB, vecDst = { 0 }; float32_t const *pSrcAVec; float32_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f64.c index e41c7dc..3f405d6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_f64.c @@ -5,13 +5,13 @@ * Title: arm_mat_sub_f64.c * Description: Floating-point matrix subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,16 +34,6 @@ @ingroup groupMatrix */ -/** - @defgroup MatrixSub Matrix Subtraction - - Subtract two matrices. - \image html MatrixSubtraction.gif "Subraction of two 3 x 3 matrices" - - The functions check to make sure that - pSrcA, pSrcB, and pDst have the same - number of rows and columns. - */ /** @addtogroup MatrixSub diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q15.c index dff3aa1..e611663 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_sub_q15.c * Description: Q15 Matrix subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -61,7 +61,7 @@ arm_status arm_mat_sub_q15( { uint32_t numSamples; /* total number of elements in the matrix */ q15_t *pDataA, *pDataB, *pDataDst; - q15x8_t vecA, vecB, vecDst; + q15x8_t vecA, vecB, vecDst = { 0 }; q15_t const *pSrcAVec; q15_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ @@ -169,8 +169,8 @@ arm_status arm_mat_sub_q15( /* Subtract, Saturate and store result in destination buffer. */ #if defined (ARM_MATH_DSP) - write_q15x2_ia (&pOut, __QSUB16(read_q15x2_ia ((q15_t **) &pInA), read_q15x2_ia ((q15_t **) &pInB))); - write_q15x2_ia (&pOut, __QSUB16(read_q15x2_ia ((q15_t **) &pInA), read_q15x2_ia ((q15_t **) &pInB))); + write_q15x2_ia (&pOut, __QSUB16(read_q15x2_ia (&pInA), read_q15x2_ia (&pInB))); + write_q15x2_ia (&pOut, __QSUB16(read_q15x2_ia (&pInA), read_q15x2_ia (&pInB))); #else *pOut++ = (q15_t) __SSAT(((q31_t) * pInA++ - *pInB++), 16); *pOut++ = (q15_t) __SSAT(((q31_t) * pInA++ - *pInB++), 16); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q31.c index b81ca7c..9643bdc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_sub_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_sub_q31.c * Description: Q31 matrix subtraction * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,7 +60,7 @@ arm_status arm_mat_sub_q31( { uint32_t numSamples; /* total number of elements in the matrix */ q31_t *pDataA, *pDataB, *pDataDst; - q31x4_t vecA, vecB, vecDst; + q31x4_t vecA, vecB, vecDst = { 0 }; q31_t const *pSrcAVec; q31_t const *pSrcBVec; uint32_t blkCnt; /* loop counters */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f16.c index b63e988..b162f2c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f16.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_f16.c * Description: Floating-point matrix transpose * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f32.c index 906d755..b2baa63 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f32.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_f32.c * Description: Floating-point matrix transpose * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -40,7 +40,23 @@ Tranposes a matrix. Transposing an M x N matrix flips it around the center diagonal and results in an N x M matrix. - \image html MatrixTranspose.gif "Transpose of a 3 x 3 matrix" + + @par Transpose of a 3 x 3 matrix + + \f[ + \begin{pmatrix} + a_{1,1} & a_{1,2} & a_{1,3} \\ + a_{2,1} & a_{2,2} & a_{2,3} \\ + a_{3,1} & a_{3,2} & a_{3,3} \\ + \end{pmatrix}^T + = + \begin{pmatrix} + a_{1,1} & a_{2,1} & a_{3,1} \\ + a_{1,2} & a_{2,2} & a_{3,2} \\ + a_{1,3} & a_{2,3} & a_{3,3} \\ + \end{pmatrix} + \f] + */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f64.c index d4d94a4..d01ce3b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_f64.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_f64.c * Description: Floating-point matrix transpose * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2017 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -34,14 +34,7 @@ @ingroup groupMatrix */ -/** - @defgroup MatrixTrans Matrix Transpose - - Tranposes a matrix. - Transposing an M x N matrix flips it around the center diagonal and results in an N x M matrix. - \image html MatrixTranspose.gif "Transpose of a 3 x 3 matrix" - */ /** @addtogroup MatrixTrans diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q15.c index 9a39c08..de00d1b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q15.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_q15.c * Description: Q15 matrix transpose * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -142,7 +142,7 @@ arm_status arm_mat_trans_q15( while (col > 0U) /* column loop */ { /* Read two elements from row */ - in = read_q15x2_ia ((q15_t **) &pIn); + in = read_q15x2_ia (&pIn); /* Unpack and store one element in destination */ #ifndef ARM_MATH_BIG_ENDIAN @@ -165,7 +165,7 @@ arm_status arm_mat_trans_q15( pOut += nRows; /* Read two elements from row */ - in = read_q15x2_ia ((q15_t **) &pIn); + in = read_q15x2_ia (&pIn); /* Unpack and store one element in destination */ #ifndef ARM_MATH_BIG_ENDIAN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q31.c index be2a306..4f77a28 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q31.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_q31.c * Description: Q31 matrix transpose * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q7.c index cd40f97..666cdfa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_trans_q7.c @@ -5,13 +5,13 @@ * Title: arm_mat_trans_q7.c * Description: Q7 matrix transpose * - * $Date: 06. July 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,10 +63,10 @@ arm_status arm_mat_trans_q7(const arm_matrix_instance_q7 *pSrc, arm_matrix_insta #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ - if ((pSrc->numRows != pDst->dstCols) || (pSrc->srcCols != pDst->numCols)) + if ((pSrc->numRows != pDst->numCols) || (pSrc->numCols != pDst->numRows)) { /* Set status as ARM_MATH_SIZE_MISMATCH */ - return = ARM_MATH_SIZE_MISMATCH; + return ARM_MATH_SIZE_MISMATCH; } #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f16.c index 7944086..fb7e53c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f16.c @@ -5,12 +5,14 @@ * Title: arm_mat_vec_mult_f16.c * Description: Floating-point matrix and vector multiplication * - * $Date: 07. July 202 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -310,10 +312,10 @@ void arm_mat_vec_mult_f16(const arm_matrix_instance_f16 *pSrcMat, const float16_ pInVec = pVec; /* Initialize accumulators */ - float16_t sum1 = 0.0f; - float16_t sum2 = 0.0f; - float16_t sum3 = 0.0f; - float16_t sum4 = 0.0f; + float16_t sum1 = 0.0f16; + float16_t sum2 = 0.0f16; + float16_t sum3 = 0.0f16; + float16_t sum4 = 0.0f16; /* Loop unrolling: process 2 columns per iteration */ colCnt = numCols; @@ -331,13 +333,13 @@ void arm_mat_vec_mult_f16(const arm_matrix_instance_f16 *pSrcMat, const float16_ vecData = *(pInVec)++; // Read 8 values from the matrix - 2 values from each of 4 rows, and do multiply accumulate matData = *(pInA1)++; - sum1 += matData * vecData; + sum1 += (_Float16)matData * (_Float16)vecData; matData = *(pInA2)++; - sum2 += matData * vecData; + sum2 += (_Float16)matData * (_Float16)vecData; matData = *(pInA3)++; - sum3 += matData * vecData; + sum3 += (_Float16)matData * (_Float16)vecData; matData = *(pInA4)++; - sum4 += matData * vecData; + sum4 += (_Float16)matData * (_Float16)vecData; // Decrement the loop counter colCnt--; @@ -359,7 +361,7 @@ void arm_mat_vec_mult_f16(const arm_matrix_instance_f16 *pSrcMat, const float16_ row = numRows & 3u; while (row > 0) { - float16_t sum = 0.0f; + float16_t sum = 0.0f16; pInVec = pVec; pInA1 = pSrcA + i; @@ -370,14 +372,14 @@ void arm_mat_vec_mult_f16(const arm_matrix_instance_f16 *pSrcMat, const float16_ vecData2 = *(pInVec)++; matData = *(pInA1)++; matData2 = *(pInA1)++; - sum += matData * vecData; - sum += matData2 * vecData2; + sum += (_Float16)matData * (_Float16)vecData; + sum += (_Float16)matData2 * (_Float16)vecData2; colCnt--; } // process remainder of row colCnt = numCols & 1u; while (colCnt > 0) { - sum += *pInA1++ * *pInVec++; + sum += (_Float16)*pInA1++ * (_Float16)*pInVec++; colCnt--; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f32.c index 67b390a..145ec15 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_f32.c @@ -5,12 +5,14 @@ * Title: arm_mat_vec_mult_f32.c * Description: Floating-point matrix and vector multiplication * - * $Date: 07. July 202 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -165,7 +167,7 @@ void arm_mat_vec_mult_f32( } /* - * compute 2 rows in parrallel + * compute 2 rows in parallel */ if (row >= 2) { @@ -310,16 +312,16 @@ void arm_mat_vec_mult_f32(const arm_matrix_instance_f32 *pSrcMat, const float32_ /* The following loop performs the dot-product of each row in pSrcA with the vector */ /* row loop */ while (row > 0) { - /* For every row wise process, the pInVec pointer is set - ** to the starting address of the vector */ - pInVec = pVec; - /* Initialize accumulators */ float32_t sum1 = 0.0f; float32_t sum2 = 0.0f; float32_t sum3 = 0.0f; float32_t sum4 = 0.0f; + /* For every row wise process, the pInVec pointer is set + ** to the starting address of the vector */ + pInVec = pVec; + /* Loop unrolling: process 2 columns per iteration */ colCnt = numCols; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q15.c index 177de77..9d9b1b4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q15.c @@ -5,12 +5,14 @@ * Title: arm_mat_vec_mult_q15.c * Description: Q15 matrix and vector multiplication * - * $Date: 07. July 202 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -289,16 +291,16 @@ void arm_mat_vec_mult_q15(const arm_matrix_instance_q15 *pSrcMat, const q15_t *p /* The following loop performs the dot-product of each row in pSrcA with the vector */ /* row loop */ while (row > 0) { - /* For every row wise process, the pInVec pointer is set - ** to the starting address of the vector */ - pInVec = pVec; - /* Initialize accumulators */ q63_t sum1 = 0; q63_t sum2 = 0; q63_t sum3 = 0; q63_t sum4 = 0; + /* For every row wise process, the pInVec pointer is set + ** to the starting address of the vector */ + pInVec = pVec; + /* Loop unrolling: process 2 columns per iteration */ colCnt = numCols >> 1; @@ -311,16 +313,16 @@ void arm_mat_vec_mult_q15(const arm_matrix_instance_q15 *pSrcMat, const q15_t *p // Main loop: matrix-vector multiplication while (colCnt > 0u) { // Read 2 values from vector - vecData = read_q15x2_ia ((q15_t **) &pInVec); + vecData = read_q15x2_ia (&pInVec); // Read 8 values from the matrix - 2 values from each of 4 rows, and do multiply accumulate - matData = read_q15x2_ia ((q15_t **) &pInA1); + matData = read_q15x2_ia (&pInA1); sum1 = __SMLALD(matData, vecData, sum1); - matData = read_q15x2_ia ((q15_t **) &pInA2); + matData = read_q15x2_ia (&pInA2); sum2 = __SMLALD(matData, vecData, sum2); - matData = read_q15x2_ia ((q15_t **) &pInA3); + matData = read_q15x2_ia (&pInA3); sum3 = __SMLALD(matData, vecData, sum3); - matData = read_q15x2_ia ((q15_t **) &pInA4); + matData = read_q15x2_ia (&pInA4); sum4 = __SMLALD(matData, vecData, sum4); // Decrement the loop counter @@ -361,10 +363,10 @@ void arm_mat_vec_mult_q15(const arm_matrix_instance_q15 *pSrcMat, const q15_t *p colCnt = numCols >> 2; while (colCnt > 0) { - vecData = read_q15x2_ia ((q15_t **) &pInVec); - vecData2 = read_q15x2_ia ((q15_t **) &pInVec); - matData = read_q15x2_ia ((q15_t **) &pInA1); - matData2 = read_q15x2_ia ((q15_t **) &pInA1); + vecData = read_q15x2_ia (&pInVec); + vecData2 = read_q15x2_ia (&pInVec); + matData = read_q15x2_ia (&pInA1); + matData2 = read_q15x2_ia (&pInA1); sum = __SMLALD(matData, vecData, sum); sum = __SMLALD(matData2, vecData2, sum); colCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q31.c index f9ab581..6e0b855 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q31.c @@ -5,12 +5,14 @@ * Title: arm_mat_vec_mult_q31.c * Description: Q31 matrix and vector multiplication * - * $Date: 07. July 202 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -286,16 +288,16 @@ void arm_mat_vec_mult_q31(const arm_matrix_instance_q31 *pSrcMat, const q31_t *p /* The following loop performs the dot-product of each row in pSrcA with the vector */ /* row loop */ while (row > 0) { - /* For every row wise process, the pInVec pointer is set - ** to the starting address of the vector */ - pInVec = pVec; - /* Initialize accumulators */ q63_t sum1 = 0; q63_t sum2 = 0; q63_t sum3 = 0; q63_t sum4 = 0; + /* For every row wise process, the pInVec pointer is set + ** to the starting address of the vector */ + pInVec = pVec; + /* Loop unrolling: process 2 columns per iteration */ colCnt = numCols; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q7.c index d4e4d21..5262ce3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/MatrixFunctions/arm_mat_vec_mult_q7.c @@ -5,12 +5,14 @@ * Title: arm_mat_vec_mult_q7.c * Description: Q7 matrix and vector multiplication * - * $Date: 07. July 202 + * $Date: 23 April 2021 * - * Target Processor: Cortex-M cores + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -301,16 +303,16 @@ void arm_mat_vec_mult_q7(const arm_matrix_instance_q7 *pSrcMat, const q7_t *pVec /* The following loop performs the dot-product of each row in pSrcA with the vector */ while (row > 0) { - /* For every row wise process, the pInVec pointer is set - ** to the starting address of the vector */ - pInVec = pVec; - /* Initialize accumulators */ q31_t sum1 = 0; q31_t sum2 = 0; q31_t sum3 = 0; q31_t sum4 = 0; + /* For every row wise process, the pInVec pointer is set + ** to the starting address of the vector */ + pInVec = pVec; + /* Loop unrolling: process 4 columns per iteration */ colCnt = numCols >> 2; @@ -325,26 +327,26 @@ void arm_mat_vec_mult_q7(const arm_matrix_instance_q7 *pSrcMat, const q7_t *pVec while (colCnt > 0u) { // Read 4 values from vector - vecData = read_q7x4_ia ((q7_t **) &pInVec); + vecData = read_q7x4_ia (&pInVec); vecData2 = __SXTB16(__ROR(vecData, 8)); vecData = __SXTB16(vecData); // Read 16 values from the matrix - 4 values from each of 4 rows, and do multiply accumulate - matData = read_q7x4_ia ((q7_t **) &pInA1); + matData = read_q7x4_ia (&pInA1); matData2 = __SXTB16(__ROR(matData, 8)); matData = __SXTB16(matData); sum1 = __SMLAD(matData, vecData, sum1); sum1 = __SMLAD(matData2, vecData2, sum1); - matData = read_q7x4_ia ((q7_t **) &pInA2); + matData = read_q7x4_ia (&pInA2); matData2 = __SXTB16(__ROR(matData, 8)); matData = __SXTB16(matData); sum2 = __SMLAD(matData, vecData, sum2); sum2 = __SMLAD(matData2, vecData2, sum2); - matData = read_q7x4_ia ((q7_t **) &pInA3); + matData = read_q7x4_ia (&pInA3); matData2 = __SXTB16(__ROR(matData, 8)); matData = __SXTB16(matData); sum3 = __SMLAD(matData, vecData, sum3); sum3 = __SMLAD(matData2, vecData2, sum3); - matData = read_q7x4_ia ((q7_t **) &pInA4); + matData = read_q7x4_ia (&pInA4); matData2 = __SXTB16(__ROR(matData, 8)); matData = __SXTB16(matData); sum4 = __SMLAD(matData, vecData, sum4); @@ -391,10 +393,10 @@ void arm_mat_vec_mult_q7(const arm_matrix_instance_q7 *pSrcMat, const q7_t *pVec colCnt = numCols >> 2; while (colCnt > 0) { - vecData = read_q7x4_ia ((q7_t **) &pInVec); + vecData = read_q7x4_ia (&pInVec); vecData2 = __SXTB16(__ROR(vecData, 8)); vecData = __SXTB16(vecData); - matData = read_q7x4_ia ((q7_t **) &pInA1); + matData = read_q7x4_ia (&pInA1); matData2 = __SXTB16(__ROR(matData, 8)); matData = __SXTB16(matData); sum = __SMLAD(matData, vecData, sum); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion2rotation_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion2rotation_f32.c index 25ff0de..6d1ee09 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion2rotation_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion2rotation_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion2rotation_f32.c * Description: Floating-point quaternion 2 rotation conversion * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -145,7 +147,8 @@ void arm_quaternion2rotation_f32(const float32_t *pInputQuaternions, float32_t *pOutputRotations, uint32_t nbQuaternions) { - for(uint32_t nb=0; nb < nbQuaternions; nb++) + uint32_t nb; + for(nb=0; nb < nbQuaternions; nb++) { float32_t q00 = SQ(pInputQuaternions[0 + nb * 4]); float32_t q11 = SQ(pInputQuaternions[1 + nb * 4]); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_conjugate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_conjugate_f32.c index c6d6df1..c3d80f9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_conjugate_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_conjugate_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_conjugate_f32.c * Description: Floating-point quaternion conjugate * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -81,7 +83,8 @@ void arm_quaternion_conjugate_f32(const float32_t *pInputQuaternions, float32_t *pConjugateQuaternions, uint32_t nbQuaternions) { - for(uint32_t i=0; i < nbQuaternions; i++) + uint32_t i; + for(i=0; i < nbQuaternions; i++) { pConjugateQuaternions[4 * i + 0] = pInputQuaternions[4 * i + 0]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_inverse_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_inverse_f32.c index df24db7..d4227eb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_inverse_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_inverse_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_inverse_f32.c * Description: Floating-point quaternion inverse * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -92,7 +94,8 @@ void arm_quaternion_inverse_f32(const float32_t *pInputQuaternions, { float32_t temp; - for(uint32_t i=0; i < nbQuaternions; i++) + uint32_t i; + for(i=0; i < nbQuaternions; i++) { temp = SQ(pInputQuaternions[4 * i + 0]) + diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_norm_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_norm_f32.c index a793d01..e5a6130 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_norm_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_norm_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_norm_f32.c * Description: Floating-point quaternion Norm * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -84,8 +86,9 @@ void arm_quaternion_norm_f32(const float32_t *pInputQuaternions, uint32_t nbQuaternions) { float32_t temp; + uint32_t i; - for(uint32_t i=0; i < nbQuaternions; i++) + for(i=0; i < nbQuaternions; i++) { temp = SQ(pInputQuaternions[4 * i + 0]) + SQ(pInputQuaternions[4 * i + 1]) + diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_normalize_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_normalize_f32.c index 70ec340..1380f6b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_normalize_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_normalize_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_normalize_f32.c * Description: Floating-point quaternion normalization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -85,7 +87,8 @@ void arm_quaternion_normalize_f32(const float32_t *pInputQuaternions, { float32_t temp; - for(uint32_t i=0; i < nbQuaternions; i++) + uint32_t i; + for(i=0; i < nbQuaternions; i++) { temp = SQ(pInputQuaternions[4 * i + 0]) + SQ(pInputQuaternions[4 * i + 1]) + diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_f32.c index bfb996d..fef8388 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_product_f32.c * Description: Floating-point quaternion product * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -132,7 +134,8 @@ void arm_quaternion_product_f32(const float32_t *qa, float32_t *qr, uint32_t nbQuaternions) { - for(uint32_t i=0; i < nbQuaternions; i++) + uint32_t i; + for(i=0; i < nbQuaternions; i++) { arm_quaternion_product_single_f32(qa, qb, qr); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_single_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_single_f32.c index 54f56e8..e8149fd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_single_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_quaternion_product_single_f32.c @@ -5,8 +5,10 @@ * Title: arm_quaternion_product_single_f32.c * Description: Floating-point quaternion product * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_rotation2quaternion_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_rotation2quaternion_f32.c index 0632ce7..54d56a1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_rotation2quaternion_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/QuaternionMathFunctions/arm_rotation2quaternion_f32.c @@ -5,8 +5,10 @@ * Title: arm_rotation2quaternion_f32.c * Description: Floating-point rotation to quaternion conversion * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -100,51 +102,51 @@ void arm_rotation2quaternion_f32(const float32_t *pInputRotations, if (trace > 0) { - (void)arm_sqrt_f32(trace + 1.0, &doubler) ; // invs=4*qw - doubler = 2*doubler; - s = 1.0 / doubler; + (void)arm_sqrt_f32(trace + 1.0f, &doubler) ; // invs=4*qw + doubler = 2.0f*doubler; + s = 1.0f / doubler; q1 = vmulq_n_f32(q1,s); q2 = vmulq_n_f32(q2,s); - q[0] = 0.25 * doubler; + q[0] = 0.25f * doubler; q[1] = R21 - R12; q[2] = R02 - R20; q[3] = R10 - R01; } else if ((R00 > R11) && (R00 > R22) ) { - (void)arm_sqrt_f32(1.0 + R00 - R11 - R22,&doubler); // invs=4*qx - doubler = 2*doubler; - s = 1.0 / doubler; + (void)arm_sqrt_f32(1.0f + R00 - R11 - R22,&doubler); // invs=4*qx + doubler = 2.0f*doubler; + s = 1.0f / doubler; q1 = vmulq_n_f32(q1,s); q2 = vmulq_n_f32(q2,s); q[0] = R21 - R12; - q[1] = 0.25 * doubler; + q[1] = 0.25f * doubler; q[2] = R01 + R10; q[3] = R02 + R20; } else if (R11 > R22) { - (void)arm_sqrt_f32(1.0 + R11 - R00 - R22,&doubler); // invs=4*qy - doubler = 2*doubler; - s = 1.0 / doubler; + (void)arm_sqrt_f32(1.0f + R11 - R00 - R22,&doubler); // invs=4*qy + doubler = 2.0f*doubler; + s = 1.0f / doubler; q1 = vmulq_n_f32(q1,s); q2 = vmulq_n_f32(q2,s); q[0] = R02 - R20; q[1] = R01 + R10; - q[2] = 0.25 * doubler; + q[2] = 0.25f * doubler; q[3] = R12 + R21; } else { - (void)arm_sqrt_f32(1.0 + R22 - R00 - R11,&doubler); // invs=4*qz - doubler = 2*doubler; - s = 1.0 / doubler; + (void)arm_sqrt_f32(1.0f + R22 - R00 - R11,&doubler); // invs=4*qz + doubler = 2.0f*doubler; + s = 1.0f / doubler; q1 = vmulq_n_f32(q1,s); q2 = vmulq_n_f32(q2,s); @@ -152,7 +154,7 @@ void arm_rotation2quaternion_f32(const float32_t *pInputRotations, q[0] = R10 - R01; q[1] = R02 + R20; q[2] = R12 + R21; - q[3] = 0.25 * doubler; + q[3] = 0.25f * doubler; } vst1q(pOutputQuaternions, q); @@ -166,7 +168,8 @@ void arm_rotation2quaternion_f32(const float32_t *pInputRotations, float32_t *pOutputQuaternions, uint32_t nbQuaternions) { - for(uint32_t nb=0; nb < nbQuaternions; nb++) + uint32_t nb; + for(nb=0; nb < nbQuaternions; nb++) { const float32_t *r=&pInputRotations[nb*9]; float32_t *q=&pOutputQuaternions[nb*4]; @@ -178,41 +181,41 @@ void arm_rotation2quaternion_f32(const float32_t *pInputRotations, - if (trace > 0) + if (trace > 0.0f) { - doubler = sqrtf(trace + 1.0) * 2; // invs=4*qw - s = 1.0 / doubler; - q[0] = 0.25 * doubler; + doubler = sqrtf(trace + 1.0f) * 2.0f; // invs=4*qw + s = 1.0f / doubler; + q[0] = 0.25f * doubler; q[1] = (RI(2,1) - RI(1,2)) * s; q[2] = (RI(0,2) - RI(2,0)) * s; q[3] = (RI(1,0) - RI(0,1)) * s; } else if ((RI(0,0) > RI(1,1)) && (RI(0,0) > RI(2,2)) ) { - doubler = sqrtf(1.0 + RI(0,0) - RI(1,1) - RI(2,2)) * 2; // invs=4*qx - s = 1.0 / doubler; + doubler = sqrtf(1.0f + RI(0,0) - RI(1,1) - RI(2,2)) * 2.0f; // invs=4*qx + s = 1.0f / doubler; q[0] = (RI(2,1) - RI(1,2)) * s; - q[1] = 0.25 * doubler; + q[1] = 0.25f * doubler; q[2] = (RI(0,1) + RI(1,0)) * s; q[3] = (RI(0,2) + RI(2,0)) * s; } else if (RI(1,1) > RI(2,2)) { - doubler = sqrtf(1.0 + RI(1,1) - RI(0,0) - RI(2,2)) * 2; // invs=4*qy - s = 1.0 / doubler; + doubler = sqrtf(1.0f + RI(1,1) - RI(0,0) - RI(2,2)) * 2.0f; // invs=4*qy + s = 1.0f / doubler; q[0] = (RI(0,2) - RI(2,0)) * s; q[1] = (RI(0,1) + RI(1,0)) * s; - q[2] = 0.25 * doubler; + q[2] = 0.25f * doubler; q[3] = (RI(1,2) + RI(2,1)) * s; } else { - doubler = sqrtf(1.0 + RI(2,2) - RI(0,0) - RI(1,1)) * 2; // invs=4*qz - s = 1.0 / doubler; + doubler = sqrtf(1.0f + RI(2,2) - RI(0,0) - RI(1,1)) * 2.0f; // invs=4*qz + s = 1.0f / doubler; q[0] = (RI(1,0) - RI(0,1)) * s; q[1] = (RI(0,2) + RI(2,0)) * s; q[2] = (RI(1,2) + RI(2,1)) * s; - q[3] = 0.25 * doubler; + q[3] = 0.25f * doubler; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/SVMFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/SVMFunctionsF16.c deleted file mode 100644 index 0463afe..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/SVMFunctionsF16.c +++ /dev/null @@ -1,40 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: BayesFunctions.c - * Description: Combination of all SVM function source files. - * - * $Date: 16. March 2020 - * $Revision: V1.0.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_svm_linear_init_f16.c" -#include "arm_svm_linear_predict_f16.c" -#include "arm_svm_polynomial_init_f16.c" -#include "arm_svm_polynomial_predict_f16.c" -#include "arm_svm_rbf_init_f16.c" -#include "arm_svm_rbf_predict_f16.c" -#include "arm_svm_sigmoid_init_f16.c" -#include "arm_svm_sigmoid_predict_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f16.c index 1190975..71bb9cb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_linear_init_f16.c * Description: SVM Linear Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,20 +35,11 @@ #include #include -/** - * @defgroup groupSVM SVM Functions - * - */ /** @ingroup groupSVM */ -/** - @defgroup linearsvm Linear SVM - - Linear SVM classifier - */ /** * @addtogroup linearsvm diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f32.c index 989bf76..4c92653 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_init_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_linear_init_f32.c * Description: SVM Linear Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f16.c index 2f9ca3c..8e5a55c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_linear_predict_f16.c * Description: SVM Linear Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -298,9 +300,9 @@ void arm_svm_linear_predict_f16( dot=0; for(j=0; j < S->vectorDimension; j++) { - dot = dot + in[j]* *pSupport++; + dot = (_Float16)dot + (_Float16)in[j]* (_Float16)*pSupport++; } - sum += S->dualCoefficients[i] * dot; + sum += (_Float16)S->dualCoefficients[i] * (_Float16)dot; } *pResult=S->classes[STEP(sum)]; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f32.c index b6d1dfe..8cf9678 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_linear_predict_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_linear_predict_f32.c * Description: SVM Linear Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f16.c index 9dfe908..a2ed980 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_polynomial_init_f16.c * Description: SVM Polynomial Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,11 +39,6 @@ @ingroup groupSVM */ -/** - @defgroup polysvm Polynomial SVM - - Polynomial SVM classifier - */ /** * @addtogroup polysvm diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f32.c index cef8d12..082399b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_init_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_polynomial_init_f32.c * Description: SVM Polynomial Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f16.c index 3e8a127..3cd6912 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_polynomial_predict_f16.c * Description: SVM Polynomial Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,6 +35,28 @@ #include #include +#if !defined(ARM_MATH_MVE_FLOAT16) || defined(ARM_MATH_AUTOVECTORIZE) + +/* + +_Float16 is not supported in g++ so we avoid putting _Float16 definitions +in the public headers. + +This function should at some point be moved in FastMath. + +*/ +__STATIC_INLINE float16_t arm_exponent_f16(float16_t x, int32_t nb) +{ + float16_t r = x; + nb --; + while(nb > 0) + { + r = (_Float16)r * (_Float16)x; + nb--; + } + return(r); +} +#endif /** * @addtogroup polysvm @@ -40,6 +64,13 @@ */ + + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h" + /** * @brief SVM polynomial prediction * @param[in] S Pointer to an instance of the polynomial SVM structure. @@ -48,12 +79,6 @@ * @return none. * */ - -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) - -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_math_f16.h" - void arm_svm_polynomial_predict_f16( const arm_svm_polynomial_instance_f16 *S, const float16_t * in, @@ -303,6 +328,16 @@ void arm_svm_polynomial_predict_f16( } #else + + +/** + * @brief SVM polynomial prediction + * @param[in] S Pointer to an instance of the polynomial SVM structure. + * @param[in] in Pointer to input vector + * @param[out] pResult Decision value + * @return none. + * + */ void arm_svm_polynomial_predict_f16( const arm_svm_polynomial_instance_f16 *S, const float16_t * in, @@ -318,9 +353,9 @@ void arm_svm_polynomial_predict_f16( dot=0; for(j=0; j < S->vectorDimension; j++) { - dot = dot + (_Float16)in[j]* (_Float16)*pSupport++; + dot = (_Float16)dot + (_Float16)in[j]* (_Float16)*pSupport++; } - sum += S->dualCoefficients[i] * (_Float16)arm_exponent_f16(S->gamma * dot + S->coef0, S->degree); + sum += (_Float16)S->dualCoefficients[i] * (_Float16)arm_exponent_f16((_Float16)S->gamma * (_Float16)dot + (_Float16)S->coef0, S->degree); } *pResult=S->classes[STEP(sum)]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f32.c index 31fc471..2d97e2b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_polynomial_predict_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_polynomial_predict_f32.c * Description: SVM Polynomial Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f16.c index 1f0bcf5..5b2492f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_rbf_init_f16.c * Description: SVM Radial Basis Function Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,11 +39,6 @@ @ingroup groupSVM */ -/** - @defgroup rbfsvm RBF SVM - - RBF SVM classifier - */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f32.c index cd2c620..9fddb02 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_init_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_rbf_init_f32.c * Description: SVM Radial Basis Function Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f16.c index 056562f..15dd7e6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_rbf_predict_f16.c * Description: SVM Radial Basis Function Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,7 +72,7 @@ void arm_svm_rbf_predict_f16( uint32_t blkCnt; /* loop counters */ const float16_t *pDualCoef = S->dualCoefficients; _Float16 sum = S->intercept; - f16x8_t vSum = vdupq_n_f16(0); + f16x8_t vSum = vdupq_n_f16(0.0f16); row = numRows; @@ -97,10 +99,10 @@ void arm_svm_rbf_predict_f16( /* * reset accumulators */ - acc0 = vdupq_n_f16(0.0f); - acc1 = vdupq_n_f16(0.0f); - acc2 = vdupq_n_f16(0.0f); - acc3 = vdupq_n_f16(0.0f); + acc0 = vdupq_n_f16(0.0f16); + acc1 = vdupq_n_f16(0.0f16); + acc2 = vdupq_n_f16(0.0f16); + acc3 = vdupq_n_f16(0.0f16); pSrcA0Vec = pInA0; pSrcA1Vec = pInA1; @@ -170,7 +172,7 @@ void arm_svm_rbf_predict_f16( vSum = vfmaq_m_f16(vSum, vld1q(pDualCoef), - vexpq_f16(vmulq_n_f16(vtmp, -S->gamma)),vctp16q(4)); + vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)),vctp16q(4)); pDualCoef += 4; pSrcA += numCols * 4; /* @@ -199,8 +201,8 @@ void arm_svm_rbf_predict_f16( /* * reset accumulators */ - acc0 = vdupq_n_f16(0.0f); - acc1 = vdupq_n_f16(0.0f); + acc0 = vdupq_n_f16(0.0f16); + acc1 = vdupq_n_f16(0.0f16); pSrcA0Vec = pInA0; pSrcA1Vec = pInA1; @@ -248,7 +250,7 @@ void arm_svm_rbf_predict_f16( vSum = vfmaq_m_f16(vSum, vld1q(pDualCoef), - vexpq_f16(vmulq_n_f16(vtmp, -S->gamma)), vctp16q(2)); + vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)), vctp16q(2)); pDualCoef += 2; pSrcA += numCols * 2; @@ -309,12 +311,12 @@ void arm_svm_rbf_predict_f16( vSum = vfmaq_m_f16(vSum, vld1q(pDualCoef), - vexpq_f16(vmulq_n_f16(vtmp, -S->gamma)), vctp16q(1)); + vexpq_f16(vmulq_n_f16(vtmp, -(_Float16)S->gamma)), vctp16q(1)); } - sum += vecAddAcrossF16Mve(vSum); + sum += (_Float16)vecAddAcrossF16Mve(vSum); *pResult = S->classes[STEP(sum)]; } @@ -337,7 +339,7 @@ void arm_svm_rbf_predict_f16( dot = dot + SQ((_Float16)in[j] - (_Float16) *pSupport); pSupport++; } - sum += (_Float16)S->dualCoefficients[i] * (_Float16)expf(-(_Float16)S->gamma * dot); + sum += (_Float16)S->dualCoefficients[i] * (_Float16)expf((float32_t)(-(_Float16)S->gamma * (_Float16)dot)); } *pResult=S->classes[STEP(sum)]; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f32.c index 52ab0d5..87d71e3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_rbf_predict_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_rbf_predict_f32.c * Description: SVM Radial Basis Function Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f16.c index 60f33af..33aaf42 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_sigmoid_predict_f16.c * Description: SVM Sigmoid Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,11 +39,6 @@ @ingroup groupSVM */ -/** - @defgroup sigmoidsvm Sigmoid SVM - - Sigmoid SVM classifier - */ /** * @addtogroup sigmoidsvm diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f32.c index a483345..2274e72 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_init_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_sigmoid_predict_f32.c * Description: SVM Sigmoid Instance Initialization * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f16.c index dcce835..572bc83 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f16.c @@ -5,11 +5,13 @@ * Title: arm_svm_sigmoid_predict_f16.c * Description: SVM Sigmoid Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -295,7 +297,7 @@ void arm_svm_sigmoid_predict_f16( vtanhq_f16(vaddq_n_f16(vmulq_n_f16(vtmp, S->gamma), S->coef0)), vctp16q(1)); } - sum += vecAddAcrossF16Mve(vSum); + sum += (_Float16)vecAddAcrossF16Mve(vSum); *pResult = S->classes[STEP(sum)]; } @@ -316,9 +318,9 @@ void arm_svm_sigmoid_predict_f16( dot=0.0f16; for(j=0; j < S->vectorDimension; j++) { - dot = dot + (_Float16)in[j] * (_Float16)*pSupport++; + dot = (_Float16)dot + (_Float16)in[j] * (_Float16)*pSupport++; } - sum += (_Float16)S->dualCoefficients[i] * (_Float16)tanhf((_Float16)S->gamma * dot + (_Float16)S->coef0); + sum += (_Float16)S->dualCoefficients[i] * (_Float16)tanhf((float32_t)((_Float16)S->gamma * (_Float16)dot + (_Float16)S->coef0)); } *pResult=S->classes[STEP(sum)]; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f32.c index 94b2a50..b607820 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SVMFunctions/arm_svm_sigmoid_predict_f32.c @@ -5,11 +5,13 @@ * Title: arm_svm_sigmoid_predict_f32.c * Description: SVM Sigmoid Classifier * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/StatisticsFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/StatisticsFunctionsF16.c deleted file mode 100644 index 96a08a5..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/StatisticsFunctionsF16.c +++ /dev/null @@ -1,44 +0,0 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: StatisticsFunctions.c - * Description: Combination of all statistics function source files. - * - * $Date: 16. March 2020 - * $Revision: V1.1.0 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ -/* - * Copyright (C) 2019-2020 ARM Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -#include "arm_max_f16.c" -#include "arm_min_f16.c" -#include "arm_mean_f16.c" -#include "arm_power_f16.c" -#include "arm_rms_f16.c" -#include "arm_std_f16.c" -#include "arm_var_f16.c" -#include "arm_entropy_f16.c" -#include "arm_kullback_leibler_f16.c" -#include "arm_logsumexp_dot_prod_f16.c" -#include "arm_logsumexp_f16.c" -#include "arm_max_no_idx_f16.c" - -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f16.c new file mode 100644 index 0000000..2c50961 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f16.c @@ -0,0 +1,278 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_f16.c + * Description: Maximum value of a absolute values of a floating-point vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmax_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + uint16_t blkCnt; /* loop counters */ + f16x8_t vecSrc; + float16_t const *pSrcVec; + f16x8_t curExtremValVec = vdupq_n_f16(F16_ABSMIN); + float16_t maxValue = F16_ABSMIN; + uint16_t idx = blockSize; + uint16x8_t indexVec; + uint16x8_t curExtremIdxVec; + mve_pred16_t p0; + + + indexVec = vidupq_u16((uint32_t)0, 1); + curExtremIdxVec = vdupq_n_u16(0); + + pSrcVec = (float16_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0U) + { + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = indexVec + 8; + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + + p0 = vctp16q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxnmvq(maxValue, curExtremValVec); + /* + * set index for lower values to max possible index + */ + p0 = vcmpgeq(curExtremValVec, maxValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u16(blockSize), p0); + /* + * Get min index which is thus for a max value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = maxValue; +} +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmax_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + float16_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = ((_Float16)out > 0.0f16) ? out : -(_Float16)out; \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + /* compare for the extrema value */ \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmax_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + float16_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = (_Float16)fabsf((float32_t)*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (_Float16)fabsf((float32_t)*pSrc++); + + /* compare for the maximum value */ + if ((_Float16)out < (_Float16)maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f32.c new file mode 100644 index 0000000..7ddc9ae --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f32.c @@ -0,0 +1,264 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_f32.c + * Description: Maximum value of absolute values of a floating-point vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + +/** + @defgroup AbsMax Absolute Maximum + + Computes the maximum value of absolute values of an array of data. + The function returns both the maximum value and its position within the array. + There are separate functions for floating-point, Q31, Q15, and Q7 data types. + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmax_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + int32_t blkSize = blockSize; + f32x4_t vecSrc; + f32x4_t curExtremValVec = vdupq_n_f32(F32_ABSMIN); + float32_t maxValue = F32_ABSMIN; + uint32_t idx = blockSize; + uint32x4_t indexVec; + uint32x4_t curExtremIdxVec; + uint32_t curIdx = 0; + mve_pred16_t p0; + + + indexVec = vidupq_wb_u32(&curIdx, 1); + curExtremIdxVec = vdupq_n_u32(0); + + do { + mve_pred16_t p = vctp32q(blkSize); + + vecSrc = vldrwq_z_f32((float32_t const *) pSrc, p); + vecSrc = vabsq_m(vuninitializedq_f32(), vecSrc, p); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq_m(vecSrc, curExtremValVec, p); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + /* Does TP detection works here ?? */ + indexVec = vidupq_wb_u32(&curIdx, 1); + + blkSize -= 4; + pSrc += 4; + } + while (blkSize > 0); + + /* + * Get max value across the vector + */ + maxValue = vmaxnmvq(maxValue, curExtremValVec); + /* + * set index for lower values to max possible index + */ + p0 = vcmpgeq(curExtremValVec, maxValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u32(blockSize), p0); + /* + * Get min index which is thus for a max value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = maxValue; +} + + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmax_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + float32_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0.0f) ? out : -out; \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmax_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + float32_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = fabsf(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = fabsf(*pSrc++); + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f64.c new file mode 100644 index 0000000..23a4e4e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_f64.c @@ -0,0 +1,96 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_f64.c + * Description: Maximum value of absolute values of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ +void arm_absmax_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex) +{ + float64_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = fabs(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = fabs(*pSrc++); + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} + +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f16.c new file mode 100644 index 0000000..d1c225c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f16.c @@ -0,0 +1,232 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_f16.c + * Description: Maximum value of a absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmax_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + uint16_t blkCnt; /* loop counters */ + f16x8_t vecSrc; + float16_t const *pSrcVec; + f16x8_t curExtremValVec = vdupq_n_f16(F16_ABSMIN); + float16_t maxValue = F16_ABSMIN; + mve_pred16_t p0; + + + pSrcVec = (float16_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0) + { + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + /* + * update per-lane max. + */ + curExtremValVec = vmaxnmaq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxnmaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxnmavq(maxValue, curExtremValVec); + *pResult = maxValue; +} +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmax_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + float16_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = ((_Float16)out > 0.0f16) ? out : -(_Float16)out; \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + /* compare for the extrema value */ \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = ((_Float16)cur_absmax > 0.0f16) ? cur_absmax : -(_Float16)cur_absmax; \ + if ((_Float16)cur_absmax > (_Float16)out) \ + { \ + out = cur_absmax; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmax_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + float16_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = (_Float16)fabsf((float32_t)*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (_Float16)fabsf((float32_t)*pSrc++); + + /* compare for the maximum value */ + if ((_Float16)out < (_Float16)maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f32.c new file mode 100644 index 0000000..485ccd5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f32.c @@ -0,0 +1,229 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_f32.c + * Description: Maximum value of absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmax_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + f32x4_t vecSrc; + float32_t const *pSrcVec; + f32x4_t curExtremValVec = vdupq_n_f32(F32_ABSMIN); + float32_t maxValue = F32_ABSMIN; + mve_pred16_t p0; + + + pSrcVec = (float32_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane max. + */ + curExtremValVec = vmaxnmaq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxnmaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxnmavq(maxValue, curExtremValVec); + *pResult = maxValue; +} + + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmax_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + float32_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0.0f) ? out : -out; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0.0f) ? cur_absmax : -cur_absmax; \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmax_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + float32_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + + + /* Load first input value that act as reference value for comparision */ + out = fabsf(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = fabsf(*pSrc++); + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f64.c new file mode 100644 index 0000000..017c588 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_f64.c @@ -0,0 +1,91 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_f64.c + * Description: Maximum value of absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ +void arm_absmax_no_idx_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + float64_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + + + /* Load first input value that act as reference value for comparision */ + out = fabs(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = fabs(*pSrc++); + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} + +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q15.c new file mode 100644 index 0000000..9c3a86a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q15.c @@ -0,0 +1,224 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_q15.c + * Description: Maximum value of absolute values of a Q15 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmax_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + uint16_t blkCnt; /* loop counters */ + q15x8_t vecSrc; + q15_t const *pSrcVec; + uint16x8_t curExtremValVec = vdupq_n_s16(Q15_ABSMIN); + q15_t maxValue = Q15_ABSMIN; + mve_pred16_t p0; + + + pSrcVec = (q15_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + /* + * update per-lane max. + */ + curExtremValVec = vmaxaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxavq(maxValue, (q15x8_t)curExtremValVec); + *pResult = maxValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q15_t)__QSUB16(0, out); \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmax_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q31.c new file mode 100644 index 0000000..5610a8a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q31.c @@ -0,0 +1,224 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_q31.c + * Description: Maximum value of absolute values of a Q31 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmax_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q31x4_t vecSrc; + q31_t const *pSrcVec; + uint32x4_t curExtremValVec = vdupq_n_s32(Q31_ABSMIN); + q31_t maxValue = Q31_ABSMIN; + mve_pred16_t p0; + + + pSrcVec = (q31_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane max. + */ + curExtremValVec = vmaxaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxavq(maxValue, (q31x4_t)curExtremValVec); + *pResult = maxValue; +} +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q31_t)__QSUB(0, out); \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmax_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q7.c new file mode 100644 index 0000000..26e1813 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_no_idx_q7.c @@ -0,0 +1,228 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_no_idx_q7.c + * Description: Maximum value of absolute values of a Q7 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + + + +void arm_absmax_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q7x16_t vecSrc; + q7_t const *pSrcVec; + uint8x16_t curExtremValVec = vdupq_n_s8(Q7_ABSMIN); + q7_t maxValue = Q7_ABSMIN; + mve_pred16_t p0; + + + pSrcVec = (q7_t const *) pSrc; + blkCnt = blockSize >> 4; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + /* + * update per-lane max. + */ + curExtremValVec = vmaxaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + p0 = vctp8q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxavq(maxValue, (q7x16_t)curExtremValVec); + *pResult = maxValue; +} +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q7_t)__QSUB8(0, out); \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmax_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q15.c new file mode 100644 index 0000000..656fcf9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q15.c @@ -0,0 +1,240 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_q15.c + * Description: Maximum value of absolute values of a Q15 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmax_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + int32_t blkCnt; /* loop counters */ + q15x8_t extremValVec = vdupq_n_s16(Q15_ABSMIN); + q15_t maxValue = Q15_ABSMIN; + uint16x8_t indexVec; + uint16x8_t extremIdxVec; + mve_pred16_t p0; + uint16_t extremIdxArr[8]; + + indexVec = vidupq_u16(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp16q(blkCnt); + q15x8_t extremIdxVal = vld1q_z_s16(pSrc, p); + + extremIdxVal = vqabsq(extremIdxVal); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); + + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u16(extremIdxArr, indexVec, p0); + + indexVec += 8; + pSrc += 8; + blkCnt -= 8; + } + while (blkCnt > 0); + + + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u16(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u16(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); + *pResult = maxValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + q15_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q15_t)__QSUB16(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q15_t)__QSUB16(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmax_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + q15_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q31.c new file mode 100644 index 0000000..d3cfa3c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q31.c @@ -0,0 +1,240 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_q31.c + * Description: Maximum value of absolute values of a Q31 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmax_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + int32_t blkCnt; /* loop counters */ + q31x4_t extremValVec = vdupq_n_s32(Q31_ABSMIN); + q31_t maxValue = Q31_ABSMIN; + uint32x4_t indexVec; + uint32x4_t extremIdxVec; + mve_pred16_t p0; + uint32_t extremIdxArr[4]; + + indexVec = vidupq_u32(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp32q(blkCnt); + q31x4_t extremIdxVal = vld1q_z_s32(pSrc, p); + + extremIdxVal = vqabsq(extremIdxVal); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); + + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u32(extremIdxArr, indexVec, p0); + + indexVec += 4; + pSrc += 4; + blkCnt -= 4; + } + while (blkCnt > 0); + + + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u32(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u32(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); + *pResult = maxValue; +} +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + q31_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q31_t)__QSUB(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q31_t)__QSUB(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmax_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + q31_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q7.c new file mode 100644 index 0000000..30595de --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmax_q7.c @@ -0,0 +1,298 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmax_q7.c + * Description: Maximum value of absolute values of a Q7 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMax + @{ + */ + +/** + @brief Maximum value of absolute values of a Q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +#define MAX_BLKSZ_S8 (UINT8_MAX+1) + +static void arm_small_blk_absmax_q7( + const q7_t * pSrc, + uint16_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + int32_t blkCnt; /* loop counters */ + q7x16_t extremValVec = vdupq_n_s8(Q7_ABSMIN); + q7_t maxValue = Q7_ABSMIN; + uint8x16_t indexVec; + uint8x16_t extremIdxVec; + mve_pred16_t p0; + uint8_t extremIdxArr[16]; + + indexVec = vidupq_u8(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp8q(blkCnt); + q7x16_t extremIdxVal = vld1q_z_s8(pSrc, p); + + extremIdxVal = vqabsq(extremIdxVal); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); + + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u8(extremIdxArr, indexVec, p0); + + indexVec += 16; + pSrc += 16; + blkCnt -= 16; + } + while (blkCnt > 0); + + + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u8(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u8(blockSize - 1), p0); + *pIndex = vminvq_u8(blockSize - 1, indexVec); + *pResult = maxValue; +} + +void arm_absmax_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + int32_t totalSize = blockSize; + + if (totalSize <= MAX_BLKSZ_S8) + { + arm_small_blk_absmax_q7(pSrc, blockSize, pResult, pIndex); + } + else + { + uint32_t curIdx = 0; + q7_t curBlkExtr = Q7_MIN; + uint32_t curBlkPos = 0; + uint32_t curBlkIdx = 0; + /* + * process blocks of 255 elts + */ + while (totalSize >= MAX_BLKSZ_S8) + { + const q7_t *curSrc = pSrc; + + arm_small_blk_absmax_q7(curSrc, MAX_BLKSZ_S8, pResult, pIndex); + if (*pResult > curBlkExtr) + { + /* + * update partial extrema + */ + curBlkExtr = *pResult; + curBlkPos = *pIndex; + curBlkIdx = curIdx; + } + curIdx++; + pSrc += MAX_BLKSZ_S8; + totalSize -= MAX_BLKSZ_S8; + } + /* + * remainder + */ + arm_small_blk_absmax_q7(pSrc, totalSize, pResult, pIndex); + if (*pResult > curBlkExtr) + { + curBlkExtr = *pResult; + curBlkPos = *pIndex; + curBlkIdx = curIdx; + } + *pIndex = curBlkIdx * MAX_BLKSZ_S8 + curBlkPos; + *pResult = curBlkExtr; + } +} +#else +#if defined(ARM_MATH_DSP) +void arm_absmax_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + q7_t cur_absmax, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q7_t)__QSUB8(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmax to next consecutive values one by one */ \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + /* compare for the extrema value */ \ + if (cur_absmax > out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmax; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmax = *pSrc++; \ + cur_absmax = (cur_absmax > 0) ? cur_absmax : (q7_t)__QSUB8(0, cur_absmax); \ + if (cur_absmax > out) \ + { \ + out = cur_absmax; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmax_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + q7_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMax group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f16.c new file mode 100644 index 0000000..335f502 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f16.c @@ -0,0 +1,280 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_f16.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmin_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + uint16_t blkCnt; /* loop counters */ + f16x8_t vecSrc; + float16_t const *pSrcVec; + f16x8_t curExtremValVec = vdupq_n_f16(F16_ABSMAX); + float16_t minValue = F16_ABSMAX; + uint16_t idx = blockSize; + uint16x8_t indexVec; + uint16x8_t curExtremIdxVec; + mve_pred16_t p0; + + + indexVec = vidupq_u16((uint32_t)0, 1); + curExtremIdxVec = vdupq_n_u16(0); + + pSrcVec = (float16_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0U) + { + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpleq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = indexVec + 8; + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + p0 = vctp16q(blkCnt); + + vecSrc = vldrhq_f16(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpleq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminnmvq(minValue, curExtremValVec); + /* + * set index for lower values to max possible index + */ + p0 = vcmpleq(curExtremValVec, minValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u16(blockSize), p0); + /* + * Get min index which is thus for a max value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmin_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + float16_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = ((_Float16)out > 0.0f16) ? out : -(_Float16)out; \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + /* compare for the extrema value */ \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmin_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult, + uint32_t * pIndex) +{ + float16_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = (_Float16)fabsf((float32_t)*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (_Float16)fabsf((float32_t)*pSrc++); + + /* compare for the minimum value */ + if ((_Float16)out > (_Float16)minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f32.c new file mode 100644 index 0000000..521093a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f32.c @@ -0,0 +1,283 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_f32.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + + +/** + @ingroup groupStats + */ + +/** + @defgroup AbsMin Absolute Minimum + + Computes the minimum value of absolute values of an array of data. + The function returns both the minimum value and its position within the array. + There are separate functions for floating-point, Q31, Q15, and Q7 data types. + */ + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmin_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + int32_t blkCnt; /* loop counters */ + f32x4_t vecSrc; + float32_t const *pSrcVec; + f32x4_t curExtremValVec = vdupq_n_f32(F32_ABSMAX); + float32_t minValue = F32_ABSMAX; + uint32_t idx = blockSize; + uint32x4_t indexVec; + uint32x4_t curExtremIdxVec; + mve_pred16_t p0; + + + indexVec = vidupq_u32((uint32_t)0, 1); + curExtremIdxVec = vdupq_n_u32(0); + + pSrcVec = (float32_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + vecSrc = vabsq(vecSrc); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpleq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = indexVec + 4; + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + p0 = vctp32q(blkCnt); + + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + vecSrc = vabsq(vecSrc); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + p0 = vcmpleq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminnmvq(minValue, curExtremValVec); + /* + * set index for lower values to max possible index + */ + p0 = vcmpleq(curExtremValVec, minValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u32(blockSize), p0); + /* + * Get min index which is thus for a max value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmin_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + float32_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0.0f) ? out : -out; \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmin_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult, + uint32_t * pIndex) +{ + float32_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = fabsf(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = fabsf(*pSrc++); + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} + +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f64.c new file mode 100644 index 0000000..518651a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_f64.c @@ -0,0 +1,94 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_f64.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ +void arm_absmin_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex) +{ + float64_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = fabs(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = fabs(*pSrc++); + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} + +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f16.c new file mode 100644 index 0000000..1e90c91 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f16.c @@ -0,0 +1,234 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_f16.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmin_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + f16x8_t vecSrc; + float16_t const *pSrcVec; + f16x8_t curExtremValVec = vdupq_n_f16(F16_ABSMAX); + float16_t minValue = F16_ABSMAX; + mve_pred16_t p0; + + + pSrcVec = (float16_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + /* + * update per-lane min. + */ + curExtremValVec = vminnmaq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminnmaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get min value across the vector + */ + minValue = vminnmavq(minValue, curExtremValVec); + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmin_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + float16_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = ((_Float16)out > 0.0f16) ? out : -(_Float16)out; \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + /* compare for the extrema value */ \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = ((_Float16)cur_absmin > 0.0f16) ? cur_absmin : -(_Float16)cur_absmin; \ + if ((_Float16)cur_absmin < (_Float16)out) \ + { \ + out = cur_absmin; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmin_no_idx_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + float16_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = (_Float16)fabsf((float32_t)*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (_Float16)fabsf((float32_t)*pSrc++); + + /* compare for the minimum value */ + if ((_Float16)out > (_Float16)minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f32.c new file mode 100644 index 0000000..20aca41 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f32.c @@ -0,0 +1,230 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_f32.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_absmin_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + f32x4_t vecSrc; + float32_t const *pSrcVec; + f32x4_t curExtremValVec = vdupq_n_f32(F32_ABSMAX); + float32_t minValue = F32_ABSMAX; + mve_pred16_t p0; + + + pSrcVec = (float32_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane min. + */ + curExtremValVec = vminnmaq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_f32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminnmaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get min value across the vector + */ + minValue = vminnmavq(minValue, curExtremValVec); + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_LOOPUNROLL) +void arm_absmin_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + float32_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0.0f) ? out : -out; \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0.0f) ? cur_absmin : -cur_absmin; \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmin_no_idx_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + float32_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = fabsf(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = fabsf(*pSrc++); + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} + +#endif /* defined(ARM_MATH_LOOPUNROLL) */ +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f64.c new file mode 100644 index 0000000..143271d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_f64.c @@ -0,0 +1,88 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_f64.c + * Description: Minimum value of absolute values of a floating-point vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ +void arm_absmin_no_idx_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + float64_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + /* Load first input value that act as reference value for comparision */ + out = fabs(*pSrc++); + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = fabs(*pSrc++); + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} + +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q15.c new file mode 100644 index 0000000..c6dd15e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q15.c @@ -0,0 +1,226 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_q15.c + * Description: Minimum value of absolute values of a Q15 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmin_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + uint16_t blkCnt; /* loop counters */ + q15x8_t vecSrc; + q15_t const *pSrcVec; + uint16x8_t curExtremValVec = vdupq_n_s16(Q15_ABSMAX); + q15_t minValue = Q15_ABSMAX; + mve_pred16_t p0; + + + pSrcVec = (q15_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + /* + * update per-lane min. + */ + curExtremValVec = vminaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get min value across the vector + */ + minValue = vminavq(minValue, (q15x8_t)curExtremValVec); + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q15_t)__QSUB16(0, out); \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmin_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q31.c new file mode 100644 index 0000000..90281a4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q31.c @@ -0,0 +1,225 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_q31.c + * Description: Minimum value of absolute values of a Q31 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmin_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q31x4_t vecSrc; + q31_t const *pSrcVec; + uint32x4_t curExtremValVec = vdupq_n_s32(Q31_ABSMAX); + q31_t minValue = Q31_ABSMAX; + mve_pred16_t p0; + + + pSrcVec = (q31_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane min. + */ + curExtremValVec = vminaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get min value across the vector + */ + minValue = vminavq(minValue, (q31x4_t)curExtremValVec); + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q31_t)__QSUB(0, out); \ + \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmin_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q7.c new file mode 100644 index 0000000..e0f712b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_no_idx_q7.c @@ -0,0 +1,227 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_no_idx_q7.c + * Description: Minimum value of absolute values of a Q7 vector + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + + + +void arm_absmin_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q7x16_t vecSrc; + q7_t const *pSrcVec; + uint8x16_t curExtremValVec = vdupq_n_s8(Q7_ABSMAX); + q7_t minValue = Q7_ABSMAX; + mve_pred16_t p0; + + + pSrcVec = (q7_t const *) pSrc; + blkCnt = blockSize >> 4; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + /* + * update per-lane min. + */ + curExtremValVec = vminaq(curExtremValVec, vecSrc); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + p0 = vctp8q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminaq_m(curExtremValVec, vecSrc, p0); + } + /* + * Get min value across the vector + */ + minValue = vminavq(minValue, (q7x16_t)curExtremValVec); + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt; /* Loop counter */ \ + \ + \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q7_t)__QSUB8(0, out); \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ +} +#else +void arm_absmin_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* Loop counter */ + + + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q15.c new file mode 100644 index 0000000..ef389ba --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q15.c @@ -0,0 +1,273 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_q15.c + * Description: Minimum value of absolute values of a Q15 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q15 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmin_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + uint16_t blkCnt; /* loop counters */ + q15x8_t vecSrc; + q15_t const *pSrcVec; + q15x8_t curExtremValVec = vdupq_n_s16(Q15_ABSMAX); + q15_t minValue = Q15_ABSMAX; + uint16_t idx = blockSize; + uint16x8_t indexVec; + uint16x8_t curExtremIdxVec; + uint32_t startIdx = 0; + mve_pred16_t p0; + + + indexVec = vidupq_wb_u16(&startIdx, 1); + curExtremIdxVec = vdupq_n_u16(0); + + pSrcVec = (q15_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = vidupq_wb_u16(&startIdx, 1); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + vecSrc = vabsq(vecSrc); + + p0 = vctp16q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + /* + * set index for lower values to min possible index + */ + p0 = vcmpleq(curExtremValVec, minValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u16(blockSize), p0); + /* + * Get min index which is thus for a min value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + q15_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q15_t)__QSUB16(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q15_t)__QSUB16(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmin_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult, + uint32_t * pIndex) +{ + q15_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q15_t) 0x8000) ? 0x7fff : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q31.c new file mode 100644 index 0000000..0f28026 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q31.c @@ -0,0 +1,273 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_q31.c + * Description: Minimum value of absolute values of a Q31 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q31 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_absmin_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + uint16_t blkCnt; /* loop counters */ + q31x4_t vecSrc; + q31_t const *pSrcVec; + q31x4_t curExtremValVec = vdupq_n_s32(Q31_ABSMAX); + q31_t minValue = Q31_ABSMAX; + uint16_t idx = blockSize; + uint32x4_t indexVec; + uint32x4_t curExtremIdxVec; + uint32_t startIdx = 0; + mve_pred16_t p0; + + + indexVec = vidupq_wb_u32(&startIdx, 1); + curExtremIdxVec = vdupq_n_u32(0); + + pSrcVec = (q31_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0U) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + vecSrc = vabsq(vecSrc); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = vidupq_wb_u32(&startIdx, 1); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0U) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + vecSrc = vabsq(vecSrc); + + p0 = vctp32q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + /* + * set index for lower values to min possible index + */ + p0 = vcmpleq(curExtremValVec, minValue); + indexVec = vpselq(curExtremIdxVec, vdupq_n_u32(blockSize), p0); + /* + * Get min index which is thus for a min value + */ + idx = vminvq(idx, indexVec); + /* + * Save result + */ + *pIndex = idx; + *pResult = minValue; +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + q31_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q31_t)__QSUB(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q31_t)__QSUB(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmin_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) +{ + q31_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == INT32_MIN) ? INT32_MAX : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q7.c new file mode 100644 index 0000000..99bb473 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_absmin_q7.c @@ -0,0 +1,326 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_absmin_q7.c + * Description: Minimum value of absolute values of a Q7 vector + * + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup AbsMin + @{ + */ + +/** + @brief Minimum value of absolute values of a Q7 vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +#define MAX_BLKSZ_S8 (UINT8_MAX+1) + +static void arm_small_blk_absmin_q7( + const q7_t *pSrc, + uint32_t blockSize, + q7_t *pResult, + uint32_t *pIndex) +{ + uint16_t blkCnt; /* loop counters */ + q7x16_t vecSrc; + q7_t const *pSrcVec; + q7x16_t curExtremValVec = vdupq_n_s8(Q7_ABSMAX); + q7_t minValue = Q7_ABSMAX; + uint16_t idx = blockSize - 1; + uint8x16_t indexVec; + uint8x16_t curExtremIdxVec; + uint32_t startIdx = 0; + mve_pred16_t p0; + + + indexVec = vidupq_wb_u8(&startIdx, 1); + curExtremIdxVec = vdupq_n_u8(0); + + pSrcVec = (q7_t const *) pSrc; + blkCnt = blockSize >> 4; + while (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + vecSrc = vabsq(vecSrc); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq(vecSrc, curExtremValVec); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + + indexVec = vidupq_wb_u8(&startIdx, 1); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0U) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + vecSrc = vabsq(vecSrc); + + p0 = vctp8q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + p0 = vcmpleq_m(vecSrc, curExtremValVec, p0); + curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); + curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + /* + * set index for lower values to min possible index + */ + p0 = vcmpleq(curExtremValVec, minValue); + idx = vminvq_p_u8(idx, curExtremIdxVec, p0); + /* + * Save result + */ + *pIndex = idx; + *pResult = minValue; +} + + +void arm_absmin_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + int32_t totalSize = blockSize; + + if (totalSize <= MAX_BLKSZ_S8) + { + arm_small_blk_absmin_q7(pSrc, blockSize, pResult, pIndex); + } + else + { + uint32_t curIdx = 0; + q7_t curBlkExtr = Q7_MAX; + uint32_t curBlkPos = 0; + uint32_t curBlkIdx = 0; + /* + * process blocks of 255 elts + */ + while (totalSize >= MAX_BLKSZ_S8) + { + const q7_t *curSrc = pSrc; + + arm_small_blk_absmin_q7(curSrc, MAX_BLKSZ_S8, pResult, pIndex); + if (*pResult < curBlkExtr) + { + /* + * update partial extrema + */ + curBlkExtr = *pResult; + curBlkPos = *pIndex; + curBlkIdx = curIdx; + } + curIdx++; + pSrc += MAX_BLKSZ_S8; + totalSize -= MAX_BLKSZ_S8; + } + /* + * remainder + */ + arm_small_blk_absmin_q7(pSrc, totalSize, pResult, pIndex); + if (*pResult < curBlkExtr) + { + curBlkExtr = *pResult; + curBlkPos = *pIndex; + curBlkIdx = curIdx; + } + *pIndex = curBlkIdx * MAX_BLKSZ_S8 + curBlkPos; + *pResult = curBlkExtr; + } +} + +#else +#if defined(ARM_MATH_DSP) +void arm_absmin_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + q7_t cur_absmin, out; /* Temporary variables to store the output value. */\ + uint32_t blkCnt, outIndex; /* Loop counter */ \ + uint32_t index; /* index of maximum value */ \ + \ + /* Initialize index value to zero. */ \ + outIndex = 0U; \ + /* Load first input value that act as reference value for comparision */ \ + out = *pSrc++; \ + out = (out > 0) ? out : (q7_t)__QSUB8(0, out); \ + /* Initialize index of extrema value. */ \ + index = 0U; \ + \ + /* Loop unrolling: Compute 4 outputs at a time */ \ + blkCnt = (blockSize - 1U) >> 2U; \ + \ + while (blkCnt > 0U) \ + { \ + /* Initialize cur_absmin to next consecutive values one by one */ \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + /* compare for the extrema value */ \ + if (cur_absmin < out) \ + { \ + /* Update the extrema value and it's index */ \ + out = cur_absmin; \ + outIndex = index + 1U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 2U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 3U; \ + } \ + \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = index + 4U; \ + } \ + \ + index += 4U; \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Loop unrolling: Compute remaining outputs */ \ + blkCnt = (blockSize - 1U) % 4U; \ + \ + \ + while (blkCnt > 0U) \ + { \ + cur_absmin = *pSrc++; \ + cur_absmin = (cur_absmin > 0) ? cur_absmin : (q7_t)__QSUB8(0, cur_absmin); \ + if (cur_absmin < out) \ + { \ + out = cur_absmin; \ + outIndex = blockSize - blkCnt; \ + } \ + \ + /* Decrement loop counter */ \ + blkCnt--; \ + } \ + \ + /* Store the extrema value and it's index into destination pointers */ \ + *pResult = out; \ + *pIndex = outIndex; +} +#else +void arm_absmin_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult, + uint32_t * pIndex) +{ + q7_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + /* Load first input value that act as reference value for comparision */ + out = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = (*pSrc > 0) ? *pSrc : ((*pSrc == (q7_t) 0x80) ? (q7_t) 0x7f : -*pSrc); + pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} +#endif /* defined(ARM_MATH_DSP) */ +#endif /* defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of AbsMin group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f16.c new file mode 100644 index 0000000..71be5f1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f16.c @@ -0,0 +1,125 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_accumulate_f16.c + * Description: accumulation value of a floating-point vector + * + * $Date: 14 July 2022 + * $Revision: V1.0.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + + +/** + @ingroup groupStats + */ + +/** + @defgroup Accumulation Accumulation functions + + Calculates the accumulation of the input vector. Sum is defined as the addition of the elements in the vector. + The underlying algorithm is used: + +
+ Result = (pSrc[0] + pSrc[1] + pSrc[2] + ... + pSrc[blockSize-1]);
+ 
+ + There are separate functions for floating-point, Q31, Q15, and Q7 data types. + */ + +/** + @addtogroup Accumulation + @{ + */ + +/** + @brief accumulate value of a floating-point vector. + @param[in] pSrc points to the input vector. + @param[in] blockSize number of samples in input vector. + @param[out] pResult sum of values in input vector. + @return none + */ + +void arm_accumulate_f16( + const float16_t * pSrc, + uint32_t blockSize, + float16_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float16_t sum = 0.0f16; /* Temporary result storage */ + +#if defined (ARM_MATH_LOOPUNROLL) && !defined(ARM_MATH_AUTOVECTORIZE) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += (_Float16)*pSrc++; + + sum += (_Float16)*pSrc++; + + sum += (_Float16)*pSrc++; + + sum += (_Float16)*pSrc++; + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = blockSize % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += (_Float16)*pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + /* Store result to destination */ + *pResult = sum ; +} +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +/** + @} end of Accumulation group + */ + + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f32.c new file mode 100644 index 0000000..353ab17 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f32.c @@ -0,0 +1,213 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_accumulate_f32.c + * Description: Sum value of a floating-point vector + * + * $Date: 14 July 2022 + * $Revision: V1.0.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Accumulation + @{ + */ + +/** + @brief Accumulation value of a floating-point vector. + @param[in] pSrc points to the input vector. + @param[in] blockSize number of samples in input vector. + @param[out] pResult sum of values in input vector. + @return none + */ + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_accumulate_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + f32x4_t vecA; + f32x4_t vecSum; + uint32_t blkCnt; + float32_t sum = 0.0f; + vecSum = vdupq_n_f32(0.0f); + + /* Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + while (blkCnt > 0U) + { + /* + * C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] + * Calculate dot product and then store the result in a temporary buffer. + * and advance vector source and destination pointers + */ + vecA = vld1q_f32(pSrc); + pSrc += 4; + + vecSum = vaddq_f32(vecSum, vecA); + /* + * Decrement the blockSize loop counter + */ + blkCnt --; + } + + + blkCnt = blockSize & 3; + if (blkCnt > 0U) + { + /* C = A[0]* B[0] + A[1]* B[1] + A[2]* B[2] + .....+ A[blockSize-1]* B[blockSize-1] */ + + mve_pred16_t p0 = vctp32q(blkCnt); + vecA = vld1q(pSrc); + vecSum = vaddq_m(vecSum,vecSum, vecA, p0); + } + + sum = vecAddAcrossF32Mve(vecSum); + + /* Store result in destination buffer */ + *pResult = sum; +} + +#else + +#if defined(ARM_MATH_NEON) && !defined(ARM_MATH_AUTOVECTORIZE) +void arm_accumulate_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + float32_t sum = 0.0f; /* Temporary result storage */ + float32x4_t sumV = vdupq_n_f32(0.0f); /* Temporary result storage */ + float32x2_t sumV2; + + uint32_t blkCnt; /* Loop counter */ + + float32x4_t inV; + + blkCnt = blockSize >> 2U; + + /* Compute 4 outputs at a time. + ** a second loop below computes the remaining 1 to 3 samples. */ + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + inV = vld1q_f32(pSrc); + sumV = vaddq_f32(sumV, inV); + + pSrc += 4; + /* Decrement the loop counter */ + blkCnt--; + } + + sumV2 = vpadd_f32(vget_low_f32(sumV),vget_high_f32(sumV)); + sum = vget_lane_f32(sumV2, 0) + vget_lane_f32(sumV2, 1); + + /* If the blockSize is not a multiple of 4, compute any remaining output samples here. + ** No loop unrolling is used. */ + blkCnt = blockSize & 3; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + /* Decrement the loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + /* Store the result to the destination */ + *pResult = sum; +} + +#else +void arm_accumulate_f32( + const float32_t * pSrc, + uint32_t blockSize, + float32_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float32_t sum = 0.0f; /* Temporary result storage */ + +#if defined (ARM_MATH_LOOPUNROLL) && !defined(ARM_MATH_AUTOVECTORIZE) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + sum += *pSrc++; + + sum += *pSrc++; + + sum += *pSrc++; + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = blockSize % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + /* Store result to destination */ + *pResult = sum ; +} +#endif /* #if defined(ARM_MATH_NEON) */ + +#endif /* #if defined(ARM_MATH_MVEF) */ +/** + @} end of Accumulation group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f64.c new file mode 100644 index 0000000..25420ac --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_accumulate_f64.c @@ -0,0 +1,131 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_accumulate_f64.c + * Description: Accumulation value of a floating-point vector + * + * $Date: 14 July 2022 + * $Revision: V1.0.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Accumulation + @{ + */ + +/** + @brief Accumulation value of a floating-point vector. + @param[in] pSrc points to the input vector. + @param[in] blockSize number of samples in input vector. + @param[out] pResult sum of values in input vector. + @return none + */ +#if defined(ARM_MATH_NEON) && defined(__aarch64__) +void arm_accumulate_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + + /*Neon buffers*/ + float64x2_t vSum = vdupq_n_f64(0.0); + float64x2_t afterLoad ; + + float64_t sum = 0.; /* Temporary result storage */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize >> 1U; + + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + + afterLoad = vld1q_f64(pSrc); + vSum = vaddq_f64(vSum, afterLoad); + + /* Decrement loop counter */ + blkCnt--; + + pSrc += 2; + } + sum = vaddvq_f64(vSum); + + /* Tail */ + blkCnt = blockSize & 1 ; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + /* Store result to destination */ + *pResult = sum; +} +#else +void arm_accumulate_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t sum = 0.; /* Temporary result storage */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + /* Store result to destination */ + *pResult = sum; +} + +#endif + + +/** + @} end of Accumulation group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f16.c index 9a5bf6a..4e223c7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f16.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f16.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -97,7 +99,7 @@ float16_t arm_entropy_f16(const float16_t * pSrcA,uint32_t blockSize) while(blkCnt > 0) { p = *pSrcA++; - accum += p * logf(p); + accum += p * (_Float16)logf((float32_t)p); blkCnt--; @@ -122,7 +124,7 @@ float16_t arm_entropy_f16(const float16_t * pSrcA,uint32_t blockSize) while(blkCnt > 0) { p = *pIn++; - accum += p * logf(p); + accum += p * (_Float16)logf((float32_t)p); blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f32.c index 163f8be..290e5c1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f32.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f32.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f64.c index 5be9be9..5cb2ef5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_entropy_f64.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f64.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -53,7 +55,7 @@ float64_t arm_entropy_f64(const float64_t * pSrcA, uint32_t blockSize) pIn = pSrcA; blkCnt = blockSize; - accum = 0.0f; + accum = 0.0; while(blkCnt > 0) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f16.c index 10e1528..6c291fe 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f16.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f16.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -81,7 +83,7 @@ float16_t arm_kullback_leibler_f16(const float16_t * pSrcA,const float16_t * pSr accum = 0.0f16; - f16x8_t vSum = vdupq_n_f16(0.0f); + f16x8_t vSum = vdupq_n_f16(0.0f16); blkCnt = blockSize >> 3; while(blkCnt > 0) { @@ -108,7 +110,7 @@ float16_t arm_kullback_leibler_f16(const float16_t * pSrcA,const float16_t * pSr { pA = *pSrcA++; pB = *pSrcB++; - accum += pA * logf(pB / pA); + accum += pA * (_Float16)logf((float32_t)pB / (float32_t)pA); blkCnt--; @@ -134,7 +136,7 @@ float16_t arm_kullback_leibler_f16(const float16_t * pSrcA,const float16_t * pSr { pA = *pInA++; pB = *pInB++; - accum += pA * logf(pB / pA); + accum += pA * (_Float16)logf((float32_t)pB / (float32_t)pA); blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f32.c index 7193b4e..993e102 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f32.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f32.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f64.c index 1eede11..8bde9c2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_kullback_leibler_f64.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f64.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -55,7 +57,7 @@ float64_t arm_kullback_leibler_f64(const float64_t * pSrcA, const float64_t * pS pInB = pSrcB; blkCnt = blockSize; - accum = 0.0f; + accum = 0.0; while(blkCnt > 0) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f16.c index 28cb1df..08fb197 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f16.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f16.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f32.c index 95ae872..bb5d90f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_dot_prod_f32.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f32.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f16.c index 1b809f3..dc151f7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f16.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f16.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -111,12 +113,12 @@ float16_t arm_logsumexp_f16(const float16_t *in, uint32_t blockSize) while(blkCnt > 0) { tmp = *pIn++; - accum += expf(tmp - maxVal); + accum += (_Float16)expf((float32_t)((_Float16)tmp - (_Float16)maxVal)); blkCnt--; } - accum = maxVal + logf(accum); + accum = (_Float16)maxVal + (_Float16)logf((float32_t)accum); return (accum); } @@ -154,11 +156,11 @@ float16_t arm_logsumexp_f16(const float16_t *in, uint32_t blockSize) while(blkCnt > 0) { tmp = *pIn++; - accum += expf(tmp - maxVal); + accum += (_Float16)expf((float32_t)((_Float16)tmp - (_Float16)maxVal)); blkCnt--; } - accum = maxVal + logf(accum); + accum = (_Float16)maxVal + (_Float16)logf((float32_t)accum); return(accum); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f32.c index 6156a1a..8f0cc74 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_logsumexp_f32.c @@ -5,11 +5,13 @@ * Title: arm_logsumexp_f32.c * Description: LogSumExp * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -113,7 +115,7 @@ float32_t arm_logsumexp_f32(const float32_t *in, uint32_t blockSize) } - accum = maxVal + log(accum); + accum = maxVal + logf(accum); return (accum); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f16.c index c405ae2..3fb0512 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f16.c @@ -5,13 +5,13 @@ * Title: arm_max_f16.c * Description: Maximum value of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -122,7 +122,7 @@ void arm_max_f16( tmp = *pSrc++; /* compare for the maximum value */ - if (maxValue < tmp) + if ((_Float16)maxValue < (_Float16)tmp) { /* Update the maximum value and it's index */ maxValue = tmp; @@ -173,7 +173,7 @@ void arm_max_f16( maxVal = *pSrc++; /* compare for the maximum value */ - if (out < maxVal) + if ((_Float16)out < (_Float16)maxVal) { /* Update the maximum value and it's index */ out = maxVal; @@ -181,21 +181,21 @@ void arm_max_f16( } maxVal = *pSrc++; - if (out < maxVal) + if ((_Float16)out < (_Float16)maxVal) { out = maxVal; outIndex = index + 2U; } maxVal = *pSrc++; - if (out < maxVal) + if ((_Float16)out < (_Float16)maxVal) { out = maxVal; outIndex = index + 3U; } maxVal = *pSrc++; - if (out < maxVal) + if ((_Float16)out < (_Float16)maxVal) { out = maxVal; outIndex = index + 4U; @@ -223,7 +223,7 @@ void arm_max_f16( maxVal = *pSrc++; /* compare for the maximum value */ - if (out < maxVal) + if ((_Float16)out < (_Float16)maxVal) { /* Update the maximum value and it's index */ out = maxVal; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f32.c index 3ee95bb..4856c46 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f32.c @@ -5,13 +5,13 @@ * Title: arm_max_f32.c * Description: Maximum value of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -167,7 +167,7 @@ void arm_max_f32( uint32x4_t countV; uint32x2_t countV2; - maxIdx = vdupq_n_u32(ULONG_MAX); + maxIdx = vdupq_n_u32(UINT_MAX); delta = vdupq_n_u32(4); index = vld1q_u32(indexInit); countV = vld1q_u32(countVInit); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f64.c new file mode 100644 index 0000000..66cfd34 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_f64.c @@ -0,0 +1,94 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_max_f64.c + * Description: Maximum value of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup Max + @{ + */ + +/** + @brief Maximum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @param[out] pIndex index of maximum value returned here + @return none + */ +void arm_max_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex) +{ + float64_t maxVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal = *pSrc++; + + /* compare for the maximum value */ + if (out < maxVal) + { + /* Update the maximum value and it's index */ + out = maxVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the maximum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} + +/** + @} end of Max group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f16.c index 5a7b514..a7232da 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f16.c @@ -5,13 +5,13 @@ * Title: arm_max_no_idx_f16.c * Description: Maximum value of a floating-point vector without returning the index * - * $Date: 16. October 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -97,7 +97,7 @@ void arm_max_no_idx_f16( newVal = *pSrc++; /* compare for the maximum value */ - if (maxValue < newVal) + if ((_Float16)maxValue < (_Float16)newVal) { /* Update the maximum value and it's index */ maxValue = newVal; @@ -124,7 +124,7 @@ void arm_max_no_idx_f16( newVal = *pSrc++; /* compare for the maximum value */ - if (maxValue < newVal) + if ((_Float16)maxValue < (_Float16)newVal) { /* Update the maximum value and it's index */ maxValue = newVal; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f32.c index 3961416..c578e6b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f32.c @@ -5,13 +5,13 @@ * Title: arm_max_no_idx_f32.c * Description: Maximum value of a floating-point vector without returning the index * - * $Date: 16. October 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f64.c new file mode 100644 index 0000000..dcb7afb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_max_no_idx_f64.c + * Description: Maximum value of a floating-point vector without returning the index + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Max + @{ + */ + +/** + @brief Maximum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ +void arm_max_no_idx_f64( + const float64_t *pSrc, + uint32_t blockSize, + float64_t *pResult) +{ + float64_t maxValue = F64_MIN; + float64_t newVal; + + while (blockSize > 0U) + { + newVal = *pSrc++; + + /* compare for the maximum value */ + if (maxValue < newVal) + { + /* Update the maximum value and it's index */ + maxValue = newVal; + } + + blockSize --; + } + + *pResult = maxValue; +} + +/** + @} end of Max group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q15.c new file mode 100644 index 0000000..063a5e3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q15.c @@ -0,0 +1,146 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_max_no_idx_q15.c + * Description: Maximum value of a q15 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Max + @{ + */ + +/** + @brief Maximum value of a q15 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_max_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q15x8_t vecSrc; + q15_t const *pSrcVec; + q15x8_t curExtremValVec = vdupq_n_s16(Q15_MIN); + q15_t maxValue = Q15_MIN; + mve_pred16_t p0; + + + pSrcVec = (q15_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + /* + * update per-lane max. + */ + curExtremValVec = vmaxq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxvq(maxValue, curExtremValVec); + *pResult = maxValue; +} + +#else +void arm_max_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t maxVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal1 = *pSrc++; + + /* compare for the maximum value */ + if (out < maxVal1) + { + /* Update the maximum value */ + out = maxVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the maximum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Max group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q31.c new file mode 100644 index 0000000..cffdd13 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q31.c @@ -0,0 +1,146 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_max_no_idx_q31.c + * Description: Maximum value of a q31 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Max + @{ + */ + +/** + @brief Maximum value of a q31 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_max_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q31x4_t vecSrc; + q31_t const *pSrcVec; + q31x4_t curExtremValVec = vdupq_n_s32(Q31_MIN); + q31_t maxValue = Q31_MIN; + mve_pred16_t p0; + + + pSrcVec = (q31_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane max. + */ + curExtremValVec = vmaxq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxvq(maxValue, curExtremValVec); + *pResult = maxValue; +} + +#else +void arm_max_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t maxVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal1 = *pSrc++; + + /* compare for the maximum value */ + if (out < maxVal1) + { + /* Update the maximum value */ + out = maxVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the maximum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Max group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q7.c new file mode 100644 index 0000000..059acf5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_no_idx_q7.c @@ -0,0 +1,147 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_max_no_idx_q7.c + * Description: Maximum value of a q7 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Max + @{ + */ + +/** + @brief Maximum value of a q7 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult maximum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_max_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q7x16_t vecSrc; + q7_t const *pSrcVec; + q7x16_t curExtremValVec = vdupq_n_s8(Q7_MIN); + q7_t maxValue = Q7_MIN; + mve_pred16_t p0; + + + pSrcVec = (q7_t const *) pSrc; + blkCnt = blockSize >> 4; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + /* + * update per-lane max. + */ + curExtremValVec = vmaxq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + p0 = vctp8q(blkCnt); + /* + * Get current max per lane and current index per lane + * when a max is selected + */ + curExtremValVec = vmaxq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get max value across the vector + */ + maxValue = vmaxvq(maxValue, curExtremValVec); + *pResult = maxValue; +} + +#else + +void arm_max_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t maxVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize maxVal to the next consecutive values one by one */ + maxVal1 = *pSrc++; + + /* compare for the maximum value */ + if (out < maxVal1) + { + /* Update the maximum value */ + out = maxVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the maximum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Max group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q15.c index 32663e3..9f30ece 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q15.c @@ -5,13 +5,13 @@ * Title: arm_max_q15.c * Description: Maximum value of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -57,78 +57,49 @@ void arm_max_q15( q15_t * pResult, uint32_t * pIndex) { - uint32_t blkCnt; /* loop counters */ - q15x8_t vecSrc; - q15x8_t curExtremValVec = vdupq_n_s16(Q15_MIN); - q15_t maxValue = Q15_MIN, temp; - uint32_t idx = blockSize; - uint16x8_t indexVec; - uint16x8_t curExtremIdxVec; - mve_pred16_t p0; - - - indexVec = vidupq_u16((uint32_t)0, 1); - curExtremIdxVec = vdupq_n_u16(0); - - blkCnt = blockSize >> 3; - while (blkCnt > 0U) - { - vecSrc = vldrhq_s16(pSrc); - pSrc += 8; + int32_t blkCnt; /* loop counters */ + q15x8_t extremValVec = vdupq_n_s16(Q15_MIN); + q15_t maxValue = Q15_MIN; + uint16x8_t indexVec; + uint16x8_t extremIdxVec; + mve_pred16_t p0; + uint16_t extremIdxArr[8]; + + indexVec = vidupq_u16(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp16q(blkCnt); + q15x8_t extremIdxVal = vld1q_z_s16(pSrc, p); /* * Get current max per lane and current index per lane * when a max is selected */ - p0 = vcmpgeq(vecSrc, curExtremValVec); - curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); - curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); - indexVec = indexVec + 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * Get max value across the vector - */ - maxValue = vmaxvq(maxValue, curExtremValVec); - /* - * set index for lower values to max possible index - */ - p0 = vcmpgeq(curExtremValVec, maxValue); - indexVec = vpselq(curExtremIdxVec, vdupq_n_u16(blockSize), p0); - /* - * Get min index which is thus for a max value - */ - idx = vminvq(idx, indexVec); - - /* Tail */ - blkCnt = blockSize & 0x7; - while (blkCnt > 0U) - { - /* Initialize temp to the next consecutive values one by one */ - temp = *pSrc++; - - /* compare for the maximum value */ - if (maxValue < temp) - { - /* Update the maximum value and it's index */ - maxValue = temp; - idx = blockSize - blkCnt; - } - - /* Decrement loop counter */ - blkCnt--; + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u16(extremIdxArr, indexVec, p0); + + indexVec += 8; + pSrc += 8; + blkCnt -= 8; } + while (blkCnt > 0); + - /* - * Save result - */ - *pIndex = idx; + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u16(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u16(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); *pResult = maxValue; } + #else void arm_max_q15( const q15_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q31.c index 2b3288c..d0665a4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q31.c @@ -5,13 +5,13 @@ * Title: arm_max_q31.c * Description: Maximum value of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -52,86 +52,54 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" void arm_max_q31( - const q31_t * pSrc, - uint32_t blockSize, - q31_t * pResult, - uint32_t * pIndex) + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult, + uint32_t * pIndex) { - uint32_t blkCnt; /* loop counters */ - q31x4_t vecSrc; - q31x4_t curExtremValVec = vdupq_n_s32( Q31_MIN); - q31_t maxValue = Q31_MIN; - q31_t temp; - uint32_t idx = blockSize; - uint32x4_t indexVec; - uint32x4_t curExtremIdxVec; - mve_pred16_t p0; - - - indexVec = vidupq_u32((uint32_t)0, 1); - curExtremIdxVec = vdupq_n_u32(0); - - /* Compute 4 outputs at a time */ - blkCnt = blockSize >> 2U; - while (blkCnt > 0U) - { - vecSrc = vldrwq_s32(pSrc); - pSrc += 4; + int32_t blkCnt; /* loop counters */ + q31x4_t extremValVec = vdupq_n_s32(Q31_MIN); + q31_t maxValue = Q31_MIN; + uint32x4_t indexVec; + uint32x4_t extremIdxVec; + mve_pred16_t p0; + uint32_t extremIdxArr[4]; + + indexVec = vidupq_u32(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp32q(blkCnt); + q31x4_t extremIdxVal = vld1q_z_s32(pSrc, p); /* * Get current max per lane and current index per lane * when a max is selected */ - p0 = vcmpgeq(vecSrc, curExtremValVec); - curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); - curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); - indexVec = indexVec + 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * Get max value across the vector - */ - maxValue = vmaxvq(maxValue, curExtremValVec); - /* - * set index for lower values to max possible index - */ - p0 = vcmpgeq(curExtremValVec, maxValue); - indexVec = vpselq(curExtremIdxVec, vdupq_n_u32(blockSize), p0); - /* - * Get min index which is thus for a max value - */ - idx = vminvq(idx, indexVec); - - /* Tail */ - blkCnt = blockSize & 0x3; - - while (blkCnt > 0U) - { - /* Initialize maxVal to the next consecutive values one by one */ - temp = *pSrc++; - - /* compare for the maximum value */ - if (maxValue < temp) - { - /* Update the maximum value and it's index */ - maxValue = temp; - idx = blockSize - blkCnt; - } - - /* Decrement loop counter */ - blkCnt--; + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u32(extremIdxArr, indexVec, p0); + + indexVec += 4; + pSrc += 4; + blkCnt -= 4; } + while (blkCnt > 0); - /* - * Save result - */ - *pIndex = idx; + + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u32(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u32(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); *pResult = maxValue; } + #else void arm_max_q31( const q31_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q7.c index 72fdf31..377db4a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_max_q7.c @@ -5,13 +5,13 @@ * Title: arm_max_q7.c * Description: Maximum value of a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -53,83 +53,50 @@ static void arm_small_blk_max_q7( const q7_t * pSrc, - uint8_t blockSize, + uint16_t blockSize, q7_t * pResult, uint32_t * pIndex) { - uint32_t blkCnt; /* loop counters */ - q7x16_t vecSrc; - q7x16_t curExtremValVec = vdupq_n_s8( Q7_MIN); - q7_t maxValue = Q7_MIN, temp; - uint32_t idx = blockSize; - uint8x16_t indexVec; - uint8x16_t curExtremIdxVec; - mve_pred16_t p0; - - - indexVec = vidupq_u8((uint32_t)0, 1); - curExtremIdxVec = vdupq_n_u8(0); - - blkCnt = blockSize >> 4; - while (blkCnt > 0U) - { - vecSrc = vldrbq_s8(pSrc); - pSrc += 16; + int32_t blkCnt; /* loop counters */ + q7x16_t extremValVec = vdupq_n_s8(Q7_MIN); + q7_t maxValue = Q7_MIN; + uint8x16_t indexVec; + uint8x16_t extremIdxVec; + mve_pred16_t p0; + uint8_t extremIdxArr[16]; + + indexVec = vidupq_u8(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp8q(blkCnt); + q7x16_t extremIdxVal = vld1q_z_s8(pSrc, p); /* * Get current max per lane and current index per lane * when a max is selected */ - p0 = vcmpgeq(vecSrc, curExtremValVec); - curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); - curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + p0 = vcmpgeq_m(extremIdxVal, extremValVec, p); - indexVec = indexVec + 16; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - - /* - * Get max value across the vector - */ - maxValue = vmaxvq(maxValue, curExtremValVec); - /* - * set index for lower values to max possible index - */ - p0 = vcmpgeq(curExtremValVec, maxValue); - indexVec = vpselq(curExtremIdxVec, vdupq_n_u8(blockSize), p0); - /* - * Get min index which is thus for a max value - */ - idx = vminvq(idx, indexVec); - - /* - * tail - */ - blkCnt = blockSize & 0xF; - - while (blkCnt > 0U) - { - /* Initialize temp to the next consecutive values one by one */ - temp = *pSrc++; - - /* compare for the maximum value */ - if (maxValue < temp) - { - /* Update the maximum value and it's index */ - maxValue = temp; - idx = blockSize - blkCnt; - } - - /* Decrement loop counter */ - blkCnt--; + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u8(extremIdxArr, indexVec, p0); + + indexVec += 16; + pSrc += 16; + blkCnt -= 16; } - /* - * Save result - */ - *pIndex = idx; + while (blkCnt > 0); + + + /* Get max value across the vector */ + maxValue = vmaxvq(maxValue, extremValVec); + + /* set index for lower values to max possible index */ + p0 = vcmpgeq(extremValVec, maxValue); + extremIdxVec = vld1q_u8(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u8(blockSize - 1), p0); + *pIndex = vminvq_u8(blockSize - 1, indexVec); *pResult = maxValue; } @@ -140,8 +107,9 @@ void arm_max_q7( uint32_t * pIndex) { int32_t totalSize = blockSize; + const uint16_t sub_blk_sz = UINT8_MAX + 1; - if (totalSize <= UINT8_MAX) + if (totalSize <= sub_blk_sz) { arm_small_blk_max_q7(pSrc, blockSize, pResult, pIndex); } @@ -154,11 +122,11 @@ void arm_max_q7( /* * process blocks of 255 elts */ - while (totalSize >= UINT8_MAX) + while (totalSize >= sub_blk_sz) { const q7_t *curSrc = pSrc; - arm_small_blk_max_q7(curSrc, UINT8_MAX, pResult, pIndex); + arm_small_blk_max_q7(curSrc, sub_blk_sz, pResult, pIndex); if (*pResult > curBlkExtr) { /* @@ -169,8 +137,8 @@ void arm_max_q7( curBlkIdx = curIdx; } curIdx++; - pSrc += UINT8_MAX; - totalSize -= UINT8_MAX; + pSrc += sub_blk_sz; + totalSize -= sub_blk_sz; } /* * remainder @@ -182,7 +150,7 @@ void arm_max_q7( curBlkPos = *pIndex; curBlkIdx = curIdx; } - *pIndex = curBlkIdx * UINT8_MAX + curBlkPos; + *pIndex = curBlkIdx * sub_blk_sz + curBlkPos; *pResult = curBlkExtr; } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f16.c index a2739ac..19ded7d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f16.c @@ -5,13 +5,13 @@ * Title: arm_mean_f16.c * Description: Mean value of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -87,7 +87,7 @@ void arm_mean_f16( } while (blkCnt > 0); - *pResult = vecAddAcrossF16Mve(sumVec) / (float16_t) blockSize; + *pResult = (_Float16)vecAddAcrossF16Mve(sumVec) / (_Float16) blockSize; } @@ -109,13 +109,13 @@ void arm_mean_f16( while (blkCnt > 0U) { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - sum += *pSrc++; + sum += (_Float16)*pSrc++; - sum += *pSrc++; + sum += (_Float16)*pSrc++; - sum += *pSrc++; + sum += (_Float16)*pSrc++; - sum += *pSrc++; + sum += (_Float16)*pSrc++; /* Decrement the loop counter */ blkCnt--; @@ -134,7 +134,7 @@ void arm_mean_f16( while (blkCnt > 0U) { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - sum += *pSrc++; + sum += (_Float16)*pSrc++; /* Decrement loop counter */ blkCnt--; @@ -142,7 +142,7 @@ void arm_mean_f16( /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) / blockSize */ /* Store result to destination */ - *pResult = (sum / (float16_t)blockSize); + *pResult = ((_Float16)sum / (_Float16)blockSize); } #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f32.c index 79bf476..99c6dbe 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f32.c @@ -5,13 +5,13 @@ * Title: arm_mean_f32.c * Description: Mean value of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f64.c new file mode 100644 index 0000000..cb91116 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mean_f64.c + * Description: Mean value of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup mean + @{ + */ + +/** + @brief Mean value of a floating-point vector. + @param[in] pSrc points to the input vector. + @param[in] blockSize number of samples in input vector. + @param[out] pResult mean value returned here. + @return none + */ +void arm_mean_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t sum = 0.; /* Temporary result storage */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + sum += *pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) / blockSize */ + /* Store result to destination */ + *pResult = (sum / blockSize); +} + +/** + @} end of mean group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q15.c index de20f9a..0eefbdb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q15.c @@ -5,13 +5,13 @@ * Title: arm_mean_q15.c * Description: Mean value of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -116,11 +116,11 @@ void arm_mean_q15( while (blkCnt > 0U) { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); sum += ((in << 16U) >> 16U); sum += (in >> 16U); - in = read_q15x2_ia ((q15_t **) &pSrc); + in = read_q15x2_ia (&pSrc); sum += ((in << 16U) >> 16U); sum += (in >> 16U); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q31.c index 03e2327..1b95ce5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q31.c @@ -5,13 +5,13 @@ * Title: arm_mean_q31.c * Description: Mean value of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -90,7 +90,7 @@ void arm_mean_q31( blkCnt --; } - *pResult = arm_div_q63_to_q31(sum, blockSize); + *pResult = arm_div_int64_to_int32(sum, blockSize); } #else void arm_mean_q31( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q7.c index 44ca51d..5ac4517 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mean_q7.c @@ -5,13 +5,13 @@ * Title: arm_mean_q7.c * Description: Mean value of a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -115,7 +115,7 @@ void arm_mean_q7( while (blkCnt > 0U) { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - in = read_q7x4_ia ((q7_t **) &pSrc); + in = read_q7x4_ia (&pSrc); sum += ((in << 24U) >> 24U); sum += ((in << 16U) >> 24U); sum += ((in << 8U) >> 24U); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f16.c index 46ddb94..4e08799 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f16.c @@ -5,13 +5,13 @@ * Title: arm_min_f16.c * Description: Minimum value of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -167,7 +167,7 @@ void arm_min_f16( minVal = *pSrc++; /* compare for the minimum value */ - if (out > minVal) + if ((_Float16)out > (_Float16)minVal) { /* Update the minimum value and it's index */ out = minVal; @@ -175,21 +175,21 @@ void arm_min_f16( } minVal = *pSrc++; - if (out > minVal) + if ((_Float16)out > (_Float16)minVal) { out = minVal; outIndex = index + 2U; } minVal = *pSrc++; - if (out > minVal) + if ((_Float16)out > (_Float16)minVal) { out = minVal; outIndex = index + 3U; } minVal = *pSrc++; - if (out > minVal) + if ((_Float16)out > (_Float16)minVal) { out = minVal; outIndex = index + 4U; @@ -217,7 +217,7 @@ void arm_min_f16( minVal = *pSrc++; /* compare for the minimum value */ - if (out > minVal) + if ((_Float16)out > (_Float16)minVal) { /* Update the minimum value and it's index */ out = minVal; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f32.c index 6c49822..b581473 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f32.c @@ -5,13 +5,13 @@ * Title: arm_min_f32.c * Description: Minimum value of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -168,7 +168,7 @@ void arm_min_f32( uint32x4_t countV; uint32x2_t countV2; - maxIdx = vdupq_n_u32(ULONG_MAX); + maxIdx = vdupq_n_u32(UINT_MAX); delta = vdupq_n_u32(4); index = vld1q_u32(indexInit); countV = vld1q_u32(countVInit); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f64.c new file mode 100644 index 0000000..525470f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_f64.c @@ -0,0 +1,94 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_f64.c + * Description: Minimum value of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @param[out] pIndex index of minimum value returned here + @return none + */ +void arm_min_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult, + uint32_t * pIndex) +{ + float64_t minVal, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt, outIndex; /* Loop counter */ + + /* Initialise index value to zero. */ + outIndex = 0U; + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + /* Initialize blkCnt with number of samples */ + blkCnt = (blockSize - 1U); + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal = *pSrc++; + + /* compare for the minimum value */ + if (out > minVal) + { + /* Update the minimum value and it's index */ + out = minVal; + outIndex = blockSize - blkCnt; + } + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store the minimum value and it's index into destination pointers */ + *pResult = out; + *pIndex = outIndex; +} + +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f16.c new file mode 100644 index 0000000..a2a64db --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f16.c @@ -0,0 +1,148 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_f16.c + * Description: Minimum value of a floating-point vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) + +void arm_min_no_idx_f16( + const float16_t *pSrc, + uint32_t blockSize, + float16_t *pResult) +{ + f16x8_t vecSrc; + f16x8_t curExtremValVec = vdupq_n_f16(F16_MAX); + float16_t minValue = F16_MAX; + float16_t newVal; + uint32_t blkCnt; + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 3U; + + while (blkCnt > 0U) + { + + vecSrc = vldrhq_f16(pSrc); + /* + * update per-lane min. + */ + curExtremValVec = vminnmq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + * Advance vector source and destination pointers + */ + pSrc += 8; + blkCnt --; + } + /* + * Get min value across the vector + */ + minValue = vminnmvq(minValue, curExtremValVec); + + blkCnt = blockSize & 7; + + while (blkCnt > 0U) + { + newVal = *pSrc++; + + /* compare for the minimum value */ + if ((_Float16)minValue > (_Float16)newVal) + { + /* Update the minimum value and it's index */ + minValue = newVal; + } + + blkCnt --; + } + + *pResult = minValue; +} + +#else + +void arm_min_no_idx_f16( + const float16_t *pSrc, + uint32_t blockSize, + float16_t *pResult) +{ + float16_t minValue = F16_MAX; + float16_t newVal; + + while (blockSize > 0U) + { + newVal = *pSrc++; + + /* compare for the minimum value */ + if ((_Float16)minValue > (_Float16)newVal) + { + /* Update the minimum value and it's index */ + minValue = newVal; + } + + blockSize --; + } + + *pResult = minValue; +} + +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +/** + @} end of Min group + */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f32.c new file mode 100644 index 0000000..eafae73 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f32.c @@ -0,0 +1,142 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_f32.c + * Description: Minimum value of a floating-point vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#if (defined(ARM_MATH_NEON) || defined(ARM_MATH_MVEF)) && !defined(ARM_MATH_AUTOVECTORIZE) +#include +#endif + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) + +void arm_min_no_idx_f32( + const float32_t *pSrc, + uint32_t blockSize, + float32_t *pResult) +{ + f32x4_t vecSrc; + f32x4_t curExtremValVec = vdupq_n_f32(F32_MAX); + float32_t minValue = F32_MAX; + float32_t newVal; + uint32_t blkCnt; + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + + vecSrc = vldrwq_f32(pSrc); + /* + * update per-lane min. + */ + curExtremValVec = vminnmq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + * Advance vector source and destination pointers + */ + pSrc += 4; + blkCnt --; + } + /* + * Get min value across the vector + */ + minValue = vminnmvq(minValue, curExtremValVec); + + blkCnt = blockSize & 3; + + while (blkCnt > 0U) + { + newVal = *pSrc++; + + /* compare for the minimum value */ + if (minValue > newVal) + { + /* Update the minimum value and it's index */ + minValue = newVal; + } + + blkCnt --; + } + + *pResult = minValue; +} + +#else + +void arm_min_no_idx_f32( + const float32_t *pSrc, + uint32_t blockSize, + float32_t *pResult) +{ + float32_t minValue = F32_MAX; + float32_t newVal; + + while (blockSize > 0U) + { + newVal = *pSrc++; + + /* compare for the minimum value */ + if (minValue > newVal) + { + /* Update the minimum value and it's index */ + minValue = newVal; + } + + blockSize --; + } + + *pResult = minValue; +} + +#endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ + +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f64.c new file mode 100644 index 0000000..5e3317e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_f64.c @@ -0,0 +1,79 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_f64.c + * Description: Maximum value of a floating-point vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup Min + @{ + */ + +/** + @brief Maximum value of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ +void arm_min_no_idx_f64( + const float64_t *pSrc, + uint32_t blockSize, + float64_t *pResult) +{ + float64_t minValue = F64_MAX; + float64_t newVal; + + while (blockSize > 0U) + { + newVal = *pSrc++; + + /* compare for the minimum value */ + if (minValue > newVal) + { + /* Update the minimum value and it's index */ + minValue = newVal; + } + + blockSize --; + } + + *pResult = minValue; +} + +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q15.c new file mode 100644 index 0000000..f588e70 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q15.c @@ -0,0 +1,146 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_q15.c + * Description: Minimum value of a q15 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a q15 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_min_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q15x8_t vecSrc; + q15_t const *pSrcVec; + q15x8_t curExtremValVec = vdupq_n_s16(Q15_MAX); + q15_t minValue = Q15_MAX; + mve_pred16_t p0; + + + pSrcVec = (q15_t const *) pSrc; + blkCnt = blockSize >> 3; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + /* + * update per-lane min. + */ + curExtremValVec = vminq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 7; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 8; + p0 = vctp16q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + *pResult = minValue; +} + +#else +void arm_min_no_idx_q15( + const q15_t * pSrc, + uint32_t blockSize, + q15_t * pResult) +{ + q15_t minVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal1 = *pSrc++; + + /* compare for the minimum value */ + if (out > minVal1) + { + /* Update the minimum value */ + out = minVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the minimum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q31.c new file mode 100644 index 0000000..b00a5ba --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q31.c @@ -0,0 +1,145 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_q31.c + * Description: Minimum value of a q31 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a q31 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ + +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" +void arm_min_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q31x4_t vecSrc; + q31_t const *pSrcVec; + q31x4_t curExtremValVec = vdupq_n_s32(Q31_MAX); + q31_t minValue = Q31_MAX; + mve_pred16_t p0; + + + pSrcVec = (q31_t const *) pSrc; + blkCnt = blockSize >> 2; + while (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + /* + * update per-lane min. + */ + curExtremValVec = vminq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 3; + if (blkCnt > 0) + { + vecSrc = vldrwq_s32(pSrcVec); + pSrcVec += 4; + p0 = vctp32q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + *pResult = minValue; +} + +#else +void arm_min_no_idx_q31( + const q31_t * pSrc, + uint32_t blockSize, + q31_t * pResult) +{ + q31_t minVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal1 = *pSrc++; + + /* compare for the minimum value */ + if (out > minVal1) + { + /* Update the minimum value */ + out = minVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the minimum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q7.c new file mode 100644 index 0000000..e0a8396 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_no_idx_q7.c @@ -0,0 +1,145 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_min_no_idx_q7.c + * Description: Minimum value of a q7 vector without returning the index + * + * $Date: 16 November 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + + +/** + @ingroup groupStats + */ + +/** + @addtogroup Min + @{ + */ + +/** + @brief Minimum value of a q7 vector without index. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult minimum value returned here + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_min_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + int32_t blkCnt; /* loop counters */ + q7x16_t vecSrc; + q7_t const *pSrcVec; + q7x16_t curExtremValVec = vdupq_n_s8(Q7_MAX); + q7_t minValue = Q7_MAX; + mve_pred16_t p0; + + + pSrcVec = (q7_t const *) pSrc; + blkCnt = blockSize >> 4; + while (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + /* + * update per-lane min. + */ + curExtremValVec = vminq(vecSrc, curExtremValVec); + /* + * Decrement the blockSize loop counter + */ + blkCnt--; + } + /* + * tail + * (will be merged thru tail predication) + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0) + { + vecSrc = vld1q(pSrcVec); + pSrcVec += 16; + p0 = vctp8q(blkCnt); + /* + * Get current min per lane and current index per lane + * when a min is selected + */ + curExtremValVec = vminq_m(curExtremValVec, vecSrc, curExtremValVec, p0); + } + /* + * Get min value across the vector + */ + minValue = vminvq(minValue, curExtremValVec); + *pResult = minValue; +} + +#else +void arm_min_no_idx_q7( + const q7_t * pSrc, + uint32_t blockSize, + q7_t * pResult) +{ + q7_t minVal1, out; /* Temporary variables to store the output value. */ + uint32_t blkCnt; /* loop counter */ + + /* Load first input value that act as reference value for comparision */ + out = *pSrc++; + + blkCnt = (blockSize - 1U); + + + while (blkCnt > 0U) + { + /* Initialize minVal to the next consecutive values one by one */ + minVal1 = *pSrc++; + + /* compare for the minimum value */ + if (out > minVal1) + { + /* Update the minimum value */ + out = minVal1; + } + + /* Decrement the loop counter */ + blkCnt--; + } + + /* Store the minimum value into destination pointer */ + *pResult = out; +} + +#endif /* #if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) */ +/** + @} end of Min group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q15.c index 3f4a59f..3a4d99e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q15.c @@ -5,13 +5,13 @@ * Title: arm_min_q15.c * Description: Minimum value of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,79 +58,48 @@ void arm_min_q15( q15_t * pResult, uint32_t * pIndex) { - uint32_t blkCnt; /* loop counters */ - q15x8_t vecSrc; - q15x8_t curExtremValVec = vdupq_n_s16(Q15_MAX); - q15_t minValue = Q15_MAX,temp; - uint32_t idx = blockSize; - uint16x8_t indexVec; - uint16x8_t curExtremIdxVec; - mve_pred16_t p0; + int32_t blkCnt; /* loop counters */ + q15x8_t extremValVec = vdupq_n_s16(Q15_MAX); + q15_t minValue = Q15_MAX; + uint16x8_t indexVec; + uint16x8_t extremIdxVec; + mve_pred16_t p0; + uint16_t extremIdxArr[8]; - indexVec = vidupq_u16((uint32_t)0, 1); - curExtremIdxVec = vdupq_n_u16(0); + indexVec = vidupq_u16(0U, 1); - blkCnt = blockSize >> 3; - while (blkCnt > 0U) - { - vecSrc = vldrhq_s16(pSrc); - pSrc += 8; + blkCnt = blockSize; + do { + mve_pred16_t p = vctp16q(blkCnt); + q15x8_t extremIdxVal = vld1q_z_s16(pSrc, p); /* * Get current min per lane and current index per lane * when a min is selected */ - p0 = vcmpleq(vecSrc, curExtremValVec); - curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); - curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + p0 = vcmpleq_m(extremIdxVal, extremValVec, p); - indexVec = indexVec + 8; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * Get min value across the vector - */ - minValue = vminvq(minValue, curExtremValVec); - /* - * set index for lower values to min possible index - */ - p0 = vcmpleq(curExtremValVec, minValue); - indexVec = vpselq(curExtremIdxVec, vdupq_n_u16(blockSize), p0); - /* - * Get min index which is thus for a min value - */ - idx = vminvq(idx, indexVec); - - /* - * tail - */ - blkCnt = blockSize & 7; - while (blkCnt > 0U) - { - /* Initialize minVal to the next consecutive values one by one */ - temp = *pSrc++; - - /* compare for the minimum value */ - if (minValue > temp) - { - /* Update the minimum value and it's index */ - minValue = temp; - idx = blockSize - blkCnt; - } - - /* Decrement loop counter */ - blkCnt--; + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u16(extremIdxArr, indexVec, p0); + + indexVec += 8; + pSrc += 8; + blkCnt -= 8; } + while (blkCnt > 0); + + /* Get min value across the vector */ + minValue = vminvq(minValue, extremValVec); + + /* set index for lower values to min possible index */ + p0 = vcmpleq(extremValVec, minValue); + extremIdxVec = vld1q_u16(extremIdxArr); - /* - * Save result - */ - *pIndex = idx; + indexVec = vpselq(extremIdxVec, vdupq_n_u16(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); *pResult = minValue; + } #else void arm_min_q15( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q31.c index df96c95..7c889e5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q31.c @@ -5,13 +5,13 @@ * Title: arm_min_q31.c * Description: Minimum value of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -58,79 +58,49 @@ void arm_min_q31( q31_t * pResult, uint32_t * pIndex) { - uint32_t blkCnt; /* loop counters */ - q31x4_t vecSrc; - q31x4_t curExtremValVec = vdupq_n_s32(Q31_MAX); - q31_t minValue = Q31_MAX, temp; - uint32_t idx = blockSize; - uint32x4_t indexVec; - uint32x4_t curExtremIdxVec; - mve_pred16_t p0; - - - indexVec = vidupq_u32((uint32_t)0, 1); - curExtremIdxVec = vdupq_n_u32(0); - - /* Compute 4 outputs at a time */ - blkCnt = blockSize >> 2U; - while (blkCnt > 0U) - { - vecSrc = vldrwq_s32(pSrc); - pSrc += 4; + int32_t blkCnt; /* loop counters */ + q31x4_t extremValVec = vdupq_n_s32(Q31_MAX); + q31_t minValue = Q31_MAX; + uint32x4_t indexVec; + uint32x4_t extremIdxVec; + mve_pred16_t p0; + uint32_t extremIdxArr[4]; + + indexVec = vidupq_u32(0U, 1); + + blkCnt = blockSize; + do { + mve_pred16_t p = vctp32q(blkCnt); + q31x4_t extremIdxVal = vld1q_z_s32(pSrc, p); /* * Get current min per lane and current index per lane * when a min is selected */ - p0 = vcmpleq(vecSrc, curExtremValVec); - curExtremValVec = vpselq(vecSrc, curExtremValVec, p0); - curExtremIdxVec = vpselq(indexVec, curExtremIdxVec, p0); + p0 = vcmpleq_m(extremIdxVal, extremValVec, p); - indexVec = indexVec + 4; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - /* - * Get min value across the vector - */ - minValue = vminvq(minValue, curExtremValVec); - /* - * set index for lower values to min possible index - */ - p0 = vcmpleq(curExtremValVec, minValue); - indexVec = vpselq(curExtremIdxVec, vdupq_n_u32(blockSize), p0); - /* - * Get min index which is thus for a min value - */ - idx = vminvq(idx, indexVec); - - - /* Tail */ - blkCnt = blockSize & 0x3; - while (blkCnt > 0U) - { - /* Initialize temp to the next consecutive values one by one */ - temp = *pSrc++; - - /* compare for the minimum value */ - if (minValue > temp) - { - /* Update the minimum value and it's index */ - minValue = temp; - idx = blockSize - blkCnt; - } - - /* Decrement loop counter */ - blkCnt--; + extremValVec = vorrq_m(extremValVec, extremIdxVal, extremIdxVal, p0); + /* store per-lane extrema indexes */ + vst1q_p_u32(extremIdxArr, indexVec, p0); + + indexVec += 4; + pSrc += 4; + blkCnt -= 4; } - /* - * Save result - */ - *pIndex = idx; + while (blkCnt > 0); + + + /* Get min value across the vector */ + minValue = vminvq(minValue, extremValVec); + + /* set index for lower values to min possible index */ + p0 = vcmpleq(extremValVec, minValue); + extremIdxVec = vld1q_u32(extremIdxArr); + + indexVec = vpselq(extremIdxVec, vdupq_n_u32(blockSize - 1), p0); + *pIndex = vminvq(blockSize - 1, indexVec); *pResult = minValue; } + #else void arm_min_q31( const q31_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q7.c index 25e607f..6d8451b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_min_q7.c @@ -5,13 +5,13 @@ * Title: arm_min_q7.c * Description: Minimum value of a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f16.c new file mode 100644 index 0000000..20c8083 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f16.c @@ -0,0 +1,207 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_f16.c + * Description: Half floating point mean square error + * + * $Date: 05 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two half floating point vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] result mean square error + @return none + */ + +#if !defined(ARM_MATH_AUTOVECTORIZE) + +#if defined(ARM_MATH_MVE_FLOAT16) +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_mse_f16( + const float16_t * pSrcA, + const float16_t * pSrcB, + uint32_t blockSize, + float16_t * result) + +{ + float16x8_t vecA, vecB; + float16x8_t vecSum; + uint32_t blkCnt; + _Float16 sum = 0.0f16; + vecSum = vdupq_n_f16(0.0f16); + + blkCnt = (blockSize) >> 3; + while (blkCnt > 0U) + { + vecA = vld1q(pSrcA); + pSrcA += 8; + + vecB = vld1q(pSrcB); + pSrcB += 8; + + vecA = vsubq(vecA, vecB); + + vecSum = vfmaq(vecSum, vecA, vecA); + /* + * Decrement the blockSize loop counter + */ + blkCnt --; + } + + + blkCnt = (blockSize) & 7; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp16q(blkCnt); + vecA = vld1q(pSrcA); + vecB = vld1q(pSrcB); + + vecA = vsubq(vecA, vecB); + vecSum = vfmaq_m(vecSum, vecA, vecA, p0); + } + + sum = vecAddAcrossF16Mve(vecSum); + + /* Store result in destination buffer */ + *result = (_Float16)sum / (_Float16)blockSize; + +} + +#endif + + +#endif /*#if !defined(ARM_MATH_AUTOVECTORIZE)*/ + + +#if defined(ARM_FLOAT16_SUPPORTED) + +#if (!defined(ARM_MATH_MVE_FLOAT16)) || defined(ARM_MATH_AUTOVECTORIZE) + + + +void arm_mse_f16( + const float16_t * pSrcA, + const float16_t * pSrcB, + uint32_t blockSize, + float16_t * result) + +{ + uint32_t blkCnt; /* Loop counter */ + _Float16 inA, inB; + _Float16 sum = 0.0f16; /* Temporary return variable */ +#if defined (ARM_MATH_LOOPUNROLL) + blkCnt = (blockSize) >> 3; + + + while (blkCnt > 0U) + { + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + /* Decrement loop counter */ + blkCnt--; + } + + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = (blockSize) & 7; +#else + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; +#endif + while (blkCnt > 0U) + { + inA = *pSrcA++; + inB = *pSrcB++; + inA = (_Float16)inA - (_Float16)inB; + sum += (_Float16)inA * (_Float16)inA; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in destination buffer */ + *result = (_Float16)sum / (_Float16)blockSize; +} + +#endif /* end of test for vector instruction availability */ + +#endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f32.c new file mode 100644 index 0000000..622abb5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f32.c @@ -0,0 +1,251 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_f32.c + * Description: Floating point mean square error + * + * $Date: 05 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two floating point vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] result mean square error + @return none + */ + +#if !defined(ARM_MATH_AUTOVECTORIZE) + +#if defined(ARM_MATH_MVEF) +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" + +void arm_mse_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t * result) + +{ + float32x4_t vecA, vecB; + float32x4_t vecSum; + uint32_t blkCnt; + float32_t sum = 0.0f; + vecSum = vdupq_n_f32(0.0f); + + /* Compute 4 outputs at a time */ + blkCnt = (blockSize) >> 2; + while (blkCnt > 0U) + { + vecA = vld1q(pSrcA); + pSrcA += 4; + + vecB = vld1q(pSrcB); + pSrcB += 4; + + vecA = vsubq(vecA, vecB); + + vecSum = vfmaq(vecSum, vecA, vecA); + /* + * Decrement the blockSize loop counter + */ + blkCnt --; + } + + + blkCnt = (blockSize) & 3; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp32q(blkCnt); + vecA = vld1q(pSrcA); + vecB = vld1q(pSrcB); + + vecA = vsubq(vecA, vecB); + vecSum = vfmaq_m(vecSum, vecA, vecA, p0); + } + + sum = vecAddAcrossF32Mve(vecSum); + + /* Store result in destination buffer */ + *result = sum / blockSize; + +} + +#endif + +#if defined(ARM_MATH_NEON) +void arm_mse_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t * result) + +{ + float32x4_t vecA, vecB; + float32x4_t vecSum; + uint32_t blkCnt; + float32_t inA, inB; + float32_t sum = 0.0f; + vecSum = vdupq_n_f32(0.0f); +#if !defined(__aarch64__) + f32x2_t tmp = vdup_n_f32(0.0f); +#endif + + /* Compute 4 outputs at a time */ + blkCnt = (blockSize) >> 2; + while (blkCnt > 0U) + { + vecA = vld1q_f32(pSrcA); + pSrcA += 4; + + vecB = vld1q_f32(pSrcB); + pSrcB += 4; + + vecA = vsubq_f32(vecA, vecB); + + vecSum = vfmaq_f32(vecSum, vecA, vecA); + /* + * Decrement the blockSize loop counter + */ + blkCnt --; + } + +#if defined(__aarch64__) + sum = vpadds_f32(vpadd_f32(vget_low_f32(vecSum), vget_high_f32(vecSum))); +#else + tmp = vpadd_f32(vget_low_f32(vecSum), vget_high_f32(vecSum)); + sum = vget_lane_f32(tmp, 0) + vget_lane_f32(tmp, 1); + +#endif + + blkCnt = (blockSize) & 3; + while (blkCnt > 0U) + { + /* Calculate dot product and store result in a temporary buffer. */ + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in destination buffer */ + *result = sum / blockSize; + +} +#endif + +#endif /*#if !defined(ARM_MATH_AUTOVECTORIZE)*/ + + + +#if (!defined(ARM_MATH_MVEF) && !defined(ARM_MATH_NEON)) || defined(ARM_MATH_AUTOVECTORIZE) + + +void arm_mse_f32( + const float32_t * pSrcA, + const float32_t * pSrcB, + uint32_t blockSize, + float32_t * result) + +{ + uint32_t blkCnt; /* Loop counter */ + float32_t inA, inB; + float32_t sum = 0.0f; /* Temporary return variable */ +#if defined (ARM_MATH_LOOPUNROLL) + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = (blockSize) >> 2; + + /* First part of the processing with loop unrolling. Compute 4 outputs at a time. + ** a second loop below computes the remaining 1 to 3 samples. */ + while (blkCnt > 0U) + { + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + /* Decrement loop counter */ + blkCnt--; + } + + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = (blockSize) & 3; +#else + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; +#endif + while (blkCnt > 0U) + { + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in destination buffer */ + *result = sum / blockSize; +} + +#endif /* end of test for vector instruction availability */ + +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f64.c new file mode 100644 index 0000000..d63674b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_f64.c @@ -0,0 +1,114 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_f64.c + * Description: Double floating point mean square error + * + * $Date: 05 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two double floating point vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] result mean square error + @return none + */ + + + + + +void arm_mse_f64( + const float64_t * pSrcA, + const float64_t * pSrcB, + uint32_t blockSize, + float64_t * result) + +{ + uint32_t blkCnt; /* Loop counter */ + float64_t inA, inB; + float64_t sum = 0.0; /* Temporary return variable */ +#if defined (ARM_MATH_LOOPUNROLL) + blkCnt = (blockSize) >> 1; + + + while (blkCnt > 0U) + { + + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + /* Decrement loop counter */ + blkCnt--; + } + + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = (blockSize) & 1; +#else + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; +#endif + while (blkCnt > 0U) + { + inA = *pSrcA++; + inB = *pSrcB++; + inA = inA - inB; + sum += inA * inA; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in destination buffer */ + *result = sum / blockSize; +} + + +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q15.c new file mode 100644 index 0000000..3412a4f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q15.c @@ -0,0 +1,179 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_q15.c + * Description: Mean square error between two Q15 vectors + * + * $Date: 04 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two Q15 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) +void arm_mse_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + uint32_t blockSize, + q15_t * pResult) +{ + uint32_t blkCnt; /* loop counters */ + q15x8_t vecSrcA,vecSrcB; + q63_t sum = 0LL; + + blkCnt = blockSize >> 3U; + while (blkCnt > 0U) + { + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + /* + * sum lanes + */ + sum = vmlaldavaq(sum, vecSrcA, vecSrcA); + + blkCnt--; + pSrcA += 8; + pSrcB += 8; + } + + /* + * tail + */ + blkCnt = blockSize & 7; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp16q(blkCnt); + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + + sum = vmlaldavaq_p(sum, vecSrcA, vecSrcA, p0); + } + + + + *pResult = (q15_t) __SSAT((q31_t) (sum / blockSize)>>13, 16); +} +#else +void arm_mse_q15( + const q15_t * pSrcA, + const q15_t * pSrcB, + uint32_t blockSize, + q15_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + q63_t sum = 0; /* Temporary result storage */ + q15_t inA,inB; /* Temporary variable to store input value */ + + +#if defined (ARM_MATH_LOOPUNROLL) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q15_t) __SSAT(((q31_t) inA - (q31_t)inB), 16); + sum += (q63_t)((q31_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q15_t) __SSAT(((q31_t) inA - (q31_t)inB), 16); + sum += (q63_t)((q31_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q15_t) __SSAT(((q31_t) inA - (q31_t)inB), 16); + sum += (q63_t)((q31_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q15_t) __SSAT(((q31_t) inA - (q31_t)inB), 16); + sum += (q63_t)((q31_t) inA * inA); + + /* Decrement loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = blockSize % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q15_t) __SSAT(((q31_t) inA - (q31_t)inB), 16); + sum += (q63_t)((q31_t) inA * inA); + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in q15 format */ + *pResult = (q15_t) __SSAT((q31_t) (sum / blockSize)>>13, 16); +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q31.c new file mode 100644 index 0000000..f89a768 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q31.c @@ -0,0 +1,180 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_q31.c + * Description: Mean square error between two Q31 vectors + * + * $Date: 04 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two Q31 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) +void arm_mse_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + uint32_t blockSize, + q31_t * pResult) +{ + uint32_t blkCnt; /* loop counters */ + q31x4_t vecSrcA,vecSrcB; + q63_t sum = 0LL; + + /* Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + while (blkCnt > 0U) + { + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + /* + * sum lanes + */ + sum = vrmlaldavhaq(sum, vecSrcA, vecSrcA); + + blkCnt--; + pSrcA += 4; + pSrcB += 4; + } + + /* + * tail + */ + blkCnt = blockSize & 3; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp32q(blkCnt); + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + + sum = vrmlaldavhaq_p(sum, vecSrcA, vecSrcA, p0); + } + + + *pResult = (q31_t) ((sum / blockSize)>>21); + +} +#else +void arm_mse_q31( + const q31_t * pSrcA, + const q31_t * pSrcB, + uint32_t blockSize, + q31_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + q63_t sum = 0; /* Temporary result storage */ + + q31_t inA32,inB32; /* Temporary variable to store packed input value */ + +#if defined (ARM_MATH_LOOPUNROLL) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + inA32 = *pSrcA++ >> 1; + inB32 = *pSrcB++ >> 1; + inA32 = __QSUB(inA32, inB32); + sum += ((q63_t) inA32 * inA32) >> 14U; + + inA32 = *pSrcA++ >> 1; + inB32 = *pSrcB++ >> 1; + inA32 = __QSUB(inA32, inB32); + sum += ((q63_t) inA32 * inA32) >> 14U; + + inA32 = *pSrcA++ >> 1; + inB32 = *pSrcB++ >> 1; + inA32 = __QSUB(inA32, inB32); + sum += ((q63_t) inA32 * inA32) >> 14U; + + inA32 = *pSrcA++ >> 1; + inB32 = *pSrcB++ >> 1; + inA32 = __QSUB(inA32, inB32); + sum += ((q63_t) inA32 * inA32) >> 14U; + + + /* Decrement loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = blockSize % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + inA32 = *pSrcA++ >> 1; + inB32 = *pSrcB++ >> 1; + inA32 = __QSUB(inA32, inB32); + sum += ((q63_t) inA32 * inA32) >> 14U; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in q31 format */ + *pResult = (q31_t) ((sum / blockSize)>>15); +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q7.c new file mode 100644 index 0000000..fb28d90 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_mse_q7.c @@ -0,0 +1,183 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mse_q7.c + * Description: Mean square error between two Q7 vectors + * + * $Date: 04 April 2022 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2022 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @defgroup MSE Mean Square Error + + Calculates the mean square error between two vectors. + + */ + +/** + @addtogroup MSE + @{ + */ + +/** + @brief Mean square error between two Q7 vectors. + @param[in] pSrcA points to the first input vector + @param[in] pSrcB points to the second input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult mean square error + @return none + */ +#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) +void arm_mse_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + uint32_t blockSize, + q7_t * pResult) +{ + uint32_t blkCnt; /* loop counters */ + q7x16_t vecSrcA,vecSrcB; + q31_t sum = 0LL; + + /* Compute 16 outputs at a time */ + blkCnt = blockSize >> 4U; + while (blkCnt > 0U) + { + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + /* + * sum lanes + */ + sum = vmladavaq(sum, vecSrcA, vecSrcA); + + blkCnt--; + pSrcA += 16; + pSrcB += 16; + } + + /* + * tail + */ + blkCnt = blockSize & 0xF; + if (blkCnt > 0U) + { + mve_pred16_t p0 = vctp8q(blkCnt); + vecSrcA = vld1q(pSrcA); + vecSrcB = vld1q(pSrcB); + + vecSrcA = vshrq(vecSrcA,1); + vecSrcB = vshrq(vecSrcB,1); + + vecSrcA = vqsubq(vecSrcA,vecSrcB); + + sum = vmladavaq_p(sum, vecSrcA, vecSrcA, p0); + } + + *pResult = (q7_t) __SSAT((q15_t) (sum / blockSize)>>5, 8); +} +#else +void arm_mse_q7( + const q7_t * pSrcA, + const q7_t * pSrcB, + uint32_t blockSize, + q7_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + q31_t sum = 0; /* Temporary result storage */ + q7_t inA,inB; /* Temporary variable to store input value */ + + +#if defined (ARM_MATH_LOOPUNROLL) + + /* Loop unrolling: Compute 4 outputs at a time */ + blkCnt = blockSize >> 2U; + + while (blkCnt > 0U) + { + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q7_t) __SSAT((q15_t) inA - (q15_t)inB, 8); + sum += ((q15_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q7_t) __SSAT((q15_t) inA - (q15_t)inB, 8); + sum += ((q15_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q7_t) __SSAT((q15_t) inA - (q15_t)inB, 8); + sum += ((q15_t) inA * inA); + + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + inA = (q7_t) __SSAT((q15_t) inA - (q15_t)inB, 8); + sum += ((q15_t) inA * inA); + + /* Decrement loop counter */ + blkCnt--; + } + + /* Loop unrolling: Compute remaining outputs */ + blkCnt = blockSize % 0x4U; + +#else + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + +#endif /* #if defined (ARM_MATH_LOOPUNROLL) */ + + while (blkCnt > 0U) + { + inA = *pSrcA++ >> 1; + inB = *pSrcB++ >> 1; + + inA = (q7_t) __SSAT((q15_t) inA - (q15_t)inB, 8); + sum += ((q15_t) inA * inA); + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result in q7 format */ + *pResult = (q7_t) __SSAT((q15_t) (sum / blockSize)>>5, 8);; +} +#endif /* defined(ARM_MATH_MVEI) */ + +/** + @} end of MSE group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f16.c index 1a2d5b1..f9833c0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f16.c @@ -5,13 +5,13 @@ * Title: arm_power_f16.c * Description: Sum of the squares of the elements of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f32.c index 80ed5d9..ec07058 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f32.c @@ -5,13 +5,13 @@ * Title: arm_power_f32.c * Description: Sum of the squares of the elements of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f64.c new file mode 100644 index 0000000..d2e1e03 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_f64.c @@ -0,0 +1,81 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_power_f64.c + * Description: Sum of the squares of the elements of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup power + @{ + */ + +/** + @brief Sum of the squares of the elements of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult sum of the squares value returned here + @return none + */ +void arm_power_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t sum = 0.; /* Temporary result storage */ + float64_t in; /* Temporary variable to store input value */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A[0] * A[0] + A[1] * A[1] + ... + A[blockSize-1] * A[blockSize-1] */ + + /* Compute Power and store result in a temporary variable, sum. */ + in = *pSrc++; + sum += in * in; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Store result to destination */ + *pResult = sum; +} + +/** + @} end of power group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q15.c index 22c3afd..1cb3845 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q15.c @@ -5,13 +5,13 @@ * Title: arm_power_q15.c * Description: Sum of the squares of the elements of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -124,10 +124,10 @@ void arm_power_q15( /* Compute Power and store result in a temporary variable, sum. */ #if defined (ARM_MATH_DSP) - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sum = __SMLALD(in32, in32, sum); - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sum = __SMLALD(in32, in32, sum); #else in = *pSrc++; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q31.c index 71ce6b5..db83d3b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q31.c @@ -5,13 +5,13 @@ * Title: arm_power_q31.c * Description: Sum of the squares of the elements of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q7.c index bdbc041..7f74aa2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_power_q7.c @@ -5,13 +5,13 @@ * Title: arm_power_q7.c * Description: Sum of the squares of the elements of a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -124,7 +124,7 @@ void arm_power_q7( /* Compute Power and store result in a temporary variable, sum. */ #if defined (ARM_MATH_DSP) - in32 = read_q7x4_ia ((q7_t **) &pSrc); + in32 = read_q7x4_ia (&pSrc); in1 = __SXTB16(__ROR(in32, 8)); in2 = __SXTB16(in32); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f16.c index 3a98ffc..e2c878c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f16.c @@ -5,13 +5,13 @@ * Title: arm_rms_f16.c * Description: Root mean square value of the elements of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -37,18 +37,7 @@ @ingroup groupStats */ -/** - @defgroup RMS Root mean square (RMS) - - Calculates the Root Mean Square of the elements in the input vector. - The underlying algorithm is used: -
-      Result = sqrt(((pSrc[0] * pSrc[0] + pSrc[1] * pSrc[1] + ... + pSrc[blockSize-1] * pSrc[blockSize-1]) / blockSize));
-  
- - There are separate functions for floating point, Q31, and Q15 data types. - */ /** @addtogroup RMS @@ -75,7 +64,7 @@ void arm_rms_f16( arm_power_f16(pSrc, blockSize, &pow); /* Compute Rms and store the result in the destination */ - arm_sqrt_f16(pow / (float16_t) blockSize, pResult); + arm_sqrt_f16((_Float16)pow / (_Float16) blockSize, pResult); } #else @@ -137,7 +126,7 @@ void arm_rms_f16( } /* Compute Rms and store result in destination */ - arm_sqrt_f16(sum / (float16_t) blockSize, pResult); + arm_sqrt_f16((_Float16)sum / (_Float16) blockSize, pResult); } #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f32.c index e86b7f2..7dadc34 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_f32.c @@ -5,13 +5,13 @@ * Title: arm_rms_f32.c * Description: Root mean square value of the elements of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q15.c index 8e3dc55..2ed47f6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q15.c @@ -5,13 +5,13 @@ * Title: arm_rms_q15.c * Description: Root Mean Square of the elements of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -95,10 +95,10 @@ void arm_rms_q15( /* Compute sum of squares and store result in a temporary variable. */ #if defined (ARM_MATH_DSP) - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sum = __SMLALD(in32, in32, sum); - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sum = __SMLALD(in32, in32, sum); #else in = *pSrc++; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q31.c index 93303a8..f334db8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_rms_q31.c @@ -5,13 +5,13 @@ * Title: arm_rms_q31.c * Description: Root Mean Square of the elements of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f16.c index dc8f4a1..b941f24 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f16.c @@ -5,13 +5,13 @@ * Title: arm_std_f16.c * Description: Standard deviation of the elements of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f32.c index 245f27d..ea60d3c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f32.c @@ -5,13 +5,13 @@ * Title: arm_std_f32.c * Description: Standard deviation of the elements of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f64.c new file mode 100644 index 0000000..a193f57 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_f64.c @@ -0,0 +1,63 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_std_f64.c + * Description: Standard deviation of the elements of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup STD + @{ + */ + +/** + @brief Standard deviation of the elements of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult standard deviation value returned here + @return none + */ +void arm_std_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + float64_t var; + arm_var_f64(pSrc,blockSize,&var); + *pResult = sqrt(var); +} + +/** + @} end of STD group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q15.c index 08d205b..4e15a85 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q15.c @@ -5,13 +5,13 @@ * Title: arm_std_q15.c * Description: Standard deviation of an array of Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -102,12 +102,12 @@ void arm_std_q15( /* Compute sum of squares and store result in a temporary variable, sumOfSquares. */ /* Compute sum and store result in a temporary variable, sum. */ #if defined (ARM_MATH_DSP) - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sumOfSquares = __SMLALD(in32, in32, sumOfSquares); sum += ((in32 << 16U) >> 16U); sum += (in32 >> 16U); - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sumOfSquares = __SMLALD(in32, in32, sumOfSquares); sum += ((in32 << 16U) >> 16U); sum += (in32 >> 16U); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q31.c index 2248665..3036a2b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_std_q31.c @@ -5,13 +5,13 @@ * Title: arm_std_q31.c * Description: Standard deviation of the elements of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f16.c index e2ffd47..8700428 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f16.c @@ -5,13 +5,13 @@ * Title: arm_var_f16.c * Description: Variance of the elements of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -63,7 +63,7 @@ void arm_var_f16( { int32_t blkCnt; /* loop counters */ f16x8_t vecSrc; - f16x8_t sumVec = vdupq_n_f16((float16_t) 0.0); + f16x8_t sumVec = vdupq_n_f16(0.0f16); float16_t fMean; if (blockSize <= 1U) { @@ -74,15 +74,6 @@ void arm_var_f16( arm_mean_f16(pSrc, blockSize, &fMean); -/* 6.14 bug */ -#if defined (__ARMCC_VERSION) && (__ARMCC_VERSION >= 6100100) && (__ARMCC_VERSION < 6150001) - __asm volatile( - " vmov.i32 %[acc], #0 \n" - : [acc] "+t"(sumVec) - : - : ); -#endif - blkCnt = blockSize; do { mve_pred16_t p = vctp16q(blkCnt); @@ -100,7 +91,7 @@ void arm_var_f16( while (blkCnt > 0); /* Variance */ - *pResult = vecAddAcrossF16Mve(sumVec) / (float16_t) (blockSize - 1.0f); + *pResult = (_Float16)vecAddAcrossF16Mve(sumVec) / (_Float16) (blockSize - 1.0f16); } #else @@ -130,10 +121,10 @@ void arm_var_f16( { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - sum += *pInput++; - sum += *pInput++; - sum += *pInput++; - sum += *pInput++; + sum += (_Float16)*pInput++; + sum += (_Float16)*pInput++; + sum += (_Float16)*pInput++; + sum += (_Float16)*pInput++; /* Decrement loop counter */ @@ -154,14 +145,14 @@ void arm_var_f16( { /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ - sum += *pInput++; + sum += (_Float16)*pInput++; /* Decrement loop counter */ blkCnt--; } /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) / blockSize */ - fMean = sum / (float16_t) blockSize; + fMean = (_Float16)sum / (_Float16) blockSize; pInput = pSrc; @@ -172,17 +163,17 @@ void arm_var_f16( while (blkCnt > 0U) { - fValue = *pInput++ - fMean; - fSum += fValue * fValue; + fValue = (_Float16)*pInput++ - (_Float16)fMean; + fSum += (_Float16)fValue * (_Float16)fValue; - fValue = *pInput++ - fMean; - fSum += fValue * fValue; + fValue = (_Float16)*pInput++ - (_Float16)fMean; + fSum += (_Float16)fValue * (_Float16)fValue; - fValue = *pInput++ - fMean; - fSum += fValue * fValue; + fValue = (_Float16)*pInput++ - (_Float16)fMean; + fSum += (_Float16)fValue * (_Float16)fValue; - fValue = *pInput++ - fMean; - fSum += fValue * fValue; + fValue = (_Float16)*pInput++ - (_Float16)fMean; + fSum += (_Float16)fValue * (_Float16)fValue; /* Decrement loop counter */ blkCnt--; @@ -200,15 +191,15 @@ void arm_var_f16( while (blkCnt > 0U) { - fValue = *pInput++ - fMean; - fSum += fValue * fValue; + fValue = (_Float16)*pInput++ - (_Float16)fMean; + fSum += (_Float16)fValue * (_Float16)fValue; /* Decrement loop counter */ blkCnt--; } /* Variance */ - *pResult = fSum / (float16_t)(blockSize - 1.0f); + *pResult = (_Float16)fSum / ((_Float16)blockSize - 1.0f16); } #endif /* defined(ARM_MATH_MVEF) && !defined(ARM_MATH_AUTOVECTORIZE) */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f32.c index b00b1ad..69ab060 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f32.c @@ -5,13 +5,13 @@ * Title: arm_var_f32.c * Description: Variance of the elements of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f64.c new file mode 100644 index 0000000..229db63 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_f64.c @@ -0,0 +1,104 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_var_f64.c + * Description: Variance of the elements of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + +/** + @ingroup groupStats + */ + +/** + @addtogroup variance + @{ + */ + +/** + @brief Variance of the elements of a floating-point vector. + @param[in] pSrc points to the input vector + @param[in] blockSize number of samples in input vector + @param[out] pResult variance value returned here + @return none + */ +void arm_var_f64( + const float64_t * pSrc, + uint32_t blockSize, + float64_t * pResult) +{ + uint32_t blkCnt; /* Loop counter */ + float64_t sum = 0.; /* Temporary result storage */ + float64_t fSum = 0.; + float64_t fMean, fValue; + const float64_t * pInput = pSrc; + + if (blockSize <= 1U) + { + *pResult = 0; + return; + } + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) */ + + sum += *pInput++; + + /* Decrement loop counter */ + blkCnt--; + } + + /* C = (A[0] + A[1] + A[2] + ... + A[blockSize-1]) / blockSize */ + fMean = sum / (float64_t) blockSize; + + pInput = pSrc; + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + fValue = *pInput++ - fMean; + fSum += fValue * fValue; + + /* Decrement loop counter */ + blkCnt--; + } + + /* Variance */ + *pResult = fSum / (float64_t)(blockSize - 1.); +} + +/** + @} end of variance group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q15.c index a6be746..9c78d34 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q15.c @@ -5,13 +5,13 @@ * Title: arm_var_q15.c * Description: Variance of an array of Q15 type * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -113,10 +113,10 @@ void arm_var_q15( /* Compute Mean of squares of the input samples * and then store the result in a temporary variable, meanOfSquares. */ - meanOfSquares = arm_div_q63_to_q31(sumOfSquares, (blockSize - 1U)); + meanOfSquares = arm_div_int64_to_int32(sumOfSquares, (blockSize - 1U)); /* Compute square of mean */ - squareOfMean = arm_div_q63_to_q31((q63_t)sum * sum, (q31_t)(blockSize * (blockSize - 1U))); + squareOfMean = arm_div_int64_to_int32((q63_t)sum * sum, (q31_t)(blockSize * (blockSize - 1U))); /* mean of the squares minus the square of the mean. */ *pResult = (meanOfSquares - squareOfMean) >> 15; @@ -156,12 +156,12 @@ void arm_var_q15( /* Compute sum of squares and store result in a temporary variable, sumOfSquares. */ /* Compute sum and store result in a temporary variable, sum. */ #if defined (ARM_MATH_DSP) - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sumOfSquares = __SMLALD(in32, in32, sumOfSquares); sum += ((in32 << 16U) >> 16U); sum += (in32 >> 16U); - in32 = read_q15x2_ia ((q15_t **) &pSrc); + in32 = read_q15x2_ia (&pSrc); sumOfSquares = __SMLALD(in32, in32, sumOfSquares); sum += ((in32 << 16U) >> 16U); sum += (in32 >> 16U); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q31.c index 0da41b1..025cc56 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/StatisticsFunctions/arm_var_q31.c @@ -5,13 +5,13 @@ * Title: arm_var_q31.c * Description: Variance of an array of Q31 type * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f16.c index 161b6ec..9a1aa6a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f16.c @@ -5,11 +5,13 @@ * Title: arm_barycenter_f16.c * Description: Barycenter * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -105,7 +107,7 @@ void arm_barycenter_f16(const float16_t *in, w2 = *pW++; w3 = *pW++; w4 = *pW++; - accum += w1 + w2 + w3 + w4; + accum += (_Float16)w1 + (_Float16)w2 + (_Float16)w3 + (_Float16)w4; blkCntSample = vecDim >> 3; while (blkCntSample > 0) { @@ -131,10 +133,10 @@ void arm_barycenter_f16(const float16_t *in, blkCntSample = vecDim & 7; while (blkCntSample > 0) { - *pOut = *pOut + *pIn1++ * w1; - *pOut = *pOut + *pIn2++ * w2; - *pOut = *pOut + *pIn3++ * w3; - *pOut = *pOut + *pIn4++ * w4; + *pOut = (_Float16)*pOut + (_Float16)*pIn1++ * (_Float16)w1; + *pOut = (_Float16)*pOut + (_Float16)*pIn2++ * (_Float16)w2; + *pOut = (_Float16)*pOut + (_Float16)*pIn3++ * (_Float16)w3; + *pOut = (_Float16)*pOut + (_Float16)*pIn4++ * (_Float16)w4; pOut++; blkCntSample--; } @@ -156,7 +158,7 @@ void arm_barycenter_f16(const float16_t *in, pOut = out; w = *pW++; - accum += w; + accum += (_Float16)w; blkCntSample = vecDim >> 3; while (blkCntSample > 0) @@ -174,7 +176,7 @@ void arm_barycenter_f16(const float16_t *in, blkCntSample = vecDim & 7; while (blkCntSample > 0) { - *pOut = *pOut + *pIn++ * w; + *pOut = (_Float16)*pOut + (_Float16)*pIn++ * (_Float16)w; pOut++; blkCntSample--; } @@ -184,7 +186,7 @@ void arm_barycenter_f16(const float16_t *in, /* Normalize */ pOut = out; - accum = 1.0f / accum; + accum = 1.0f16 / (_Float16)accum; blkCntSample = vecDim >> 3; while (blkCntSample > 0) @@ -201,7 +203,7 @@ void arm_barycenter_f16(const float16_t *in, blkCntSample = vecDim & 7; while (blkCntSample > 0) { - *pOut = *pOut * accum; + *pOut = (_Float16)*pOut * (_Float16)accum; pOut++; blkCntSample--; } @@ -218,7 +220,7 @@ void arm_barycenter_f16(const float16_t *in, const float16_t *weights, float16_t blkCntVector = nbVectors; blkCntSample = vecDim; - accum = 0.0f; + accum = 0.0f16; pW = weights; pIn = in; @@ -229,7 +231,7 @@ void arm_barycenter_f16(const float16_t *in, const float16_t *weights, float16_t while(blkCntSample > 0) { - *pOut = 0.0f; + *pOut = 0.0f16; pOut++; blkCntSample--; } @@ -239,12 +241,12 @@ void arm_barycenter_f16(const float16_t *in, const float16_t *weights, float16_t { pOut = out; w = *pW++; - accum += w; + accum += (_Float16)w; blkCntSample = vecDim; while(blkCntSample > 0) { - *pOut = *pOut + *pIn++ * w; + *pOut = (_Float16)*pOut + (_Float16)*pIn++ * (_Float16)w; pOut++; blkCntSample--; } @@ -258,7 +260,7 @@ void arm_barycenter_f16(const float16_t *in, const float16_t *weights, float16_t while(blkCntSample > 0) { - *pOut = *pOut / accum; + *pOut = (_Float16)*pOut / (_Float16)accum; pOut++; blkCntSample--; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f32.c index 9f41c07..e941cbd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_barycenter_f32.c @@ -5,11 +5,13 @@ * Title: arm_barycenter_f32.c * Description: Barycenter * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -33,6 +35,7 @@ /** @ingroup barycenter + @{ */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bitonic_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bitonic_sort_f32.c index 131a5da..05edb29 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bitonic_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bitonic_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_bitonic_sort_f32.c * Description: Floating point bitonic sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bubble_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bubble_sort_f32.c index f84c057..4044ed0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bubble_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_bubble_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_bubble_sort_f32.c * Description: Floating point bubble sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f16.c index a77f4e9..6d0003b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f16.c @@ -5,13 +5,13 @@ * Title: arm_copy_f16.c * Description: Copies the elements of a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f32.c index 51c0b19..f6f2a33 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f32.c @@ -5,13 +5,13 @@ * Title: arm_copy_f32.c * Description: Copies the elements of a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f64.c new file mode 100644 index 0000000..05c21e1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_f64.c @@ -0,0 +1,75 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_copy_f64.c + * Description: Copies the elements of a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h" + +/** + @ingroup groupSupport + */ + +/** + @addtogroup copy + @{ + */ + +/** + @brief Copies the elements of a floating-point vector. + @param[in] pSrc points to input vector + @param[out] pDst points to output vector + @param[in] blockSize number of samples in each vector + @return none + */ +void arm_copy_f64( + const float64_t * pSrc, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = A */ + + /* Copy and store result in destination buffer */ + *pDst++ = *pSrc++; + + /* Decrement loop counter */ + blkCnt--; + } +} + +/** + @} end of BasicCopy group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q15.c index 765b038..fcb7d22 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q15.c @@ -5,13 +5,13 @@ * Title: arm_copy_q15.c * Description: Copies the elements of a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -97,8 +97,8 @@ void arm_copy_q15( /* C = A */ /* read 2 times 2 samples at a time */ - write_q15x2_ia (&pDst, read_q15x2_ia ((q15_t **) &pSrc)); - write_q15x2_ia (&pDst, read_q15x2_ia ((q15_t **) &pSrc)); + write_q15x2_ia (&pDst, read_q15x2_ia (&pSrc)); + write_q15x2_ia (&pDst, read_q15x2_ia (&pSrc)); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q31.c index 07b9b22..1249380 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q31.c @@ -5,13 +5,13 @@ * Title: arm_copy_q31.c * Description: Copies the elements of a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q7.c index 1eaa857..70f7b7f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_copy_q7.c @@ -5,13 +5,13 @@ * Title: arm_copy_q7.c * Description: Copies the elements of a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -100,7 +100,7 @@ void arm_copy_q7( /* C = A */ /* read 4 samples at a time */ - write_q7x4_ia (&pDst, read_q7x4_ia ((q7_t **) &pSrc)); + write_q7x4_ia (&pDst, read_q7x4_ia (&pSrc)); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_float.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_float.c index 87b1e46..cf4451f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_float.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_float.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q15.c * Description: Converts the elements of the floating-point vector to Q15 vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -55,7 +55,11 @@ */ -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H) +#pragma GCC warning "Scalar version of arm_f16_to_float built. Helium version has build issues with gcc." +#endif + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) void arm_f16_to_float( const float16_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_q15.c index a454881..cdd714e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_f16_to_q15.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q15.c * Description: Converts the elements of the floating-point vector to Q15 vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -138,7 +138,7 @@ void arm_f16_to_q15( /* * convert from float to Q31 and then store the results in the destination buffer */ - *pDst++ = clip_q31_to_q15((q31_t) (*pIn++ * 32768.0)); + *pDst++ = clip_q31_to_q15((q31_t) ((_Float16)*pIn++ * 32768.0f16)); #endif /* #ifdef ARM_MATH_ROUNDING */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f16.c index 6f46139..f52f505 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f16.c @@ -5,13 +5,13 @@ * Title: arm_fill_f16.c * Description: Fills a constant value into a floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f32.c index 14c851b..b06ceb3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f32.c @@ -5,13 +5,13 @@ * Title: arm_fill_f32.c * Description: Fills a constant value into a floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f64.c new file mode 100644 index 0000000..152cb4c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_f64.c @@ -0,0 +1,75 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_fill_f64.c + * Description: Fills a constant value into a floating-point vector + * + * $Date: 13 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/support_functions.h" + +/** + @ingroup groupSupport + */ + +/** + @addtogroup Fill + @{ + */ + +/** + @brief Fills a constant value into a floating-point vector. + @param[in] value input value to be filled + @param[out] pDst points to output vector + @param[in] blockSize number of samples in each vector + @return none + */ +void arm_fill_f64( + float64_t value, + float64_t * pDst, + uint32_t blockSize) +{ + uint32_t blkCnt; /* Loop counter */ + + /* Initialize blkCnt with number of samples */ + blkCnt = blockSize; + + while (blkCnt > 0U) + { + /* C = value */ + + /* Fill value in destination buffer */ + *pDst++ = value; + + /* Decrement loop counter */ + blkCnt--; + } +} + +/** + @} end of Fill group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q15.c index c9b46c4..a45aae4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q15.c @@ -5,13 +5,13 @@ * Title: arm_fill_q15.c * Description: Fills a constant value into a Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q31.c index 4a9a6c1..9a8b129 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q31.c @@ -5,13 +5,13 @@ * Title: arm_fill_q31.c * Description: Fills a constant value into a Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q7.c index 2050480..1211436 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_fill_q7.c @@ -5,13 +5,13 @@ * Title: arm_fill_q7.c * Description: Fills a constant value into a Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_f16.c index 3a1e9a5..c726153 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_f16.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q15.c * Description: Converts the elements of the floating-point vector to Q15 vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -51,7 +51,11 @@ */ -#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && defined(__CMSIS_GCC_H) +#pragma GCC warning "Scalar version of arm_float_to_f16 built. Helium version has build issues with gcc." +#endif + +#if defined(ARM_MATH_MVE_FLOAT16) && !defined(ARM_MATH_AUTOVECTORIZE) && !defined(__CMSIS_GCC_H) void arm_float_to_f16( const float32_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q15.c index c6bd214..dd5bab1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q15.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q15.c * Description: Converts the elements of the floating-point vector to Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,7 +70,7 @@ void arm_float_to_q15( uint32_t blkCnt; float32_t maxQ = (float32_t) Q15_MAX; f32x4x2_t tmp; - q15x8_t vecDst; + q15x8_t vecDst = { 0 }; #ifdef ARM_MATH_ROUNDING float32_t in; #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q31.c index 3cd44ad..76cd238 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q31.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q31.c * Description: Converts the elements of the floating-point vector to Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q7.c index 613976c..f64e6d2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_float_to_q7.c @@ -5,13 +5,13 @@ * Title: arm_float_to_q7.c * Description: Converts the elements of the floating-point vector to Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -69,8 +69,8 @@ void arm_float_to_q7( uint32_t blkCnt; /* loop counters */ float32_t maxQ = powf(2.0, 7); f32x4x4_t tmp; - q15x8_t evVec, oddVec; - q7x16_t vecDst; + q15x8_t evVec = { 0 }, oddVec = { 0 }; + q7x16_t vecDst = { 0 }; float32_t const *pSrcVec; #ifdef ARM_MATH_ROUNDING float32_t in; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_heap_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_heap_sort_f32.c index 78985b7..aa52173 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_heap_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_heap_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_heap_sort_f32.c * Description: Floating point heap sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c index 440b26e..386c5ce 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_insertion_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_insertion_sort_f32.c * Description: Floating point insertion sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_f32.c index 13c7a33..a74a961 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_merge_sort_f32.c * Description: Floating point merge sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_init_f32.c index 901554a..73f916a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_merge_sort_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_merge_sort_init_f32.c * Description: Floating point merge sort initialization function * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_f16.c index 8b95b12..a250b0b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_f16.c @@ -5,13 +5,13 @@ * Title: arm_q15_to_float.c * Description: Converts the elements of the Q15 vector to floating-point vector * - * $Date: 18. March 2020 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -38,7 +38,7 @@ */ /** - * @defgroup q15_to_x Convert 16-bit Integer value + * @defgroup q15_to_x Convert 16-bit fixed point value */ /** @@ -116,10 +116,10 @@ void arm_q15_to_f16( /* C = (float16_t) A / 32768 */ /* Convert from q15 to float and store result in destination buffer */ - *pDst++ = ((float16_t) * pIn++ / 32768.0f); - *pDst++ = ((float16_t) * pIn++ / 32768.0f); - *pDst++ = ((float16_t) * pIn++ / 32768.0f); - *pDst++ = ((float16_t) * pIn++ / 32768.0f); + *pDst++ = ((_Float16) * pIn++ / 32768.0f16); + *pDst++ = ((_Float16) * pIn++ / 32768.0f16); + *pDst++ = ((_Float16) * pIn++ / 32768.0f16); + *pDst++ = ((_Float16) * pIn++ / 32768.0f16); /* Decrement loop counter */ blkCnt--; @@ -140,7 +140,7 @@ void arm_q15_to_f16( /* C = (float16_t) A / 32768 */ /* Convert from q15 to float and store result in destination buffer */ - *pDst++ = ((float16_t) *pIn++ / 32768.0f); + *pDst++ = ((_Float16) *pIn++ / 32768.0f16); /* Decrement loop counter */ blkCnt--; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_float.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_float.c index 9f8dc33..1bc9729 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_float.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_float.c @@ -5,13 +5,13 @@ * Title: arm_q15_to_float.c * Description: Converts the elements of the Q15 vector to floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,7 +35,7 @@ */ /** - * @defgroup q15_to_x Convert 16-bit Integer value + * @defgroup q15_to_x Convert 16-bit fixed point value */ /** @@ -67,16 +67,16 @@ void arm_q15_to_float( q15x8_t vecDst; q15_t const *pSrcVec; - + pSrcVec = (q15_t const *) pSrc; blkCnt = blockSize >> 2; while (blkCnt > 0U) { /* C = (float32_t) A / 32768 */ /* convert from q15 to float and then store the results in the destination buffer */ - vecDst = vldrhq_s32(pSrcVec); + vecDst = vldrhq_s32(pSrcVec); pSrcVec += 4; - vstrwq(pDst, vcvtq_n_f32_s32(vecDst, 15)); + vstrwq(pDst, vcvtq_n_f32_s32((int32x4_t)vecDst, 15)); pDst += 4; /* * Decrement the blockSize loop counter @@ -131,7 +131,7 @@ void arm_q15_to_float( outV = vcvtq_n_f32_s32(inV1,15); vst1q_f32(pDst, outV); pDst += 4; - + /* Decrement the loop counter */ blkCnt--; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q31.c index 3e59523..2a56392 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q31.c @@ -5,13 +5,13 @@ * Title: arm_q15_to_q31.c * Description: Converts the elements of the Q15 vector to Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -119,8 +119,8 @@ void arm_q15_to_q31( /* C = (q31_t)A << 16 */ /* Convert from q15 to q31 and store result in destination buffer */ - in1 = read_q15x2_ia ((q15_t **) &pIn); - in2 = read_q15x2_ia ((q15_t **) &pIn); + in1 = read_q15x2_ia (&pIn); + in2 = read_q15x2_ia (&pIn); #ifndef ARM_MATH_BIG_ENDIAN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q7.c index abb68b5..8a33729 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q15_to_q7.c @@ -5,13 +5,13 @@ * Title: arm_q15_to_q7.c * Description: Converts the elements of the Q15 vector to Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,7 +62,7 @@ void arm_q15_to_q7( uint32_t blkCnt; /* loop counters */ q15x8x2_t tmp; q15_t const *pSrcVec; - q7x16_t vecDst; + q7x16_t vecDst = { 0 }; pSrcVec = (q15_t const *) pSrc; @@ -121,8 +121,8 @@ void arm_q15_to_q7( /* Convert from q15 to q7 and store result in destination buffer */ #if defined (ARM_MATH_DSP) - in1 = read_q15x2_ia ((q15_t **) &pIn); - in2 = read_q15x2_ia ((q15_t **) &pIn); + in1 = read_q15x2_ia (&pIn); + in2 = read_q15x2_ia (&pIn); #ifndef ARM_MATH_BIG_ENDIAN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_float.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_float.c index fc4b280..a478044 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_float.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_float.c @@ -5,13 +5,13 @@ * Title: arm_q31_to_float.c * Description: Converts the elements of the Q31 vector to floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,7 +35,7 @@ */ /** - * @defgroup q31_to_x Convert 32-bit Integer value + * @defgroup q31_to_x Convert 32-bit fixed point value */ /** diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q15.c index 27e04b5..2d0c58a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q15.c @@ -5,13 +5,13 @@ * Title: arm_q31_to_q15.c * Description: Converts the elements of the Q31 vector to Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,7 +60,7 @@ void arm_q31_to_q15( { uint32_t blkCnt; /* loop counters */ q31x4x2_t tmp; - q15x8_t vecDst; + q15x8_t vecDst = { 0 }; q31_t const *pSrcVec; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q7.c index 14f25bc..27d1423 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q31_to_q7.c @@ -5,13 +5,13 @@ * Title: arm_q31_to_q7.c * Description: Converts the elements of the Q31 vector to Q7 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -60,8 +60,8 @@ void arm_q31_to_q7( { uint32_t blkCnt; /* loop counters */ q31x4x4_t tmp; - q15x8_t evVec, oddVec; - q7x16_t vecDst; + q15x8_t evVec = { 0 }, oddVec = { 0 }; + q7x16_t vecDst = { 0 }; q31_t const *pSrcVec; pSrcVec = (q31_t const *) pSrc; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_float.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_float.c index 6535dd0..f70206d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_float.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_float.c @@ -5,13 +5,13 @@ * Title: arm_q7_to_float.c * Description: Converts the elements of the Q7 vector to floating-point vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,7 +35,7 @@ */ /** - * @defgroup q7_to_x Convert 8-bit Integer value + * @defgroup q7_to_x Convert 8-bit fixed point value */ /** @@ -74,7 +74,7 @@ void arm_q7_to_float( /* convert from q7 to float and then store the results in the destination buffer */ vecDst = vldrbq_s32(pSrcVec); pSrcVec += 4; - vstrwq(pDst, vcvtq_n_f32_s32(vecDst, 7)); + vstrwq(pDst, vcvtq_n_f32_s32((int32x4_t)vecDst, 7)); pDst += 4; /* * Decrement the blockSize loop counter diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q15.c index 75bb856..b169fba 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q15.c @@ -5,13 +5,13 @@ * Title: arm_q7_to_q15.c * Description: Converts the elements of the Q7 vector to Q15 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -123,7 +123,7 @@ void arm_q7_to_q15( /* Convert from q7 to q15 and store result in destination buffer */ #if defined (ARM_MATH_DSP) - in = read_q7x4_ia ((q7_t **) &pIn); + in = read_q7x4_ia (&pIn); /* rotatate in by 8 and extend two q7_t values to q15_t values */ in1 = __SXTB16(__ROR(in, 8)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q31.c index a01894a..7867a08 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_q7_to_q31.c @@ -5,13 +5,13 @@ * Title: arm_q7_to_q31.c * Description: Converts the elements of the Q7 vector to Q31 vector * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -115,7 +115,7 @@ void arm_q7_to_q31( /* C = (q31_t) A << 24 */ /* Convert from q7 to q31 and store result in destination buffer */ - in = read_q7x4_ia ((q7_t **) &pIn); + in = read_q7x4_ia (&pIn); #ifndef ARM_MATH_BIG_ENDIAN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_quick_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_quick_sort_f32.c index 4723d13..6c0e638 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_quick_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_quick_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_quick_sort_f32.c * Description: Floating point quick sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_selection_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_selection_sort_f32.c index 7100f04..7fa49ae 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_selection_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_selection_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_selection_sort_f32.c * Description: Floating point selection sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_f32.c index 3d3ecd8..931fc2d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_f32.c @@ -5,13 +5,13 @@ * Title: arm_sort_f32.c * Description: Floating point sort * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_init_f32.c index 723db0b..7220b4c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_sort_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_sort_init_f32.c * Description: Floating point sort initialization function * - * $Date: 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f16.c index d0b6f99..2c80545 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f16.c @@ -5,11 +5,13 @@ * Title: arm_weighted_sum_f16.c * Description: Weighted Sum * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f32.c index 7f28207..243378d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/SupportFunctions/arm_weighted_sum_f32.c @@ -5,11 +5,13 @@ * Title: arm_weighted_sum_f32.c * Description: Weighted Sum * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal.c index a1504f1..cc8e8b0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal.c @@ -5,13 +5,13 @@ * Title: arm_bitreversal.c * Description: Bitreversal functions * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal2.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal2.c index a22e8cb..e093aec 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal2.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal2.c @@ -5,10 +5,10 @@ * Title: arm_bitreversal2.c * Description: Bitreversal functions * - * $Date: 18. March 2019 - * $Revision: V1.0.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2019 ARM Limited or its affiliates. All rights reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal_f16.c index 7809ea9..bd13013 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal_f16.c @@ -5,8 +5,10 @@ * Title: arm_bitreversal_f16.c * Description: Bitreversal functions * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. @@ -100,4 +102,5 @@ const uint16_t * pBitRevTab) } } #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f16.c index 2d6d436..ee4f926 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f16.c @@ -5,13 +5,13 @@ * Title: arm_cfft_f32.c * Description: Combined Radix Decimation in Frequency CFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -42,111 +42,51 @@ static float16_t arm_inverse_fft_length_f16(uint16_t fftLen) { float16_t retValue=1.0; - - switch (fftLen) - { - - case 4096U: - retValue = (float16_t)0.000244140625f; - break; - - case 2048U: - retValue = (float16_t)0.00048828125f; - break; - - case 1024U: - retValue = (float16_t)0.0009765625f; - break; - - case 512U: - retValue = (float16_t)0.001953125f; - break; - - case 256U: - retValue = (float16_t)0.00390625f; - break; - - case 128U: - retValue = (float16_t)0.0078125f; - break; - - case 64U: - retValue = (float16_t)0.015625f; - break; - - case 32U: - retValue = (float16_t)0.03125f; - break; - - case 16U: - retValue = (float16_t)0.0625f; - break; - - - default: - break; - } - return(retValue); -} - - -static void arm_bitreversal_f16_inpl_mve( - uint16_t *pSrc, - const uint16_t bitRevLen, - const uint16_t *pBitRevTab) - -{ - uint32_t *src = (uint32_t *)pSrc; - uint32_t blkCnt; /* loop counters */ - uint32x4_t bitRevTabOff; - uint16x8_t one = vdupq_n_u16(1); - - blkCnt = (bitRevLen / 2) / 4; - while (blkCnt > 0U) { - bitRevTabOff = vldrhq_u16(pBitRevTab); - pBitRevTab += 8; - uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one); - uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one); + switch (fftLen) + { - bitRevOff1 = bitRevOff1 >> 3; - bitRevOff2 = bitRevOff2 >> 3; + case 4096U: + retValue = (float16_t)0.000244140625f; + break; - uint32x4_t in1 = vldrwq_gather_shifted_offset_u32(src, bitRevOff1); - uint32x4_t in2 = vldrwq_gather_shifted_offset_u32(src, bitRevOff2); + case 2048U: + retValue = (float16_t)0.00048828125f; + break; - vstrwq_scatter_shifted_offset_u32(src, bitRevOff1, in2); - vstrwq_scatter_shifted_offset_u32(src, bitRevOff2, in1); + case 1024U: + retValue = (float16_t)0.0009765625f; + break; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } + case 512U: + retValue = (float16_t)0.001953125f; + break; + case 256U: + retValue = (float16_t)0.00390625f; + break; - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = bitRevLen & 7; - if (blkCnt > 0U) { - mve_pred16_t p0 = vctp16q(blkCnt); + case 128U: + retValue = (float16_t)0.0078125f; + break; - bitRevTabOff = vldrhq_z_u16(pBitRevTab, p0); + case 64U: + retValue = (float16_t)0.015625f; + break; - uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one); - uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one); + case 32U: + retValue = (float16_t)0.03125f; + break; - bitRevOff1 = bitRevOff1 >> 3; - bitRevOff2 = bitRevOff2 >> 3; + case 16U: + retValue = (float16_t)0.0625f; + break; - uint32x4_t in1 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff1, p0); - uint32x4_t in2 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff2, p0); - vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff1, in2, p0); - vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff2, in1, p0); - } + default: + break; + } + return(retValue); } @@ -159,39 +99,37 @@ static void _arm_radix4_butterfly_f16_mve(const arm_cfft_instance_f16 * S,float1 uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = - {(0 - 16) * sizeof(float16_t *) - , (4 - 16) * sizeof(float16_t *) - , (8 - 16) * sizeof(float16_t *) - , (12 - 16) * sizeof(float16_t *)}; + static const int32_t strides[4] = + { ( 0 - 16) * (int32_t)sizeof(float16_t *) + , ( 4 - 16) * (int32_t)sizeof(float16_t *) + , ( 8 - 16) * (int32_t)sizeof(float16_t *) + , (12 - 16) * (int32_t)sizeof(float16_t *)}; n2 = fftLen; n1 = n2; n2 >>= 2u; for (int k = fftLen / 4u; k > 1; k >>= 2) { + float16_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + float16_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + float16_t const *p_rearranged_twiddle_tab_stride3 = + &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + float16_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - float16_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - float16_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - float16_t const *p_rearranged_twiddle_tab_stride3 = - &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - float16_t const *pW1, *pW2, *pW3; - float16_t *inA = pSrc + CMPLX_DIM * i * n1; - float16_t *inB = inA + n2 * CMPLX_DIM; - float16_t *inC = inB + n2 * CMPLX_DIM; - float16_t *inD = inC + n2 * CMPLX_DIM; - f16x8_t vecW; - - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; + float16_t *inA = pBase; + float16_t *inB = inA + n2 * CMPLX_DIM; + float16_t *inC = inB + n2 * CMPLX_DIM; + float16_t *inD = inC + n2 * CMPLX_DIM; + float16_t const *pW1 = p_rearranged_twiddle_tab_stride1; + float16_t const *pW2 = p_rearranged_twiddle_tab_stride2; + float16_t const *pW3 = p_rearranged_twiddle_tab_stride3; + f16x8_t vecW; blkCnt = n2 / 4; /* @@ -260,6 +198,7 @@ static void _arm_radix4_butterfly_f16_mve(const arm_cfft_instance_f16 * S,float1 blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -270,7 +209,7 @@ static void _arm_radix4_butterfly_f16_mve(const arm_cfft_instance_f16 * S,float1 /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* load scheduling */ @@ -362,16 +301,15 @@ static void _arm_radix4_butterfly_inverse_f16_mve(const arm_cfft_instance_f16 * f16x8_t vecTmp0, vecTmp1; f16x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; f16x8_t vecA, vecB, vecC, vecD; - f16x8_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q31_t *), - (4 - 16) * sizeof(q31_t *), - (8 - 16) * sizeof(q31_t *), - (12 - 16) * sizeof(q31_t *) + static const int32_t strides[4] = { + ( 0 - 16) * (int32_t)sizeof(q31_t *), + ( 4 - 16) * (int32_t)sizeof(q31_t *), + ( 8 - 16) * (int32_t)sizeof(q31_t *), + (12 - 16) * (int32_t)sizeof(q31_t *) }; n2 = fftLen; @@ -379,26 +317,27 @@ static void _arm_radix4_butterfly_inverse_f16_mve(const arm_cfft_instance_f16 * n2 >>= 2u; for (int k = fftLen / 4; k > 1; k >>= 2) { + float16_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + float16_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + float16_t const *p_rearranged_twiddle_tab_stride3 = + &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + + float16_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - float16_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - float16_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - float16_t const *p_rearranged_twiddle_tab_stride3 = - &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - float16_t const *pW1, *pW2, *pW3; - float16_t *inA = pSrc + CMPLX_DIM * i * n1; - float16_t *inB = inA + n2 * CMPLX_DIM; - float16_t *inC = inB + n2 * CMPLX_DIM; - float16_t *inD = inC + n2 * CMPLX_DIM; - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; + float16_t *inA = pBase; + float16_t *inB = inA + n2 * CMPLX_DIM; + float16_t *inC = inB + n2 * CMPLX_DIM; + float16_t *inD = inC + n2 * CMPLX_DIM; + float16_t const *pW1 = p_rearranged_twiddle_tab_stride1; + float16_t const *pW2 = p_rearranged_twiddle_tab_stride2; + float16_t const *pW3 = p_rearranged_twiddle_tab_stride3; + f16x8_t vecW; blkCnt = n2 / 4; /* @@ -466,6 +405,7 @@ static void _arm_radix4_butterfly_inverse_f16_mve(const arm_cfft_instance_f16 * blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -476,7 +416,7 @@ static void _arm_radix4_butterfly_inverse_f16_mve(const arm_cfft_instance_f16 * /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -592,53 +532,53 @@ void arm_cfft_f16( float16_t * pSrc, uint8_t ifftFlag, uint8_t bitReverseFlag) -{ - uint32_t fftLen = S->fftLen; - - if (ifftFlag == 1U) { - - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_inverse_f16_mve(S, pSrc, fftLen, arm_inverse_fft_length_f16(S->fftLen)); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_inverse_f16_mve(S, pSrc, fftLen); - break; - } - } else { - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_f16_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_f16_mve(S, pSrc, fftLen); - break; - } - } - - - if (bitReverseFlag) - { - - arm_bitreversal_f16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable); - - } +{ + uint32_t fftLen = S->fftLen; + + if (ifftFlag == 1U) { + + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_inverse_f16_mve(S, pSrc, fftLen, arm_inverse_fft_length_f16(S->fftLen)); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_inverse_f16_mve(S, pSrc, fftLen); + break; + } + } else { + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_f16_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_f16_mve(S, pSrc, fftLen); + break; + } + } + + + if (bitReverseFlag) + { + + arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable); + + } } #else @@ -666,162 +606,6 @@ extern void arm_radix4_butterfly_f16( @ingroup groupTransforms */ -/** - @defgroup ComplexFFT Complex FFT Functions - - @par - The Fast Fourier Transform (FFT) is an efficient algorithm for computing the - Discrete Fourier Transform (DFT). The FFT can be orders of magnitude faster - than the DFT, especially for long lengths. - The algorithms described in this section - operate on complex data. A separate set of functions is devoted to handling - of real sequences. - @par - There are separate algorithms for handling floating-point, Q15, and Q31 data - types. The algorithms available for each data type are described next. - @par - The FFT functions operate in-place. That is, the array holding the input data - will also be used to hold the corresponding result. The input data is complex - and contains 2*fftLen interleaved values as shown below. -
{real[0], imag[0], real[1], imag[1], ...} 
- The FFT result will be contained in the same array and the frequency domain - values will have the same interleaving. - - @par Floating-point - The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-8 - stages are performed along with a single radix-2 or radix-4 stage, as needed. - The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses - a different twiddle factor table. - @par - The function uses the standard FFT definition and output values may grow by a - factor of fftLen when computing the forward transform. The - inverse transform includes a scale of 1/fftLen as part of the - calculation and this matches the textbook definition of the inverse FFT. - @par - For the MVE version, the new arm_cfft_init_f32 initialization function is - mandatory. Compilation flags are available to include only the required tables for the - needed FFTs. Other FFT versions can continue to be initialized as - explained below. - @par - For not MVE versions, pre-initialized data structures containing twiddle factors - and bit reversal tables are provided and defined in arm_const_structs.h. Include - this header in your function and then pass one of the constant structures as - an argument to arm_cfft_f32. For example: - @par - arm_cfft_f32(arm_cfft_sR_f32_len64, pSrc, 1, 1) - @par - computes a 64-point inverse complex FFT including bit reversal. - The data structures are treated as constant data and not modified during the - calculation. The same data structure can be reused for multiple transforms - including mixing forward and inverse transforms. - @par - Earlier releases of the library provided separate radix-2 and radix-4 - algorithms that operated on floating-point data. These functions are still - provided but are deprecated. The older functions are slower and less general - than the new functions. - @par - An example of initialization of the constants for the arm_cfft_f32 function follows: - @code - const static arm_cfft_instance_f32 *S; - ... - switch (length) { - case 16: - S = &arm_cfft_sR_f32_len16; - break; - case 32: - S = &arm_cfft_sR_f32_len32; - break; - case 64: - S = &arm_cfft_sR_f32_len64; - break; - case 128: - S = &arm_cfft_sR_f32_len128; - break; - case 256: - S = &arm_cfft_sR_f32_len256; - break; - case 512: - S = &arm_cfft_sR_f32_len512; - break; - case 1024: - S = &arm_cfft_sR_f32_len1024; - break; - case 2048: - S = &arm_cfft_sR_f32_len2048; - break; - case 4096: - S = &arm_cfft_sR_f32_len4096; - break; - } - @endcode - @par - The new arm_cfft_init_f32 can also be used. - @par Q15 and Q31 - The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-4 - stages are performed along with a single radix-2 stage, as needed. - The algorithm supports lengths of [16, 32, 64, ..., 4096] and each length uses - a different twiddle factor table. - @par - The function uses the standard FFT definition and output values may grow by a - factor of fftLen when computing the forward transform. The - inverse transform includes a scale of 1/fftLen as part of the - calculation and this matches the textbook definition of the inverse FFT. - @par - Pre-initialized data structures containing twiddle factors and bit reversal - tables are provided and defined in arm_const_structs.h. Include - this header in your function and then pass one of the constant structures as - an argument to arm_cfft_q31. For example: - @par - arm_cfft_q31(arm_cfft_sR_q31_len64, pSrc, 1, 1) - @par - computes a 64-point inverse complex FFT including bit reversal. - The data structures are treated as constant data and not modified during the - calculation. The same data structure can be reused for multiple transforms - including mixing forward and inverse transforms. - @par - Earlier releases of the library provided separate radix-2 and radix-4 - algorithms that operated on floating-point data. These functions are still - provided but are deprecated. The older functions are slower and less general - than the new functions. - @par - An example of initialization of the constants for the arm_cfft_q31 function follows: - @code - const static arm_cfft_instance_q31 *S; - ... - switch (length) { - case 16: - S = &arm_cfft_sR_q31_len16; - break; - case 32: - S = &arm_cfft_sR_q31_len32; - break; - case 64: - S = &arm_cfft_sR_q31_len64; - break; - case 128: - S = &arm_cfft_sR_q31_len128; - break; - case 256: - S = &arm_cfft_sR_q31_len256; - break; - case 512: - S = &arm_cfft_sR_q31_len512; - break; - case 1024: - S = &arm_cfft_sR_q31_len1024; - break; - case 2048: - S = &arm_cfft_sR_q31_len2048; - break; - case 4096: - S = &arm_cfft_sR_q31_len4096; - break; - } - @endcode - - */ - - /** @addtogroup ComplexFFT @{ @@ -855,7 +639,7 @@ void arm_cfft_f16( pSrc = p1 + 1; for(l=0; lpTwiddle); break; - + } if ( bitReverseFlag ) @@ -885,13 +669,13 @@ void arm_cfft_f16( if (ifftFlag == 1U) { - invL = 1.0f/(float16_t)L; + invL = 1.0f16/(_Float16)L; /* Conjugate and scale output data */ pSrc = p1; for(l=0; l 0U) { - bitRevTabOff = vldrhq_u32(pBitRevTab); - pBitRevTab += 4; + case 1024U: + retValue = 0.0009765625f; + break; - uint64x2_t bitRevOff1 = vmullbq_int_u32(bitRevTabOff, one); - uint64x2_t bitRevOff2 = vmulltq_int_u32(bitRevTabOff, one); + case 512U: + retValue = 0.001953125; + break; - uint64x2_t in1 = vldrdq_gather_offset_u64(src, bitRevOff1); - uint64x2_t in2 = vldrdq_gather_offset_u64(src, bitRevOff2); + case 256U: + retValue = 0.00390625f; + break; - vstrdq_scatter_offset_u64(src, bitRevOff1, in2); - vstrdq_scatter_offset_u64(src, bitRevOff2, in1); + case 128U: + retValue = 0.0078125; + break; - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } + case 64U: + retValue = 0.015625f; + break; + + case 32U: + retValue = 0.03125; + break; + + case 16U: + retValue = 0.0625f; + break; + + + default: + break; + } + return(retValue); } + + static void _arm_radix4_butterfly_f32_mve(const arm_cfft_instance_f32 * S,float32_t * pSrc, uint32_t fftLen) { - f32x4_t vecTmp0, vecTmp1; - f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; - f32x4_t vecA, vecB, vecC, vecD; - uint32_t blkCnt; - uint32_t n1, n2; - uint32_t stage = 0; - int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q31_t *), - (1 - 16) * sizeof(q31_t *), - (8 - 16) * sizeof(q31_t *), - (9 - 16) * sizeof(q31_t *) + f32x4_t vecTmp0, vecTmp1; + f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; + f32x4_t vecA, vecB, vecC, vecD; + uint32_t blkCnt; + uint32_t n1, n2; + uint32_t stage = 0; + int32_t iter = 1; + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q31_t *), + (1 - 16) * (int32_t)sizeof(q31_t *), + (8 - 16) * (int32_t)sizeof(q31_t *), + (9 - 16) * (int32_t)sizeof(q31_t *) }; n2 = fftLen; @@ -143,29 +112,28 @@ static void _arm_radix4_butterfly_f32_mve(const arm_cfft_instance_f32 * S,float3 n2 >>= 2u; for (int k = fftLen / 4u; k > 1; k >>= 2) { + float32_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + float32_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + float32_t const *p_rearranged_twiddle_tab_stride3 = + &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + + float32_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - float32_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - float32_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - float32_t const *p_rearranged_twiddle_tab_stride3 = - &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - float32_t const *pW1, *pW2, *pW3; - float32_t *inA = pSrc + CMPLX_DIM * i * n1; - float32_t *inB = inA + n2 * CMPLX_DIM; - float32_t *inC = inB + n2 * CMPLX_DIM; - float32_t *inD = inC + n2 * CMPLX_DIM; + float32_t *inA = pBase; + float32_t *inB = inA + n2 * CMPLX_DIM; + float32_t *inC = inB + n2 * CMPLX_DIM; + float32_t *inD = inC + n2 * CMPLX_DIM; + float32_t const *pW1 = p_rearranged_twiddle_tab_stride1; + float32_t const *pW2 = p_rearranged_twiddle_tab_stride2; + float32_t const *pW3 = p_rearranged_twiddle_tab_stride3; f32x4_t vecW; - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; - blkCnt = n2 / 2; /* * load 2 f32 complex pair @@ -233,6 +201,7 @@ static void _arm_radix4_butterfly_f32_mve(const arm_cfft_instance_f32 * S,float3 blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -243,7 +212,7 @@ static void _arm_radix4_butterfly_f32_mve(const arm_cfft_instance_f32 * S,float3 /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* load scheduling */ @@ -335,16 +304,15 @@ static void _arm_radix4_butterfly_inverse_f32_mve(const arm_cfft_instance_f32 * f32x4_t vecTmp0, vecTmp1; f32x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; f32x4_t vecA, vecB, vecC, vecD; - f32x4_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q31_t *), - (1 - 16) * sizeof(q31_t *), - (8 - 16) * sizeof(q31_t *), - (9 - 16) * sizeof(q31_t *) + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q31_t *), + (1 - 16) * (int32_t)sizeof(q31_t *), + (8 - 16) * (int32_t)sizeof(q31_t *), + (9 - 16) * (int32_t)sizeof(q31_t *) }; n2 = fftLen; @@ -352,26 +320,27 @@ static void _arm_radix4_butterfly_inverse_f32_mve(const arm_cfft_instance_f32 * n2 >>= 2u; for (int k = fftLen / 4; k > 1; k >>= 2) { + float32_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + float32_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + float32_t const *p_rearranged_twiddle_tab_stride3 = + &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + + float32_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - float32_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - float32_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - float32_t const *p_rearranged_twiddle_tab_stride3 = - &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - float32_t const *pW1, *pW2, *pW3; - float32_t *inA = pSrc + CMPLX_DIM * i * n1; - float32_t *inB = inA + n2 * CMPLX_DIM; - float32_t *inC = inB + n2 * CMPLX_DIM; - float32_t *inD = inC + n2 * CMPLX_DIM; - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; + float32_t *inA = pBase; + float32_t *inB = inA + n2 * CMPLX_DIM; + float32_t *inC = inB + n2 * CMPLX_DIM; + float32_t *inD = inC + n2 * CMPLX_DIM; + float32_t const *pW1 = p_rearranged_twiddle_tab_stride1; + float32_t const *pW2 = p_rearranged_twiddle_tab_stride2; + float32_t const *pW3 = p_rearranged_twiddle_tab_stride3; + f32x4_t vecW; blkCnt = n2 / 2; /* @@ -439,6 +408,7 @@ static void _arm_radix4_butterfly_inverse_f32_mve(const arm_cfft_instance_f32 * blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -449,7 +419,7 @@ static void _arm_radix4_butterfly_inverse_f32_mve(const arm_cfft_instance_f32 * /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32 ((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -565,53 +535,53 @@ void arm_cfft_f32( float32_t * pSrc, uint8_t ifftFlag, uint8_t bitReverseFlag) -{ - uint32_t fftLen = S->fftLen; - - if (ifftFlag == 1U) { - - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_inverse_f32_mve(S, pSrc, fftLen, arm_inverse_fft_length_f32(S->fftLen)); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_inverse_f32_mve(S, pSrc, fftLen); - break; - } - } else { - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_f32_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_f32_mve(S, pSrc, fftLen); - break; - } - } - - - if (bitReverseFlag) - { - - arm_bitreversal_f32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable); - - } +{ + uint32_t fftLen = S->fftLen; + + if (ifftFlag == 1U) { + + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_inverse_f32_mve(S, pSrc, fftLen, arm_inverse_fft_length_f32(S->fftLen)); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_inverse_f32_mve(S, pSrc, fftLen); + break; + } + } else { + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_f32_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_f32_mve(S, pSrc, fftLen); + break; + } + } + + + if (bitReverseFlag) + { + + arm_bitreversal_32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable); + + } } @@ -633,7 +603,7 @@ extern void arm_bitreversal_32( /** @defgroup ComplexFFT Complex FFT Functions - + @par The Fast Fourier Transform (FFT) is an efficient algorithm for computing the Discrete Fourier Transform (DFT). The FFT can be orders of magnitude faster @@ -651,7 +621,7 @@ extern void arm_bitreversal_32(
{real[0], imag[0], real[1], imag[1], ...} 
The FFT result will be contained in the same array and the frequency domain values will have the same interleaving. - + @par Floating-point The floating-point complex FFT uses a mixed-radix algorithm. Multiple radix-8 stages are performed along with a single radix-2 or radix-4 stage, as needed. @@ -663,12 +633,12 @@ extern void arm_bitreversal_32( inverse transform includes a scale of 1/fftLen as part of the calculation and this matches the textbook definition of the inverse FFT. @par - For the MVE version, the new arm_cfft_init_f32 initialization function is + For the MVE version, the new arm_cfft_init_f32 initialization function is mandatory. Compilation flags are available to include only the required tables for the - needed FFTs. Other FFT versions can continue to be initialized as + needed FFTs.
Other FFT versions can continue to be initialized as explained below. @par - For not MVE versions, pre-initialized data structures containing twiddle factors + For not MVE versions, pre-initialized data structures containing twiddle factors and bit reversal tables are provided and defined in arm_const_structs.h. Include this header in your function and then pass one of the constant structures as an argument to arm_cfft_f32. For example: @@ -689,36 +659,37 @@ extern void arm_bitreversal_32( @code const static arm_cfft_instance_f32 *S; ... - switch (length) { - case 16: - S = &arm_cfft_sR_f32_len16; - break; - case 32: - S = &arm_cfft_sR_f32_len32; - break; - case 64: - S = &arm_cfft_sR_f32_len64; - break; - case 128: - S = &arm_cfft_sR_f32_len128; - break; - case 256: - S = &arm_cfft_sR_f32_len256; - break; - case 512: - S = &arm_cfft_sR_f32_len512; - break; - case 1024: - S = &arm_cfft_sR_f32_len1024; - break; - case 2048: - S = &arm_cfft_sR_f32_len2048; - break; - case 4096: - S = &arm_cfft_sR_f32_len4096; - break; - } + switch (length) { + case 16: + S = &arm_cfft_sR_f32_len16; + break; + case 32: + S = &arm_cfft_sR_f32_len32; + break; + case 64: + S = &arm_cfft_sR_f32_len64; + break; + case 128: + S = &arm_cfft_sR_f32_len128; + break; + case 256: + S = &arm_cfft_sR_f32_len256; + break; + case 512: + S = &arm_cfft_sR_f32_len512; + break; + case 1024: + S = &arm_cfft_sR_f32_len1024; + break; + case 2048: + S = &arm_cfft_sR_f32_len2048; + break; + case 4096: + S = &arm_cfft_sR_f32_len4096; + break; + } @endcode + @par The new arm_cfft_init_f32 can also be used. @par Q15 and Q31 @@ -783,7 +754,7 @@ extern void arm_bitreversal_32( break; } @endcode - + */ void arm_cfft_radix8by2_f32 (arm_cfft_instance_f32 * S, float32_t * p1) diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f64.c index 3f5a91b..83b2cd3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_f64.c @@ -5,13 +5,13 @@ * Title: arm_cfft_f64.c * Description: Combined Radix Decimation in Frequency CFFT Double Precision Floating point processing function * - * $Date: 29. November 2019 - * $Revision: V1.0.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -43,10 +43,6 @@ extern void arm_bitreversal_64( const uint16_t bitRevLen, const uint16_t * pBitRevTable); -/** -* @} end of ComplexFFT group -*/ - /* ---------------------------------------------------------------------- * Internal helper function used by the FFTs * ---------------------------------------------------------------------- */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f16.c index 7dfaf62..ac8260a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_cfft_init_f16.c * Description: Initialization function for cfft f16 instance * - * $Date: 07. January 2020 - * $Revision: V1.7.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f32.c index 98db754..b82f5ce 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_init_f32.c * Description: Initialization function for cfft f32 instance * - * $Date: 07. January 2020 - * $Revision: V1.7.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f64.c index 05e691a..cb2dae8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_f64.c @@ -5,13 +5,13 @@ * Title: arm_cfft_init_f64.c * Description: Initialization function for cfft f64 instance * - * $Date: 23. January 2020 - * $Revision: V1.7.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -72,7 +72,7 @@ arm_status arm_cfft_init_f64( /* Initializations of Instance structure depending on the FFT length */ switch (S->fftLen) { -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_4096) && defined(ARM_TABLE_BITREVIDX_FLT_4096)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_4096) && defined(ARM_TABLE_BITREVIDX_FLT_4096)) /* Initializations of structure parameters for 4096 point FFT */ case 4096U: /* Initialise the bit reversal table modifier */ @@ -80,7 +80,7 @@ arm_status arm_cfft_init_f64( break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_2048) && defined(ARM_TABLE_BITREVIDX_FLT_2048)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_2048) && defined(ARM_TABLE_BITREVIDX_FLT_2048)) /* Initializations of structure parameters for 2048 point FFT */ case 2048U: /* Initialise the bit reversal table modifier */ @@ -89,7 +89,7 @@ arm_status arm_cfft_init_f64( break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_1024) && defined(ARM_TABLE_BITREVIDX_FLT_1024)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_1024) && defined(ARM_TABLE_BITREVIDX_FLT_1024)) /* Initializations of structure parameters for 1024 point FFT */ case 1024U: /* Initialise the bit reversal table modifier */ @@ -98,7 +98,7 @@ arm_status arm_cfft_init_f64( break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_512) && defined(ARM_TABLE_BITREVIDX_FLT_512)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_512) && defined(ARM_TABLE_BITREVIDX_FLT_512)) /* Initializations of structure parameters for 512 point FFT */ case 512U: /* Initialise the bit reversal table modifier */ @@ -106,31 +106,31 @@ arm_status arm_cfft_init_f64( break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_256) && defined(ARM_TABLE_BITREVIDX_FLT_256)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_256) && defined(ARM_TABLE_BITREVIDX_FLT_256)) case 256U: FFTINIT(f64,256); break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_128) && defined(ARM_TABLE_BITREVIDX_FLT_128)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_128) && defined(ARM_TABLE_BITREVIDX_FLT_128)) case 128U: FFTINIT(f64,128); break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_64) && defined(ARM_TABLE_BITREVIDX_FLT_64)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_64) && defined(ARM_TABLE_BITREVIDX_FLT_64)) case 64U: FFTINIT(f64,64); break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_32) && defined(ARM_TABLE_BITREVIDX_FLT_32)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_32) && defined(ARM_TABLE_BITREVIDX_FLT_32)) case 32U: FFTINIT(f64,32); break; #endif -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_f64_16) && defined(ARM_TABLE_BITREVIDX_FLT_16)) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || (defined(ARM_TABLE_TWIDDLECOEF_F64_16) && defined(ARM_TABLE_BITREVIDX_FLT_16)) case 16U: /* Initializations of structure parameters for 16 point FFT */ FFTINIT(f64,16); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q15.c index d08b97e..a0f6356 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_cfft_init_q15.c * Description: Initialization function for cfft q15 instance * - * $Date: 07. January 2020 - * $Revision: V1.7.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q31.c index 8b9c970..0877d2c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_cfft_init_q31.c * Description: Initialization function for cfft q31 instance * - * $Date: 07. January 2020 - * $Revision: V1.7.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q15.c index 1bebc2b..83ca024 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q15.c @@ -5,13 +5,13 @@ * Title: arm_cfft_q15.c * Description: Combined Radix Decimation in Q15 Frequency CFFT processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -35,65 +35,6 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h" -static void arm_bitreversal_16_inpl_mve( - uint16_t *pSrc, - const uint16_t bitRevLen, - const uint16_t *pBitRevTab) - -{ - uint32_t *src = (uint32_t *)pSrc; - uint32_t blkCnt; /* loop counters */ - uint32x4_t bitRevTabOff; - uint16x8_t one = vdupq_n_u16(1); - - blkCnt = (bitRevLen / 2) / 4; - while (blkCnt > 0U) { - bitRevTabOff = vldrhq_u16(pBitRevTab); - pBitRevTab += 8; - - uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one); - uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one); - - bitRevOff1 = bitRevOff1 >> 3; - bitRevOff2 = bitRevOff2 >> 3; - - uint32x4_t in1 = vldrwq_gather_shifted_offset_u32(src, bitRevOff1); - uint32x4_t in2 = vldrwq_gather_shifted_offset_u32(src, bitRevOff2); - - vstrwq_scatter_shifted_offset_u32(src, bitRevOff1, in2); - vstrwq_scatter_shifted_offset_u32(src, bitRevOff2, in1); - - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } - - - /* - * tail - * (will be merged thru tail predication) - */ - blkCnt = bitRevLen & 7; - if (blkCnt > 0U) { - mve_pred16_t p0 = vctp16q(blkCnt); - - bitRevTabOff = vldrhq_z_u16(pBitRevTab, p0); - - uint32x4_t bitRevOff1 = vmullbq_int_u16(bitRevTabOff, one); - uint32x4_t bitRevOff2 = vmulltq_int_u16(bitRevTabOff, one); - - bitRevOff1 = bitRevOff1 >> 3; - bitRevOff2 = bitRevOff2 >> 3; - - uint32x4_t in1 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff1, p0); - uint32x4_t in2 = vldrwq_gather_shifted_offset_z_u32(src, bitRevOff2, p0); - - vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff1, in2, p0); - vstrwq_scatter_shifted_offset_p_u32(src, bitRevOff2, in1, p0); - } -} - static void _arm_radix4_butterfly_q15_mve( const arm_cfft_instance_q15 * S, q15_t *pSrc, @@ -102,14 +43,13 @@ static void _arm_radix4_butterfly_q15_mve( q15x8_t vecTmp0, vecTmp1; q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; q15x8_t vecA, vecB, vecC, vecD; - q15x8_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *), - (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *) + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q15_t *), (4 - 16) * (int32_t)sizeof(q15_t *), + (8 - 16) * (int32_t)sizeof(q15_t *), (12 - 16) * (int32_t)sizeof(q15_t *) }; /* @@ -122,25 +62,26 @@ static void _arm_radix4_butterfly_q15_mve( for (int k = fftLen / 4u; k > 1; k >>= 2u) { + q15_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + q15_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + + q15_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - q15_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - q15_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - q15_t const *pW1, *pW2, *pW3; - q15_t *inA = pSrc + CMPLX_DIM * i * n1; + q15_t *inA = pBase; q15_t *inB = inA + n2 * CMPLX_DIM; q15_t *inC = inB + n2 * CMPLX_DIM; q15_t *inD = inC + n2 * CMPLX_DIM; - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; + q15_t const *pW1 = p_rearranged_twiddle_tab_stride1; + q15_t const *pW2 = p_rearranged_twiddle_tab_stride2; + q15_t const *pW3 = p_rearranged_twiddle_tab_stride3; + q15x8_t vecW; blkCnt = n2 / 4; /* @@ -173,7 +114,7 @@ static void _arm_radix4_butterfly_q15_mve( */ vecW = vld1q(pW2); pW2 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); vst1q(inB, vecTmp1); inB += 8; @@ -186,7 +127,7 @@ static void _arm_radix4_butterfly_q15_mve( */ vecW = vld1q(pW1); pW1 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); vst1q(inC, vecTmp1); inC += 8; @@ -199,7 +140,7 @@ static void _arm_radix4_butterfly_q15_mve( */ vecW = vld1q(pW3); pW3 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q15x8_t); vst1q(inD, vecTmp1); inD += 8; @@ -208,6 +149,7 @@ static void _arm_radix4_butterfly_q15_mve( blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -218,7 +160,7 @@ static void _arm_radix4_butterfly_q15_mve( /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32 ((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -245,16 +187,16 @@ static void _arm_radix4_butterfly_q15_mve( vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8); vecTmp0 = vhaddq(vecSum0, vecSum1); - vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64, (int32x4_t) vecTmp0); vecTmp0 = vhsubq(vecSum0, vecSum1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (int32x4_t) vecTmp0); vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (int32x4_t) vecTmp0); vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (int32x4_t) vecTmp0); blkCnt--; } @@ -295,7 +237,7 @@ static void arm_cfft_radix4by2_q15_mve(const arm_cfft_instance_q15 *S, q15_t *pS pCoefVec += 8; vecDiff = vhsubq(vecIn0, vecIn1); - vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw); + vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw, q15x8_t); vst1q(pIn1, vecCmplxTmp); pIn1 += 8; @@ -337,14 +279,13 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S q15x8_t vecTmp0, vecTmp1; q15x8_t vecSum0, vecDiff0, vecSum1, vecDiff1; q15x8_t vecA, vecB, vecC, vecD; - q15x8_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q15_t *), (4 - 16) * sizeof(q15_t *), - (8 - 16) * sizeof(q15_t *), (12 - 16) * sizeof(q15_t *) + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q15_t *), (4 - 16) * (int32_t)sizeof(q15_t *), + (8 - 16) * (int32_t)sizeof(q15_t *), (12 - 16) * (int32_t)sizeof(q15_t *) }; @@ -358,25 +299,27 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S for (int k = fftLen / 4u; k > 1; k >>= 2u) { + q15_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + q15_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + + q15_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - q15_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - q15_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - q15_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - q15_t const *pW1, *pW2, *pW3; - q15_t *inA = pSrc + CMPLX_DIM * i * n1; + q15_t *inA = pBase; q15_t *inB = inA + n2 * CMPLX_DIM; q15_t *inC = inB + n2 * CMPLX_DIM; q15_t *inD = inC + n2 * CMPLX_DIM; + q15_t const *pW1 = p_rearranged_twiddle_tab_stride1; + q15_t const *pW2 = p_rearranged_twiddle_tab_stride2; + q15_t const *pW3 = p_rearranged_twiddle_tab_stride3; + q15x8_t vecW; - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; blkCnt = n2 / 4; /* @@ -409,7 +352,7 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S */ vecW = vld1q(pW2); pW2 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t); vst1q(inB, vecTmp1); inB += 8; @@ -422,7 +365,7 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S */ vecW = vld1q(pW1); pW1 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t); vst1q(inC, vecTmp1); inC += 8; /* @@ -434,7 +377,7 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S */ vecW = vld1q(pW3); pW3 += 8; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q15x8_t); vst1q(inD, vecTmp1); inD += 8; @@ -443,6 +386,7 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -453,7 +397,7 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -480,16 +424,16 @@ static void _arm_radix4_butterfly_inverse_q15_mve(const arm_cfft_instance_q15 *S vecC = (q15x8_t) vldrwq_gather_base_s32(vecScGathAddr, 8); vecTmp0 = vhaddq(vecSum0, vecSum1); - vstrwq_scatter_base_s32(vecScGathAddr, -64, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64, (int32x4_t) vecTmp0); vecTmp0 = vhsubq(vecSum0, vecSum1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 4, (int32x4_t) vecTmp0); vecTmp0 = MVE_CMPLX_ADD_FX_A_ixB(vecDiff0, vecDiff1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 8, (int32x4_t) vecTmp0); vecTmp0 = MVE_CMPLX_SUB_FX_A_ixB(vecDiff0, vecDiff1); - vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (q15x8_t) vecTmp0); + vstrwq_scatter_base_s32(vecScGathAddr, -64 + 12, (int32x4_t) vecTmp0); blkCnt--; } @@ -594,53 +538,53 @@ void arm_cfft_q15( q15_t * pSrc, uint8_t ifftFlag, uint8_t bitReverseFlag) -{ - uint32_t fftLen = S->fftLen; - - if (ifftFlag == 1U) { - - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_inverse_q15_mve(S, pSrc, fftLen); - break; - } - } else { - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_q15_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_q15_mve(S, pSrc, fftLen); - break; - } - } - - - if (bitReverseFlag) - { - +{ + uint32_t fftLen = S->fftLen; + + if (ifftFlag == 1U) { + + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_inverse_q15_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_inverse_q15_mve(S, pSrc, fftLen); + break; + } + } else { + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_q15_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_q15_mve(S, pSrc, fftLen); + break; + } + } + + + if (bitReverseFlag) + { + arm_bitreversal_16_inpl_mve((uint16_t*)pSrc, S->bitRevLength, S->pBitRevTable); - - } + + } } #else @@ -776,7 +720,7 @@ void arm_cfft_radix4by2_q15( for (i = n2; i > 0; i--) { - coeff = read_q15x2_ia ((q15_t **) &pC); + coeff = read_q15x2_ia (&pC); T = read_q15x2 (pSi); T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */ @@ -875,7 +819,7 @@ void arm_cfft_radix4by2_inverse_q15( for (i = n2; i > 0; i--) { - coeff = read_q15x2_ia ((q15_t **) &pC); + coeff = read_q15x2_ia (&pC); T = read_q15x2 (pSi); T = __SHADD16(T, 0); /* this is just a SIMD arithmetic shift right by 1 */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q31.c index d0fb253..373e8a7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_q31.c @@ -5,13 +5,13 @@ * Title: arm_cfft_q31.c * Description: Combined Radix Decimation in Frequency CFFT fixed point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -36,37 +36,6 @@ #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_vec_fft.h" -static void arm_bitreversal_32_inpl_mve( - uint32_t *pSrc, - const uint16_t bitRevLen, - const uint16_t *pBitRevTab) - -{ - uint64_t *src = (uint64_t *) pSrc; - uint32_t blkCnt; /* loop counters */ - uint32x4_t bitRevTabOff; - uint32x4_t one = vdupq_n_u32(1); - - blkCnt = (bitRevLen / 2) / 2; - while (blkCnt > 0U) { - bitRevTabOff = vldrhq_u32(pBitRevTab); - pBitRevTab += 4; - - uint64x2_t bitRevOff1 = vmullbq_int_u32(bitRevTabOff, one); - uint64x2_t bitRevOff2 = vmulltq_int_u32(bitRevTabOff, one); - - uint64x2_t in1 = vldrdq_gather_offset_u64(src, bitRevOff1); - uint64x2_t in2 = vldrdq_gather_offset_u64(src, bitRevOff2); - - vstrdq_scatter_offset_u64(src, bitRevOff1, in2); - vstrdq_scatter_offset_u64(src, bitRevOff2, in1); - - /* - * Decrement the blockSize loop counter - */ - blkCnt--; - } -} static void _arm_radix4_butterfly_q31_mve( const arm_cfft_instance_q31 * S, @@ -76,14 +45,13 @@ static void _arm_radix4_butterfly_q31_mve( q31x4_t vecTmp0, vecTmp1; q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; q31x4_t vecA, vecB, vecC, vecD; - q31x4_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q31_t *), (1 - 16) * sizeof(q31_t *), - (8 - 16) * sizeof(q31_t *), (9 - 16) * sizeof(q31_t *) + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q31_t *), (1 - 16) * (int32_t)sizeof(q31_t *), + (8 - 16) * (int32_t)sizeof(q31_t *), (9 - 16) * (int32_t)sizeof(q31_t *) }; @@ -97,25 +65,27 @@ static void _arm_radix4_butterfly_q31_mve( for (int k = fftLen / 4u; k > 1; k >>= 2u) { + q31_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + q31_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + + q31_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - q31_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - q31_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - q31_t const *pW1, *pW2, *pW3; - q31_t *inA = pSrc + CMPLX_DIM * i * n1; + q31_t *inA = pBase; q31_t *inB = inA + n2 * CMPLX_DIM; q31_t *inC = inB + n2 * CMPLX_DIM; q31_t *inD = inC + n2 * CMPLX_DIM; + q31_t const *pW1 = p_rearranged_twiddle_tab_stride1; + q31_t const *pW2 = p_rearranged_twiddle_tab_stride2; + q31_t const *pW3 = p_rearranged_twiddle_tab_stride3; + q31x4_t vecW; - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; blkCnt = n2 / 2; /* @@ -148,7 +118,7 @@ static void _arm_radix4_butterfly_q31_mve( */ vecW = vld1q(pW2); pW2 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); vst1q(inB, vecTmp1); inB += 4; @@ -161,7 +131,7 @@ static void _arm_radix4_butterfly_q31_mve( */ vecW = vld1q(pW1); pW1 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); vst1q(inC, vecTmp1); inC += 4; /* @@ -173,7 +143,7 @@ static void _arm_radix4_butterfly_q31_mve( */ vecW = vld1q(pW3); pW3 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0); + vecTmp1 = MVE_CMPLX_MULT_FX_AxB(vecW, vecTmp0, q31x4_t); vst1q(inD, vecTmp1); inD += 4; @@ -182,6 +152,7 @@ static void _arm_radix4_butterfly_q31_mve( blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -200,7 +171,7 @@ static void _arm_radix4_butterfly_q31_mve( /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -281,7 +252,7 @@ static void arm_cfft_radix4by2_q31_mve(const arm_cfft_instance_q31 *S, q31_t *pS pCoef += 4; vecDiff = vhsubq(vecIn0, vecIn1); - vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw); + vecCmplxTmp = MVE_CMPLX_MULT_FX_AxConjB(vecDiff, vecTw, q31x4_t); vst1q(pIn1, vecCmplxTmp); pIn1 += 4; @@ -326,14 +297,13 @@ static void _arm_radix4_butterfly_inverse_q31_mve( q31x4_t vecTmp0, vecTmp1; q31x4_t vecSum0, vecDiff0, vecSum1, vecDiff1; q31x4_t vecA, vecB, vecC, vecD; - q31x4_t vecW; uint32_t blkCnt; uint32_t n1, n2; uint32_t stage = 0; int32_t iter = 1; - static const uint32_t strides[4] = { - (0 - 16) * sizeof(q31_t *), (1 - 16) * sizeof(q31_t *), - (8 - 16) * sizeof(q31_t *), (9 - 16) * sizeof(q31_t *) + static const int32_t strides[4] = { + (0 - 16) * (int32_t)sizeof(q31_t *), (1 - 16) * (int32_t)sizeof(q31_t *), + (8 - 16) * (int32_t)sizeof(q31_t *), (9 - 16) * (int32_t)sizeof(q31_t *) }; /* @@ -346,26 +316,26 @@ static void _arm_radix4_butterfly_inverse_q31_mve( for (int k = fftLen / 4u; k > 1; k >>= 2u) { + q31_t const *p_rearranged_twiddle_tab_stride2 = + &S->rearranged_twiddle_stride2[ + S->rearranged_twiddle_tab_stride2_arr[stage]]; + q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ + S->rearranged_twiddle_tab_stride3_arr[stage]]; + q31_t const *p_rearranged_twiddle_tab_stride1 = + &S->rearranged_twiddle_stride1[ + S->rearranged_twiddle_tab_stride1_arr[stage]]; + + q31_t * pBase = pSrc; for (int i = 0; i < iter; i++) { - q31_t const *p_rearranged_twiddle_tab_stride2 = - &S->rearranged_twiddle_stride2[ - S->rearranged_twiddle_tab_stride2_arr[stage]]; - q31_t const *p_rearranged_twiddle_tab_stride3 = &S->rearranged_twiddle_stride3[ - S->rearranged_twiddle_tab_stride3_arr[stage]]; - q31_t const *p_rearranged_twiddle_tab_stride1 = - &S->rearranged_twiddle_stride1[ - S->rearranged_twiddle_tab_stride1_arr[stage]]; - - q31_t const *pW1, *pW2, *pW3; - q31_t *inA = pSrc + CMPLX_DIM * i * n1; + q31_t *inA = pBase; q31_t *inB = inA + n2 * CMPLX_DIM; q31_t *inC = inB + n2 * CMPLX_DIM; q31_t *inD = inC + n2 * CMPLX_DIM; - - pW1 = p_rearranged_twiddle_tab_stride1; - pW2 = p_rearranged_twiddle_tab_stride2; - pW3 = p_rearranged_twiddle_tab_stride3; + q31_t const *pW1 = p_rearranged_twiddle_tab_stride1; + q31_t const *pW2 = p_rearranged_twiddle_tab_stride2; + q31_t const *pW3 = p_rearranged_twiddle_tab_stride3; + q31x4_t vecW; blkCnt = n2 / 2; /* @@ -398,7 +368,7 @@ static void _arm_radix4_butterfly_inverse_q31_mve( */ vecW = vld1q(pW2); pW2 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t); vst1q(inB, vecTmp1); inB += 4; @@ -411,7 +381,7 @@ static void _arm_radix4_butterfly_inverse_q31_mve( */ vecW = vld1q(pW1); pW1 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t); vst1q(inC, vecTmp1); inC += 4; /* @@ -423,7 +393,7 @@ static void _arm_radix4_butterfly_inverse_q31_mve( */ vecW = vld1q(pW3); pW3 += 4; - vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW); + vecTmp1 = MVE_CMPLX_MULT_FX_AxConjB(vecTmp0, vecW, q31x4_t); vst1q(inD, vecTmp1); inD += 4; @@ -432,6 +402,7 @@ static void _arm_radix4_butterfly_inverse_q31_mve( blkCnt--; } + pBase += CMPLX_DIM * n1; } n1 = n2; n2 >>= 2u; @@ -450,7 +421,7 @@ static void _arm_radix4_butterfly_inverse_q31_mve( /* * start of Last stage process */ - uint32x4_t vecScGathAddr = *(uint32x4_t *) strides; + uint32x4_t vecScGathAddr = vld1q_u32((uint32_t*)strides); vecScGathAddr = vecScGathAddr + (uint32_t) pSrc; /* @@ -536,7 +507,7 @@ static void arm_cfft_radix4by2_inverse_q31_mve(const arm_cfft_instance_q31 *S, q pCoef += 4; vecDiff = vhsubq(vecIn0, vecIn1); - vecCmplxTmp = MVE_CMPLX_MULT_FX_AxB(vecDiff, vecTw); + vecCmplxTmp = MVE_CMPLX_MULT_FX_AxB(vecDiff, vecTw, q31x4_t); vst1q(pIn1, vecCmplxTmp); pIn1 += 4; @@ -600,55 +571,55 @@ void arm_cfft_q31( q31_t * pSrc, uint8_t ifftFlag, uint8_t bitReverseFlag) -{ - uint32_t fftLen = S->fftLen; - - if (ifftFlag == 1U) { - - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_inverse_q31_mve(S, pSrc, fftLen); - break; - } - } else { - switch (fftLen) { - case 16: - case 64: - case 256: - case 1024: - case 4096: - _arm_radix4_butterfly_q31_mve(S, pSrc, fftLen); - break; - - case 32: - case 128: - case 512: - case 2048: - arm_cfft_radix4by2_q31_mve(S, pSrc, fftLen); - break; - } - } - - - if (bitReverseFlag) - { - +{ + uint32_t fftLen = S->fftLen; + + if (ifftFlag == 1U) { + + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_inverse_q31_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_inverse_q31_mve(S, pSrc, fftLen); + break; + } + } else { + switch (fftLen) { + case 16: + case 64: + case 256: + case 1024: + case 4096: + _arm_radix4_butterfly_q31_mve(S, pSrc, fftLen); + break; + + case 32: + case 128: + case 512: + case 2048: + arm_cfft_radix4by2_q31_mve(S, pSrc, fftLen); + break; + } + } + + + if (bitReverseFlag) + { + arm_bitreversal_32_inpl_mve((uint32_t*)pSrc, S->bitRevLength, S->pBitRevTable); - - } + + } } -#else +#else extern void arm_radix4_butterfly_q31( q31_t * pSrc, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f16.c index d45ec07..c95a01f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f16.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_f16.c * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -148,22 +148,22 @@ uint16_t twidCoefModifier) l = i + n2; /* Butterfly implementation */ - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 + p1; - pSrc[2 * l + 1] = p2 - p3; + pSrc[2 * l] = (_Float16)p0 + (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 - (_Float16)p3; i++; } // groups loop end @@ -190,22 +190,22 @@ uint16_t twidCoefModifier) do { l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 + p1; - pSrc[2 * l + 1] = p2 - p3; + pSrc[2 * l] = (_Float16)p0 + (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 - (_Float16)p3; i += n1; } while ( i < fftLen ); // butterfly loop end @@ -217,11 +217,11 @@ uint16_t twidCoefModifier) // loop for butterfly for (i = 0; i < fftLen; i += 2) { - a0 = pSrc[2 * i] + pSrc[2 * i + 2]; - xt = pSrc[2 * i] - pSrc[2 * i + 2]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * i + 2]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * i + 2]; - yt = pSrc[2 * i + 1] - pSrc[2 * i + 3]; - a1 = pSrc[2 * i + 3] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * i + 3]; + a1 = (_Float16)pSrc[2 * i + 3] + (_Float16)pSrc[2 * i + 1]; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; @@ -253,22 +253,22 @@ uint16_t twidCoefModifier) do { l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 + p1; - pSrc[2 * l + 1] = p2 - p3; + pSrc[2 * l] = (_Float16)p0 + (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 - (_Float16)p3; i += n1; } while (i < fftLen); @@ -309,22 +309,22 @@ float16_t onebyfftLen) ia += twidCoefModifier; l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 - p1; - pSrc[2 * l + 1] = p2 + p3; + pSrc[2 * l] = (_Float16)p0 - (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 + (_Float16)p3; } // groups loop end twidCoefModifier <<= 1U; @@ -349,22 +349,22 @@ float16_t onebyfftLen) do { l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 - p1; - pSrc[2 * l + 1] = p2 + p3; + pSrc[2 * l] = (_Float16)p0 - (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 + (_Float16)p3; i += n1; } while ( i < fftLen ); // butterfly loop end @@ -377,16 +377,16 @@ float16_t onebyfftLen) // loop for butterfly for (i = 0; i < fftLen; i += 2) { - a0 = pSrc[2 * i] + pSrc[2 * i + 2]; - xt = pSrc[2 * i] - pSrc[2 * i + 2]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * i + 2]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * i + 2]; - a1 = pSrc[2 * i + 3] + pSrc[2 * i + 1]; - yt = pSrc[2 * i + 1] - pSrc[2 * i + 3]; + a1 = (_Float16)pSrc[2 * i + 3] + (_Float16)pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * i + 3]; - p0 = a0 * onebyfftLen; - p2 = xt * onebyfftLen; - p1 = a1 * onebyfftLen; - p3 = yt * onebyfftLen; + p0 = (_Float16)a0 * (_Float16)onebyfftLen; + p2 = (_Float16)xt * (_Float16)onebyfftLen; + p1 = (_Float16)a1 * (_Float16)onebyfftLen; + p3 = (_Float16)yt * (_Float16)onebyfftLen; pSrc[2 * i] = p0; pSrc[2 * i + 1] = p1; @@ -418,22 +418,22 @@ float16_t onebyfftLen) do { l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 - p1; - pSrc[2 * l + 1] = p2 + p3; + pSrc[2 * l] = (_Float16)p0 - (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 + (_Float16)p3; i += n1; } while ( i < fftLen ); // butterfly loop end @@ -451,16 +451,16 @@ float16_t onebyfftLen) { l = i + n2; - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; - p0 = a0 * onebyfftLen; - p2 = xt * onebyfftLen; - p1 = a1 * onebyfftLen; - p3 = yt * onebyfftLen; + p0 = (_Float16)a0 * (_Float16)onebyfftLen; + p2 = (_Float16)xt * (_Float16)onebyfftLen; + p1 = (_Float16)a1 * (_Float16)onebyfftLen; + p3 = (_Float16)yt * (_Float16)onebyfftLen; pSrc[2 * i] = p0; pSrc[2U * l] = p2; @@ -475,4 +475,5 @@ float16_t onebyfftLen) #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f32.c index bdad034..dba45f4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_f32.c * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f16.c index 4671765..17e7c80 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f16.c @@ -5,10 +5,13 @@ * Title: arm_cfft_radix2_init_f16.c * Description: Radix-2 Decimation in Frequency Floating-point CFFT & CIFFT Initialization function * - * Target Processor: Cortex-M cores + * $Date: 23 April 2021 + * $Revision: V1.9.0 + * + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f32.c index db63a37..71fba78 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_init_f32.c * Description: Radix-2 Decimation in Frequency Floating-point CFFT & CIFFT Initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q15.c index 934cd54..f07cad4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_init_q15.c * Description: Radix-2 Decimation in Frequency Q15 FFT & IFFT initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -94,7 +94,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initialise the Flag for calculation Bit reversal or not */ S->bitReverseFlag = bitReverseFlag; -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) /* Initializations of structure parameters depending on the FFT length */ switch (S->fftLen) @@ -107,7 +107,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 1U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) armBitRevIndexTable_fixed_4096; + S->pBitRevTable = (uint16_t *) armBitRevTable; break; @@ -119,7 +119,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 2U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[1]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[1]; break; @@ -127,7 +127,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 1024 point FFT */ S->twidCoefModifier = 4U; S->bitRevFactor = 4U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[3]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[3]; break; @@ -135,7 +135,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 512 point FFT */ S->twidCoefModifier = 8U; S->bitRevFactor = 8U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[7]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[7]; break; @@ -143,7 +143,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 256 point FFT */ S->twidCoefModifier = 16U; S->bitRevFactor = 16U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[15]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[15]; break; @@ -151,7 +151,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 128 point FFT */ S->twidCoefModifier = 32U; S->bitRevFactor = 32U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[31]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[31]; break; @@ -159,7 +159,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 64 point FFT */ S->twidCoefModifier = 64U; S->bitRevFactor = 64U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[63]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[63]; break; @@ -167,7 +167,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 32 point FFT */ S->twidCoefModifier = 128U; S->bitRevFactor = 128U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[127]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[127]; break; @@ -175,7 +175,7 @@ arm_status arm_cfft_radix2_init_q15( /* Initializations of structure parameters for 16 point FFT */ S->twidCoefModifier = 256U; S->bitRevFactor = 256U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[255]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[255]; break; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q31.c index 8f171f7..5823559 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_init_q31.c * Description: Radix-2 Decimation in Frequency Fixed-point CFFT & CIFFT Initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -95,7 +95,7 @@ arm_status arm_cfft_radix2_init_q31( /* Initialise the Flag for calculation Bit reversal or not */ S->bitReverseFlag = bitReverseFlag; -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) /* Initializations of Instance structure depending on the FFT length */ switch (S->fftLen) @@ -107,7 +107,7 @@ arm_status arm_cfft_radix2_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 1U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) armBitRevIndexTable_fixed_4096; + S->pBitRevTable = (uint16_t *) armBitRevTable; break; /* Initializations of structure parameters for 2048 point FFT */ @@ -117,7 +117,7 @@ arm_status arm_cfft_radix2_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 2U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[1]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[1]; break; /* Initializations of structure parameters for 1024 point FFT */ @@ -127,7 +127,7 @@ arm_status arm_cfft_radix2_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 4U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[3]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[3]; break; /* Initializations of structure parameters for 512 point FFT */ @@ -137,42 +137,42 @@ arm_status arm_cfft_radix2_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 8U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[7]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[7]; break; case 256U: /* Initializations of structure parameters for 256 point FFT */ S->twidCoefModifier = 16U; S->bitRevFactor = 16U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[15]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[15]; break; case 128U: /* Initializations of structure parameters for 128 point FFT */ S->twidCoefModifier = 32U; S->bitRevFactor = 32U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[31]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[31]; break; case 64U: /* Initializations of structure parameters for 64 point FFT */ S->twidCoefModifier = 64U; S->bitRevFactor = 64U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[63]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[63]; break; case 32U: /* Initializations of structure parameters for 32 point FFT */ S->twidCoefModifier = 128U; S->bitRevFactor = 128U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[127]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[127]; break; case 16U: /* Initializations of structure parameters for 16 point FFT */ S->twidCoefModifier = 256U; S->bitRevFactor = 256U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[255]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[255]; break; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q15.c index 8d3347b..49f6d9d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q15.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_q15.c * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q31.c index d647396..6f36181 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix2_q31.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix2_q31.c * Description: Radix-2 Decimation in Frequency CFFT & CIFFT Fixed point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f16.c index cbc0552..4c46bc1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f16.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_f16.c * Description: Radix-4 Decimation in Frequency CFFT & CIFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -97,22 +97,22 @@ void arm_cfft_radix4by2_f16( l = i + n2; /* Butterfly implementation */ - a0 = pSrc[2 * i] + pSrc[2 * l]; - xt = pSrc[2 * i] - pSrc[2 * l]; + a0 = (_Float16)pSrc[2 * i] + (_Float16)pSrc[2 * l]; + xt = (_Float16)pSrc[2 * i] - (_Float16)pSrc[2 * l]; - yt = pSrc[2 * i + 1] - pSrc[2 * l + 1]; - a1 = pSrc[2 * l + 1] + pSrc[2 * i + 1]; + yt = (_Float16)pSrc[2 * i + 1] - (_Float16)pSrc[2 * l + 1]; + a1 = (_Float16)pSrc[2 * l + 1] + (_Float16)pSrc[2 * i + 1]; - p0 = xt * cosVal; - p1 = yt * sinVal; - p2 = yt * cosVal; - p3 = xt * sinVal; + p0 = (_Float16)xt * (_Float16)cosVal; + p1 = (_Float16)yt * (_Float16)sinVal; + p2 = (_Float16)yt * (_Float16)cosVal; + p3 = (_Float16)xt * (_Float16)sinVal; pSrc[2 * i] = a0; pSrc[2 * i + 1] = a1; - pSrc[2 * l] = p0 + p1; - pSrc[2 * l + 1] = p2 - p3; + pSrc[2 * l] = (_Float16)p0 + (_Float16)p1; + pSrc[2 * l + 1] = (_Float16)p2 - (_Float16)p3; } @@ -230,13 +230,13 @@ uint16_t twidCoefModifier) ydIn = pSrc[(2U * i3) + 1U]; /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* index calculation for the coefficients */ ia2 = ia1 + ia1; @@ -244,31 +244,31 @@ uint16_t twidCoefModifier) si2 = pCoef[(ia2 * 2U) + 1U]; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* xb - xd */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* yb - yd */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa' = xa + xb + xc + xd */ - pSrc[(2U * i0)] = Xaplusc + Xbplusd; + pSrc[(2U * i0)] = (_Float16)Xaplusc + (_Float16)Xbplusd; /* ya' = ya + yb + yc + yd */ - pSrc[(2U * i0) + 1U] = Yaplusc + Ybplusd; + pSrc[(2U * i0) + 1U] = (_Float16)Yaplusc + (_Float16)Ybplusd; /* (xa - xc) + (yb - yd) */ - Xb12C_out = (Xaminusc + Ybminusd); + Xb12C_out = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* (ya - yc) + (xb - xd) */ - Yb12C_out = (Yaminusc - Xbminusd); + Yb12C_out = ((_Float16)Yaminusc - (_Float16)Xbminusd); /* (xa + xc) - (xb + xd) */ - Xc12C_out = (Xaplusc - Xbplusd); + Xc12C_out = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* (ya + yc) - (yb + yd) */ - Yc12C_out = (Yaplusc - Ybplusd); + Yc12C_out = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* (xa - xc) - (yb - yd) */ - Xd12C_out = (Xaminusc - Ybminusd); + Xd12C_out = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* (ya - yc) + (xb - xd) */ - Yd12C_out = (Xbminusd + Yaminusc); + Yd12C_out = ((_Float16)Xbminusd + (_Float16)Yaminusc); co1 = pCoef[ia1 * 2U]; si1 = pCoef[(ia1 * 2U) + 1U]; @@ -278,38 +278,38 @@ uint16_t twidCoefModifier) co3 = pCoef[ia3 * 2U]; si3 = pCoef[(ia3 * 2U) + 1U]; - Xb12_out = Xb12C_out * co1; - Yb12_out = Yb12C_out * co1; - Xc12_out = Xc12C_out * co2; - Yc12_out = Yc12C_out * co2; - Xd12_out = Xd12C_out * co3; - Yd12_out = Yd12C_out * co3; + Xb12_out = (_Float16)Xb12C_out * (_Float16)co1; + Yb12_out = (_Float16)Yb12C_out * (_Float16)co1; + Xc12_out = (_Float16)Xc12C_out * (_Float16)co2; + Yc12_out = (_Float16)Yc12C_out * (_Float16)co2; + Xd12_out = (_Float16)Xd12C_out * (_Float16)co3; + Yd12_out = (_Float16)Yd12C_out * (_Float16)co3; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ //Xb12_out -= Yb12C_out * si1; - p0 = Yb12C_out * si1; + p0 = (_Float16)Yb12C_out * (_Float16)si1; /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ //Yb12_out += Xb12C_out * si1; - p1 = Xb12C_out * si1; + p1 = (_Float16)Xb12C_out * (_Float16)si1; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ //Xc12_out -= Yc12C_out * si2; - p2 = Yc12C_out * si2; + p2 = (_Float16)Yc12C_out * (_Float16)si2; /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ //Yc12_out += Xc12C_out * si2; - p3 = Xc12C_out * si2; + p3 = (_Float16)Xc12C_out * (_Float16)si2; /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ //Xd12_out -= Yd12C_out * si3; - p4 = Yd12C_out * si3; + p4 = (_Float16)Yd12C_out * (_Float16)si3; /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ //Yd12_out += Xd12C_out * si3; - p5 = Xd12C_out * si3; + p5 = (_Float16)Xd12C_out * (_Float16)si3; - Xb12_out += p0; - Yb12_out -= p1; - Xc12_out += p2; - Yc12_out -= p3; - Xd12_out += p4; - Yd12_out -= p5; + Xb12_out += (_Float16)p0; + Yb12_out -= (_Float16)p1; + Xc12_out += (_Float16)p2; + Yc12_out -= (_Float16)p3; + Xd12_out += (_Float16)p4; + Yd12_out -= (_Float16)p5; /* xc' = (xa-xb+xc-xd)co2 + (ya-yb+yc-yd)(si2) */ pSrc[2U * i1] = Xc12_out; @@ -387,71 +387,71 @@ uint16_t twidCoefModifier) ydIn = pSrc[(2U * i3) + 1U]; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* (xb - xd) */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* (yb - yd) */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* (xa - xc) + (yb - yd) */ - Xb12C_out = (Xaminusc + Ybminusd); + Xb12C_out = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* (ya - yc) - (xb - xd) */ - Yb12C_out = (Yaminusc - Xbminusd); + Yb12C_out = ((_Float16)Yaminusc - (_Float16)Xbminusd); /* xa + xc -(xb + xd) */ - Xc12C_out = (Xaplusc - Xbplusd); + Xc12C_out = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* (ya + yc) - (yb + yd) */ - Yc12C_out = (Yaplusc - Ybplusd); + Yc12C_out = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* (xa - xc) - (yb - yd) */ - Xd12C_out = (Xaminusc - Ybminusd); + Xd12C_out = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* (ya - yc) + (xb - xd) */ - Yd12C_out = (Xbminusd + Yaminusc); + Yd12C_out = ((_Float16)Xbminusd + (_Float16)Yaminusc); - pSrc[(2U * i0)] = Xaplusc + Xbplusd; - pSrc[(2U * i0) + 1U] = Yaplusc + Ybplusd; + pSrc[(2U * i0)] = (_Float16)Xaplusc + (_Float16)Xbplusd; + pSrc[(2U * i0) + 1U] = (_Float16)Yaplusc + (_Float16)Ybplusd; - Xb12_out = Xb12C_out * co1; - Yb12_out = Yb12C_out * co1; - Xc12_out = Xc12C_out * co2; - Yc12_out = Yc12C_out * co2; - Xd12_out = Xd12C_out * co3; - Yd12_out = Yd12C_out * co3; + Xb12_out = (_Float16)Xb12C_out * (_Float16)co1; + Yb12_out = (_Float16)Yb12C_out * (_Float16)co1; + Xc12_out = (_Float16)Xc12C_out * (_Float16)co2; + Yc12_out = (_Float16)Yc12C_out * (_Float16)co2; + Xd12_out = (_Float16)Xd12C_out * (_Float16)co3; + Yd12_out = (_Float16)Yd12C_out * (_Float16)co3; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ //Xb12_out -= Yb12C_out * si1; - p0 = Yb12C_out * si1; + p0 = (_Float16)Yb12C_out * (_Float16)si1; /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ //Yb12_out += Xb12C_out * si1; - p1 = Xb12C_out * si1; + p1 = (_Float16)Xb12C_out * (_Float16)si1; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ //Xc12_out -= Yc12C_out * si2; - p2 = Yc12C_out * si2; + p2 = (_Float16)Yc12C_out * (_Float16)si2; /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ //Yc12_out += Xc12C_out * si2; - p3 = Xc12C_out * si2; + p3 = (_Float16)Xc12C_out * (_Float16)si2; /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ //Xd12_out -= Yd12C_out * si3; - p4 = Yd12C_out * si3; + p4 = (_Float16)Yd12C_out * (_Float16)si3; /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ //Yd12_out += Xd12C_out * si3; - p5 = Xd12C_out * si3; + p5 = (_Float16)Xd12C_out * (_Float16)si3; - Xb12_out += p0; - Yb12_out -= p1; - Xc12_out += p2; - Yc12_out -= p3; - Xd12_out += p4; - Yd12_out -= p5; + Xb12_out += (_Float16)p0; + Yb12_out -= (_Float16)p1; + Xc12_out += (_Float16)p2; + Yc12_out -= (_Float16)p3; + Xd12_out += (_Float16)p4; + Yd12_out -= (_Float16)p5; /* xc' = (xa-xb+xc-xd)co2 + (ya-yb+yc-yd)(si2) */ pSrc[2U * i1] = Xc12_out; @@ -494,45 +494,45 @@ uint16_t twidCoefModifier) ydIn = ptr1[7]; /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* (xb-xd) */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* (yb-yd) */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa' = xa + xb + xc + xd */ - a0 = (Xaplusc + Xbplusd); + a0 = ((_Float16)Xaplusc + (_Float16)Xbplusd); /* ya' = ya + yb + yc + yd */ - a1 = (Yaplusc + Ybplusd); + a1 = ((_Float16)Yaplusc + (_Float16)Ybplusd); /* xc' = (xa-xb+xc-xd) */ - a2 = (Xaplusc - Xbplusd); + a2 = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* yc' = (ya-yb+yc-yd) */ - a3 = (Yaplusc - Ybplusd); + a3 = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* xb' = (xa+yb-xc-yd) */ - a4 = (Xaminusc + Ybminusd); + a4 = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* yb' = (ya-xb-yc+xd) */ - a5 = (Yaminusc - Xbminusd); + a5 = ((_Float16)Yaminusc - (_Float16)Xbminusd); /* xd' = (xa-yb-xc+yd)) */ - a6 = (Xaminusc - Ybminusd); + a6 = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* yd' = (ya+xb-yc-xd) */ - a7 = (Xbminusd + Yaminusc); + a7 = ((_Float16)Xbminusd + (_Float16)Yaminusc); ptr1[0] = a0; ptr1[1] = a1; @@ -590,70 +590,70 @@ uint16_t twidCoefModifier) i3 = i2 + n2; /* xa + xc */ - r1 = pSrc[(2U * i0)] + pSrc[(2U * i2)]; + r1 = (_Float16)pSrc[(2U * i0)] + (_Float16)pSrc[(2U * i2)]; /* xa - xc */ - r2 = pSrc[(2U * i0)] - pSrc[(2U * i2)]; + r2 = (_Float16)pSrc[(2U * i0)] - (_Float16)pSrc[(2U * i2)]; /* ya + yc */ - s1 = pSrc[(2U * i0) + 1U] + pSrc[(2U * i2) + 1U]; + s1 = (_Float16)pSrc[(2U * i0) + 1U] + (_Float16)pSrc[(2U * i2) + 1U]; /* ya - yc */ - s2 = pSrc[(2U * i0) + 1U] - pSrc[(2U * i2) + 1U]; + s2 = (_Float16)pSrc[(2U * i0) + 1U] - (_Float16)pSrc[(2U * i2) + 1U]; /* xb + xd */ - t1 = pSrc[2U * i1] + pSrc[2U * i3]; + t1 = (_Float16)pSrc[2U * i1] + (_Float16)pSrc[2U * i3]; /* xa' = xa + xb + xc + xd */ - pSrc[2U * i0] = r1 + t1; + pSrc[2U * i0] = (_Float16)r1 + (_Float16)t1; /* xa + xc -(xb + xd) */ - r1 = r1 - t1; + r1 = (_Float16)r1 - (_Float16)t1; /* yb + yd */ - t2 = pSrc[(2U * i1) + 1U] + pSrc[(2U * i3) + 1U]; + t2 = (_Float16)pSrc[(2U * i1) + 1U] + (_Float16)pSrc[(2U * i3) + 1U]; /* ya' = ya + yb + yc + yd */ - pSrc[(2U * i0) + 1U] = s1 + t2; + pSrc[(2U * i0) + 1U] = (_Float16)s1 + (_Float16)t2; /* (ya + yc) - (yb + yd) */ - s1 = s1 - t2; + s1 = (_Float16)s1 - (_Float16)t2; /* (yb - yd) */ - t1 = pSrc[(2U * i1) + 1U] - pSrc[(2U * i3) + 1U]; + t1 = (_Float16)pSrc[(2U * i1) + 1U] - (_Float16)pSrc[(2U * i3) + 1U]; /* (xb - xd) */ - t2 = pSrc[2U * i1] - pSrc[2U * i3]; + t2 = (_Float16)pSrc[2U * i1] - (_Float16)pSrc[2U * i3]; /* xc' = (xa-xb+xc-xd)co2 + (ya-yb+yc-yd)(si2) */ - pSrc[2U * i1] = (r1 * co2) + (s1 * si2); + pSrc[2U * i1] = ((_Float16)r1 * (_Float16)co2) + ((_Float16)s1 * (_Float16)si2); /* yc' = (ya-yb+yc-yd)co2 - (xa-xb+xc-xd)(si2) */ - pSrc[(2U * i1) + 1U] = (s1 * co2) - (r1 * si2); + pSrc[(2U * i1) + 1U] = ((_Float16)s1 * (_Float16)co2) - ((_Float16)r1 * (_Float16)si2); /* (xa - xc) + (yb - yd) */ - r1 = r2 + t1; + r1 = (_Float16)r2 + (_Float16)t1; /* (xa - xc) - (yb - yd) */ - r2 = r2 - t1; + r2 = (_Float16)r2 - (_Float16)t1; /* (ya - yc) - (xb - xd) */ - s1 = s2 - t2; + s1 = (_Float16)s2 - (_Float16)t2; /* (ya - yc) + (xb - xd) */ - s2 = s2 + t2; + s2 = (_Float16)s2 + (_Float16)t2; /* xb' = (xa+yb-xc-yd)co1 + (ya-xb-yc+xd)(si1) */ - pSrc[2U * i2] = (r1 * co1) + (s1 * si1); + pSrc[2U * i2] = ((_Float16)r1 * (_Float16)co1) + ((_Float16)s1 * (_Float16)si1); /* yb' = (ya-xb-yc+xd)co1 - (xa+yb-xc-yd)(si1) */ - pSrc[(2U * i2) + 1U] = (s1 * co1) - (r1 * si1); + pSrc[(2U * i2) + 1U] = ((_Float16)s1 * (_Float16)co1) - ((_Float16)r1 * (_Float16)si1); /* xd' = (xa-yb-xc+yd)co3 + (ya+xb-yc-xd)(si3) */ - pSrc[2U * i3] = (r2 * co3) + (s2 * si3); + pSrc[2U * i3] = ((_Float16)r2 * (_Float16)co3) + ((_Float16)s2 * (_Float16)si3); /* yd' = (ya+xb-yc-xd)co3 - (xa-yb-xc+yd)(si3) */ - pSrc[(2U * i3) + 1U] = (s2 * co3) - (r2 * si3); + pSrc[(2U * i3) + 1U] = ((_Float16)s2 * (_Float16)co3) - ((_Float16)r2 * (_Float16)si3); i0 += n1; } while ( i0 < fftLen); @@ -734,13 +734,13 @@ float16_t onebyfftLen) ydIn = pSrc[(2U * i3) + 1U]; /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* index calculation for the coefficients */ ia2 = ia1 + ia1; @@ -748,32 +748,32 @@ float16_t onebyfftLen) si2 = pCoef[(ia2 * 2U) + 1U]; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* xb - xd */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* yb - yd */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa' = xa + xb + xc + xd */ - pSrc[(2U * i0)] = Xaplusc + Xbplusd; + pSrc[(2U * i0)] = (_Float16)Xaplusc + (_Float16)Xbplusd; /* ya' = ya + yb + yc + yd */ - pSrc[(2U * i0) + 1U] = Yaplusc + Ybplusd; + pSrc[(2U * i0) + 1U] = (_Float16)Yaplusc + (_Float16)Ybplusd; /* (xa - xc) - (yb - yd) */ - Xb12C_out = (Xaminusc - Ybminusd); + Xb12C_out = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* (ya - yc) + (xb - xd) */ - Yb12C_out = (Yaminusc + Xbminusd); + Yb12C_out = ((_Float16)Yaminusc + (_Float16)Xbminusd); /* (xa + xc) - (xb + xd) */ - Xc12C_out = (Xaplusc - Xbplusd); + Xc12C_out = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* (ya + yc) - (yb + yd) */ - Yc12C_out = (Yaplusc - Ybplusd); + Yc12C_out = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* (xa - xc) + (yb - yd) */ - Xd12C_out = (Xaminusc + Ybminusd); + Xd12C_out = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* (ya - yc) - (xb - xd) */ - Yd12C_out = (Yaminusc - Xbminusd); + Yd12C_out = ((_Float16)Yaminusc - (_Float16)Xbminusd); co1 = pCoef[ia1 * 2U]; si1 = pCoef[(ia1 * 2U) + 1U]; @@ -783,38 +783,38 @@ float16_t onebyfftLen) co3 = pCoef[ia3 * 2U]; si3 = pCoef[(ia3 * 2U) + 1U]; - Xb12_out = Xb12C_out * co1; - Yb12_out = Yb12C_out * co1; - Xc12_out = Xc12C_out * co2; - Yc12_out = Yc12C_out * co2; - Xd12_out = Xd12C_out * co3; - Yd12_out = Yd12C_out * co3; + Xb12_out = (_Float16)Xb12C_out * (_Float16)co1; + Yb12_out = (_Float16)Yb12C_out * (_Float16)co1; + Xc12_out = (_Float16)Xc12C_out * (_Float16)co2; + Yc12_out = (_Float16)Yc12C_out * (_Float16)co2; + Xd12_out = (_Float16)Xd12C_out * (_Float16)co3; + Yd12_out = (_Float16)Yd12C_out * (_Float16)co3; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ //Xb12_out -= Yb12C_out * si1; - p0 = Yb12C_out * si1; + p0 = (_Float16)Yb12C_out * (_Float16)si1; /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ //Yb12_out += Xb12C_out * si1; - p1 = Xb12C_out * si1; + p1 = (_Float16)Xb12C_out * (_Float16)si1; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ //Xc12_out -= Yc12C_out * si2; - p2 = Yc12C_out * si2; + p2 = (_Float16)Yc12C_out * (_Float16)si2; /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ //Yc12_out += Xc12C_out * si2; - p3 = Xc12C_out * si2; + p3 = (_Float16)Xc12C_out * (_Float16)si2; /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ //Xd12_out -= Yd12C_out * si3; - p4 = Yd12C_out * si3; + p4 = (_Float16)Yd12C_out * (_Float16)si3; /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ //Yd12_out += Xd12C_out * si3; - p5 = Xd12C_out * si3; + p5 =(_Float16) Xd12C_out * (_Float16)si3; - Xb12_out -= p0; - Yb12_out += p1; - Xc12_out -= p2; - Yc12_out += p3; - Xd12_out -= p4; - Yd12_out += p5; + Xb12_out -= (_Float16)p0; + Yb12_out += (_Float16)p1; + Xc12_out -= (_Float16)p2; + Yc12_out += (_Float16)p3; + Xd12_out -= (_Float16)p4; + Yd12_out += (_Float16)p5; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ pSrc[2U * i1] = Xc12_out; @@ -891,71 +891,71 @@ float16_t onebyfftLen) ydIn = pSrc[(2U * i3) + 1U]; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* (xb - xd) */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* (yb - yd) */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* (xa - xc) - (yb - yd) */ - Xb12C_out = (Xaminusc - Ybminusd); + Xb12C_out = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* (ya - yc) + (xb - xd) */ - Yb12C_out = (Yaminusc + Xbminusd); + Yb12C_out = ((_Float16)Yaminusc + (_Float16)Xbminusd); /* xa + xc -(xb + xd) */ - Xc12C_out = (Xaplusc - Xbplusd); + Xc12C_out = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* (ya + yc) - (yb + yd) */ - Yc12C_out = (Yaplusc - Ybplusd); + Yc12C_out = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* (xa - xc) + (yb - yd) */ - Xd12C_out = (Xaminusc + Ybminusd); + Xd12C_out = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* (ya - yc) - (xb - xd) */ - Yd12C_out = (Yaminusc - Xbminusd); + Yd12C_out = ((_Float16)Yaminusc - (_Float16)Xbminusd); - pSrc[(2U * i0)] = Xaplusc + Xbplusd; - pSrc[(2U * i0) + 1U] = Yaplusc + Ybplusd; + pSrc[(2U * i0)] = (_Float16)Xaplusc + (_Float16)Xbplusd; + pSrc[(2U * i0) + 1U] = (_Float16)Yaplusc + (_Float16)Ybplusd; - Xb12_out = Xb12C_out * co1; - Yb12_out = Yb12C_out * co1; - Xc12_out = Xc12C_out * co2; - Yc12_out = Yc12C_out * co2; - Xd12_out = Xd12C_out * co3; - Yd12_out = Yd12C_out * co3; + Xb12_out = (_Float16)Xb12C_out * (_Float16)co1; + Yb12_out = (_Float16)Yb12C_out * (_Float16)co1; + Xc12_out = (_Float16)Xc12C_out * (_Float16)co2; + Yc12_out = (_Float16)Yc12C_out * (_Float16)co2; + Xd12_out = (_Float16)Xd12C_out * (_Float16)co3; + Yd12_out = (_Float16)Yd12C_out * (_Float16)co3; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ //Xb12_out -= Yb12C_out * si1; - p0 = Yb12C_out * si1; + p0 = (_Float16)Yb12C_out * (_Float16)si1; /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ //Yb12_out += Xb12C_out * si1; - p1 = Xb12C_out * si1; + p1 = (_Float16)Xb12C_out * (_Float16)si1; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ //Xc12_out -= Yc12C_out * si2; - p2 = Yc12C_out * si2; + p2 = (_Float16)Yc12C_out * (_Float16)si2; /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ //Yc12_out += Xc12C_out * si2; - p3 = Xc12C_out * si2; + p3 = (_Float16)Xc12C_out * (_Float16)si2; /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ //Xd12_out -= Yd12C_out * si3; - p4 = Yd12C_out * si3; + p4 = (_Float16)Yd12C_out * (_Float16)si3; /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ //Yd12_out += Xd12C_out * si3; - p5 = Xd12C_out * si3; + p5 = (_Float16)Xd12C_out * (_Float16)si3; - Xb12_out -= p0; - Yb12_out += p1; - Xc12_out -= p2; - Yc12_out += p3; - Xd12_out -= p4; - Yd12_out += p5; + Xb12_out -= (_Float16)p0; + Yb12_out += (_Float16)p1; + Xc12_out -= (_Float16)p2; + Yc12_out += (_Float16)p3; + Xd12_out -= (_Float16)p4; + Yd12_out += (_Float16)p5; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ pSrc[2U * i1] = Xc12_out; @@ -1000,54 +1000,54 @@ float16_t onebyfftLen) /* Butterfly implementation */ /* xa + xc */ - Xaplusc = xaIn + xcIn; + Xaplusc = (_Float16)xaIn + (_Float16)xcIn; /* xa - xc */ - Xaminusc = xaIn - xcIn; + Xaminusc = (_Float16)xaIn - (_Float16)xcIn; /* ya + yc */ - Yaplusc = yaIn + ycIn; + Yaplusc = (_Float16)yaIn + (_Float16)ycIn; /* ya - yc */ - Yaminusc = yaIn - ycIn; + Yaminusc = (_Float16)yaIn - (_Float16)ycIn; /* xb + xd */ - Xbplusd = xbIn + xdIn; + Xbplusd = (_Float16)xbIn + (_Float16)xdIn; /* yb + yd */ - Ybplusd = ybIn + ydIn; + Ybplusd = (_Float16)ybIn + (_Float16)ydIn; /* (xb-xd) */ - Xbminusd = xbIn - xdIn; + Xbminusd = (_Float16)xbIn - (_Float16)xdIn; /* (yb-yd) */ - Ybminusd = ybIn - ydIn; + Ybminusd = (_Float16)ybIn - (_Float16)ydIn; /* xa' = (xa+xb+xc+xd) * onebyfftLen */ - a0 = (Xaplusc + Xbplusd); + a0 = ((_Float16)Xaplusc + (_Float16)Xbplusd); /* ya' = (ya+yb+yc+yd) * onebyfftLen */ - a1 = (Yaplusc + Ybplusd); + a1 = ((_Float16)Yaplusc + (_Float16)Ybplusd); /* xc' = (xa-xb+xc-xd) * onebyfftLen */ - a2 = (Xaplusc - Xbplusd); + a2 = ((_Float16)Xaplusc - (_Float16)Xbplusd); /* yc' = (ya-yb+yc-yd) * onebyfftLen */ - a3 = (Yaplusc - Ybplusd); + a3 = ((_Float16)Yaplusc - (_Float16)Ybplusd); /* xb' = (xa-yb-xc+yd) * onebyfftLen */ - a4 = (Xaminusc - Ybminusd); + a4 = ((_Float16)Xaminusc - (_Float16)Ybminusd); /* yb' = (ya+xb-yc-xd) * onebyfftLen */ - a5 = (Yaminusc + Xbminusd); + a5 = ((_Float16)Yaminusc + (_Float16)Xbminusd); /* xd' = (xa-yb-xc+yd) * onebyfftLen */ - a6 = (Xaminusc + Ybminusd); + a6 = ((_Float16)Xaminusc + (_Float16)Ybminusd); /* yd' = (ya-xb-yc+xd) * onebyfftLen */ - a7 = (Yaminusc - Xbminusd); + a7 = ((_Float16)Yaminusc - (_Float16)Xbminusd); - p0 = a0 * onebyfftLen; - p1 = a1 * onebyfftLen; - p2 = a2 * onebyfftLen; - p3 = a3 * onebyfftLen; - p4 = a4 * onebyfftLen; - p5 = a5 * onebyfftLen; - p6 = a6 * onebyfftLen; - p7 = a7 * onebyfftLen; + p0 = (_Float16)a0 * (_Float16)onebyfftLen; + p1 = (_Float16)a1 * (_Float16)onebyfftLen; + p2 = (_Float16)a2 * (_Float16)onebyfftLen; + p3 = (_Float16)a3 * (_Float16)onebyfftLen; + p4 = (_Float16)a4 * (_Float16)onebyfftLen; + p5 = (_Float16)a5 * (_Float16)onebyfftLen; + p6 = (_Float16)a6 * (_Float16)onebyfftLen; + p7 = (_Float16)a7 * (_Float16)onebyfftLen; /* xa' = (xa+xb+xc+xd) * onebyfftLen */ ptr1[0] = p0; @@ -1116,70 +1116,70 @@ float16_t onebyfftLen) i3 = i2 + n2; /* xa + xc */ - r1 = pSrc[(2U * i0)] + pSrc[(2U * i2)]; + r1 = (_Float16)pSrc[(2U * i0)] + (_Float16)pSrc[(2U * i2)]; /* xa - xc */ - r2 = pSrc[(2U * i0)] - pSrc[(2U * i2)]; + r2 = (_Float16)pSrc[(2U * i0)] - (_Float16)pSrc[(2U * i2)]; /* ya + yc */ - s1 = pSrc[(2U * i0) + 1U] + pSrc[(2U * i2) + 1U]; + s1 = (_Float16)pSrc[(2U * i0) + 1U] + (_Float16)pSrc[(2U * i2) + 1U]; /* ya - yc */ - s2 = pSrc[(2U * i0) + 1U] - pSrc[(2U * i2) + 1U]; + s2 = (_Float16)pSrc[(2U * i0) + 1U] - (_Float16)pSrc[(2U * i2) + 1U]; /* xb + xd */ - t1 = pSrc[2U * i1] + pSrc[2U * i3]; + t1 = (_Float16)pSrc[2U * i1] + (_Float16)pSrc[2U * i3]; /* xa' = xa + xb + xc + xd */ - pSrc[2U * i0] = r1 + t1; + pSrc[2U * i0] = (_Float16)r1 + (_Float16)t1; /* xa + xc -(xb + xd) */ - r1 = r1 - t1; + r1 = (_Float16)r1 - (_Float16)t1; /* yb + yd */ - t2 = pSrc[(2U * i1) + 1U] + pSrc[(2U * i3) + 1U]; + t2 = (_Float16)pSrc[(2U * i1) + 1U] + (_Float16)pSrc[(2U * i3) + 1U]; /* ya' = ya + yb + yc + yd */ - pSrc[(2U * i0) + 1U] = s1 + t2; + pSrc[(2U * i0) + 1U] = (_Float16)s1 + (_Float16)t2; /* (ya + yc) - (yb + yd) */ - s1 = s1 - t2; + s1 = (_Float16)s1 - (_Float16)t2; /* (yb - yd) */ - t1 = pSrc[(2U * i1) + 1U] - pSrc[(2U * i3) + 1U]; + t1 = (_Float16)pSrc[(2U * i1) + 1U] - (_Float16)pSrc[(2U * i3) + 1U]; /* (xb - xd) */ - t2 = pSrc[2U * i1] - pSrc[2U * i3]; + t2 = (_Float16)pSrc[2U * i1] - (_Float16)pSrc[2U * i3]; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ - pSrc[2U * i1] = (r1 * co2) - (s1 * si2); + pSrc[2U * i1] = ((_Float16)r1 * (_Float16)co2) - ((_Float16)s1 * (_Float16)si2); /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ - pSrc[(2U * i1) + 1U] = (s1 * co2) + (r1 * si2); + pSrc[(2U * i1) + 1U] = ((_Float16)s1 * (_Float16)co2) + ((_Float16)r1 * (_Float16)si2); /* (xa - xc) - (yb - yd) */ - r1 = r2 - t1; + r1 = (_Float16)r2 - (_Float16)t1; /* (xa - xc) + (yb - yd) */ - r2 = r2 + t1; + r2 = (_Float16)r2 + (_Float16)t1; /* (ya - yc) + (xb - xd) */ - s1 = s2 + t2; + s1 = (_Float16)s2 + (_Float16)t2; /* (ya - yc) - (xb - xd) */ - s2 = s2 - t2; + s2 = (_Float16)s2 - (_Float16)t2; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ - pSrc[2U * i2] = (r1 * co1) - (s1 * si1); + pSrc[2U * i2] = ((_Float16)r1 * (_Float16)co1) - ((_Float16)s1 * (_Float16)si1); /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ - pSrc[(2U * i2) + 1U] = (s1 * co1) + (r1 * si1); + pSrc[(2U * i2) + 1U] = ((_Float16)s1 * (_Float16)co1) + ((_Float16)r1 * (_Float16)si1); /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ - pSrc[2U * i3] = (r2 * co3) - (s2 * si3); + pSrc[2U * i3] = ((_Float16)r2 * (_Float16)co3) - ((_Float16)s2 * (_Float16)si3); /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ - pSrc[(2U * i3) + 1U] = (s2 * co3) + (r2 * si3); + pSrc[(2U * i3) + 1U] = ((_Float16)s2 * (_Float16)co3) + ((_Float16)r2 * (_Float16)si3); i0 += n1; } while ( i0 < fftLen); @@ -1202,74 +1202,75 @@ float16_t onebyfftLen) /* Butterfly implementation */ /* xa + xc */ - r1 = pSrc[2U * i0] + pSrc[2U * i2]; + r1 = (_Float16)pSrc[2U * i0] + (_Float16)pSrc[2U * i2]; /* xa - xc */ - r2 = pSrc[2U * i0] - pSrc[2U * i2]; + r2 = (_Float16)pSrc[2U * i0] - (_Float16)pSrc[2U * i2]; /* ya + yc */ - s1 = pSrc[(2U * i0) + 1U] + pSrc[(2U * i2) + 1U]; + s1 = (_Float16)pSrc[(2U * i0) + 1U] + (_Float16)pSrc[(2U * i2) + 1U]; /* ya - yc */ - s2 = pSrc[(2U * i0) + 1U] - pSrc[(2U * i2) + 1U]; + s2 = (_Float16)pSrc[(2U * i0) + 1U] - (_Float16)pSrc[(2U * i2) + 1U]; /* xc + xd */ - t1 = pSrc[2U * i1] + pSrc[2U * i3]; + t1 = (_Float16)pSrc[2U * i1] + (_Float16)pSrc[2U * i3]; /* xa' = xa + xb + xc + xd */ - pSrc[2U * i0] = (r1 + t1) * onebyfftLen; + pSrc[2U * i0] = ((_Float16)r1 + (_Float16)t1) * (_Float16)onebyfftLen; /* (xa + xb) - (xc + xd) */ - r1 = r1 - t1; + r1 = (_Float16)r1 - (_Float16)t1; /* yb + yd */ - t2 = pSrc[(2U * i1) + 1U] + pSrc[(2U * i3) + 1U]; + t2 = (_Float16)pSrc[(2U * i1) + 1U] + (_Float16)pSrc[(2U * i3) + 1U]; /* ya' = ya + yb + yc + yd */ - pSrc[(2U * i0) + 1U] = (s1 + t2) * onebyfftLen; + pSrc[(2U * i0) + 1U] = ((_Float16)s1 + (_Float16)t2) * (_Float16)onebyfftLen; /* (ya + yc) - (yb + yd) */ - s1 = s1 - t2; + s1 = (_Float16)s1 - (_Float16)t2; /* (yb-yd) */ - t1 = pSrc[(2U * i1) + 1U] - pSrc[(2U * i3) + 1U]; + t1 = (_Float16)pSrc[(2U * i1) + 1U] - (_Float16)pSrc[(2U * i3) + 1U]; /* (xb-xd) */ - t2 = pSrc[2U * i1] - pSrc[2U * i3]; + t2 = (_Float16)pSrc[2U * i1] - (_Float16)pSrc[2U * i3]; /* xc' = (xa-xb+xc-xd)co2 - (ya-yb+yc-yd)(si2) */ - pSrc[2U * i1] = r1 * onebyfftLen; + pSrc[2U * i1] = (_Float16)r1 * (_Float16)onebyfftLen; /* yc' = (ya-yb+yc-yd)co2 + (xa-xb+xc-xd)(si2) */ - pSrc[(2U * i1) + 1U] = s1 * onebyfftLen; + pSrc[(2U * i1) + 1U] = (_Float16)s1 * (_Float16)onebyfftLen; /* (xa - xc) - (yb-yd) */ - r1 = r2 - t1; + r1 = (_Float16)r2 - (_Float16)t1; /* (xa - xc) + (yb-yd) */ - r2 = r2 + t1; + r2 = (_Float16)r2 + (_Float16)t1; /* (ya - yc) + (xb-xd) */ - s1 = s2 + t2; + s1 = (_Float16)s2 + (_Float16)t2; /* (ya - yc) - (xb-xd) */ - s2 = s2 - t2; + s2 = (_Float16)s2 - (_Float16)t2; /* xb' = (xa+yb-xc-yd)co1 - (ya-xb-yc+xd)(si1) */ - pSrc[2U * i2] = r1 * onebyfftLen; + pSrc[2U * i2] = (_Float16)r1 * (_Float16)onebyfftLen; /* yb' = (ya-xb-yc+xd)co1 + (xa+yb-xc-yd)(si1) */ - pSrc[(2U * i2) + 1U] = s1 * onebyfftLen; + pSrc[(2U * i2) + 1U] = (_Float16)s1 * (_Float16)onebyfftLen; /* xd' = (xa-yb-xc+yd)co3 - (ya+xb-yc-xd)(si3) */ - pSrc[2U * i3] = r2 * onebyfftLen; + pSrc[2U * i3] = (_Float16)r2 * (_Float16)onebyfftLen; /* yd' = (ya+xb-yc-xd)co3 + (xa-yb-xc+yd)(si3) */ - pSrc[(2U * i3) + 1U] = s2 * onebyfftLen; + pSrc[(2U * i3) + 1U] = (_Float16)s2 * (_Float16)onebyfftLen; } #endif /* #if defined (ARM_MATH_DSP) */ } #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f32.c index 1bc2f77..4c7020a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_f32.c * Description: Radix-4 Decimation in Frequency CFFT & CIFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f16.c index d83e138..152542d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f16.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_init_f16.c * Description: Radix-4 Decimation in Frequency Floating-point CFFT & CIFFT Initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f32.c index d218140..3d1a5ef 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_init_f32.c * Description: Radix-4 Decimation in Frequency Floating-point CFFT & CIFFT Initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q15.c index 49858ee..c4a024e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_init_q15.c * Description: Radix-4 Decimation in Frequency Q15 FFT & IFFT initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -93,7 +93,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initialise the Flag for calculation Bit reversal or not */ S->bitReverseFlag = bitReverseFlag; -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) /* Initializations of structure parameters depending on the FFT length */ switch (S->fftLen) @@ -106,7 +106,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 1U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) armBitRevIndexTable_fixed_4096; + S->pBitRevTable = (uint16_t *) armBitRevTable; break; @@ -114,7 +114,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initializations of structure parameters for 1024 point FFT */ S->twidCoefModifier = 4U; S->bitRevFactor = 4U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[3]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[3]; break; @@ -122,7 +122,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initializations of structure parameters for 256 point FFT */ S->twidCoefModifier = 16U; S->bitRevFactor = 16U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[15]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[15]; break; @@ -130,7 +130,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initializations of structure parameters for 64 point FFT */ S->twidCoefModifier = 64U; S->bitRevFactor = 64U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[63]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[63]; break; @@ -138,7 +138,7 @@ arm_status arm_cfft_radix4_init_q15( /* Initializations of structure parameters for 16 point FFT */ S->twidCoefModifier = 256U; S->bitRevFactor = 256U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[255]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[255]; break; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q31.c index 6cde656..9b6273f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix4_init_q31.c * Description: Radix-4 Decimation in Frequency Q31 FFT & IFFT initialization function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -80,7 +80,7 @@ arm_status arm_cfft_radix4_init_q31( #if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_FFT_ALLOW_TABLES) -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q15_4096) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_TWIDDLECOEF_Q31_4096) /* Initialise the default arm status */ status = ARM_MATH_SUCCESS; @@ -93,7 +93,7 @@ arm_status arm_cfft_radix4_init_q31( /* Initialise the Flag for calculation Bit reversal or not */ S->bitReverseFlag = bitReverseFlag; -#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREVIDX_FXT_4096) +#if !defined(ARM_DSP_CONFIG_TABLES) || defined(ARM_ALL_FFT_TABLES) || defined(ARM_TABLE_BITREV_1024) /* Initializations of Instance structure depending on the FFT length */ switch (S->fftLen) @@ -105,7 +105,7 @@ arm_status arm_cfft_radix4_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 1U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) armBitRevIndexTable_fixed_4096; + S->pBitRevTable = (uint16_t *) armBitRevTable; break; /* Initializations of structure parameters for 1024 point FFT */ @@ -115,28 +115,28 @@ arm_status arm_cfft_radix4_init_q31( /* Initialise the bit reversal table modifier */ S->bitRevFactor = 4U; /* Initialise the bit reversal table pointer */ - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[3]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[3]; break; case 256U: /* Initializations of structure parameters for 256 point FFT */ S->twidCoefModifier = 16U; S->bitRevFactor = 16U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[15]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[15]; break; case 64U: /* Initializations of structure parameters for 64 point FFT */ S->twidCoefModifier = 64U; S->bitRevFactor = 64U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[63]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[63]; break; case 16U: /* Initializations of structure parameters for 16 point FFT */ S->twidCoefModifier = 256U; S->bitRevFactor = 256U; - S->pBitRevTable = (uint16_t *) & armBitRevIndexTable_fixed_4096[255]; + S->pBitRevTable = (uint16_t *) & armBitRevTable[255]; break; default: diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q15.c index 33edbf1..33b5029 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q15.c @@ -6,13 +6,13 @@ * Description: This file has function definition of Radix-4 FFT & IFFT function and * In-place bit reversal using bit reversal table * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -72,8 +72,21 @@ void arm_bitreversal_q15( Hence the output format is different for different FFT sizes. The input and output formats for different FFT sizes and number of bits to upscale are mentioned in the tables below for CFFT and CIFFT: @par - \image html CFFTQ15.gif "Input and Output Formats for Q15 CFFT" - \image html CIFFTQ15.gif "Input and Output Formats for Q15 CIFFT" + +| CFFT Size | Input format | Output format | Number of bits to upscale | +| --------: | ------------: | ------------: | ------------------------: | +| 16 | 1.15 | 5.11 | 4 | +| 64 | 1.15 | 7.9 | 6 | +| 256 | 1.15 | 9.7 | 8 | +| 1024 | 1.15 | 11.5 | 10 | + +| CIFFT Size | Input format | Output format | Number of bits to upscale | +| ---------: | ------------: | ------------: | ------------------------: | +| 16 | 1.15 | 5.11 | 0 | +| 64 | 1.15 | 7.9 | 0 | +| 256 | 1.15 | 9.7 | 0 | +| 1024 | 1.15 | 11.5 | 0 | + */ void arm_cfft_radix4_q15( @@ -497,16 +510,16 @@ void arm_radix4_butterfly_q15( do { /* Read xa (real), ya(imag) input */ - xaya = read_q15x2_ia ((q15_t **) &ptr1); + xaya = read_q15x2_ia (&ptr1); /* Read xb (real), yb(imag) input */ - xbyb = read_q15x2_ia ((q15_t **) &ptr1); + xbyb = read_q15x2_ia (&ptr1); /* Read xc (real), yc(imag) input */ - xcyc = read_q15x2_ia ((q15_t **) &ptr1); + xcyc = read_q15x2_ia (&ptr1); /* Read xd (real), yd(imag) input */ - xdyd = read_q15x2_ia ((q15_t **) &ptr1); + xdyd = read_q15x2_ia (&ptr1); /* R = packed((ya + yc), (xa + xc)) */ R = __QADD16(xaya, xcyc); @@ -1360,16 +1373,16 @@ void arm_radix4_butterfly_inverse_q15( do { /* Read xa (real), ya(imag) input */ - xaya = read_q15x2_ia ((q15_t **) &ptr1); + xaya = read_q15x2_ia (&ptr1); /* Read xb (real), yb(imag) input */ - xbyb = read_q15x2_ia ((q15_t **) &ptr1); + xbyb = read_q15x2_ia (&ptr1); /* Read xc (real), yc(imag) input */ - xcyc = read_q15x2_ia ((q15_t **) &ptr1); + xcyc = read_q15x2_ia (&ptr1); /* Read xd (real), yd(imag) input */ - xdyd = read_q15x2_ia ((q15_t **) &ptr1); + xdyd = read_q15x2_ia (&ptr1); /* R = packed((ya + yc), (xa + xc)) */ R = __QADD16(xaya, xcyc); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q31.c index 7e5d38b..bad1640 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix4_q31.c @@ -6,13 +6,13 @@ * Description: This file has function definition of Radix-4 FFT & IFFT function and * In-place bit reversal using bit reversal table * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -70,8 +70,21 @@ void arm_bitreversal_q31( Hence the output format is different for different FFT sizes. The input and output formats for different FFT sizes and number of bits to upscale are mentioned in the tables below for CFFT and CIFFT: @par - \image html CFFTQ31.gif "Input and Output Formats for Q31 CFFT" - \image html CIFFTQ31.gif "Input and Output Formats for Q31 CIFFT" + +| CFFT Size | Input format | Output format | Number of bits to upscale | +| --------: | ------------: | ------------: | ------------------------: | +| 16 | 1.31 | 5.27 | 4 | +| 64 | 1.31 | 7.25 | 6 | +| 256 | 1.31 | 9.23 | 8 | +| 1024 | 1.31 | 11.21 | 10 | + +| CIFFT Size | Input format | Output format | Number of bits to upscale | +| ---------: | ------------: | ------------: | ------------------------: | +| 16 | 1.31 | 5.27 | 0 | +| 64 | 1.31 | 7.25 | 0 | +| 256 | 1.31 | 9.23 | 0 | +| 1024 | 1.31 | 11.21 | 0 | + */ void arm_cfft_radix4_q31( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f16.c index d9582f5..77dfc5b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f16.c @@ -5,11 +5,13 @@ * Title: arm_cfft_radix8_f16.c * Description: Radix-8 Decimation in Frequency CFFT & CIFFT Floating point processing function * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -61,7 +63,7 @@ void arm_radix8_butterfly_f16( float16_t p1, p2, p3, p4; float16_t co2, co3, co4, co5, co6, co7, co8; float16_t si2, si3, si4, si5, si6, si7, si8; - const float16_t C81 = 0.70710678118f; + const float16_t C81 = 0.70710678118f16; n2 = fftLen; @@ -80,58 +82,58 @@ void arm_radix8_butterfly_f16( i6 = i5 + n2; i7 = i6 + n2; i8 = i7 + n2; - r1 = pSrc[2 * i1] + pSrc[2 * i5]; - r5 = pSrc[2 * i1] - pSrc[2 * i5]; - r2 = pSrc[2 * i2] + pSrc[2 * i6]; - r6 = pSrc[2 * i2] - pSrc[2 * i6]; - r3 = pSrc[2 * i3] + pSrc[2 * i7]; - r7 = pSrc[2 * i3] - pSrc[2 * i7]; - r4 = pSrc[2 * i4] + pSrc[2 * i8]; - r8 = pSrc[2 * i4] - pSrc[2 * i8]; - t1 = r1 - r3; - r1 = r1 + r3; - r3 = r2 - r4; - r2 = r2 + r4; - pSrc[2 * i1] = r1 + r2; - pSrc[2 * i5] = r1 - r2; - r1 = pSrc[2 * i1 + 1] + pSrc[2 * i5 + 1]; - s5 = pSrc[2 * i1 + 1] - pSrc[2 * i5 + 1]; - r2 = pSrc[2 * i2 + 1] + pSrc[2 * i6 + 1]; - s6 = pSrc[2 * i2 + 1] - pSrc[2 * i6 + 1]; - s3 = pSrc[2 * i3 + 1] + pSrc[2 * i7 + 1]; - s7 = pSrc[2 * i3 + 1] - pSrc[2 * i7 + 1]; - r4 = pSrc[2 * i4 + 1] + pSrc[2 * i8 + 1]; - s8 = pSrc[2 * i4 + 1] - pSrc[2 * i8 + 1]; - t2 = r1 - s3; - r1 = r1 + s3; - s3 = r2 - r4; - r2 = r2 + r4; - pSrc[2 * i1 + 1] = r1 + r2; - pSrc[2 * i5 + 1] = r1 - r2; - pSrc[2 * i3] = t1 + s3; - pSrc[2 * i7] = t1 - s3; - pSrc[2 * i3 + 1] = t2 - r3; - pSrc[2 * i7 + 1] = t2 + r3; - r1 = (r6 - r8) * C81; - r6 = (r6 + r8) * C81; - r2 = (s6 - s8) * C81; - s6 = (s6 + s8) * C81; - t1 = r5 - r1; - r5 = r5 + r1; - r8 = r7 - r6; - r7 = r7 + r6; - t2 = s5 - r2; - s5 = s5 + r2; - s8 = s7 - s6; - s7 = s7 + s6; - pSrc[2 * i2] = r5 + s7; - pSrc[2 * i8] = r5 - s7; - pSrc[2 * i6] = t1 + s8; - pSrc[2 * i4] = t1 - s8; - pSrc[2 * i2 + 1] = s5 - r7; - pSrc[2 * i8 + 1] = s5 + r7; - pSrc[2 * i6 + 1] = t2 - r8; - pSrc[2 * i4 + 1] = t2 + r8; + r1 = (_Float16)pSrc[2 * i1] + (_Float16)pSrc[2 * i5]; + r5 = (_Float16)pSrc[2 * i1] - (_Float16)pSrc[2 * i5]; + r2 = (_Float16)pSrc[2 * i2] + (_Float16)pSrc[2 * i6]; + r6 = (_Float16)pSrc[2 * i2] - (_Float16)pSrc[2 * i6]; + r3 = (_Float16)pSrc[2 * i3] + (_Float16)pSrc[2 * i7]; + r7 = (_Float16)pSrc[2 * i3] - (_Float16)pSrc[2 * i7]; + r4 = (_Float16)pSrc[2 * i4] + (_Float16)pSrc[2 * i8]; + r8 = (_Float16)pSrc[2 * i4] - (_Float16)pSrc[2 * i8]; + t1 = (_Float16)r1 - (_Float16)r3; + r1 = (_Float16)r1 + (_Float16)r3; + r3 = (_Float16)r2 - (_Float16)r4; + r2 = (_Float16)r2 + (_Float16)r4; + pSrc[2 * i1] = (_Float16)r1 + (_Float16)r2; + pSrc[2 * i5] = (_Float16)r1 - (_Float16)r2; + r1 = (_Float16)pSrc[2 * i1 + 1] + (_Float16)pSrc[2 * i5 + 1]; + s5 = (_Float16)pSrc[2 * i1 + 1] - (_Float16)pSrc[2 * i5 + 1]; + r2 = (_Float16)pSrc[2 * i2 + 1] + (_Float16)pSrc[2 * i6 + 1]; + s6 = (_Float16)pSrc[2 * i2 + 1] - (_Float16)pSrc[2 * i6 + 1]; + s3 = (_Float16)pSrc[2 * i3 + 1] + (_Float16)pSrc[2 * i7 + 1]; + s7 = (_Float16)pSrc[2 * i3 + 1] - (_Float16)pSrc[2 * i7 + 1]; + r4 = (_Float16)pSrc[2 * i4 + 1] + (_Float16)pSrc[2 * i8 + 1]; + s8 = (_Float16)pSrc[2 * i4 + 1] - (_Float16)pSrc[2 * i8 + 1]; + t2 = (_Float16)r1 - (_Float16)s3; + r1 = (_Float16)r1 + (_Float16)s3; + s3 = (_Float16)r2 - (_Float16)r4; + r2 = (_Float16)r2 + (_Float16)r4; + pSrc[2 * i1 + 1] = (_Float16)r1 + (_Float16)r2; + pSrc[2 * i5 + 1] = (_Float16)r1 - (_Float16)r2; + pSrc[2 * i3] = (_Float16)t1 + (_Float16)s3; + pSrc[2 * i7] = (_Float16)t1 - (_Float16)s3; + pSrc[2 * i3 + 1] = (_Float16)t2 - (_Float16)r3; + pSrc[2 * i7 + 1] = (_Float16)t2 + (_Float16)r3; + r1 = ((_Float16)r6 - (_Float16)r8) * (_Float16)C81; + r6 = ((_Float16)r6 + (_Float16)r8) * (_Float16)C81; + r2 = ((_Float16)s6 - (_Float16)s8) * (_Float16)C81; + s6 = ((_Float16)s6 + (_Float16)s8) * (_Float16)C81; + t1 = (_Float16)r5 - (_Float16)r1; + r5 = (_Float16)r5 + (_Float16)r1; + r8 = (_Float16)r7 - (_Float16)r6; + r7 = (_Float16)r7 + (_Float16)r6; + t2 = (_Float16)s5 - (_Float16)r2; + s5 = (_Float16)s5 + (_Float16)r2; + s8 = (_Float16)s7 - (_Float16)s6; + s7 = (_Float16)s7 + (_Float16)s6; + pSrc[2 * i2] = (_Float16)r5 + (_Float16)s7; + pSrc[2 * i8] = (_Float16)r5 - (_Float16)s7; + pSrc[2 * i6] = (_Float16)t1 + (_Float16)s8; + pSrc[2 * i4] = (_Float16)t1 - (_Float16)s8; + pSrc[2 * i2 + 1] = (_Float16)s5 - (_Float16)r7; + pSrc[2 * i8 + 1] = (_Float16)s5 + (_Float16)r7; + pSrc[2 * i6 + 1] = (_Float16)t2 - (_Float16)r8; + pSrc[2 * i4 + 1] = (_Float16)t2 + (_Float16)r8; i1 += n1; } while (i1 < fftLen); @@ -181,100 +183,100 @@ void arm_radix8_butterfly_f16( i6 = i5 + n2; i7 = i6 + n2; i8 = i7 + n2; - r1 = pSrc[2 * i1] + pSrc[2 * i5]; - r5 = pSrc[2 * i1] - pSrc[2 * i5]; - r2 = pSrc[2 * i2] + pSrc[2 * i6]; - r6 = pSrc[2 * i2] - pSrc[2 * i6]; - r3 = pSrc[2 * i3] + pSrc[2 * i7]; - r7 = pSrc[2 * i3] - pSrc[2 * i7]; - r4 = pSrc[2 * i4] + pSrc[2 * i8]; - r8 = pSrc[2 * i4] - pSrc[2 * i8]; - t1 = r1 - r3; - r1 = r1 + r3; - r3 = r2 - r4; - r2 = r2 + r4; - pSrc[2 * i1] = r1 + r2; - r2 = r1 - r2; - s1 = pSrc[2 * i1 + 1] + pSrc[2 * i5 + 1]; - s5 = pSrc[2 * i1 + 1] - pSrc[2 * i5 + 1]; - s2 = pSrc[2 * i2 + 1] + pSrc[2 * i6 + 1]; - s6 = pSrc[2 * i2 + 1] - pSrc[2 * i6 + 1]; - s3 = pSrc[2 * i3 + 1] + pSrc[2 * i7 + 1]; - s7 = pSrc[2 * i3 + 1] - pSrc[2 * i7 + 1]; - s4 = pSrc[2 * i4 + 1] + pSrc[2 * i8 + 1]; - s8 = pSrc[2 * i4 + 1] - pSrc[2 * i8 + 1]; - t2 = s1 - s3; - s1 = s1 + s3; - s3 = s2 - s4; - s2 = s2 + s4; - r1 = t1 + s3; - t1 = t1 - s3; - pSrc[2 * i1 + 1] = s1 + s2; - s2 = s1 - s2; - s1 = t2 - r3; - t2 = t2 + r3; - p1 = co5 * r2; - p2 = si5 * s2; - p3 = co5 * s2; - p4 = si5 * r2; - pSrc[2 * i5] = p1 + p2; - pSrc[2 * i5 + 1] = p3 - p4; - p1 = co3 * r1; - p2 = si3 * s1; - p3 = co3 * s1; - p4 = si3 * r1; - pSrc[2 * i3] = p1 + p2; - pSrc[2 * i3 + 1] = p3 - p4; - p1 = co7 * t1; - p2 = si7 * t2; - p3 = co7 * t2; - p4 = si7 * t1; - pSrc[2 * i7] = p1 + p2; - pSrc[2 * i7 + 1] = p3 - p4; - r1 = (r6 - r8) * C81; - r6 = (r6 + r8) * C81; - s1 = (s6 - s8) * C81; - s6 = (s6 + s8) * C81; - t1 = r5 - r1; - r5 = r5 + r1; - r8 = r7 - r6; - r7 = r7 + r6; - t2 = s5 - s1; - s5 = s5 + s1; - s8 = s7 - s6; - s7 = s7 + s6; - r1 = r5 + s7; - r5 = r5 - s7; - r6 = t1 + s8; - t1 = t1 - s8; - s1 = s5 - r7; - s5 = s5 + r7; - s6 = t2 - r8; - t2 = t2 + r8; - p1 = co2 * r1; - p2 = si2 * s1; - p3 = co2 * s1; - p4 = si2 * r1; - pSrc[2 * i2] = p1 + p2; - pSrc[2 * i2 + 1] = p3 - p4; - p1 = co8 * r5; - p2 = si8 * s5; - p3 = co8 * s5; - p4 = si8 * r5; - pSrc[2 * i8] = p1 + p2; - pSrc[2 * i8 + 1] = p3 - p4; - p1 = co6 * r6; - p2 = si6 * s6; - p3 = co6 * s6; - p4 = si6 * r6; - pSrc[2 * i6] = p1 + p2; - pSrc[2 * i6 + 1] = p3 - p4; - p1 = co4 * t1; - p2 = si4 * t2; - p3 = co4 * t2; - p4 = si4 * t1; - pSrc[2 * i4] = p1 + p2; - pSrc[2 * i4 + 1] = p3 - p4; + r1 = (_Float16)pSrc[2 * i1] + (_Float16)pSrc[2 * i5]; + r5 = (_Float16)pSrc[2 * i1] - (_Float16)pSrc[2 * i5]; + r2 = (_Float16)pSrc[2 * i2] + (_Float16)pSrc[2 * i6]; + r6 = (_Float16)pSrc[2 * i2] - (_Float16)pSrc[2 * i6]; + r3 = (_Float16)pSrc[2 * i3] + (_Float16)pSrc[2 * i7]; + r7 = (_Float16)pSrc[2 * i3] - (_Float16)pSrc[2 * i7]; + r4 = (_Float16)pSrc[2 * i4] + (_Float16)pSrc[2 * i8]; + r8 = (_Float16)pSrc[2 * i4] - (_Float16)pSrc[2 * i8]; + t1 = (_Float16)r1 - (_Float16)r3; + r1 = (_Float16)r1 + (_Float16)r3; + r3 = (_Float16)r2 - (_Float16)r4; + r2 = (_Float16)r2 + (_Float16)r4; + pSrc[2 * i1] = (_Float16)r1 + (_Float16)r2; + r2 = (_Float16)r1 - (_Float16)r2; + s1 = (_Float16)pSrc[2 * i1 + 1] + (_Float16)pSrc[2 * i5 + 1]; + s5 = (_Float16)pSrc[2 * i1 + 1] - (_Float16)pSrc[2 * i5 + 1]; + s2 = (_Float16)pSrc[2 * i2 + 1] + (_Float16)pSrc[2 * i6 + 1]; + s6 = (_Float16)pSrc[2 * i2 + 1] - (_Float16)pSrc[2 * i6 + 1]; + s3 = (_Float16)pSrc[2 * i3 + 1] + (_Float16)pSrc[2 * i7 + 1]; + s7 = (_Float16)pSrc[2 * i3 + 1] - (_Float16)pSrc[2 * i7 + 1]; + s4 = (_Float16)pSrc[2 * i4 + 1] + (_Float16)pSrc[2 * i8 + 1]; + s8 = (_Float16)pSrc[2 * i4 + 1] - (_Float16)pSrc[2 * i8 + 1]; + t2 = (_Float16)s1 - (_Float16)s3; + s1 = (_Float16)s1 + (_Float16)s3; + s3 = (_Float16)s2 - (_Float16)s4; + s2 = (_Float16)s2 + (_Float16)s4; + r1 = (_Float16)t1 + (_Float16)s3; + t1 = (_Float16)t1 - (_Float16)s3; + pSrc[2 * i1 + 1] = (_Float16)s1 + (_Float16)s2; + s2 = (_Float16)s1 - (_Float16)s2; + s1 = (_Float16)t2 - (_Float16)r3; + t2 = (_Float16)t2 + (_Float16)r3; + p1 = (_Float16)co5 * (_Float16)r2; + p2 = (_Float16)si5 * (_Float16)s2; + p3 = (_Float16)co5 * (_Float16)s2; + p4 = (_Float16)si5 * (_Float16)r2; + pSrc[2 * i5] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i5 + 1] = (_Float16)p3 - (_Float16)p4; + p1 = (_Float16)co3 * (_Float16)r1; + p2 = (_Float16)si3 * (_Float16)s1; + p3 = (_Float16)co3 * (_Float16)s1; + p4 = (_Float16)si3 * (_Float16)r1; + pSrc[2 * i3] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i3 + 1] = (_Float16)p3 - (_Float16)p4; + p1 = (_Float16)co7 * (_Float16)t1; + p2 = (_Float16)si7 * (_Float16)t2; + p3 = (_Float16)co7 * (_Float16)t2; + p4 = (_Float16)si7 * (_Float16)t1; + pSrc[2 * i7] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i7 + 1] = (_Float16)p3 - (_Float16)p4; + r1 = ((_Float16)r6 - (_Float16)r8) * (_Float16)C81; + r6 = ((_Float16)r6 + (_Float16)r8) * (_Float16)C81; + s1 = ((_Float16)s6 - (_Float16)s8) * (_Float16)C81; + s6 = ((_Float16)s6 + (_Float16)s8) * (_Float16)C81; + t1 = (_Float16)r5 - (_Float16)r1; + r5 = (_Float16)r5 + (_Float16)r1; + r8 = (_Float16)r7 - (_Float16)r6; + r7 = (_Float16)r7 + (_Float16)r6; + t2 = (_Float16)s5 - (_Float16)s1; + s5 = (_Float16)s5 + (_Float16)s1; + s8 = (_Float16)s7 - (_Float16)s6; + s7 = (_Float16)s7 + (_Float16)s6; + r1 = (_Float16)r5 + (_Float16)s7; + r5 = (_Float16)r5 - (_Float16)s7; + r6 = (_Float16)t1 + (_Float16)s8; + t1 = (_Float16)t1 - (_Float16)s8; + s1 = (_Float16)s5 - (_Float16)r7; + s5 = (_Float16)s5 + (_Float16)r7; + s6 = (_Float16)t2 - (_Float16)r8; + t2 = (_Float16)t2 + (_Float16)r8; + p1 = (_Float16)co2 * (_Float16)r1; + p2 = (_Float16)si2 * (_Float16)s1; + p3 = (_Float16)co2 * (_Float16)s1; + p4 = (_Float16)si2 * (_Float16)r1; + pSrc[2 * i2] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i2 + 1] = (_Float16)p3 - (_Float16)p4; + p1 = (_Float16)co8 * (_Float16)r5; + p2 = (_Float16)si8 * (_Float16)s5; + p3 = (_Float16)co8 * (_Float16)s5; + p4 = (_Float16)si8 * (_Float16)r5; + pSrc[2 * i8] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i8 + 1] = (_Float16)p3 - (_Float16)p4; + p1 = (_Float16)co6 * (_Float16)r6; + p2 = (_Float16)si6 * (_Float16)s6; + p3 = (_Float16)co6 * (_Float16)s6; + p4 = (_Float16)si6 * (_Float16)r6; + pSrc[2 * i6] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i6 + 1] = (_Float16)p3 - (_Float16)p4; + p1 = (_Float16)co4 * (_Float16)t1; + p2 = (_Float16)si4 * (_Float16)t2; + p3 = (_Float16)co4 * (_Float16)t2; + p4 = (_Float16)si4 * (_Float16)t1; + pSrc[2 * i4] = (_Float16)p1 + (_Float16)p2; + pSrc[2 * i4 + 1] = (_Float16)p3 - (_Float16)p4; i1 += n1; } while (i1 < fftLen); @@ -287,4 +289,5 @@ void arm_radix8_butterfly_f16( } #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f32.c index a37d50a..328a725 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_cfft_radix8_f32.c @@ -5,13 +5,13 @@ * Title: arm_cfft_radix8_f32.c * Description: Radix-8 Decimation in Frequency CFFT & CIFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_f32.c index 2214ca6..7367b11 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_f32.c @@ -5,13 +5,13 @@ * Title: arm_dct4_f32.c * Description: Processing function of DCT4 & IDCT4 F32 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -62,11 +62,15 @@ @par Algorithm The N-point type-IV DCT is defined as a real, linear transformation by the formula: - \image html DCT4Equation.gif + \f[ + X_c(k) = \sqrt{\frac{2}{N}}\sum_{n=0}^{N-1} x(n)cos\Big[\Big(n+\frac{1}{2}\Big)\Big(k+\frac{1}{2}\Big)\frac{\pi}{N}\Big] + \f] where k = 0, 1, 2, ..., N-1 @par Its inverse is defined as follows: - \image html IDCT4Equation.gif + \f[ + x(n) = \sqrt{\frac{2}{N}}\sum_{k=0}^{N-1} X_c(k)cos\Big[\Big(n+\frac{1}{2}\Big)\Big(k+\frac{1}{2}\Big)\frac{\pi}{N}\Big] + \f] where n = 0, 1, 2, ..., N-1 @par The DCT4 matrices become involutory (i.e. they are self-inverse) by multiplying with an overall scale factor of sqrt(2/N). diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_f32.c index adac8a4..957e01e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_dct4_init_f32.c * Description: Initialization function of DCT-4 & IDCT4 F32 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -56,7 +56,13 @@ The normalizing factor is sqrt(2/N), which depends on the size of transform N. Floating-point normalizing factors are mentioned in the table below for different DCT sizes: - \image html dct4NormalizingF32Table.gif + +| DCT Size | Normalizing factor value | +| --------: | ------------------------: | +| 2048 | 0.03125 | +| 512 | 0.0625 | +| 128 | 0.125 | + */ arm_status arm_dct4_init_f32( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q15.c index 20a2cd2..0cd18fb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_dct4_init_q15.c * Description: Initialization function of DCT-4 & IDCT4 Q15 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -56,7 +56,12 @@ The normalizing factor is sqrt(2/N), which depends on the size of transform N. Normalizing factors in 1.15 format are mentioned in the table below for different DCT sizes: - \image html dct4NormalizingQ15Table.gif +| DCT Size | Normalizing factor value (hexadecimal) | +| --------: | ---------------------------------------:| +| 2048 | 0x400 | +| 512 | 0x800 | +| 128 | 0x1000 | + */ arm_status arm_dct4_init_q15( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q31.c index 0ee5da3..1d7d2f1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_dct4_init_q31.c * Description: Initialization function of DCT-4 & IDCT4 Q31 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -55,8 +55,13 @@ @par Normalizing factor: The normalizing factor is sqrt(2/N), which depends on the size of transform N. Normalizing factors in 1.31 format are mentioned in the table below for different DCT sizes: + +| DCT Size | Normalizing factor value (hexadecimal) | +| --------: | ---------------------------------------:| +| 2048 | 0x4000000 | +| 512 | 0x8000000 | +| 128 | 0x10000000 | - \image html dct4NormalizingQ31Table.gif */ arm_status arm_dct4_init_q31( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q15.c index b590c38..a9d4e78 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q15.c @@ -5,13 +5,13 @@ * Title: arm_dct4_q15.c * Description: Processing function of DCT4 & IDCT4 Q15 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -46,8 +46,14 @@ Internally inputs are downscaled in the RFFT process function to avoid overflows. Number of bits downscaled, depends on the size of the transform. The input and output formats for different DCT sizes and number of bits to upscale are mentioned in the table below: + +| DCT Size | Input format | Output format | Number of bits to upscale | +| --------: | ------------: | ------------: | ------------------------: | +| 2048 | 1.15 | 11.5 | 10 | +| 512 | 1.15 | 9.7 | 8 | +| 128 | 1.15 | 7.9 | 6 | + - \image html dct4FormatsQ15Table.gif */ void arm_dct4_q15( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q31.c index 259dc9a..5976bd0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_dct4_q31.c @@ -5,13 +5,13 @@ * Title: arm_dct4_q31.c * Description: Processing function of DCT4 & IDCT4 Q31 * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -50,7 +50,12 @@ The input and output formats for different DCT sizes and number of bits to upscale are mentioned in the table below: - \image html dct4FormatsQ31Table.gif +| DCT Size | Input format | Output format | Number of bits to upscale | +| --------: | ------------: | ------------: | ------------------------: | +| 2048 | 2.30 | 12.20 | 11 | +| 512 | 2.30 | 10.22 | 9 | +| 128 | 2.30 | 8.24 | 7 | + */ void arm_dct4_q31( diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f16.c new file mode 100644 index 0000000..f9cf6fd --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f16.c @@ -0,0 +1,165 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_f16.c + * Description: MFCC function for the f16 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions_f16.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + +/** + @ingroup groupTransforms + */ + + +/** + @defgroup MFCC MFCC + + MFCC Transform + + There are separate functions for floating-point, Q15, and Q31 data types. + */ + + + +/** + @addtogroup MFCC + @{ + */ + +/** + @brief MFCC F16 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values + @param[inout] pTmp points to a temporary buffer of complex + + @return none + + @par Description + The number of input samples if the FFT length used + when initializing the instance data structure. + + The temporary buffer has a 2*fft length size when MFCC + is implemented with CFFT. + It has length FFT Length + 2 when implemented with RFFT + (default implementation). + + The source buffer is modified by this function. + + */ +void arm_mfcc_f16( + const arm_mfcc_instance_f16 * S, + float16_t *pSrc, + float16_t *pDst, + float16_t *pTmp + ) +{ + float16_t maxValue; + uint32_t index; + uint32_t i; + float16_t result; + const float16_t *coefs=S->filterCoefs; + arm_matrix_instance_f16 pDctMat; + + /* Normalize */ + arm_absmax_f16(pSrc,S->fftLen,&maxValue,&index); + + arm_scale_f16(pSrc,1.0f16/(_Float16)maxValue,pSrc,S->fftLen); + + /* Multiply by window */ + arm_mult_f16(pSrc,S->windowCoefs,pSrc,S->fftLen); + + /* Compute spectrum magnitude + */ +#if defined(ARM_MFCC_CFFT_BASED) + /* some HW accelerator for CMSIS-DSP used in some boards + are only providing acceleration for CFFT. + With ARM_MFCC_CFFT_BASED enabled, CFFT is used and the MFCC + will be accelerated on those boards. + + The default is to use RFFT + */ + /* Convert from real to complex */ + for(i=0; i < S->fftLen ; i++) + { + pTmp[2*i] = pSrc[i]; + pTmp[2*i+1] = 0.0f16; + } + arm_cfft_f16(&(S->cfft),pTmp,0,1); +#else + /* Default RFFT based implementation */ + arm_rfft_fast_f16(&(S->rfft),pSrc,pTmp,0); + /* Unpack real values */ + pTmp[S->fftLen]=pTmp[1]; + pTmp[S->fftLen+1]=0.0f16; + pTmp[1]=0.0f; +#endif + arm_cmplx_mag_f16(pTmp,pSrc,S->fftLen); + + /* Apply MEL filters */ + for(i=0; inbMelFilters; i++) + { + arm_dot_prod_f16(pSrc+S->filterPos[i], + coefs, + S->filterLengths[i], + &result); + + coefs += S->filterLengths[i]; + + pTmp[i] = result; + + } + + /* Compute the log */ + arm_offset_f16(pTmp,1.0e-4f16,pTmp,S->nbMelFilters); + arm_vlog_f16(pTmp,pTmp,S->nbMelFilters); + + /* Multiply with the DCT matrix */ + + pDctMat.numRows=S->nbDctOutputs; + pDctMat.numCols=S->nbMelFilters; + pDctMat.pData=(float16_t*)S->dctCoefs; + + arm_mat_vec_mult_f16(&pDctMat, pTmp, pDst); + + +} + +#endif /* defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f32.c new file mode 100644 index 0000000..544f717 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_f32.c @@ -0,0 +1,154 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_f32.c + * Description: MFCC function for the f32 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" + +/** + @ingroup groupTransforms + */ + + + +/** + @addtogroup MFCC + @{ + */ + +/** + @brief MFCC F32 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples + @param[out] pDst points to the output MFCC values + @param[inout] pTmp points to a temporary buffer of complex + + @return none + + @par Description + The number of input samples if the FFT length used + when initializing the instance data structure. + + The temporary buffer has a 2*fft length size when MFCC + is implemented with CFFT. + It has length FFT Length + 2 when implemented with RFFT + (default implementation). + + The source buffer is modified by this function. + + */ +void arm_mfcc_f32( + const arm_mfcc_instance_f32 * S, + float32_t *pSrc, + float32_t *pDst, + float32_t *pTmp + ) +{ + float32_t maxValue; + uint32_t index; + uint32_t i; + float32_t result; + const float32_t *coefs=S->filterCoefs; + arm_matrix_instance_f32 pDctMat; + + /* Normalize */ + arm_absmax_f32(pSrc,S->fftLen,&maxValue,&index); + + arm_scale_f32(pSrc,1.0f/maxValue,pSrc,S->fftLen); + + /* Multiply by window */ + arm_mult_f32(pSrc,S->windowCoefs,pSrc,S->fftLen); + + /* Compute spectrum magnitude + */ +#if defined(ARM_MFCC_CFFT_BASED) + /* some HW accelerator for CMSIS-DSP used in some boards + are only providing acceleration for CFFT. + With ARM_MFCC_CFFT_BASED enabled, CFFT is used and the MFCC + will be accelerated on those boards. + + The default is to use RFFT + */ + /* Convert from real to complex */ + for(i=0; i < S->fftLen ; i++) + { + pTmp[2*i] = pSrc[i]; + pTmp[2*i+1] = 0.0f; + } + arm_cfft_f32(&(S->cfft),pTmp,0,1); +#else + /* Default RFFT based implementation */ + arm_rfft_fast_f32(&(S->rfft),pSrc,pTmp,0); + /* Unpack real values */ + pTmp[S->fftLen]=pTmp[1]; + pTmp[S->fftLen+1]=0.0f; + pTmp[1]=0.0f; +#endif + arm_cmplx_mag_f32(pTmp,pSrc,S->fftLen); + + /* Apply MEL filters */ + for(i=0; inbMelFilters; i++) + { + arm_dot_prod_f32(pSrc+S->filterPos[i], + coefs, + S->filterLengths[i], + &result); + + coefs += S->filterLengths[i]; + + pTmp[i] = result; + + } + + /* Compute the log */ + arm_offset_f32(pTmp,1.0e-6f,pTmp,S->nbMelFilters); + arm_vlog_f32(pTmp,pTmp,S->nbMelFilters); + + /* Multiply with the DCT matrix */ + + pDctMat.numRows=S->nbDctOutputs; + pDctMat.numCols=S->nbMelFilters; + pDctMat.pData=(float32_t*)S->dctCoefs; + + arm_mat_vec_mult_f32(&pDctMat, pTmp, pDst); + + +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f16.c new file mode 100644 index 0000000..74b6c3d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f16.c @@ -0,0 +1,114 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_init_f16.c + * Description: MFCC initialization function for the f16 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + @ingroup groupTransforms + */ + + +/** + @addtogroup MFCC + @{ + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions_f16.h" + +#if defined(ARM_FLOAT16_SUPPORTED) + + + +/** + @brief Initialization of the MFCC F16 instance structure + @param[out] S points to the mfcc instance structure + @param[in] fftLen fft length + @param[in] nbMelFilters number of Mel filters + @param[in] nbDctOutputs number of Dct outputs + @param[in] dctCoefs points to an array of DCT coefficients + @param[in] filterPos points of the array of filter positions + @param[in] filterLengths points to the array of filter lengths + @param[in] filterCoefs points to the array of filter coefficients + @param[in] windowCoefs points to the array of window coefficients + + @return error status + + @par Description + The matrix of Mel filter coefficients is sparse. + Most of the coefficients are zero. + To avoid multiplying the spectrogram by those zeros, the + filter is applied only to a given position in the spectrogram + and on a given number of FFT bins (the filter length). + It is the reason for the arrays filterPos and filterLengths. + + window coefficients can describe (for instance) a Hamming window. + The array has the same size as the FFT length. + + The folder Scripts is containing a Python script which can be used + to generate the filter, dct and window arrays. + */ + +arm_status arm_mfcc_init_f16( + arm_mfcc_instance_f16 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const float16_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const float16_t *filterCoefs, + const float16_t *windowCoefs + ) +{ + arm_status status; + + S->fftLen=fftLen; + S->nbMelFilters=nbMelFilters; + S->nbDctOutputs=nbDctOutputs; + S->dctCoefs=dctCoefs; + S->filterPos=filterPos; + S->filterLengths=filterLengths; + S->filterCoefs=filterCoefs; + S->windowCoefs=windowCoefs; + + #if defined(ARM_MFCC_CFFT_BASED) + status=arm_cfft_init_f16(&(S->cfft),fftLen); + #else + status=arm_rfft_fast_init_f16(&(S->rfft),fftLen); + #endif + + return(status); +} + +#endif /* defined(ARM_FLOAT16_SUPPORTED) */ +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f32.c new file mode 100644 index 0000000..9e0bf0c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_f32.c @@ -0,0 +1,111 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_init_f32.c + * Description: MFCC initialization function for the f32 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + @ingroup groupTransforms + */ + + +/** + @addtogroup MFCC + @{ + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" + + + +/** + @brief Initialization of the MFCC F32 instance structure + @param[out] S points to the mfcc instance structure + @param[in] fftLen fft length + @param[in] nbMelFilters number of Mel filters + @param[in] nbDctOutputs number of Dct outputs + @param[in] dctCoefs points to an array of DCT coefficients + @param[in] filterPos points of the array of filter positions + @param[in] filterLengths points to the array of filter lengths + @param[in] filterCoefs points to the array of filter coefficients + @param[in] windowCoefs points to the array of window coefficients + + @return error status + + @par Description + The matrix of Mel filter coefficients is sparse. + Most of the coefficients are zero. + To avoid multiplying the spectrogram by those zeros, the + filter is applied only to a given position in the spectrogram + and on a given number of FFT bins (the filter length). + It is the reason for the arrays filterPos and filterLengths. + + window coefficients can describe (for instance) a Hamming window. + The array has the same size as the FFT length. + + The folder Scripts is containing a Python script which can be used + to generate the filter, dct and window arrays. + */ + +arm_status arm_mfcc_init_f32( + arm_mfcc_instance_f32 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const float32_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const float32_t *filterCoefs, + const float32_t *windowCoefs + ) +{ + arm_status status; + + S->fftLen=fftLen; + S->nbMelFilters=nbMelFilters; + S->nbDctOutputs=nbDctOutputs; + S->dctCoefs=dctCoefs; + S->filterPos=filterPos; + S->filterLengths=filterLengths; + S->filterCoefs=filterCoefs; + S->windowCoefs=windowCoefs; + + #if defined(ARM_MFCC_CFFT_BASED) + status=arm_cfft_init_f32(&(S->cfft),fftLen); + #else + status=arm_rfft_fast_init_f32(&(S->rfft),fftLen); + #endif + + return(status); +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q15.c new file mode 100644 index 0000000..ccd6da9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q15.c @@ -0,0 +1,111 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_init_q15.c + * Description: MFCC initialization function for the q15 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + @ingroup groupTransforms + */ + + +/** + @addtogroup MFCC + @{ + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" + + + +/** + @brief Initialization of the MFCC F32 instance structure + @param[out] S points to the mfcc instance structure + @param[in] fftLen fft length + @param[in] nbMelFilters number of Mel filters + @param[in] nbDctOutputs number of Dct outputs + @param[in] dctCoefs points to an array of DCT coefficients + @param[in] filterPos points of the array of filter positions + @param[in] filterLengths points to the array of filter lengths + @param[in] filterCoefs points to the array of filter coefficients + @param[in] windowCoefs points to the array of window coefficients + + @return error status + + @par Description + The matrix of Mel filter coefficients is sparse. + Most of the coefficients are zero. + To avoid multiplying the spectrogram by those zeros, the + filter is applied only to a given position in the spectrogram + and on a given number of FFT bins (the filter length). + It is the reason for the arrays filterPos and filterLengths. + + window coefficients can describe (for instance) a Hamming window. + The array has the same size as the FFT length. + + The folder Scripts is containing a Python script which can be used + to generate the filter, dct and window arrays. + */ + +arm_status arm_mfcc_init_q15( + arm_mfcc_instance_q15 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const q15_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const q15_t *filterCoefs, + const q15_t *windowCoefs + ) +{ + arm_status status; + + S->fftLen=fftLen; + S->nbMelFilters=nbMelFilters; + S->nbDctOutputs=nbDctOutputs; + S->dctCoefs=dctCoefs; + S->filterPos=filterPos; + S->filterLengths=filterLengths; + S->filterCoefs=filterCoefs; + S->windowCoefs=windowCoefs; + + #if defined(ARM_MFCC_CFFT_BASED) + status=arm_cfft_init_q15(&(S->cfft),fftLen); + #else + status=arm_rfft_init_q15(&(S->rfft),fftLen,0,1); + #endif + + return(status); +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q31.c new file mode 100644 index 0000000..5573b33 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_init_q31.c @@ -0,0 +1,111 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_init_q31.c + * Description: MFCC initialization function for the q31 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + @ingroup groupTransforms + */ + + +/** + @addtogroup MFCC + @{ + */ + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" + + + +/** + @brief Initialization of the MFCC F32 instance structure + @param[out] S points to the mfcc instance structure + @param[in] fftLen fft length + @param[in] nbMelFilters number of Mel filters + @param[in] nbDctOutputs number of Dct outputs + @param[in] dctCoefs points to an array of DCT coefficients + @param[in] filterPos points of the array of filter positions + @param[in] filterLengths points to the array of filter lengths + @param[in] filterCoefs points to the array of filter coefficients + @param[in] windowCoefs points to the array of window coefficients + + @return error status + + @par Description + The matrix of Mel filter coefficients is sparse. + Most of the coefficients are zero. + To avoid multiplying the spectrogram by those zeros, the + filter is applied only to a given position in the spectrogram + and on a given number of FFT bins (the filter length). + It is the reason for the arrays filterPos and filterLengths. + + window coefficients can describe (for instance) a Hamming window. + The array has the same size as the FFT length. + + The folder Scripts is containing a Python script which can be used + to generate the filter, dct and window arrays. + */ + +arm_status arm_mfcc_init_q31( + arm_mfcc_instance_q31 * S, + uint32_t fftLen, + uint32_t nbMelFilters, + uint32_t nbDctOutputs, + const q31_t *dctCoefs, + const uint32_t *filterPos, + const uint32_t *filterLengths, + const q31_t *filterCoefs, + const q31_t *windowCoefs + ) +{ + arm_status status; + + S->fftLen=fftLen; + S->nbMelFilters=nbMelFilters; + S->nbDctOutputs=nbDctOutputs; + S->dctCoefs=dctCoefs; + S->filterPos=filterPos; + S->filterLengths=filterLengths; + S->filterCoefs=filterCoefs; + S->windowCoefs=windowCoefs; + + #if defined(ARM_MFCC_CFFT_BASED) + status=arm_cfft_init_q31(&(S->cfft),fftLen); + #else + status=arm_rfft_init_q31(&(S->rfft),fftLen,0,1); + #endif + + return(status); +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q15.c new file mode 100644 index 0000000..9cbd447 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q15.c @@ -0,0 +1,203 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_q15.c + * Description: MFCC function for the q15 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" + +/* Constants for Q15 implementation */ +#define LOG2TOLOG_Q15 0x02C5C860 +#define MICRO_Q15 0x00000219 +#define SHIFT_MELFILTER_SATURATION_Q15 10 +/** + @ingroup groupTransforms + */ + + + +/** + @addtogroup MFCC + @{ + */ + +/** + @brief MFCC Q15 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples in Q15 + @param[out] pDst points to the output MFCC values in q8.7 format + @param[inout] pTmp points to a temporary buffer of complex + + @return none + + @par Description + The number of input samples is the FFT length used + when initializing the instance data structure. + + The temporary buffer has a 2*fft length. + + The source buffer is modified by this function. + + The function may saturate. If the FFT length is too + big and the number of MEL filters too small then the fixed + point computations may saturate. + + */ + +arm_status arm_mfcc_q15( + const arm_mfcc_instance_q15 * S, + q15_t *pSrc, + q15_t *pDst, + q31_t *pTmp + ) +{ + q15_t m; + uint32_t index; + uint32_t fftShift=0; + q31_t logExponent; + q63_t result; + arm_matrix_instance_q15 pDctMat; + uint32_t i; + uint32_t coefsPos; + uint32_t filterLimit; + q15_t *pTmp2=(q15_t*)pTmp; + + arm_status status = ARM_MATH_SUCCESS; + + // q15 + arm_absmax_q15(pSrc,S->fftLen,&m,&index); + + if (m !=0) + { + q15_t quotient; + int16_t shift; + + status = arm_divide_q15(0x7FFF,m,"ient,&shift); + if (status != ARM_MATH_SUCCESS) + { + return(status); + } + + arm_scale_q15(pSrc,quotient,shift,pSrc,S->fftLen); + } + + + // q15 + arm_mult_q15(pSrc,S->windowCoefs, pSrc, S->fftLen); + + + /* Compute spectrum magnitude + */ + fftShift = 31 - __CLZ(S->fftLen); +#if defined(ARM_MFCC_CFFT_BASED) + /* some HW accelerator for CMSIS-DSP used in some boards + are only providing acceleration for CFFT. + With ARM_MFCC_CFFT_BASED enabled, CFFT is used and the MFCC + will be accelerated on those boards. + + The default is to use RFFT + */ + /* Convert from real to complex */ + for(i=0; i < S->fftLen ; i++) + { + pTmp2[2*i] = pSrc[i]; + pTmp2[2*i+1] = 0; + } + arm_cfft_q15(&(S->cfft),pTmp2,0,1); +#else + /* Default RFFT based implementation */ + arm_rfft_q15(&(S->rfft),pSrc,pTmp2); +#endif + filterLimit = 1 + (S->fftLen >> 1); + + + // q15 - fftShift + arm_cmplx_mag_q15(pTmp2,pSrc,filterLimit); + // q14 - fftShift + + /* Apply MEL filters */ + coefsPos = 0; + for(i=0; inbMelFilters; i++) + { + arm_dot_prod_q15(pSrc+S->filterPos[i], + &(S->filterCoefs[coefsPos]), + S->filterLengths[i], + &result); + + coefsPos += S->filterLengths[i]; + + // q34.29 - fftShift + result += MICRO_Q15; + result >>= SHIFT_MELFILTER_SATURATION_Q15; + // q34.29 - fftShift - satShift + pTmp[i] = __SSAT(result,31) ; + + } + + + // q34.29 - fftShift - satShift + /* Compute the log */ + arm_vlog_q31(pTmp,pTmp,S->nbMelFilters); + + + // q5.26 + + logExponent = fftShift + 2 + SHIFT_MELFILTER_SATURATION_Q15; + logExponent = logExponent * LOG2TOLOG_Q15; + + + // q8.26 + arm_offset_q31(pTmp,logExponent,pTmp,S->nbMelFilters); + arm_shift_q31(pTmp,-19,pTmp,S->nbMelFilters); + for(i=0; inbMelFilters; i++) + { + pSrc[i] = __SSAT((q15_t)pTmp[i],16); + } + + // q8.7 + + pDctMat.numRows=S->nbDctOutputs; + pDctMat.numCols=S->nbMelFilters; + pDctMat.pData=(q15_t*)S->dctCoefs; + + arm_mat_vec_mult_q15(&pDctMat, pSrc, pDst); + + return(status); +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q31.c new file mode 100644 index 0000000..6993c55 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_mfcc_q31.c @@ -0,0 +1,202 @@ +#include "edge-impulse-sdk/dsp/config.hpp" +#if EIDSP_LOAD_CMSIS_DSP_SOURCES +/* ---------------------------------------------------------------------- + * Project: CMSIS DSP Library + * Title: arm_mfcc_q31.c + * Description: MFCC function for the q31 version + * + * $Date: 07 September 2021 + * $Revision: V1.10.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * -------------------------------------------------------------------- */ +/* + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + + +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/transform_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/basic_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/complex_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/fast_math_functions.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/matrix_functions.h" + +/* Constants for Q31 implementation */ +#define LOG2TOLOG_Q31 0x02C5C860 +#define MICRO_Q31 0x08637BD0 +#define SHIFT_MELFILTER_SATURATION_Q31 10 +/** + @ingroup groupTransforms + */ + + + +/** + @addtogroup MFCC + @{ + */ + +/** + @brief MFCC Q31 + @param[in] S points to the mfcc instance structure + @param[in] pSrc points to the input samples in Q31 + @param[out] pDst points to the output MFCC values in q8.23 format + @param[inout] pTmp points to a temporary buffer of complex + + @return none + + @par Description + The number of input samples is the FFT length used + when initializing the instance data structure. + + The temporary buffer has a 2*fft length. + + The source buffer is modified by this function. + + The function may saturate. If the FFT length is too + big and the number of MEL filters too small then the fixed + point computations may saturate. + + */ + + +arm_status arm_mfcc_q31( + const arm_mfcc_instance_q31 * S, + q31_t *pSrc, + q31_t *pDst, + q31_t *pTmp + ) +{ + q31_t m; + uint32_t index; + uint32_t fftShift=0; + q31_t logExponent; + q63_t result; + arm_matrix_instance_q31 pDctMat; + uint32_t i; + uint32_t coefsPos; + uint32_t filterLimit; + q31_t *pTmp2=(q31_t*)pTmp; + + arm_status status = ARM_MATH_SUCCESS; + + // q31 + arm_absmax_q31(pSrc,S->fftLen,&m,&index); + + if (m !=0) + { + q31_t quotient; + int16_t shift; + + status = arm_divide_q31(0x7FFFFFFF,m,"ient,&shift); + if (status != ARM_MATH_SUCCESS) + { + return(status); + } + + arm_scale_q31(pSrc,quotient,shift,pSrc,S->fftLen); + } + + + // q31 + arm_mult_q31(pSrc,S->windowCoefs, pSrc, S->fftLen); + + + /* Compute spectrum magnitude + */ + fftShift = 31 - __CLZ(S->fftLen); +#if defined(ARM_MFCC_CFFT_BASED) + /* some HW accelerator for CMSIS-DSP used in some boards + are only providing acceleration for CFFT. + With ARM_MFCC_CFFT_BASED enabled, CFFT is used and the MFCC + will be accelerated on those boards. + + The default is to use RFFT + */ + /* Convert from real to complex */ + for(i=0; i < S->fftLen ; i++) + { + pTmp2[2*i] = pSrc[i]; + pTmp2[2*i+1] = 0; + } + arm_cfft_q31(&(S->cfft),pTmp2,0,1); +#else + /* Default RFFT based implementation */ + arm_rfft_q31(&(S->rfft),pSrc,pTmp2); +#endif + filterLimit = 1 + (S->fftLen >> 1); + + + // q31 - fftShift + arm_cmplx_mag_q31(pTmp2,pSrc,filterLimit); + // q30 - fftShift + + + /* Apply MEL filters */ + coefsPos = 0; + for(i=0; inbMelFilters; i++) + { + arm_dot_prod_q31(pSrc+S->filterPos[i], + &(S->filterCoefs[coefsPos]), + S->filterLengths[i], + &result); + + coefsPos += S->filterLengths[i]; + + // q16.48 - fftShift + result += MICRO_Q31; + result >>= (SHIFT_MELFILTER_SATURATION_Q31 + 18); + // q16.29 - fftShift - satShift + pTmp[i] = __SSAT(result,31) ; + + } + + + // q16.29 - fftShift - satShift + /* Compute the log */ + arm_vlog_q31(pTmp,pTmp,S->nbMelFilters); + + + // q5.26 + + logExponent = fftShift + 2 + SHIFT_MELFILTER_SATURATION_Q31; + logExponent = logExponent * LOG2TOLOG_Q31; + + + // q5.26 + arm_offset_q31(pTmp,logExponent,pTmp,S->nbMelFilters); + arm_shift_q31(pTmp,-3,pTmp,S->nbMelFilters); + + + // q8.23 + + pDctMat.numRows=S->nbDctOutputs; + pDctMat.numCols=S->nbMelFilters; + pDctMat.pData=(q31_t*)S->dctCoefs; + + arm_mat_vec_mult_q31(&pDctMat, pTmp, pDst); + + return(status); +} + +/** + @} end of MFCC group + */ + +#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_f32.c index 9d0a2a3..ea6d9df 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_f32.c @@ -5,13 +5,13 @@ * Title: arm_rfft_f32.c * Description: RFFT & RIFFT Floating point process function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f16.c index d5b64a0..367b2ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f16.c @@ -5,11 +5,13 @@ * Title: arm_rfft_fast_f16.c * Description: RFFT & RIFFT Floating point process function * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -72,15 +74,15 @@ void stage_rfft_f16( twI = *pCoeff++ ; // U1 = XA(1) + XB(1); % It is real - t1a = xBR + xAR ; + t1a = (_Float16)xBR + (_Float16)xAR ; // U2 = XB(1) - XA(1); % It is imaginary - t1b = xBI + xAI ; + t1b = (_Float16)xBI + (_Float16)xAI ; // real(tw * (xB - xA)) = twR * (xBR - xAR) - twI * (xBI - xAI); // imag(tw * (xB - xA)) = twI * (xBR - xAR) + twR * (xBI - xAI); - *pOut++ = 0.5f * ( t1a + t1b ); - *pOut++ = 0.5f * ( t1a - t1b ); + *pOut++ = 0.5f16 * ( (_Float16)t1a + (_Float16)t1b ); + *pOut++ = 0.5f16 * ( (_Float16)t1a - (_Float16)t1b ); // XA(1) = 1/2*( U1 - imag(U2) + i*( U1 +imag(U2) )); pB = p + 2*k - 14; @@ -174,18 +176,18 @@ void stage_rfft_f16( twR = *pCoeff++; twI = *pCoeff++; - t1a = xBR - xAR ; - t1b = xBI + xAI ; + t1a = (_Float16)xBR - (_Float16)xAR ; + t1b = (_Float16)xBI + (_Float16)xAI ; // real(tw * (xB - xA)) = twR * (xBR - xAR) - twI * (xBI - xAI); // imag(tw * (xB - xA)) = twI * (xBR - xAR) + twR * (xBI - xAI); - p0 = twR * t1a; - p1 = twI * t1a; - p2 = twR * t1b; - p3 = twI * t1b; + p0 = (_Float16)twR * (_Float16)t1a; + p1 = (_Float16)twI * (_Float16)t1a; + p2 = (_Float16)twR * (_Float16)t1b; + p3 = (_Float16)twI * (_Float16)t1b; - *pOut++ = 0.5f * (xAR + xBR + p0 + p3 ); //xAR - *pOut++ = 0.5f * (xAI - xBI + p1 - p2 ); //xAI + *pOut++ = 0.5f16 * ((_Float16)xAR + (_Float16)xBR + (_Float16)p0 + (_Float16)p3 ); //xAR + *pOut++ = 0.5f16 * ((_Float16)xAI - (_Float16)xBI + (_Float16)p1 - (_Float16)p2 ); //xAI pA += 2; pB -= 2; @@ -223,8 +225,8 @@ void merge_rfft_f16( pCoeff += 2 ; - *pOut++ = 0.5f * ( xAR + xAI ); - *pOut++ = 0.5f * ( xAR - xAI ); + *pOut++ = 0.5f16 * ( (_Float16)xAR + (_Float16)xAI ); + *pOut++ = 0.5f16 * ( (_Float16)xAR - (_Float16)xAI ); pB = p + 2*k - 14; pA += 2 ; @@ -293,18 +295,18 @@ void merge_rfft_f16( twR = *pCoeff++; twI = *pCoeff++; - t1a = xAR - xBR ; - t1b = xAI + xBI ; + t1a = (_Float16)xAR - (_Float16)xBR ; + t1b = (_Float16)xAI + (_Float16)xBI ; - r = twR * t1a; - s = twI * t1b; - t = twI * t1a; - u = twR * t1b; + r = (_Float16)twR * (_Float16)t1a; + s = (_Float16)twI * (_Float16)t1b; + t = (_Float16)twI * (_Float16)t1a; + u = (_Float16)twR * (_Float16)t1b; // real(tw * (xA - xB)) = twR * (xAR - xBR) - twI * (xAI - xBI); // imag(tw * (xA - xB)) = twI * (xAR - xBR) + twR * (xAI - xBI); - *pOut++ = 0.5f * (xAR + xBR - r - s ); //xAR - *pOut++ = 0.5f * (xAI - xBI + t - u ); //xAI + *pOut++ = 0.5f16 * ((_Float16)xAR + (_Float16)xBR - (_Float16)r - (_Float16)s ); //xAR + *pOut++ = 0.5f16 * ((_Float16)xAI - (_Float16)xBI + (_Float16)t - (_Float16)u ); //xAI pA += 2; pB -= 2; @@ -342,15 +344,15 @@ void stage_rfft_f16( // U1 = XA(1) + XB(1); % It is real - t1a = xBR + xAR ; + t1a = (_Float16)xBR + (_Float16)xAR ; // U2 = XB(1) - XA(1); % It is imaginary - t1b = xBI + xAI ; + t1b = (_Float16)xBI + (_Float16)xAI ; // real(tw * (xB - xA)) = twR * (xBR - xAR) - twI * (xBI - xAI); // imag(tw * (xB - xA)) = twI * (xBR - xAR) + twR * (xBI - xAI); - *pOut++ = 0.5f * ( t1a + t1b ); - *pOut++ = 0.5f * ( t1a - t1b ); + *pOut++ = 0.5f16 * ( (_Float16)t1a + (_Float16)t1b ); + *pOut++ = 0.5f16 * ( (_Float16)t1a - (_Float16)t1b ); // XA(1) = 1/2*( U1 - imag(U2) + i*( U1 +imag(U2) )); pB = p + 2*k; @@ -381,18 +383,18 @@ void stage_rfft_f16( twR = *pCoeff++; twI = *pCoeff++; - t1a = xBR - xAR ; - t1b = xBI + xAI ; + t1a = (_Float16)xBR - (_Float16)xAR ; + t1b = (_Float16)xBI + (_Float16)xAI ; // real(tw * (xB - xA)) = twR * (xBR - xAR) - twI * (xBI - xAI); // imag(tw * (xB - xA)) = twI * (xBR - xAR) + twR * (xBI - xAI); - p0 = twR * t1a; - p1 = twI * t1a; - p2 = twR * t1b; - p3 = twI * t1b; + p0 = (_Float16)twR * (_Float16)t1a; + p1 = (_Float16)twI * (_Float16)t1a; + p2 = (_Float16)twR * (_Float16)t1b; + p3 = (_Float16)twI * (_Float16)t1b; - *pOut++ = 0.5f * (xAR + xBR + p0 + p3 ); //xAR - *pOut++ = 0.5f * (xAI - xBI + p1 - p2 ); //xAI + *pOut++ = 0.5f16 * ((_Float16)xAR + (_Float16)xBR + (_Float16)p0 + (_Float16)p3 ); //xAR + *pOut++ = 0.5f16 * ((_Float16)xAI - (_Float16)xBI + (_Float16)p1 - (_Float16)p2 ); //xAI pA += 2; @@ -422,8 +424,8 @@ void merge_rfft_f16( pCoeff += 2 ; - *pOut++ = 0.5f * ( xAR + xAI ); - *pOut++ = 0.5f * ( xAR - xAI ); + *pOut++ = 0.5f16 * ( (_Float16)xAR + (_Float16)xAI ); + *pOut++ = 0.5f16 * ( (_Float16)xAR - (_Float16)xAI ); pB = p + 2*k ; pA += 2 ; @@ -441,18 +443,18 @@ void merge_rfft_f16( twR = *pCoeff++; twI = *pCoeff++; - t1a = xAR - xBR ; - t1b = xAI + xBI ; + t1a = (_Float16)xAR - (_Float16)xBR ; + t1b = (_Float16)xAI + (_Float16)xBI ; - r = twR * t1a; - s = twI * t1b; - t = twI * t1a; - u = twR * t1b; + r = (_Float16)twR * (_Float16)t1a; + s = (_Float16)twI * (_Float16)t1b; + t = (_Float16)twI * (_Float16)t1a; + u = (_Float16)twR * (_Float16)t1b; // real(tw * (xA - xB)) = twR * (xAR - xBR) - twI * (xAI - xBI); // imag(tw * (xA - xB)) = twI * (xAR - xBR) + twR * (xAI - xBI); - *pOut++ = 0.5f * (xAR + xBR - r - s ); //xAR - *pOut++ = 0.5f * (xAI - xBI + t - u ); //xAI + *pOut++ = 0.5f16 * ((_Float16)xAR + (_Float16)xBR - (_Float16)r - (_Float16)s ); //xAR + *pOut++ = 0.5f16 * ((_Float16)xAI - (_Float16)xBI + (_Float16)t - (_Float16)u ); //xAI pA += 2; pB -= 2; @@ -467,99 +469,6 @@ void merge_rfft_f16( @ingroup groupTransforms */ -/** - @defgroup RealFFT Real FFT Functions - - @par - The CMSIS DSP library includes specialized algorithms for computing the - FFT of real data sequences. The FFT is defined over complex data but - in many applications the input is real. Real FFT algorithms take advantage - of the symmetry properties of the FFT and have a speed advantage over complex - algorithms of the same length. - @par - The Fast RFFT algorith relays on the mixed radix CFFT that save processor usage. - @par - The real length N forward FFT of a sequence is computed using the steps shown below. - @par - \image html RFFT.gif "Real Fast Fourier Transform" - @par - The real sequence is initially treated as if it were complex to perform a CFFT. - Later, a processing stage reshapes the data to obtain half of the frequency spectrum - in complex format. Except the first complex number that contains the two real numbers - X[0] and X[N/2] all the data is complex. In other words, the first complex sample - contains two real values packed. - @par - The input for the inverse RFFT should keep the same format as the output of the - forward RFFT. A first processing stage pre-process the data to later perform an - inverse CFFT. - @par - \image html RIFFT.gif "Real Inverse Fast Fourier Transform" - @par - The algorithms for floating-point, Q15, and Q31 data are slightly different - and we describe each algorithm in turn. - @par Floating-point - The main functions are \ref arm_rfft_fast_f16() and \ref arm_rfft_fast_init_f16(). - - @par - The FFT of a real N-point sequence has even symmetry in the frequency domain. - The second half of the data equals the conjugate of the first half flipped in frequency. - Looking at the data, we see that we can uniquely represent the FFT using only N/2 complex numbers. - These are packed into the output array in alternating real and imaginary components: - @par - X = { real[0], imag[0], real[1], imag[1], real[2], imag[2] ... - real[(N/2)-1], imag[(N/2)-1 } - @par - It happens that the first complex number (real[0], imag[0]) is actually - all real. real[0] represents the DC offset, and imag[0] should be 0. - (real[1], imag[1]) is the fundamental frequency, (real[2], imag[2]) is - the first harmonic and so on. - @par - The real FFT functions pack the frequency domain data in this fashion. - The forward transform outputs the data in this form and the inverse - transform expects input data in this form. The function always performs - the needed bitreversal so that the input and output data is always in - normal order. The functions support lengths of [32, 64, 128, ..., 4096] - samples. - @par Q15 and Q31 - The real algorithms are defined in a similar manner and utilize N/2 complex - transforms behind the scenes. - @par - The complex transforms used internally include scaling to prevent fixed-point - overflows. The overall scaling equals 1/(fftLen/2). - Due to the use of complex transform internally, the source buffer is - modified by the rfft. - @par - A separate instance structure must be defined for each transform used but - twiddle factor and bit reversal tables can be reused. - @par - There is also an associated initialization function for each data type. - The initialization function performs the following operations: - - Sets the values of the internal structure fields. - - Initializes twiddle factor table and bit reversal table pointers. - - Initializes the internal complex FFT data structure. - @par - Use of the initialization function is optional **except for MVE versions where it is mandatory**. - If you don't use the initialization functions, then the structures should be initialized with code - similar to the one below: -
-      arm_rfft_instance_q31 S = {fftLenReal, fftLenBy2, ifftFlagR, bitReverseFlagR, twidCoefRModifier, pTwiddleAReal, pTwiddleBReal, pCfft};
-      arm_rfft_instance_q15 S = {fftLenReal, fftLenBy2, ifftFlagR, bitReverseFlagR, twidCoefRModifier, pTwiddleAReal, pTwiddleBReal, pCfft};
-  
- where fftLenReal is the length of the real transform; - fftLenBy2 length of the internal complex transform (fftLenReal/2). - ifftFlagR Selects forward (=0) or inverse (=1) transform. - bitReverseFlagR Selects bit reversed output (=0) or normal order - output (=1). - twidCoefRModifier stride modifier for the twiddle factor table. - The value is based on the FFT length; - pTwiddleARealpoints to the A array of twiddle coefficients; - pTwiddleBRealpoints to the B array of twiddle coefficients; - pCfft points to the CFFT Instance structure. The CFFT structure - must also be initialized. -@par - Note that with MVE versions you can't initialize instance structures directly and **must - use the initialization function**. - */ /** @addtogroup RealFFT @@ -610,4 +519,5 @@ void arm_rfft_fast_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f32.c index 7c58076..c93f6a0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f32.c @@ -5,13 +5,13 @@ * Title: arm_rfft_fast_f32.c * Description: RFFT & RIFFT Floating point process function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -473,7 +473,7 @@ void merge_rfft_f32( of the symmetry properties of the FFT and have a speed advantage over complex algorithms of the same length. @par - The Fast RFFT algorith relays on the mixed radix CFFT that save processor usage. + The Fast RFFT algorithm relays on the mixed radix CFFT that save processor usage. @par The real length N forward FFT of a sequence is computed using the steps shown below. @par @@ -497,6 +497,8 @@ void merge_rfft_f32( The main functions are \ref arm_rfft_fast_f32() and \ref arm_rfft_fast_init_f32(). The older functions \ref arm_rfft_f32() and \ref arm_rfft_init_f32() have been deprecated but are still documented. + For f16, the functions are \ref arm_rfft_fast_f16() and \ref arm_rfft_fast_init_f16(). + For f64, the functions are \ref arm_rfft_fast_f64() and \ref arm_rfft_fast_init_f64(). @par The FFT of a real N-point sequence has even symmetry in the frequency domain. The second half of the data equals the conjugate of the first half flipped in frequency. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f64.c index 01594b4..2b0ba10 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_f64.c @@ -5,13 +5,13 @@ * Title: arm_rfft_fast_f64.c * Description: RFFT & RIFFT Double precision Floating point process function * - * $Date: 29. November 2019 - * $Revision: V1.0.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f16.c index 1d0dda6..1496b74 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f16.c @@ -5,11 +5,13 @@ * Title: arm_rfft_fast_init_f16.c * Description: Split Radix Decimation in Frequency CFFT Floating point processing function * + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -342,7 +344,7 @@ arm_status arm_rfft_fast_init_f16( break; #endif default: - return ARM_MATH_ARGUMENT_ERROR; + break; } if( ! fptr ) return ARM_MATH_ARGUMENT_ERROR; @@ -355,4 +357,5 @@ arm_status arm_rfft_fast_init_f16( */ #endif /* #if defined(ARM_FLOAT16_SUPPORTED) */ + #endif // EIDSP_LOAD_CMSIS_DSP_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f32.c index d1d1395..f469ac4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_rfft_fast_init_f32.c * Description: Split Radix Decimation in Frequency CFFT Floating point processing function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -51,7 +51,7 @@ - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_32_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_32_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -81,7 +81,7 @@ static arm_status arm_rfft_32_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_64_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_64_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -111,7 +111,7 @@ static arm_status arm_rfft_64_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_128_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_128_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -141,7 +141,7 @@ static arm_status arm_rfft_128_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_256_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_256_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -171,7 +171,7 @@ static arm_status arm_rfft_256_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_512_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_512_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -200,7 +200,7 @@ static arm_status arm_rfft_512_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_1024_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_1024_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -228,7 +228,7 @@ static arm_status arm_rfft_1024_fast_init_f32( arm_rfft_fast_instance_f32 * S ) - \ref ARM_MATH_SUCCESS : Operation successful - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_2048_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_2048_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -257,7 +257,7 @@ static arm_status arm_rfft_2048_fast_init_f32( arm_rfft_fast_instance_f32 * S ) - \ref ARM_MATH_ARGUMENT_ERROR : an error is detected */ -static arm_status arm_rfft_4096_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { +arm_status arm_rfft_4096_fast_init_f32( arm_rfft_fast_instance_f32 * S ) { arm_status status; @@ -341,7 +341,7 @@ arm_status arm_rfft_fast_init_f32( break; #endif default: - return ARM_MATH_ARGUMENT_ERROR; + break; } if( ! fptr ) return ARM_MATH_ARGUMENT_ERROR; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f64.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f64.c index 7423d9e..e653f86 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f64.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_fast_init_f64.c @@ -5,13 +5,13 @@ * Title: arm_rfft_fast_init_f64.c * Description: Split Radix Decimation in Frequency CFFT Double Precision Floating point processing function * - * $Date: 29. November 2019 - * $Revision: V1.0.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -333,7 +333,7 @@ arm_status arm_rfft_fast_init_f64( break; #endif default: - return ARM_MATH_ARGUMENT_ERROR; + break; } if( ! fptr ) return ARM_MATH_ARGUMENT_ERROR; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_f32.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_f32.c index 66f8ede..e1b088d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_f32.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_f32.c @@ -5,13 +5,13 @@ * Title: arm_rfft_init_f32.c * Description: RFFT & RIFFT Floating point initialisation function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q15.c index 9408d49..79b0f4c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q15.c @@ -5,13 +5,13 @@ * Title: arm_rfft_init_q15.c * Description: RFFT & RIFFT Q15 initialisation function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q31.c index f9c5112..fa81090 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_init_q31.c @@ -5,13 +5,13 @@ * Title: arm_rfft_init_q31.c * Description: RFFT & RIFFT Q31 initialisation function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q15.c index 45307dc..ee8b613 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q15.c @@ -5,13 +5,13 @@ * Title: arm_rfft_q15.c * Description: RFFT & RIFFT Q15 process function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -66,10 +66,34 @@ void arm_split_rifft_q15( Internally input is downscaled by 2 for every stage to avoid saturations inside CFFT/CIFFT process. Hence the output format is different for different RFFT sizes. The input and output formats for different RFFT sizes and number of bits to upscale are mentioned in the tables below for RFFT and RIFFT: - @par - \image html RFFTQ15.gif "Input and Output Formats for Q15 RFFT" - @par - \image html RIFFTQ15.gif "Input and Output Formats for Q15 RIFFT" + @par Input and Output formats for RFFT Q15 + +| RFFT Size | Input Format | Output Format | Number of bits to upscale | +| ---------: | ------------: | -------------: | ------------------------: | +| 32 | 1.15 | 5.11 | 5 | +| 64 | 1.15 | 6.10 | 6 | +| 128 | 1.15 | 7.9 | 7 | +| 256 | 1.15 | 8.8 | 8 | +| 512 | 1.15 | 9.7 | 9 | +| 1024 | 1.15 | 10.6 | 10 | +| 2048 | 1.15 | 11.5 | 11 | +| 4096 | 1.15 | 12.4 | 12 | +| 8192 | 1.15 | 13.3 | 13 | + + @par Input and Output formats for RIFFT Q15 + +| RIFFT Size | Input Format | Output Format | Number of bits to upscale | +| ----------: | ------------: | -------------: | ------------------------: | +| 32 | 1.15 | 5.11 | 0 | +| 64 | 1.15 | 6.10 | 0 | +| 128 | 1.15 | 7.9 | 0 | +| 256 | 1.15 | 8.8 | 0 | +| 512 | 1.15 | 9.7 | 0 | +| 1024 | 1.15 | 10.6 | 0 | +| 2048 | 1.15 | 11.5 | 0 | +| 4096 | 1.15 | 12.4 | 0 | +| 8192 | 1.15 | 13.3 | 0 | + @par If the input buffer is of length N, the output buffer must have length 2*N. The input buffer is modified by this function. @@ -190,8 +214,8 @@ void arm_split_rfft_q15( q15x8_t out = vhaddq_s16(MVE_CMPLX_MULT_FX_AxB_S16(in1, coefA), MVE_CMPLX_MULT_FX_AxConjB_S16(coefB, in2)); #else - q15x8_t out = vhaddq_s16(MVE_CMPLX_MULT_FX_AxB(in1, coefA), - MVE_CMPLX_MULT_FX_AxConjB(coefB, in2)); + q15x8_t out = vhaddq_s16(MVE_CMPLX_MULT_FX_AxB(in1, coefA, q15x8_t), + MVE_CMPLX_MULT_FX_AxConjB(coefB, in2, q15x8_t)); #endif vst1q_s16(pOut1, out); pOut1 += 8; @@ -415,8 +439,8 @@ void arm_split_rifft_q15( q15x8_t coefB = vldrhq_gather_shifted_offset_s16(pCoefBb, offsetCoef); /* can we avoid the conjugate here ? */ - q15x8_t out = vhaddq_s16(MVE_CMPLX_MULT_FX_AxConjB(in1, coefA), - vmulq(conj, MVE_CMPLX_MULT_FX_AxB(in2, coefB))); + q15x8_t out = vhaddq_s16(MVE_CMPLX_MULT_FX_AxConjB(in1, coefA, q15x8_t), + vmulq(conj, MVE_CMPLX_MULT_FX_AxB(in2, coefB, q15x8_t))); vst1q_s16(pDst, out); pDst += 8; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q31.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q31.c index 1741685..20d93cf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q31.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/TransformFunctions/arm_rfft_q31.c @@ -5,13 +5,13 @@ * Title: arm_rfft_q31.c * Description: FFT & RIFFT Q31 process function * - * $Date: 18. March 2019 - * $Revision: V1.6.0 + * $Date: 23 April 2021 + * $Revision: V1.9.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M and Cortex-A cores * -------------------------------------------------------------------- */ /* - * Copyright (C) 2010-2019 ARM Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -66,10 +66,34 @@ void arm_split_rifft_q31( Internally input is downscaled by 2 for every stage to avoid saturations inside CFFT/CIFFT process. Hence the output format is different for different RFFT sizes. The input and output formats for different RFFT sizes and number of bits to upscale are mentioned in the tables below for RFFT and RIFFT: - @par - \image html RFFTQ31.gif "Input and Output Formats for Q31 RFFT" - @par - \image html RIFFTQ31.gif "Input and Output Formats for Q31 RIFFT" + @par Input and Output formats for RFFT Q31 + +| RFFT Size | Input Format | Output Format | Number of bits to upscale | +| ---------: | ------------: | -------------: | ------------------------: | +| 32 | 1.31 | 5.27 | 5 | +| 64 | 1.31 | 6.26 | 6 | +| 128 | 1.31 | 7.25 | 7 | +| 256 | 1.31 | 8.24 | 8 | +| 512 | 1.31 | 9.23 | 9 | +| 1024 | 1.31 | 10.22 | 10 | +| 2048 | 1.31 | 11.21 | 11 | +| 4096 | 1.31 | 12.20 | 12 | +| 8192 | 1.31 | 13.19 | 13 | + + @par Input and Output formats for RIFFT Q31 + +| RIFFT Size | Input Format | Output Format | Number of bits to upscale | +| ----------: | ------------: | -------------: | ------------------------: | +| 32 | 1.31 | 5.27 | 0 | +| 64 | 1.31 | 6.26 | 0 | +| 128 | 1.31 | 7.25 | 0 | +| 256 | 1.31 | 8.24 | 0 | +| 512 | 1.31 | 9.23 | 0 | +| 1024 | 1.31 | 10.22 | 0 | +| 2048 | 1.31 | 11.21 | 0 | +| 4096 | 1.31 | 12.20 | 0 | +| 8192 | 1.31 | 13.19 | 0 | + @par If the input buffer is of length N, the output buffer must have length 2*N. The input buffer is modified by this function. @@ -183,7 +207,8 @@ void arm_split_rfft_q31( #if defined(__CMSIS_GCC_H) q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxB_S32(in1, coefA),MVE_CMPLX_MULT_FX_AxConjB_S32(coefB, in2)); #else - q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxB(in1, coefA),MVE_CMPLX_MULT_FX_AxConjB(coefB, in2)); + q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxB(in1, coefA, q31x4_t), + MVE_CMPLX_MULT_FX_AxConjB(coefB, in2, q31x4_t)); #endif vst1q(pOut1, out); pOut1 += 4; @@ -342,8 +367,8 @@ void arm_split_rifft_q31( q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxConjB_S32(in1, coefA), vmulq_s32(conj, MVE_CMPLX_MULT_FX_AxB_S32(in2, coefB))); #else - q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxConjB(in1, coefA), - vmulq_s32(conj, MVE_CMPLX_MULT_FX_AxB(in2, coefB))); + q31x4_t out = vhaddq_s32(MVE_CMPLX_MULT_FX_AxConjB(in1, coefA, q31x4_t), + vmulq_s32(conj, MVE_CMPLX_MULT_FX_AxB(in2, coefB, q31x4_t))); #endif vst1q_s32(pDst, out); pDst += 4; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h new file mode 100644 index 0000000..d650980 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h @@ -0,0 +1,172 @@ +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************************************************** + * @file arm_nn_math_types.h + * @brief Compiler include and basic types + * @version V1.2.0 + * @date 20 June 2022 + * Target Processor: Cortex-M + ******************************************************************************/ + +/** + Copied from CMSIS/DSP/arm_math_types.h and modified +*/ + +#ifndef _ARM_NN_MATH_TYPES_H_ + +#define _ARM_NN_MATH_TYPES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include +#include +#include + +/* Integer aliases */ +typedef int8_t q7_t; +typedef int16_t q15_t; +typedef int32_t q31_t; +typedef int64_t q63_t; + +/* Compiler specific diagnostic adjustment */ +#if defined(__CC_ARM) + +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + +#elif defined(__GNUC__) + +#elif defined(__ICCARM__) + +#elif defined(__TI_ARM__) + +#elif defined(__CSMC__) + +#elif defined(__TASKING__) + +#elif defined(_MSC_VER) + +#else +#error Unknown compiler +#endif + +/* Included for instrinsics definitions */ +#if defined(_MSC_VER) +#ifndef __STATIC_FORCEINLINE +#define __STATIC_FORCEINLINE static __forceinline +#endif +#ifndef __STATIC_INLINE +#define __STATIC_INLINE static __inline +#endif +#ifndef __ALIGNED +#define __ALIGNED(x) __declspec(align(x)) +#endif + +#elif defined(__GNUC_PYTHON__) +#ifndef __ALIGNED +#define __ALIGNED(x) __attribute__((aligned(x))) +#endif +#ifndef __STATIC_FORCEINLINE +#define __STATIC_FORCEINLINE static inline __attribute__((always_inline)) +#endif +#ifndef __STATIC_INLINE +#define __STATIC_INLINE static inline +#endif + +#else +#include "edge-impulse-sdk/CMSIS/Core/Include/cmsis_compiler.h" +#endif + +/* evaluate ARM DSP feature */ +#if (defined(__ARM_FEATURE_DSP) && (__ARM_FEATURE_DSP == 1)) +#ifndef ARM_MATH_DSP +#define ARM_MATH_DSP 1 +#endif +#endif + +#if __ARM_FEATURE_MVE +#ifndef ARM_MATH_MVEI +#define ARM_MATH_MVEI +#endif +#endif + +/* Compiler specific diagnostic adjustment */ +#if defined(__CC_ARM) + +#elif defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) + +#elif defined(__GNUC__) +// #pragma GCC diagnostic pop + +#elif defined(__ICCARM__) + +#elif defined(__TI_ARM__) + +#elif defined(__CSMC__) + +#elif defined(__TASKING__) + +#elif defined(_MSC_VER) + +#else +#error Unknown compiler +#endif + +#ifdef __cplusplus +} +#endif + +#if __ARM_FEATURE_MVE +#include +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/** + * @brief Add necessary typedefs + */ + +#define NN_Q31_MAX ((q31_t)(0x7FFFFFFFL)) +#define NN_Q15_MAX ((q15_t)(0x7FFF)) +#define NN_Q7_MAX ((q7_t)(0x7F)) +#define NN_Q31_MIN ((q31_t)(0x80000000L)) +#define NN_Q15_MIN ((q15_t)(0x8000)) +#define NN_Q7_MIN ((q7_t)(0x80)) + +/** + * @brief Error status returned by some functions in the library. + */ + +typedef enum +{ + ARM_CMSIS_NN_SUCCESS = 0, /**< No error */ + ARM_CMSIS_NN_ARG_ERROR = -1, /**< One or more arguments are incorrect */ + ARM_CMSIS_NN_NO_IMPL_ERROR = -2, /**< No implementation available */ +} arm_cmsis_nn_status; + +#ifdef __cplusplus +} +#endif + +#endif /*ifndef _ARM_NN_MATH_TYPES_H_ */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_tables.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_tables.h index 3d2b534..85a7537 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_tables.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_tables.h @@ -3,8 +3,8 @@ * Title: arm_nn_tables.h * Description: Extern declaration for NN tables * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 17. August 2021 + * $Revision: V.1.0.2 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -29,12 +29,12 @@ #ifndef _ARM_NN_TABLES_H #define _ARM_NN_TABLES_H -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h" /** -* @brief tables for various activation functions -* -*/ + * @brief tables for various activation functions + * + */ extern const q15_t sigmoidTable_q15[256]; extern const q7_t sigmoidTable_q7[256]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h index 206af07..6040d72 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2020-2022 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +22,8 @@ * Description: Public header file to contain the CMSIS-NN structs for the * TensorFlowLite micro compliant functions * - * $Date: 09. October 2020 - * $Revision: V.1.0.0 + * $Date: 22. Februari 2022 + * $Revision: V.2.1.0 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -112,7 +112,7 @@ typedef struct typedef struct { int32_t input_offset; /**< Zero value for the input tensor */ - int32_t filter_offset; /**< Zero value for the filter tensor */ + int32_t filter_offset; /**< Zero value for the filter tensor. Not used */ int32_t output_offset; /**< Zero value for the output tensor */ cmsis_nn_activation activation; } cmsis_nn_fc_params; @@ -127,4 +127,11 @@ typedef struct cmsis_nn_activation output_activation; } cmsis_nn_svdf_params; +/** CMSIS-NN object for Softmax s16 layer parameters */ +typedef struct +{ + const int16_t *exp_lut; + const int16_t *one_by_one_lut; +} cmsis_nn_softmax_lut_s16; + #endif // _ARM_NN_TYPES_H diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h index f43c0de..1548a20 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +21,8 @@ * Title: arm_nnfunctions.h * Description: Public header file for CMSIS NN Library * - * $Date: 19 January 2021 - * $Revision: V.6.5.3 + * $Date: 7 Aug 2022 + * $Revision: V.10.1.2 * * Target Processor: Cortex-M CPUs * -------------------------------------------------------------------- */ @@ -51,6 +51,15 @@ * kernels are included in the function description. The implementation details are also * described in this paper [1]. * + * Supported Processors + * ------- + * CMSIS-NN targets Cortex-M processors with typically three different implementations for each function. Each + * targets a different group of processors. + * - Processors without SIMD capability (e.g, Cortex-M0) + * - Processors with DSP extention (e.g Cortex-M4) + * - Processors with MVE extension (e.g Cortex-M55) + * The right implementation is picked through feature flags and the user usually does not have to explicit set it. + * * Function Classification * -------- * The functions can be classified into two segments @@ -103,7 +112,7 @@ * Copyright Notice * ------------ * - * Copyright (C) 2010-2019 Arm Limited. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * [1] CMSIS-NN: Efficient Neural Network Kernels for Arm Cortex-M CPUs https://arxiv.org/abs/1801.06601 * @@ -124,809 +133,620 @@ #ifndef _ARM_NNFUNCTIONS_H #define _ARM_NNFUNCTIONS_H +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h" #include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h" -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" -#include "arm_nn_types.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" #define USE_INTRINSIC //#define ARM_NN_TRUNCATE /* This config the rounding model to floor or round to the nearest int */ #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif - /** - * @brief Struct for specifying activation function types - * - */ - typedef enum - { - ARM_SIGMOID = 0, - /**< Sigmoid activation function */ - ARM_TANH = 1, - /**< Tanh activation function */ - } arm_nn_activation_type; - - /** - * @defgroup NNConv Convolution Functions - * - * Collection of convolution, depthwise convolution functions and their variants. - * - * The convolution is implemented in 2 steps: im2col and GEMM - * - * im2col is a process of converting each patch of image data into - * a column. After im2col, the convolution is computed as matrix-matrix - * multiplication. - * - * To reduce the memory footprint, the im2col is performed partially. - * Each iteration, only a few column (i.e., patches) are generated and - * computed with GEMM kernels similar to CMSIS-DSP arm_mat_mult functions. - * - */ - - /** - * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in - cmsis-nn - * to perform the convolution. - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the function. - arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the - * spatial filter dimensions - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - */ - arm_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for arm_convolve_wrapper_s8 - * - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial - * filter dimensions - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, - const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims); - - /** - * @brief Basic s8 convolution function - * @param[in, out] ctx Function context that contains the additional buffer if required by the function. - arm_convolve_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the - * spatial filter dimensions - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * 1. Supported framework: TensorFlow Lite micro - * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details. - * - */ - arm_status arm_convolve_s8(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for s8 convolution function - * - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK - * are the spatial filter dimensions - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); - - /** - * @brief Basic Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - */ - arm_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Basic Q7 convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - */ - arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Basic Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - */ - arm_status arm_convolve_HWC_q15_basic(const q15_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - - arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1 - * and dim_kernel_y=1). It can be used for - * second half of MobileNets after depthwise separable convolution. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 - */ - arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast s8 version for 1x1 convolution (non-square shape) - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the function. - arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# input_dims->c is a multiple of 4 - * -# conv_params->padding.w = conv_params->padding.h = 0 - * -# conv_params->stride.w = conv_params->stride.h = 1 - * - */ - arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for arm_convolve_1x1_s8_fast - * - * @param[in] input_dims Input (activation) dimensions - * @return The function returns the required buffer size in bytes - * - */ - int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims); - - /** - * @brief 1xn convolution - * - * @param[in, out] ctx Function context that contains the additional buffer if required by the function. - arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required - * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). - * Range of conv_params->input_offset : [-127, 128] - * Range of conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal - * spatial filter dimension - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Optional bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] - * @param[out] output_data Output data pointer. Data type: int8 - * - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH if argument constraints fail. or, - * ARM_MATH_SUCCESS on successful completion. - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# input_dims->n equals 1 - * -# ouput_dims->w is a multiple of 4 - * -# Explicit constraints(since it is for 1xN convolution) - * -## input_dims->h equals 1 - * -## output_dims->h equals 1 - * -## filter_dims->h equals 1 - *@todo Remove constraint on output_dims->w to make the function generic. - * - */ - arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, +/** + * @brief Struct for specifying activation function types + * + */ +typedef enum +{ + ARM_SIGMOID = 0, + /**< Sigmoid activation function */ + ARM_TANH = 1, + /**< Tanh activation function */ +} arm_nn_activation_type; + +/** + * @defgroup NNConv Convolution Functions + * + * Collection of convolution, depthwise convolution functions and their variants. + * + * The convolution is implemented in 2 steps: im2col and GEMM + * + * im2col is a process of converting each patch of image data into + * a column. After im2col, the convolution is computed as matrix-matrix + * multiplication. + * + * To reduce the memory footprint, the im2col is performed partially. + * Each iteration, only a few column (i.e., patches) are generated and + * computed with GEMM kernels similar to CMSIS-DSP arm_mat_mult functions. + * + */ + +/** + * @brief s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s8 + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial + * filter dimensions + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_wrapper_s8_get_buffer_size will return the buffer_size if required + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int16 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + */ +arm_cmsis_nn_status arm_convolve_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Get the required buffer size for arm_convolve_wrapper_s16 + * + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] input_dims Input (activation) dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the spatial + * filter dimensions + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_wrapper_s16_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Basic s8 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s8_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required buffer size for s8 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Basic s16 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_s16_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int16 + + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7/q15 is used as data type eventhough it is s8/s16 data. It is done so to be consistent with existing APIs. + * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * + */ +arm_cmsis_nn_status arm_convolve_s16(const cmsis_nn_context *ctx, const cmsis_nn_conv_params *conv_params, const cmsis_nn_per_channel_quant_params *quant_params, const cmsis_nn_dims *input_dims, - const q7_t *input_data, + const q15_t *input_data, const cmsis_nn_dims *filter_dims, const q7_t *filter_data, const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, + const int64_t *bias_data, const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required additional buffer size for 1xn convolution - * - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the - * horizontal spatial filter dimension - * @return The function returns required buffer size(bytes) - * - */ - int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); - - /** - * @brief Q7 version of convolution for RGB image - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This kernel is written exclusively for convolution with ch_im_in - * equals 3. This applies on the first layer of CNNs which has input - * image with RGB format. - */ - - arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - - arm_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Fast Q15 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * - */ - - arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Q7 depthwise separable convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - - arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Q7 depthwise separable convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding sizes x - * @param[in] padding_y padding sizes y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 2 - * ch_im_out is multiple of 2 - */ - arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB); - - /** - * @brief Wrapper function to pick the right optimized s8 depthwise convolution function - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if required. - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->output_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each - * output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Batch argument N is not used and assumed to be 1. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful completion. - * - * @details - * - Supported framework: TensorFlow Lite - * - Picks one of the the following functions - * -# arm_depthwise_conv_s8() - * -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only - * -# arm_depthwise_conv_s8_opt() - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the - * boundary. - */ - arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, + q15_t *output_data); +/** + * @brief Optimized s16 convolution function + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_fast_s16_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK are the + * spatial filter dimensions. (filter_dims->w * filter_dims->h * input_dims->c) must not + exceed 512 + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int16 + + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7/q15 is used as data type eventhough it is s8/s16 data. It is done so to be consistent with existing APIs. + * 3. Additional memory is required for optimization. Refer to argument 'ctx' for details. + * 4. Implementation supports kernel volumes (filter width * filter height * input channels) < 512. + * + */ + +arm_cmsis_nn_status arm_convolve_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Get the required buffer size for s16 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Get the required buffer size for fast s16 convolution function + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, HK, WK, C_IN] where HK and WK + * are the spatial filter dimensions + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Basic Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Basic Q7 convolution function (non-square shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Basic Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_convolve_HWC_q15_basic(const q15_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast Q7 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ +arm_cmsis_nn_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast Q7 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ + +arm_cmsis_nn_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * This function implement convolution with 1x1 kernel size (i.e., dim_kernel_x=1 + * and dim_kernel_y=1). It can be used for + * second half of MobileNets after depthwise separable convolution. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 4 + * ch_im_out is multiple of 2 + */ +arm_cmsis_nn_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast s8 version for 1x1 convolution (non-square shape) + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1x1_s8_fast_get_buffer_size will return the buffer_size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, 1, C_IN] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# input_dims->c is a multiple of 4 + * -# conv_params->padding.w = conv_params->padding.h = 0 + * -# conv_params->stride.w = conv_params->stride.h = 1 + * + */ +arm_cmsis_nn_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, const cmsis_nn_per_channel_quant_params *quant_params, const cmsis_nn_dims *input_dims, const q7_t *input_data, @@ -937,480 +757,966 @@ extern "C" const cmsis_nn_dims *output_dims, q7_t *output_data); - /** - * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() - * - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->input_offset : [-128, 127] - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Batch argument N is not used and assumed to be 1. - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @return Size of additional memory required for optimizations in bytes. - * - */ - int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims); - - /** - * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * exists if additional memory is. - * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) - * dw_conv_params->dilation is not used. - * Range of dw_conv_params->input_offset : [-127, 128] - * Range of dw_conv_params->input_offset : [-128, 127] - * @param[in] quant_params Per-channel quantization info. - * It contains the multiplier and shift values to be applied to each - * output channel - * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] - * Batch argument N is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - Supported framework: TensorFlow Lite - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - */ - arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on - * the input arguments(documented below). Refer arm_depthwise_conv_s8() for function - * argument details. - * - * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - Unsupported dimension of tensors - * ARM_MATH_ARGUMENT_ERROR - Unsupported pad size along the x axis - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# Number of input channel equals number of output channels - * -# Filter height and width equals 3 - * -# Padding along x is either 0 or 1. - * - */ - arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel. - * Refer arm_depthwise_conv_s8() for function argument details. - * - * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - input channel != output channel or - * ch_mult != 1 - * ARM_MATH_SUCCESS - Successful operation - * - * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out - * for the following if MVE optimizations(Arm Helium Technology) are used. - * - Output shift - * - Output multiplier - * - Output bias - * - kernel - * @details - * - Supported framework: TensorFlow Lite - * - The following constrains on the arguments apply - * -# Number of input channel equals number of output channels or ch_mult equals 1 - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - Reccomended when number of channels is 4 or greater. - * - */ - arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for optimized s8 depthwise convolution - * function with constraint that in_channel equals out_channel. - * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] - * Batch argument N is not used. - * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, - const cmsis_nn_dims *filter_dims); - - /** - * @defgroup FC Fully-connected Layer Functions - * - * Collection of fully-connected and matrix multiplication functions. - * - * Fully-connected layer is basically a matrix-vector multiplication - * with bias. The matrix is the weights and the input/output vectors - * are the activation values. Supported {weight, activation} precisions - * include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}. - * - * Here we have two types of kernel functions. The basic function - * implements the function using regular GEMV approach. The opt functions - * operates with weights in interleaved formats. - * - */ - - /** - *@brief Q7 basic fully-connected layer function - *@param[in] pV pointer to input vector - *@param[in] pM pointer to matrix weights - *@param[in] dim_vec length of the vector - *@param[in] num_of_rows number of rows in weight matrix - *@param[in] bias_shift amount of left-shift for bias - *@param[in] out_shift amount of right-shift for output - *@param[in] bias pointer to bias - *@param[in,out] pOut pointer to output vector - *@param[in,out] vec_buffer pointer to buffer space for input - *@return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q7(const q7_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut, - q15_t *vec_buffer); - - /** - * @brief Basic s8 Fully Connected function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] fc_params Fully Connected layer parameters (e.g. strides, dilations, pads,...) - * Range of fc_params->input_offset : [-127, 128] - * Range of fc_params->filter_offset : [-127, 128] - * Range of fc_params->output_offset : [-128, 127] - * @param[in] quant_params Per-tensor quantization info. - * It contains the multiplier and shift values to be applied to the output tensor. - * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] - * Input dimension is taken as Nx(H * W * C_IN) - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] - * N : accumulation depth and equals (H * W * C_IN) from input_dims - * C : output depth and equals C_OUT in output_dims - * H & W : Not used - * @param[in] filter_data Filter data pointer. Data type: int8 - * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] - * N, H, W : Not used - * @param[in] bias_data Bias data pointer. Data type: int32 - * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] - * N : Batches - * C_OUT : Output depth - * H & W : Not used. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - Supported framework: TensorFlow Lite - * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - */ - arm_status arm_fully_connected_s8(const cmsis_nn_context *ctx, - const cmsis_nn_fc_params *fc_params, - const cmsis_nn_per_tensor_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for S8 basic fully-connected and - * matrix multiplication layer function for TF Lite - * @param[in] filter_dims dimension of filter - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims); - - /** - * @brief Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q7_opt(const q7_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut, - q15_t *vec_buffer); - - /** - * @brief Q15 basic fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q15(const q15_t *pV, - const q15_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t *bias, - q15_t *pOut, - q15_t *vec_buffer); - - /** - * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_q15_opt(const q15_t *pV, - const q15_t *pM, +/** + * @brief Get the required buffer size for arm_convolve_1x1_s8_fast + * + * @param[in] input_dims Input (activation) dimensions + * @return The function returns the required buffer size in bytes + * + */ +int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims); + +/** + * @brief 1xn convolution + * + * @param[in, out] ctx Function context that contains the additional buffer if required by the function. + * arm_convolve_1_x_n_s8_get_buffer_size will return the buffer_size if required + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] conv_params Convolution parameters (e.g. strides, dilations, pads,...). + * Range of conv_params->input_offset : [-127, 128] + * Range of conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the horizontal + * spatial filter dimension + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Optional bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[out] output_data Output data pointer. Data type: int8 + * + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR if argument constraints fail. or, + * ARM_CMSIS_NN_SUCCESS on successful completion. + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# input_dims->n equals 1 + * -# ouput_dims->w is a multiple of 4 + * -# Explicit constraints(since it is for 1xN convolution) + * -## input_dims->h equals 1 + * -## output_dims->h equals 1 + * -## filter_dims->h equals 1 + *@todo Remove constraint on output_dims->w to make the function generic. + * + */ +arm_cmsis_nn_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required additional buffer size for 1xn convolution + * + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * @param[in] filter_dims Filter tensor dimensions. Format: [C_OUT, 1, WK, C_IN] where WK is the + * horizontal spatial filter dimension + * @return The function returns required buffer size(bytes) + * + */ +int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Q7 version of convolution for RGB image + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This kernel is written exclusively for convolution with ch_im_in + * equals 3. This applies on the first layer of CNNs which has input + * image with RGB format. + */ + +arm_cmsis_nn_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast Q15 convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + * dim_im_out is a multiple of 2 + */ + +arm_cmsis_nn_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Fast Q15 convolution function (non-sqaure shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding size x + * @param[in] padding_y padding size y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * @details + * + * Buffer size: + * + * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel + * + * bufferB size: 0 + * + * Input dimension constraints: + * + * ch_im_in is multiple of 2 + * + * ch_im_out is multipe of 2 + * + */ + +arm_cmsis_nn_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Q7 depthwise separable convolution function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + */ + +arm_cmsis_nn_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Q7 depthwise separable convolution function (non-square shape) + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in_x input tensor dimension x + * @param[in] dim_im_in_y input tensor dimension y + * @param[in] ch_im_in number of input tensor channels + * @param[in] wt pointer to kernel weights + * @param[in] ch_im_out number of filters, i.e., output tensor channels + * @param[in] dim_kernel_x filter kernel size x + * @param[in] dim_kernel_y filter kernel size y + * @param[in] padding_x padding sizes x + * @param[in] padding_y padding sizes y + * @param[in] stride_x convolution stride x + * @param[in] stride_y convolution stride y + * @param[in] bias pointer to bias + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in,out] Im_out pointer to output tensor + * @param[in] dim_im_out_x output tensor dimension x + * @param[in] dim_im_out_y output tensor dimension y + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] bufferB pointer to buffer space for output + * @return The function returns either + * ARM_CMSIS_NN_ARG_ERROR or ARM_CMSIS_NN_SUCCESS based on the outcome of input arguments + * constraints checking. + * + * This function is the version with full list of optimization tricks, but with + * some contraints: + * ch_im_in is multiple of 2 + * ch_im_out is multiple of 2 + */ +arm_cmsis_nn_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB); + +/** + * @brief Wrapper function to pick the right optimized s8 depthwise convolution function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->output_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + * - Picks one of the the following functions + * -# arm_depthwise_conv_s8() + * -# arm_depthwise_conv_3x3_s8() - Cortex-M CPUs with DSP extension only + * -# arm_depthwise_conv_s8_opt() + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * - Check details of arm_depthwise_conv_s8_opt() for potential data that can be accessed outside of the + * boundary. + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s8() + * + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @return Size of additional memory required for optimizations in bytes. + * + */ +int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Basic s8 depthwise convolution function that doesn't have any constraints on the input dimensions. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required exists if additional memory is. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : [-127, 128] + * Range of dw_conv_params->input_offset : [-128, 127] + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Batch argument N is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + */ +arm_cmsis_nn_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Basic s16 depthwise convolution function that doesn't have any constraints on the input dimensions. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * exists if additional memory is. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * conv_params->input_offset : Not used + * conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Batch argument N is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + * - q15 is used as data type eventhough it is s16 data. It is done so to be consistent with existing APIs. + */ +arm_cmsis_nn_status arm_depthwise_conv_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Wrapper function to pick the right optimized s16 depthwise convolution function + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * dw_conv_params->dilation is not used. + * Range of dw_conv_params->input_offset : Not used + * Range of dw_conv_params->output_offset : Not used + * @param[in] quant_params Per-channel quantization info. + * It contains the multiplier and shift values to be applied to each + * output channel + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful completion. + * + * @details + * - Supported framework: TensorFlow Lite + * - Picks one of the the following functions + * -# arm_depthwise_conv_s16() + * -# arm_depthwise_conv_fast_s16() - Cortex-M CPUs with DSP extension only + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Get size of additional buffer required by arm_depthwise_conv_wrapper_s16() + * + * @param[in] dw_conv_params Depthwise convolution parameters (e.g. strides, dilations, pads,...) + * Range of dw_conv_params->input_offset : Not used + * Range of dw_conv_params->input_offset : Not used + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Batch argument N is not used and assumed to be 1. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @param[in] output_dims Output tensor dimensions. Format: [1, H, W, C_OUT] + * @return Size of additional memory required for optimizations in bytes. + * + */ +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims); + +/** + * @brief Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel. + * Refer arm_depthwise_conv_s16() for function argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - ctx-buff == NULL and + * arm_depthwise_conv_fast_s16_get_buffer_size() > 0 or + * input channel != output channel or + * ch_mult != 1 + * + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported framework: TensorFlow Lite + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels or ch_mult equals 1 + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * - Reccomended when number of channels is 4 or greater. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Get the required buffer size for optimized s16 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] + * Batch argument N is not used. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_depthwise_conv_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @brief Optimized s8 depthwise convolution function for 3x3 kernel size with some constraints on + * the input arguments(documented below). Refer arm_depthwise_conv_s8() for function + * argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - Unsupported dimension of tensors + * - Unsupported pad size along the x axis + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported framework : TensorFlow Lite Micro + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels + * -# Filter height and width equals 3 + * -# Padding along x is either 0 or 1. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Optimized s8 depthwise convolution function with constraint that in_channel equals out_channel. + * Refer arm_depthwise_conv_s8() for function argument details. + * + * @return The function returns one of the following + * ARM_CMSIS_NN_ARG_ERROR - input channel != output channel or + * ch_mult != 1 + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read out + * for the following if MVE optimizations(Arm Helium Technology) are used. + * - Output shift + * - Output multiplier + * - Output bias + * - kernel + * @details + * - Supported framework: TensorFlow Lite + * - The following constrains on the arguments apply + * -# Number of input channel equals number of output channels or ch_mult equals 1 + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * - Reccomended when number of channels is 4 or greater. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required buffer size for optimized s8 depthwise convolution + * function with constraint that in_channel equals out_channel. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [1, H, W, C_IN] + * Batch argument N is not used. + * @param[in] filter_dims Filter tensor dimensions. Format: [1, H, W, C_OUT] + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims); + +/** + * @defgroup FC Fully-connected Layer Functions + * + * Collection of fully-connected and matrix multiplication functions. + * + * Fully-connected layer is basically a matrix-vector multiplication + * with bias. The matrix is the weights and the input/output vectors + * are the activation values. Supported {weight, activation} precisions + * include {8-bit, 8-bit}, {16-bit, 16-bit}, and {8-bit, 16-bit}. + * + * Here we have two types of kernel functions. The basic function + * implements the function using regular GEMV approach. The opt functions + * operates with weights in interleaved formats. + * + */ + +/** + *@brief Q7 basic fully-connected layer function + *@param[in] pV pointer to input vector + *@param[in] pM pointer to matrix weights + *@param[in] dim_vec length of the vector + *@param[in] num_of_rows number of rows in weight matrix + *@param[in] bias_shift amount of left-shift for bias + *@param[in] out_shift amount of right-shift for output + *@param[in] bias pointer to bias + *@param[in,out] pOut pointer to output vector + *@param[in,out] vec_buffer pointer to buffer space for input + *@return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_q7(const q7_t *pV, + const q7_t *pM, const uint16_t dim_vec, const uint16_t num_of_rows, const uint16_t bias_shift, const uint16_t out_shift, - const q15_t *bias, - q15_t *pOut, + const q7_t *bias, + q7_t *pOut, q15_t *vec_buffer); - /** - * @brief Mixed Q15-Q7 fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q15_t *pOut, - q15_t *vec_buffer); - - /** - * @brief Mixed Q15-Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - */ - - arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q15_t *pOut, - q15_t *vec_buffer); - - /** - * @brief Matrix-Multiplication Kernels for Convolution - * - * These functions are used within convolution layer functions for - * matrix multiplication. - * - * The implementation is similar to CMSIS-DSP arm_mat_mult functions - * with one Q7 and one Q15 operands. The Q15 operand is the im2col - * output which is always with 2 columns. - * - */ - - /** - * @brief Matrix-multiplication function for convolution - * @param[in] pA pointer to operand A - * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors - * @param[in] ch_im_out numRow of A - * @param[in] numCol_A numCol of A - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias the bias - * @param[in,out] pOut pointer to output - * @return The function returns the incremented output pointer - */ - - q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t *pA, - const q15_t *pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut); - /** - * @brief Matrix-multiplication function for convolution with per-channel requantization. - * @param[in] input_a pointer to operand A - * @param[in] input_b pointer to operand B, always consists of 2 vectors. - * @param[in] output_ch number of rows of A - * @param[in] out_shift pointer to per output channel requantization shift parameter. - * @param[in] out_mult pointer to per output channel requantization multiplier parameter. - * @param[in] out_offset output tensor offset. - * @param[in] activation_min minimum value to clamp the output to. Range : int8 - * @param[in] activation_max maximum value to clamp the output to. Range : int8 - * @param[in] num_col_a number of columns of A - * @param[in] output_bias per output channel bias. Range : int32 - * @param[in,out] out_0 pointer to output - * @return The function returns one of the two - * 1. The incremented output pointer for a successful operation or - * 2. NULL if implementation is not available. - * - * @details This function does the matrix multiplication of weight matrix for all output channels - * with 2 columns from im2col and produces two elements/output_channel. The outputs are - * clamped in the range provided by activation min and max. - * Supported framework: TensorFlow Lite micro. - */ - q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0); - - /** - * @brief Matrix-multiplication of re-ordered input B with A. - * - * @details For arguments, refer arm_nn_mat_mult_kernel_s8_s16. The re-ordering is a consequence - * of sign extension done by the SXTB16 command on input_b. The outputs are clamped in the range - * provided by activation min and max. - * * @details - * - Supported framework : TensorFlow Lite Micro - * - The following constrains on the arguments apply - * -# num_col_a is a multiple of 4 - * -# output_ch is a multiple of 2 - * - */ - q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0); - - /** - *@brief Matrix-multiplication function for convolution with reordered columns - *@param[in] pA pointer to operand A - *@param[in] pInBuffer pointer to operand B, always conssists of 2 vectors - *@param[in] ch_im_out numRow of A - *@param[in] numCol_A numCol of A - *@param[in] bias_shift amount of left-shift for bias - *@param[in] out_shift amount of right-shift for output - *@param[in] bias the bias - *@param[in,out] pOut pointer to output - *@return The function returns the incremented output pointer - * - *@details This function assumes that data in pInBuffer are reordered - */ - q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t *pA, - const q15_t *pInBuffer, - const uint16_t ch_im_out, - const uint16_t numCol_A, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut); +/** + * @brief Basic s8 Fully Connected function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * Range of fc_params->input_offset : [-127, 128] + * fc_params->filter_offset : 0 + * Range of fc_params->output_offset : [-128, 127] + * @param[in] quant_params Per-tensor quantization info. + * It contains the multiplier and shift values to be applied to the output tensor. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int32 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + * - q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + */ +arm_cmsis_nn_status arm_fully_connected_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required buffer size for S8 basic fully-connected and + * matrix multiplication layer function for TF Lite + * @param[in] filter_dims dimension of filter + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims); + +/** + * @brief Basic s16 Fully Connected function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] fc_params Fully Connected layer parameters. + * fc_params->input_offset : 0 + * fc_params->filter_offset : 0 + * fc_params->output_offset : 0 + * @param[in] quant_params Per-tensor quantization info. + * It contains the multiplier and shift values to be applied to the output tensor. + * @param[in] input_dims Input (activation) tensor dimensions. Format: [N, H, W, C_IN] + * Input dimension is taken as Nx(H * W * C_IN) + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Two dimensional filter dimensions. Format: [N, C] + * N : accumulation depth and equals (H * W * C_IN) from input_dims + * C : output depth and equals C_OUT in output_dims + * H & W : Not used + * @param[in] filter_data Filter data pointer. Data type: int8 + * @param[in] bias_dims Bias tensor dimensions. Format: [C_OUT] + * N, H, W : Not used + * @param[in] bias_data Bias data pointer. Data type: int64 + * @param[in] output_dims Output tensor dimensions. Format: [N, C_OUT] + * N : Batches + * C_OUT : Output depth + * H & W : Not used. + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * - Supported framework: TensorFlow Lite + * - q15 is used as data type eventhough it is s16 data. It is done so to be consistent with existing APIs. + */ +arm_cmsis_nn_status arm_fully_connected_s16(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data); + +/** + * @brief Get the required buffer size for S16 basic fully-connected and + * matrix multiplication layer function for TF Lite + * @param[in] filter_dims dimension of filter + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_fully_connected_s16_get_buffer_size(const cmsis_nn_dims *filter_dims); + +/** + * @brief Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_q7_opt(const q7_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q7_t *pOut, + q15_t *vec_buffer); + +/** + * @brief Q15 basic fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_q15(const q15_t *pV, + const q15_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t *bias, + q15_t *pOut, + q15_t *vec_buffer); + +/** + * @brief Q15 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_q15_opt(const q15_t *pV, + const q15_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t *bias, + q15_t *pOut, + q15_t *vec_buffer); + +/** + * @brief Mixed Q15-Q7 fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q15_t *pOut, + q15_t *vec_buffer); + +/** + * @brief Mixed Q15-Q7 opt fully-connected layer function + * @param[in] pV pointer to input vector + * @param[in] pM pointer to matrix weights + * @param[in] dim_vec length of the vector + * @param[in] num_of_rows number of rows in weight matrix + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias pointer to bias + * @param[in,out] pOut pointer to output vector + * @param[in,out] vec_buffer pointer to buffer space for input + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ + +arm_cmsis_nn_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q15_t *pOut, + q15_t *vec_buffer); + +/** + * @brief Matrix-Multiplication Kernels for Convolution + * + * These functions are used within convolution layer functions for + * matrix multiplication. + * + * The implementation is similar to CMSIS-DSP arm_mat_mult functions + * with one Q7 and one Q15 operands. The Q15 operand is the im2col + * output which is always with 2 columns. + * + */ + +/** + * @brief Matrix-multiplication function for convolution + * @param[in] pA pointer to operand A + * @param[in] pInBuffer pointer to operand B, always conssists of 2 vectors + * @param[in] ch_im_out numRow of A + * @param[in] numCol_A numCol of A + * @param[in] bias_shift amount of left-shift for bias + * @param[in] out_shift amount of right-shift for output + * @param[in] bias the bias + * @param[in,out] pOut pointer to output + * @return The function returns the incremented output pointer + */ + +q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t *pA, + const q15_t *pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q7_t *pOut); #ifdef __cplusplus } @@ -1423,676 +1729,925 @@ extern "C" */ #ifdef __cplusplus -extern "C" -{ +extern "C" { #endif - /** - * @defgroup BasicMath Basic math functions - * - * Element wise add and multiplication functions. - * - */ - - /** - * @brief s8 element wise add of two vectors - * @param[in] input_1_vect pointer to input vector 1 - * @param[in] input_2_vect pointer to input vector 2 - * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 - * @param[in] input_1_mult multiplier for input 1 - * @param[in] input_1_shift shift for input 1 - * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 - * @param[in] input_2_mult multiplier for input 2 - * @param[in] input_2_shift shift for input 2 - * @param[in] left_shift input left shift - * @param[in,out] output pointer to output vector - * @param[in] out_offset output offset - * @param[in] out_mult output multiplier - * @param[in] out_shift output shift - * @param[in] out_activation_min minimum value to clamp output to - * @param[in] out_activation_max maximum value to clamp output to - * @param[in] block_size number of samples - * @return The function returns ARM_MATH_SUCCESS - */ - arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_1_mult, - const int32_t input_1_shift, - const int32_t input_2_offset, - const int32_t input_2_mult, - const int32_t input_2_shift, - const int32_t left_shift, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size); - - /** - * @brief s8 element wise multiplication - * @param[in] input_1_vect pointer to input vector 1 - * @param[in] input_2_vect pointer to input vector 2 - * @param[in] input_1_offset offset for input 1. Range: Range: -127 to 128 - * @param[in] input_2_offset offset for input 2. Range: Range: -127 to 128 - * @param[in,out] output pointer to output vector - * @param[in] out_offset output offset - * @param[in] out_mult output multiplier - * @param[in] out_shift output shift - * @param[in] out_activation_min minimum value to clamp output to - * @param[in] out_activation_max maximum value to clamp output to - * @param[in] block_size number of samples - * @return The function returns ARM_MATH_SUCCESS - * - * @details Supported framework: TensorFlow Lite micro - */ - arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_2_offset, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size); - /** - * @defgroup Acti Activation Functions - * - * Perform activation layers, including ReLU (Rectified Linear Unit), - * sigmoid and tanh - * - */ - - /** - * @brief Q7 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @return none. - */ - - void arm_relu_q7(q7_t *data, uint16_t size); - - /** - * @brief s8 ReLU6 function - * @param[in,out] data pointer to input - * @param[in] size number of elements - */ - - void arm_relu6_s8(q7_t *data, uint16_t size); - - /** - * @brief Q15 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @return none. - */ - - void arm_relu_q15(q15_t *data, uint16_t size); - - /** - * @brief Q7 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * @return none. - */ - - void arm_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type); - - /** - * @brief Q15 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * @return none. - * - * @details - * - * This is the direct table look-up approach. - * - * Assume here the integer part of the fixed-point is <= 3. - * More than 3 just not making much sense, makes no difference with - * saturation followed by any of these activation functions. - */ - - void arm_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type); - - /** - * @defgroup Pooling Pooling Functions - * - * Perform pooling functions, including max pooling and average pooling - * - */ - - /** - * @brief Q7 max pooling function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] Im_out pointer to output tensor - * @return none. - * - */ - - void arm_maxpool_q7_HWC(q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const uint16_t dim_im_out, - q7_t *bufferA, - q7_t *Im_out); - - /** - * @brief Q7 average pooling function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] Im_out pointer to output tensor - * @return none. - * - */ - - void arm_avepool_q7_HWC(q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const uint16_t dim_im_out, - q7_t *bufferA, - q7_t *Im_out); - - /** - * @brief s8 average pooling function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] pool_params Pooling parameters - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Argument 'N' is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] - * Argument N and C are not used. - * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] - * Argument N is not used. - * C_OUT equals C_IN. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported Framework: TensorFlow Lite - * - */ - arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - - /** - * @brief Get the required buffer size for S8 average pooling function - * @param[in] dim_dst_width output tensor dimension - * @param[in] ch_src number of input tensor channels - * @return The function returns required buffer size in bytes - * - */ - int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src); - - /** - * @brief s8 max pooling function. - * - * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function - * definition file to see if an additional buffer is required. - * Optional function {API}_get_buffer_size() provides the buffer - * size if an additional buffer is required. - * @param[in] pool_params Pooling parameters - * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] - * Argument 'N' is not used. - * @param[in] input_data Input (activation) data pointer. Data type: int8 - * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] - * Argument N and C are not used. - * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] - * Argument N is not used. - * C_OUT equals C_IN. - * @param[in, out] output_data Output data pointer. Data type: int8 - * @return The function returns - * ARM_MATH_SUCCESS - Successful operation - * - * @details - * - Supported Framework: TensorFlow Lite - * - */ - arm_status arm_max_pool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *output_data); - /** - * @defgroup Softmax Softmax Functions - * - * EXP(2) based softmax functions. - * - */ - - /** - * @brief Q7 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - - void arm_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out); - - /** - * @brief Q7 softmax function with batch parameter - * @param[in] vec_in pointer to input vector - * @param[in] nb_batches number of batches - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * @return none. - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - - void arm_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out); - /** - * @brief Q15 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimension - * @param[out] p_out pointer to output vector - * @return none. - * - * @note This function is an optimized version which is not bit-accurate with - * TensorFlow Lite's kernel - * - */ - - void arm_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out); - - /** - * @brief S8 softmax function - * @param[in] input Pointer to the input tensor - * @param[in] num_rows Number of rows in the input tensor - * @param[in] row_size Number of elements in each input row - * @param[in] mult Input quantization multiplier - * @param[in] shift Input quantization shift within the range [0, 31] - * @param[in] diff_min Minimum difference with max in row. Used to check if - * the quantized exponential operation can be performed - * @param[out] output Pointer to the output tensor - * - * @note Supported framework: TensorFlow Lite micro (bit-accurate) - * - */ - - void arm_softmax_s8(const int8_t *input, - const int32_t num_rows, - const int32_t row_size, - const int32_t mult, - const int32_t shift, - const int32_t diff_min, - int8_t *output); - - /** - * @brief U8 softmax function - * @param[in] input Pointer to the input tensor - * @param[in] num_rows Number of rows in the input tensor - * @param[in] row_size Number of elements in each input row - * @param[in] mult Input quantization multiplier - * @param[in] shift Input quantization shift within the range [0, 31] - * @param[in] diff_min Minimum difference with max in row. Used to check if - * the quantized exponential operation can be performed - * @param[out] output Pointer to the output tensor - * - * @note Supported framework: TensorFlow Lite micro (bit-accurate) - * - */ - - void arm_softmax_u8(const uint8_t *input, +/** + * @defgroup BasicMath Basic math functions + * + * Elementwise add and multiplication functions. + * + */ + +/** + * @brief s8 elementwise add of two vectors + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: -127 to 128 + * @param[in] input_1_mult multiplier for input 1 + * @param[in] input_1_shift shift for input 1 + * @param[in] input_2_offset offset for input 2. Range: -127 to 128 + * @param[in] input_2_mult multiplier for input 2 + * @param[in] input_2_shift shift for input 2 + * @param[in] left_shift input left shift + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Range: -128 to 127 + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -128 + * @param[in] out_activation_max maximum value to clamp output to. Max: 127 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_elementwise_add_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s16 elementwise add of two vectors + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Not used. + * @param[in] input_1_mult multiplier for input 1 + * @param[in] input_1_shift shift for input 1 + * @param[in] input_2_offset offset for input 2. Not used. + * @param[in] input_2_mult multiplier for input 2 + * @param[in] input_2_shift shift for input 2 + * @param[in] left_shift input left shift + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Not used. + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -32768 + * @param[in] out_activation_max maximum value to clamp output to. Max: 32767 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + */ +arm_cmsis_nn_status arm_elementwise_add_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s8 elementwise multiplication + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Range: -127 to 128 + * @param[in] input_2_offset offset for input 2. Range: -127 to 128 + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Range: -128 to 127 + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -128 + * @param[in] out_activation_max maximum value to clamp output to. Max: 127 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @brief s16 elementwise multiplication + * @param[in] input_1_vect pointer to input vector 1 + * @param[in] input_2_vect pointer to input vector 2 + * @param[in] input_1_offset offset for input 1. Not used. + * @param[in] input_2_offset offset for input 2. Not used. + * @param[in,out] output pointer to output vector + * @param[in] out_offset output offset. Not used. + * @param[in] out_mult output multiplier + * @param[in] out_shift output shift + * @param[in] out_activation_min minimum value to clamp output to. Min: -32768 + * @param[in] out_activation_max maximum value to clamp output to. Max: 32767 + * @param[in] block_size number of samples + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details Supported framework: TensorFlow Lite micro + */ +arm_cmsis_nn_status arm_elementwise_mul_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size); + +/** + * @defgroup Acti Activation Functions + * + * Perform activation layers, including ReLU (Rectified Linear Unit), + * sigmoid and tanh + * + */ + +/** + * @brief Q7 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ + +void arm_relu_q7(q7_t *data, uint16_t size); + +/** + * @brief s8 ReLU6 function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ + +void arm_relu6_s8(q7_t *data, uint16_t size); + +/** + * @brief Q15 RELU function + * @param[in,out] data pointer to input + * @param[in] size number of elements + */ + +void arm_relu_q15(q15_t *data, uint16_t size); + +/** + * @brief Q7 neural network activation function using direct table look-up + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 + * @param[in] type type of activation functions + */ + +void arm_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type); + +/** + * @brief Q15 neural network activation function using direct table look-up + * @param[in,out] data pointer to input + * @param[in] size number of elements + * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 + * @param[in] type type of activation functions + * + * @details + * + * This is the direct table look-up approach. + * + * Assume here the integer part of the fixed-point is <= 3. + * More than 3 just not making much sense, makes no difference with + * saturation followed by any of these activation functions. + */ + +void arm_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type); + +/** + * @defgroup Pooling Pooling Functions + * + * Perform pooling functions, including max pooling and average pooling + * + */ + +/** + * @brief Q7 max pooling function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] Im_out pointer to output tensor + * + */ + +void arm_maxpool_q7_HWC(q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const uint16_t dim_im_out, + q7_t *bufferA, + q7_t *Im_out); + +/** + * @brief Q7 average pooling function + * @param[in] Im_in pointer to input tensor + * @param[in] dim_im_in input tensor dimension + * @param[in] ch_im_in number of input tensor channels + * @param[in] dim_kernel filter kernel size + * @param[in] padding padding sizes + * @param[in] stride convolution stride + * @param[in] dim_im_out output tensor dimension + * @param[in,out] bufferA pointer to buffer space for input + * @param[in,out] Im_out pointer to output tensor + * + */ + +void arm_avepool_q7_HWC(q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const uint16_t dim_im_out, + q7_t *bufferA, + q7_t *Im_out); + +/** + * @brief s8 average pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Argument 'N' is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief Get the required buffer size for S8 average pooling function + * @param[in] dim_dst_width output tensor dimension + * @param[in] ch_src number of input tensor channels + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_avgpool_s8_get_buffer_size(const int dim_dst_width, const int ch_src); + +/** + * @brief s16 average pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Argument 'N' is not used. + * @param[in] input_data Input (activation) data pointer. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int16 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + * ARM_CMSIS_NN_ARG_ERROR - In case of invalid arguments + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_avgpool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *output_data); + +/** + * @brief Get the required buffer size for S16 average pooling function + * @param[in] dim_dst_width output tensor dimension + * @param[in] ch_src number of input tensor channels + * @return The function returns required buffer size in bytes + * + */ +int32_t arm_avgpool_s16_get_buffer_size(const int dim_dst_width, const int ch_src); + +/** + * @brief s8 max pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Argument 'N' is not used. + * @param[in] input_data Input (activation) data pointer. The input tensor must not + * overlap with the output tensor. Data type: int8 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] output_data Output data pointer. Data type: int8 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_max_pool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief s16 max pooling function. + * + * @param[in, out] ctx Function context (e.g. temporary buffer). Check the function + * definition file to see if an additional buffer is required. + * Optional function {API}_get_buffer_size() provides the buffer + * size if an additional buffer is required. + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] pool_params Pooling parameters + * @param[in] input_dims Input (activation) tensor dimensions. Format: [H, W, C_IN] + * Argument 'N' is not used. + * @param[in] src Input (activation) data pointer. The input tensor must not + * overlap with the output tensor. Data type: int16 + * @param[in] filter_dims Filter tensor dimensions. Format: [H, W] + * Argument N and C are not used. + * @param[in] output_dims Output tensor dimensions. Format: [H, W, C_OUT] + * Argument N is not used. + * C_OUT equals C_IN. + * @param[in, out] dst Output data pointer. Data type: int16 + * @return The function returns + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @details + * - Supported Framework: TensorFlow Lite + * + */ +arm_cmsis_nn_status arm_max_pool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *dst); + +/** + * @defgroup Softmax Softmax Functions + * + * EXP(2) based softmax functions. + * + */ + +/** + * @brief Q7 softmax function + * @param[in] vec_in pointer to input vector + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out); + +/** + * @brief Q7 softmax function with batch parameter + * @param[in] vec_in pointer to input vector + * @param[in] nb_batches number of batches + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, const uint16_t dim_vec, q7_t *p_out); +/** + * @brief Q15 softmax function + * @param[in] vec_in pointer to input vector + * @param[in] dim_vec input vector dimension + * @param[out] p_out pointer to output vector + * + * @note This function is an optimized version which is not bit-accurate with + * TensorFlow Lite's kernel + * + */ + +void arm_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out); + +/** + * @brief S8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_softmax_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output); + +/** + * @brief S8 to s16 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_softmax_s8_s16(const int8_t *input, const int32_t num_rows, const int32_t row_size, const int32_t mult, const int32_t shift, const int32_t diff_min, - uint8_t *output); - - /** - * @brief uint8 depthwise convolution function with asymmetric quantization - * Unless specified otherwise, arguments are mandatory. - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_ch Channels in input tensor - * @param[in] kernel Pointer to kernel weights - * @param[in] kernel_x Width of kernel - * @param[in] kernel_y Height of kernel - * @param[in] ch_mult Number of channel multiplier - * @param[in] pad_x Padding sizes x - * @param[in] pad_y Padding sizes y - * @param[in] stride_x stride along the width - * @param[in] stride_y stride along the height - * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. - * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. - * @param[in] bias Pointer to optional bias values. If no bias is - * availble, NULL is expected - * @param[in] input_offset Input tensor zero offset - * @param[in] filter_offset Kernel tensor zero offset - * @param[in] output_offset Output tensor zero offset - * @param[in,out] output Pointer to output tensor - * @param[in] output_x Width of output tensor - * @param[in] output_y Height of output tensor - * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} - * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} - * @param[in] out_shift Amount of right-shift for output - * @param[in] out_mult Output multiplier for requantization - * @return The function returns the following - * ARM_MATH_SUCCESS - Successful operation - * - */ - arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_ch, - const uint8_t *kernel, - const uint16_t kernel_x, - const uint16_t kernel_y, - const int16_t ch_mult, - const int16_t pad_x, - const int16_t pad_y, - const int16_t stride_x, - const int16_t stride_y, - const int16_t dilation_x, - const int16_t dilation_y, - const int32_t *bias, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_offset, - uint8_t *output, - const uint16_t output_x, - const uint16_t output_y, - const int32_t output_activation_min, - const int32_t output_activation_max, - const int32_t out_shift, - const int32_t out_mult); - - /** - * @defgroup Reshape Reshape Functions - * - */ - - /** - * @brief Reshape a s8 vector into another with different shape - * @param[in] input points to the s8 input vector - * @param[out] output points to the s8 output vector - * @param[in] total_size total size of the input and output vectors in bytes - * - * @note The output is expected to be in a memory area that does not overlap with the input's - * - */ - void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size); - - /** - * @defgroup Concatenation Concatenation Functions - * - */ - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis - * This function should be called for each input tensor to concatenate. The argument offset_x - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_x = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x) - * offset_x += input_x[i] - * } - * - * This function assumes that the output tensor has: - * -# The same height of the input tensor - * -# The same number of channels of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it - * does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_x Width of output tensor - * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_x is less than output_x - * - */ - void arm_concatenation_s8_x(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_x, - const uint32_t offset_x); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis - * This function should be called for each input tensor to concatenate. The argument offset_y - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_y = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y) - * offset_y += input_y[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same number of channels of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it - * does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_y Height of output tensor - * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_y is less than output_y - * - */ - void arm_concatenation_s8_y(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_y, - const uint32_t offset_y); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis - * This function should be called for each input tensor to concatenate. The argument offset_z - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_z = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z) - * offset_z += input_z[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same height of the input tensor - * -# The same batch size of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it - * does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] output_z Channels in output tensor - * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - * Input constraints - * offset_z is less than output_z - * - */ - void arm_concatenation_s8_z(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint16_t output_z, - const uint32_t offset_z); - - /** - * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size) - * This function should be called for each input tensor to concatenate. The argument offset_w - * will be used to store the input tensor in the correct position in the output tensor - * - * i.e. offset_w = 0 - * for(i = 0 i < num_input_tensors; ++i) - * { - * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w) - * offset_w += input_w[i] - * } - * - * This function assumes that the output tensor has: - * -# The same width of the input tensor - * -# The same height of the input tensor - * -# The same number o channels of the input tensor - * - * Unless specified otherwise, arguments are mandatory. - * - * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it - * does not involve any arithmetic operation - * - * @param[in] input Pointer to input tensor - * @param[in] input_x Width of input tensor - * @param[in] input_y Height of input tensor - * @param[in] input_z Channels in input tensor - * @param[in] input_w Batch size in input tensor - * @param[out] output Pointer to output tensor - * @param[in] offset_w The offset on the W axis to start concatenating the input tensor - * It is user responsibility to provide the correct value - * - */ - void arm_concatenation_s8_w(const int8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_z, - const uint16_t input_w, - int8_t *output, - const uint32_t offset_w); - /** - * @defgroup SVDF SVDF Layer Functions - * - */ - - /** - * @brief s8 SVDF function - * - * @param[in] input_ctx Temporary scratch buffer - * @param[in] output_ctx Temporary output scratch buffer - * @param[in] svdf_params SVDF Parameters - * Range of svdf_params->input_offset : [-128, 127] - * Range of svdf_params->output_offset : [-128, 127] - * @param[in] input_quant_params Input quantization parameters - * @param[in] output_quant_params Output quantization parameters - * @param[in] input_dims Input tensor dimensions - * @param[in] input_data Pointer to input tensor - * @param[in] state_dims State tensor dimensions - * @param[in] state_data Pointer to state tensor - * @param[in] weights_feature_dims Weights (feature) tensor dimensions - * @param[in] weights_feature_data Pointer to the weights (feature) tensor - * @param[in] weights_time_dims Weights (time) tensor dimensions - * @param[in] weights_time_data Pointer to the weights (time) tensor - * @param[in] bias_dims Bias tensor dimensions - * @param[in] bias_data Pointer to bias tensor - * @param[in] output_dims Output tensor dimensions - * @param[out] output_data Pointer to the output tensor - * - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * 1. Supported framework: TensorFlow Lite micro - * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. - * - */ - arm_status arm_svdf_s8(const cmsis_nn_context *input_ctx, - const cmsis_nn_context *output_ctx, - const cmsis_nn_svdf_params *svdf_params, - const cmsis_nn_per_tensor_quant_params *input_quant_params, - const cmsis_nn_per_tensor_quant_params *output_quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *state_dims, - q15_t *state_data, - const cmsis_nn_dims *weights_feature_dims, - const q7_t *weights_feature_data, - const cmsis_nn_dims *weights_time_dims, - const q15_t *weights_time_data, - const cmsis_nn_dims *bias_dims, - const q31_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data); + int16_t *output); + +/** + * @brief S16 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] softmax_params Softmax s16 layer parameters with two pointers to LUTs speficied below. + * For indexing the high 9 bits are used and 7 remaining for interpolation. + * That means 512 entries for the 9-bit indexing and 1 extra for interpolation, i.e. 513 + * values for each LUT. + * - Lookup table for exp(x), where x uniform distributed between [-10.0 , 0.0] + * - Lookup table for 1 / (1 + x), where x uniform distributed between [0.0 , 1.0] + * @param[out] output Pointer to the output tensor + * @return The function returns + * ARM_CMSIS_NN_ARG_ERROR Argument error check failed + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +arm_cmsis_nn_status arm_softmax_s16(const int16_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const cmsis_nn_softmax_lut_s16 *softmax_params, + int16_t *output); + +/** + * @brief U8 softmax function + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ + +void arm_softmax_u8(const uint8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + uint8_t *output); + +/** + * @brief uint8 depthwise convolution function with asymmetric quantization + * Unless specified otherwise, arguments are mandatory. + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_ch Channels in input tensor + * @param[in] kernel Pointer to kernel weights + * @param[in] kernel_x Width of kernel + * @param[in] kernel_y Height of kernel + * @param[in] ch_mult Number of channel multiplier + * @param[in] pad_x Padding sizes x + * @param[in] pad_y Padding sizes y + * @param[in] stride_x stride along the width + * @param[in] stride_y stride along the height + * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. + * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. + * @param[in] bias Pointer to optional bias values. If no bias is + * availble, NULL is expected + * @param[in] input_offset Input tensor zero offset + * @param[in] filter_offset Kernel tensor zero offset + * @param[in] output_offset Output tensor zero offset + * @param[in,out] output Pointer to output tensor + * @param[in] output_x Width of output tensor + * @param[in] output_y Height of output tensor + * @param[in] output_activation_min Minimum value to clamp the output to. Range : {0, 255} + * @param[in] output_activation_max Minimum value to clamp the output to. Range : {0, 255} + * @param[in] out_shift Amount of right-shift for output + * @param[in] out_mult Output multiplier for requantization + * @return The function returns the following + * ARM_CMSIS_NN_SUCCESS - Successful operation + * + */ +arm_cmsis_nn_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint8_t *kernel, + const uint16_t kernel_x, + const uint16_t kernel_y, + const int16_t ch_mult, + const int16_t pad_x, + const int16_t pad_y, + const int16_t stride_x, + const int16_t stride_y, + const int16_t dilation_x, + const int16_t dilation_y, + const int32_t *bias, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t output_offset, + uint8_t *output, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t out_shift, + const int32_t out_mult); + +/** + * @defgroup Reshape Reshape Functions + * + */ + +/** + * @brief Reshape a s8 vector into another with different shape + * @param[in] input points to the s8 input vector + * @param[out] output points to the s8 output vector + * @param[in] total_size total size of the input and output vectors in bytes + * + * @note The output is expected to be in a memory area that does not overlap with the input's + * + */ +void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size); + +/** + * @defgroup Concatenation Concatenation Functions + * + */ + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the X axis + * This function should be called for each input tensor to concatenate. The argument offset_x + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_x = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_x(&input[i], ..., &output, ..., ..., offset_x) + * offset_x += input_x[i] + * } + * + * This function assumes that the output tensor has: + * -# The same height of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_x * input_y * input_z * input_w) + offset_x + * bytes. + * @param[in] output_x Width of output tensor + * @param[in] offset_x The offset (in number of elements) on the X axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_x is less than output_x + * + */ +void arm_concatenation_s8_x(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_x, + const uint32_t offset_x); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Y axis + * This function should be called for each input tensor to concatenate. The argument offset_y + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_y = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_y(&input[i], ..., &output, ..., ..., offset_y) + * offset_y += input_y[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same number of channels of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with the output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_z * input_w * input_x * input_y) + offset_y + * bytes. + * @param[in] output_y Height of output tensor + * @param[in] offset_y The offset on the Y axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_y is less than output_y + * + */ +void arm_concatenation_s8_y(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_y, + const uint32_t offset_y); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the Z axis + * This function should be called for each input tensor to concatenate. The argument offset_z + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_z = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_z(&input[i], ..., &output, ..., ..., offset_z) + * offset_z += input_z[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same batch size of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor. Input tensor must not overlap with output tensor. + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * (input_x * input_y * input_z * input_w) + offset_z + * bytes. + * @param[in] output_z Channels in output tensor + * @param[in] offset_z The offset on the Z axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + * Input constraints + * offset_z is less than output_z + * + */ +void arm_concatenation_s8_z(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint16_t output_z, + const uint32_t offset_z); + +/** + * @brief int8/uint8 concatenation function to be used for concatenating N-tensors along the W axis (Batch size) + * This function should be called for each input tensor to concatenate. The argument offset_w + * will be used to store the input tensor in the correct position in the output tensor + * + * i.e. offset_w = 0 + * for(i = 0 i < num_input_tensors; ++i) + * { + * arm_concatenation_s8_w(&input[i], ..., &output, ..., ..., offset_w) + * offset_w += input_w[i] + * } + * + * This function assumes that the output tensor has: + * -# The same width of the input tensor + * -# The same height of the input tensor + * -# The same number o channels of the input tensor + * + * Unless specified otherwise, arguments are mandatory. + * + * @note This function, data layout independent, can be used to concatenate either int8 or uint8 tensors because it + * does not involve any arithmetic operation + * + * @param[in] input Pointer to input tensor + * @param[in] input_x Width of input tensor + * @param[in] input_y Height of input tensor + * @param[in] input_z Channels in input tensor + * @param[in] input_w Batch size in input tensor + * @param[out] output Pointer to output tensor. Expected to be at least + * input_x * input_y * input_z * input_w + * bytes. + * @param[in] offset_w The offset on the W axis to start concatenating the input tensor + * It is user responsibility to provide the correct value + * + */ +void arm_concatenation_s8_w(const int8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_z, + const uint16_t input_w, + int8_t *output, + const uint32_t offset_w); +/** + * @defgroup SVDF SVDF Layer Functions + * + */ + +/** + * @brief s8 SVDF function with 8 bit state tensor and 8 bit time weights + * + * @param[in] input_ctx Temporary scratch buffer + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] output_ctx Temporary output scratch buffer + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] svdf_params SVDF Parameters + * Range of svdf_params->input_offset : [-128, 127] + * Range of svdf_params->output_offset : [-128, 127] + * @param[in] input_quant_params Input quantization parameters + * @param[in] output_quant_params Output quantization parameters + * @param[in] input_dims Input tensor dimensions + * @param[in] input_data Pointer to input tensor + * @param[in] state_dims State tensor dimensions + * @param[in] state_data Pointer to state tensor + * @param[in] weights_feature_dims Weights (feature) tensor dimensions + * @param[in] weights_feature_data Pointer to the weights (feature) tensor + * @param[in] weights_time_dims Weights (time) tensor dimensions + * @param[in] weights_time_data Pointer to the weights (time) tensor + * @param[in] bias_dims Bias tensor dimensions + * @param[in] bias_data Pointer to bias tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output_data Pointer to the output tensor + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * + */ +arm_cmsis_nn_status arm_svdf_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *state_dims, + q7_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const q7_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const q7_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const q31_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); + +/** + * @brief s8 SVDF function with 16 bit state tensor and 16 bit time weights + * + * @param[in] input_ctx Temporary scratch buffer + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] output_ctx Temporary output scratch buffer + * The caller is expected to clear the buffer ,if applicable, for security reasons. + * @param[in] svdf_params SVDF Parameters + * Range of svdf_params->input_offset : [-128, 127] + * Range of svdf_params->output_offset : [-128, 127] + * @param[in] input_quant_params Input quantization parameters + * @param[in] output_quant_params Output quantization parameters + * @param[in] input_dims Input tensor dimensions + * @param[in] input_data Pointer to input tensor + * @param[in] state_dims State tensor dimensions + * @param[in] state_data Pointer to state tensor + * @param[in] weights_feature_dims Weights (feature) tensor dimensions + * @param[in] weights_feature_data Pointer to the weights (feature) tensor + * @param[in] weights_time_dims Weights (time) tensor dimensions + * @param[in] weights_time_data Pointer to the weights (time) tensor + * @param[in] bias_dims Bias tensor dimensions + * @param[in] bias_data Pointer to bias tensor + * @param[in] output_dims Output tensor dimensions + * @param[out] output_data Pointer to the output tensor + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + * @details + * 1. Supported framework: TensorFlow Lite micro + * 2. q7 is used as data type eventhough it is s8 data. It is done so to be consistent with existing APIs. + * + */ +arm_cmsis_nn_status arm_svdf_state_s16_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *state_dims, + q15_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const q7_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const q15_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const q31_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data); #ifdef __cplusplus } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h index 3e2f941..232aa61 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +21,8 @@ * Title: arm_nnsupportfunctions.h * Description: Public header file of support functions for CMSIS NN Library * - * $Date: 09. October 2020 - * $Revision: V.4.5.5 + * $Date: 8 August 2022 + * $Revision: V.10.0.0 * * Target Processor: Cortex-M CPUs * -------------------------------------------------------------------- */ @@ -30,8 +30,11 @@ #ifndef _ARM_NNSUPPORTFUNCTIONS_H_ #define _ARM_NNSUPPORTFUNCTIONS_H_ -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_common_tables.h" -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math_types.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_math_types.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/none.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" + +#include #ifdef __cplusplus extern "C" { @@ -46,6 +49,27 @@ extern "C" { #define MAX(A, B) ((A) > (B) ? (A) : (B)) #define MIN(A, B) ((A) < (B) ? (A) : (B)) #define CLAMP(x, h, l) MAX(MIN((x), (h)), (l)) +#define REDUCE_MULTIPLIER(_mult) ((_mult < 0x7FFF0000) ? ((_mult + (1 << 15)) >> 16) : 0x7FFF) + +// Number of channels processed in a block for DW Conv(MVE) +// Requirement: Greater than 0 & less than 128 +// This can be fine tuned to match number of input channels for best performance. +// A layer with lower number of channels than CH_IN_BLOCK_MVE will result in higher +// scratch buffer usage and a layer with higher number of channels than CH_IN_BLOCK_MVE +// will result in lower scratch buffer usage. +#define CH_IN_BLOCK_MVE (124) + +/** + * @brief definition to pack four 8 bit values. + */ +#define PACK_Q7x4_32x1(v0, v1, v2, v3) \ + ((((int32_t)(v0) << 0) & (int32_t)0x000000FF) | (((int32_t)(v1) << 8) & (int32_t)0x0000FF00) | \ + (((int32_t)(v2) << 16) & (int32_t)0x00FF0000) | (((int32_t)(v3) << 24) & (int32_t)0xFF000000)) + +/** + * @brief definition to pack two 16 bit values. + */ +#define PACK_Q15x2_32x1(v0, v1) (((int32_t)v0 & (int32_t)0xFFFF) | ((int32_t)v1 << 16)) /** * @brief Union for SIMD access of q31/q15/q7 types @@ -114,7 +138,6 @@ void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size); * @param[in] *pSrc points to the q7 input vector * @param[out] *pDst points to the q15 output vector * @param[in] blockSize length of the input vector - * @return none. * */ void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize); @@ -143,7 +166,6 @@ void arm_q7_to_q15_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, * @param[out] dst pointer to the q15 output vector * @param[in] block_size length of the input vector * @param[in] offset offset to be added to each input vector element. - * @return none. * * @details This function does the q7 to q15 expansion with re-ordering of bytes. Re-ordering is a consequence of * the sign extension intrinsic(DSP extension). The tail (i.e., last (N % 4) elements) retains its @@ -236,58 +258,101 @@ q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, const uint16_t row_len, const int32_t *const bias, q7_t *out); +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization for 16 bits convolution. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] activation_min minimum value to clamp the output to. Range : int16 + * @param[in] activation_max maximum value to clamp the output to. Range : int16 + * @param[in] num_col_a number of columns of A + * @param[in] output_bias per output channel bias. Range : int64 + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ +q15_t *arm_nn_mat_mult_kernel_s16(const q7_t *input_a, + const q15_t *input_b, + const int32_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int64_t *const output_bias, + q15_t *out_0); /** - * @brief General Matrix-multiplication without requantization for one row & one column - * @param[in] row_elements number of row elements - * @param[in] row_base pointer to row operand - * @param[in] col_base pointer to col operand - * @param[out] sum_col pointer to store sum of column elements - * @param[out] output pointer to store result of multiply-accumulate - * @return The function returns the multiply-accumulated result of the row by column. + * @brief General Vector by Matrix multiplication with requantization and storage of result. + * @param[in] row_elements number of row elements + * @param[in] skipped_row_elements number of row elements skipped due to padding. + * row_elements + skipped_row_elements = (kernel_x * kernel_y) * input_ch + * @param[in] row_base_ref pointer to row operand + * @param[in] col_base_ref pointer to col operand + * @param[out] out_ch Number of output channels + * @param[in] conv_params Pointer to convolution parameters like offsets and activation values + * @param[in] quant_params Pointer to per-channel quantization parameters + * @param[in] bias Pointer to optional per-channel bias + * @param[out] output Pointer to output where int8 results are stored. + * @return The function performs matrix(row_base_ref) multiplication with vector(col_base_ref) and + * scaled result is stored in memory. * * @details Pseudo-code * *output = 0 * sum_col = 0 + * for (j = 0; j < out_ch; j++) * for (i = 0; i < row_elements; i++) - * *output += row_base[i] * col_base[i] - * sum_col += col_base[i] + * *output += row_base_ref[i] * col_base_ref[i] + * sum_col += col_base_ref[i] + * scale sum_col using quant_params and bias + * store result in 'output' + * * */ -arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output); +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output); /** - * @brief General Matrix-multiplication without requantization for four rows and one column + * @brief Matrix-multiplication with requantization & activation function for four rows and one column * @param[in] row_elements number of row elements * @param[in] offset offset between rows. Can be the same as row_elements. * For e.g, in a 1x1 conv scenario with stride as 1. * @param[in] row_base pointer to row operand * @param[in] col_base pointer to col operand - * @param[out] sum_col pointer to store sum of column elements - * @param[out] output pointer to store result(4 int32's) of multiply-accumulate - * @return The function returns the multiply-accumulated result of the row by column + * @param[in] out_ch Number of output channels + * @param[in] conv_params Pointer to convolution parameters like offsets and activation values + * @param[in] quant_params Pointer to per-channel quantization parameters + * @param[in] bias Pointer to per-channel bias + * @param[out] output Pointer to output where int8 results are stored. * - * @details Pseudo-code - * output[0] = 0 - * .. - * output[3] = 0 - * sum_col = 0 - * for (i = 0; i < row_elements; i++) - * output[0] += row_base[i] * col_base[i] - * .. - * output[3] += row_base[i + (row_elements * 3)] * col_base[i] - * sum_col += col_base[i] + * @return The function returns the updated output pointer or NULL if implementation is not available. + * + * @details Compliant to TFLM int8 specification. MVE implementation only */ -arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, - const int32_t offset, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output); +int8_t *arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output); /** * @brief General Matrix-multiplication function with per-channel requantization. @@ -315,22 +380,22 @@ arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, * @param[in] activation_min Minimum value to clamp down the output. Range : int8 * @param[in] activation_max Maximum value to clamp up the output. Range : int8 * - * @return The function returns ARM_MATH_SUCCESS + * @return The function returns ARM_CMSIS_NN_SUCCESS * */ -arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t *dst_multipliers, - const int32_t *dst_shifts, - const int32_t lhs_rows, - const int32_t rhs_rows, - const int32_t rhs_cols, - const int32_t lhs_offset, - const int32_t dst_offset, - const int32_t activation_min, - const int32_t activation_max); +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max); /** * @brief s8 Vector by Matrix (transposed) multiplication @@ -341,8 +406,7 @@ arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, * @param[out] dst Output vector * @param[in] lhs_offset Offset to be added to the input values of the left-hand side vector. * Range: -127 to 128 - * @param[in] rhs_offset Offset to be added to the input values of the right-hand side matrix. - * Range: -127 to 128 + * @param[in] rhs_offset Not used * @param[in] dst_offset Offset to be added to the output values. Range: -127 to 128 * @param[in] dst_multiplier Output multiplier * @param[in] dst_shift Output shift @@ -350,23 +414,88 @@ arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, * @param[in] rhs_rows Number of rows in the right-hand side input matrix * @param[in] activation_min Minimum value to clamp the output to. Range: int8 * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] address_offset Memory position offset for dst. First output is stored at 'dst', the + * second at 'dst + address_offset' and so on. Default value is typically 1. * - * @return The function returns ARM_MATH_SUCCESS + * @return The function returns ARM_CMSIS_NN_SUCCESS * */ -arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t lhs_offset, - const int32_t rhs_offset, - const int32_t dst_offset, - const int32_t dst_multiplier, - const int32_t dst_shift, - const int32_t rhs_cols, - const int32_t rhs_rows, - const int32_t activation_min, - const int32_t activation_max); +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset); + +/** + * @brief s16 Vector by Matrix (transposed) multiplication + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] bias Input bias + * @param[out] dst Output vector + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int16 + * @param[in] activation_max Maximum value to clamp the output to. Range: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16(const q15_t *lhs, + const q7_t *rhs, + const q63_t *bias, + q15_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief s8 Vector by Matrix (transposed) multiplication with s16 output + * + * @param[in] lhs Input left-hand side vector + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[out] dst Output vector + * @param[in] lhs_offset Offset to be added to the input values of the left-hand side + * vector. Range: -127 to 128 + * @param[in] rhs_offset Not used + * @param[in] scatter_offset Address offset for dst. First output is stored at 'dst', the + * second at 'dst + scatter_offset' and so on. + * @param[in] dst_multiplier Output multiplier + * @param[in] dst_shift Output shift + * @param[in] rhs_cols Number of columns in the right-hand side input matrix + * @param[in] rhs_rows Number of rows in the right-hand side input matrix + * @param[in] activation_min Minimum value to clamp the output to. Range: int16 + * @param[in] activation_max Maximum value to clamp the output to. Range: int16 + * + * @return The function returns ARM_CMSIS_NN_SUCCESS + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_svdf_s8(const q7_t *lhs, + const q7_t *rhs, + q15_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t scatter_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max); /** * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in padded cases where @@ -375,7 +504,8 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, * @param[in] lhs Input left-hand side matrix * @param[in] rhs Input right-hand side matrix (transposed) * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 - * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] active_ch Subset of total_ch processed + * @param[in] total_ch Number of channels in LHS/RHS * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 @@ -386,7 +516,7 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, * @param[in] out Output pointer * * @return The function returns one of the two - * - Updated output pointer if an implementaiton is available + * - Updated output pointer if an implementation is available * - NULL if no implementation is available. * * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read @@ -396,18 +526,19 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, * - Output bias * - rhs */ -q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t lhs_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out); +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t lhs_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out); /** * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. @@ -416,7 +547,8 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, * @param[in] lhs Input left-hand side matrix * @param[in] rhs Input right-hand side matrix (transposed) * @param[in] lhs_offset LHS matrix offset(input offset). Range: -127 to 128 - * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] active_ch Subset of total_ch processed + * @param[in] total_ch Number of channels in LHS/RHS * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. * @param[in] out_offset Offset to be added to the output values. Range: -127 to 128 @@ -427,7 +559,7 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, * @param[in] out Output pointer * * @return The function returns one of the two - * - Updated output pointer if an implementaiton is available + * - Updated output pointer if an implementation is available * - NULL if no implementation is available. * * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read @@ -437,18 +569,79 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, * - Output bias * - rhs */ -q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t lhs_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out); +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t lhs_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out); + +/** + * @brief Depthwise convolution of transposed rhs matrix with 4 lhs matrices. To be used in non-padded cases. + * Dimensions are the same for lhs and rhs. + * + * @param[in] lhs Input left-hand side matrix + * @param[in] rhs Input right-hand side matrix (transposed) + * @param[in] num_ch Number of channels in LHS/RHS + * @param[in] out_shift Per channel output shift. Length of vector is equal to number of channels. + * @param[in] out_mult Per channel output multiplier. Length of vector is equal to number of channels. + * @param[in] activation_min Minimum value to clamp the output to. Range: int8 + * @param[in] activation_max Maximum value to clamp the output to. Range: int8 + * @param[in] row_x_col (row_dimension * col_dimension) of LHS/RHS matrix + * @param[in] output_bias Per channel output bias. Length of vector is equal to number of channels. + * @param[in] out Output pointer + * + * @return The function returns one of the two + * - Updated output pointer if an implementation is available + * - NULL if no implementation is available. + * + * @note If number of channels is not a multiple of 4, upto 3 elements outside the boundary will be read + * out for the following. + * - Output shift + * - Output multiplier + * - Output bias + * - rhs + */ +int16_t *arm_nn_depthwise_conv_nt_t_s16(const int16_t *lhs, + const q7_t *rhs, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int64_t *const output_bias, + int16_t *out); + +/** + *@brief Matrix-multiplication function for convolution with reordered columns + *@param[in] pA pointer to operand A + *@param[in] pInBuffer pointer to operand B, always conssists of 2 vectors + *@param[in] ch_im_out numRow of A + *@param[in] numCol_A numCol of A + *@param[in] bias_shift amount of left-shift for bias + *@param[in] out_shift amount of right-shift for output + *@param[in] bias the bias + *@param[in,out] pOut pointer to output + *@return The function returns the incremented output pointer + * + *@details This function assumes that data in pInBuffer are reordered + */ +q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t *pA, + const q15_t *pInBuffer, + const uint16_t ch_im_out, + const uint16_t numCol_A, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q7_t *pOut); /** @brief Read 2 q15 elements and post increment pointer. @@ -505,6 +698,17 @@ __STATIC_FORCEINLINE q31_t arm_nn_read_q7x4(const q7_t *in_q7) return (val); } +/** + @brief Write four q7 to q7 pointer and increment pointer afterwards. + @param[in] in Double pointer to input value + @param[in] value Four bytes to copy + */ +__STATIC_FORCEINLINE void arm_nn_write_q7x4_ia(q7_t **in, q31_t value) +{ + memcpy(*in, &value, 4); + *in += 4; +} + /** * @brief memset optimized for MVE * @param[in, out] dst Destination pointer @@ -518,11 +722,11 @@ __STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst, const q7_t val, uint32_t bloc __asm volatile(" vdup.8 q0, %[set_val] \n" " wlstp.8 lr, %[cnt], 1f \n" "2: \n" - " vstrb.8 q0, [%[in]], 16 \n" + " vstrb.8 q0, [%[in]], #16 \n" " letp lr, 2b \n" "1: \n" - : [ in ] "+r"(dst) - : [ cnt ] "r"(block_size), [ set_val ] "r"(val) + : [in] "+r"(dst) + : [cnt] "r"(block_size), [set_val] "r"(val) : "q0", "memory", "r14"); #else memset(dst, val, block_size); @@ -538,7 +742,7 @@ __STATIC_FORCEINLINE void arm_memset_q7(q7_t *dst, const q7_t val, uint32_t bloc __STATIC_FORCEINLINE const q7_t *read_and_pad(const q7_t *source, q31_t *out1, q31_t *out2) { q31_t inA = arm_nn_read_q7x4_ia(&source); - q31_t inAbuf1 = __SXTB16(__ROR((uint32_t)inA, 8)); + q31_t inAbuf1 = __SXTB16_RORn((uint32_t)inA, 8); q31_t inAbuf2 = __SXTB16(inA); #ifndef ARM_MATH_BIG_ENDIAN @@ -607,7 +811,6 @@ read_and_pad_reordered_with_offset(const q7_t *source, q31_t *out1, q31_t *out2, * @param[out] *pDst pointer to the output vector * @param[in] out_shift amount of right-shift for output * @param[in] blockSize number of samples in each vector - * @return none. * * Scaling and Overflow Behavior: * \par @@ -624,7 +827,6 @@ void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out * @param[out] *pDst pointer to the output vector * @param[in] out_shift amount of right-shift for output * @param[in] blockSize number of samples in each vector - * @return none. * * Scaling and Overflow Behavior: * \par @@ -634,11 +836,69 @@ void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shift, uint32_t blockSize); +/** + * @brief Matrix-multiplication function for convolution with per-channel requantization. + * @param[in] input_a pointer to operand A + * @param[in] input_b pointer to operand B, always consists of 2 vectors. + * @param[in] output_ch number of rows of A + * @param[in] out_shift pointer to per output channel requantization shift parameter. + * @param[in] out_mult pointer to per output channel requantization multiplier parameter. + * @param[in] out_offset output tensor offset. + * @param[in] activation_min minimum value to clamp the output to. Range : int8 + * @param[in] activation_max maximum value to clamp the output to. Range : int8 + * @param[in] num_col_a number of columns of A + * @param[in] output_bias per output channel bias. Range : int32 + * @param[in,out] out_0 pointer to output + * @return The function returns one of the two + * 1. The incremented output pointer for a successful operation or + * 2. NULL if implementation is not available. + * + * @details This function does the matrix multiplication of weight matrix for all output channels + * with 2 columns from im2col and produces two elements/output_channel. The outputs are + * clamped in the range provided by activation min and max. + * Supported framework: TensorFlow Lite micro. + */ +q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, + const q15_t *input_b, + const uint16_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int16_t activation_min, + const int16_t activation_max, + const uint16_t num_col_a, + const int32_t *const output_bias, + q7_t *out_0); + +/** + * @brief Common softmax function for s8 input and s8 or s16 output + * @param[in] input Pointer to the input tensor + * @param[in] num_rows Number of rows in the input tensor + * @param[in] row_size Number of elements in each input row + * @param[in] mult Input quantization multiplier + * @param[in] shift Input quantization shift within the range [0, 31] + * @param[in] diff_min Minimum difference with max in row. Used to check if + * the quantized exponential operation can be performed + * @param[in] int16_output Indicating s8 output if 0 else s16 output + * @param[out] output Pointer to the output tensor + * + * @note Supported framework: TensorFlow Lite micro (bit-accurate) + * + */ +void arm_nn_softmax_common_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + const bool int16_output, + void *output); + /** * @brief macro for adding rounding offset */ #ifndef ARM_NN_TRUNCATE -#define NN_ROUND(out_shift) ((0x1u << out_shift) >> 1) +#define NN_ROUND(out_shift) ((0x1 << out_shift) >> 1) #else #define NN_ROUND(out_shift) 0 #endif @@ -663,8 +923,8 @@ void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shi /** * @brief Saturating doubling high multiply. Result matches * NEON instruction VQRDMULH. - * @param[in] m1 Multiplicand. Range: {Q31_MIN, Q31_MAX} - * @param[in] m2 Multiplier. Range: {Q31_MIN, Q31_MAX} + * @param[in] m1 Multiplicand. Range: {NN_Q31_MIN, NN_Q31_MAX} + * @param[in] m2 Multiplier. Range: {NN_Q31_MIN, NN_Q31_MAX} * @return Result of multiplication. * */ @@ -685,9 +945,9 @@ __STATIC_FORCEINLINE q31_t arm_nn_doubling_high_mult(const q31_t m1, const q31_t // as well. result = (int32_t)(mult / (1ll << 31)); - if ((m1 == m2) && (m1 == (int32_t)Q31_MIN)) + if ((m1 == m2) && (m1 == (int32_t)NN_Q31_MIN)) { - result = Q31_MAX; + result = NN_Q31_MAX; } return result; } @@ -696,13 +956,13 @@ __STATIC_FORCEINLINE q31_t arm_nn_doubling_high_mult(const q31_t m1, const q31_t * @brief Doubling high multiply without saturation. This is intended * for requantization where the scale is a positive integer * - * @param[in] m1 Multiplicand. Range: {Q31_MIN, Q31_MAX} - * @param[in] m2 Multiplier Range: {Q31_MIN, Q31_MAX} + * @param[in] m1 Multiplicand. Range: {NN_Q31_MIN, NN_Q31_MAX} + * @param[in] m2 Multiplier Range: {NN_Q31_MIN, NN_Q31_MAX} * @return Result of multiplication. * @note The result of this matches that of neon instruction - * VQRDMULH for m1 in range {Q31_MIN, Q31_MAX} and m2 in - * range {Q31_MIN + 1, Q31_MAX}. Saturation occurs when - * m1 equals m2 equals Q31_MIN and that is not handled by + * VQRDMULH for m1 in range {NN_Q31_MIN, NN_Q31_MAX} and m2 in + * range {NN_Q31_MIN + 1, NN_Q31_MAX}. Saturation occurs when + * m1 equals m2 equals NN_Q31_MIN and that is not handled by * this function. * */ @@ -759,7 +1019,7 @@ __STATIC_FORCEINLINE q31_t arm_nn_divide_by_power_of_two(const q31_t dividend, c /** * @brief Requantize a given value. * @param[in] val Value to be requantized - * @param[in] multiplier multiplier. Range {Q31_MIN + 1, Q32_MAX} + * @param[in] multiplier multiplier. Range {NN_Q31_MIN + 1, Q32_MAX} * @param[in] shift left or right shift for 'val * multiplier' * * @return Returns (val * multiplier)/(2 ^ shift) @@ -767,8 +1027,38 @@ __STATIC_FORCEINLINE q31_t arm_nn_divide_by_power_of_two(const q31_t dividend, c */ __STATIC_FORCEINLINE q31_t arm_nn_requantize(const q31_t val, const q31_t multiplier, const q31_t shift) { +#ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int64_t total_shift = 31 - shift; + const int64_t new_val = val * (int64_t)multiplier; + + int32_t result = new_val >> (total_shift - 1); + result = (result + 1) >> 1; + + return result; +#else return arm_nn_divide_by_power_of_two(arm_nn_doubling_high_mult_no_sat(val * (1 << LEFT_SHIFT(shift)), multiplier), RIGHT_SHIFT(shift)); +#endif +} + +/** + * @brief Requantize a given 64 bit value. + * @param[in] val Value to be requantized in the range {-(1<<47)} to {(1<<47) - 1} + * @param[in] reduced_multiplier Reduced multiplier in the range {NN_Q31_MIN + 1, Q32_MAX} to {Q16_MIN + 1, + * Q16_MAX} + * @param[in] shift Left or right shift for 'val * multiplier' in the range {-31} to {7} + * + * @return Returns (val * multiplier)/(2 ^ shift) + * + */ +__STATIC_FORCEINLINE q31_t arm_nn_requantize_s64(const q63_t val, const q31_t reduced_multiplier, const q31_t shift) +{ + const q63_t new_val = val * reduced_multiplier; + + q31_t result = new_val >> (14 - shift); // 64->32 bit reduction + result = (result + 1) >> 1; // Last shift position and insert round + + return result; } /** @@ -783,18 +1073,30 @@ __STATIC_FORCEINLINE void arm_memcpy_q7(q7_t *__RESTRICT dst, const q7_t *__REST #if defined(ARM_MATH_MVEI) __asm volatile(" wlstp.8 lr, %[cnt], 1f \n" "2: \n" - " vldrb.8 q0, [%[in]], 16 \n" - " vstrb.8 q0, [%[out]], 16 \n" + " vldrb.8 q0, [%[in]], #16 \n" + " vstrb.8 q0, [%[out]], #16 \n" " letp lr, 2b \n" "1: \n" - : [ in ] "+r"(src), [ out ] "+r"(dst) - : [ cnt ] "r"(block_size) + : [in] "+r"(src), [out] "+r"(dst) + : [cnt] "r"(block_size) : "q0", "memory", "r14"); #else memcpy(dst, src, block_size); #endif } +/** + * @brief memcpy wrapper for int16 + * @param[in, out] dst Destination pointer + * @param[in] src Source pointer. + * @param[in] block_size Number of bytes to copy. + * + */ +__STATIC_FORCEINLINE void arm_memcpy_q15(q15_t *__RESTRICT dst, const q15_t *__RESTRICT src, uint32_t block_size) +{ + memcpy(dst, src, block_size); +} + #if defined(ARM_MATH_MVEI) /** * @brief Vector saturating doubling high multiply returning high half. @@ -835,8 +1137,21 @@ __STATIC_FORCEINLINE int32x4_t arm_divide_by_power_of_two_mve(const int32x4_t di */ __STATIC_FORCEINLINE int32x4_t arm_requantize_mve(const int32x4_t val, const q31_t multiplier, const q31_t shift) { +#ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int right_shift = MIN(-1, shift); + const int left_shift = shift - right_shift; + + const int32x4_t left_shift_dup = vdupq_n_s32(left_shift); + const int32x4_t right_shift_dup = vdupq_n_s32(right_shift); + + int32x4_t result = vqdmulhq_n_s32(vshlq_s32(val, left_shift_dup), multiplier); + result = vrshlq_s32(result, right_shift_dup); + + return result; +#else return arm_divide_by_power_of_two_mve( arm_doubling_high_mult_mve(vshlq_s32(val, vdupq_n_s32(LEFT_SHIFT(shift))), multiplier), RIGHT_SHIFT(shift)); +#endif } __STATIC_FORCEINLINE int32x4_t arm_doubling_high_mult_mve_32x4(const int32x4_t m1, const int32x4_t m2) @@ -856,6 +1171,15 @@ __STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, const int32x4_t multiplier, const int32x4_t shift) { +#ifdef CMSIS_NN_USE_SINGLE_ROUNDING + const int32x4_t right_shift = vminq_s32(vdupq_n_s32(-1), shift); + const int32x4_t left_shift = vqsubq_s32(shift, right_shift); + + int32x4_t result = vqdmulhq_s32(vshlq_s32(val, left_shift), multiplier); + result = vrshlq_s32(result, right_shift); + + return result; +#else const int32x4_t zz = vdupq_n_s32(0); const mve_pred16_t p = vcmpgtq_n_s32(shift, 0); @@ -864,6 +1188,7 @@ __STATIC_FORCEINLINE int32x4_t arm_requantize_mve_32x4(const int32x4_t val, return arm_divide_by_power_of_two_mve_32x4(arm_doubling_high_mult_mve_32x4(vshlq_s32(val, left_shift), multiplier), right_shift); +#endif } #endif @@ -899,21 +1224,21 @@ __STATIC_FORCEINLINE int32_t arm_nn_exp_on_negative_values(int32_t val) #undef SELECT_IF_NON_ZERO mask = MASK_IF_ZERO(val); - return SELECT_USING_MASK(mask, Q31_MAX, result); + return SELECT_USING_MASK(mask, NN_Q31_MAX, result); } __STATIC_FORCEINLINE q31_t arm_nn_mult_by_power_of_two(const int32_t val, const int32_t exp) { const int32_t thresh = ((1 << (31 - exp)) - 1); int32_t result = val << exp; - result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), Q31_MAX, result); - result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), Q31_MIN, result); + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), NN_Q31_MAX, result); + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), NN_Q31_MIN, result); return result; } __STATIC_FORCEINLINE int32_t arm_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val) { - const int64_t sum = (int64_t)val + (int64_t)Q31_MAX; + const int64_t sum = (int64_t)val + (int64_t)NN_Q31_MAX; const int32_t half_denominator = (int32_t)((sum + (sum >= 0 ? 1 : -1)) / 2L); int32_t x = 1515870810 + MUL_SAT(half_denominator, -1010580540); @@ -929,7 +1254,6 @@ __STATIC_FORCEINLINE int32_t arm_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val @brief Write 2 q15 elements and post increment pointer. @param[in] dest_q15 Pointer to pointer that holds address of destination. @param[in] src_q31 Input value to be written. - @return none */ __STATIC_FORCEINLINE void arm_nn_write_q15x2_ia(q15_t **dest_q15, q31_t src_q31) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c index 11a854a..5872806 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q15.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. * @@ -94,3 +96,5 @@ void arm_nn_activations_direct_q15(q15_t *data, uint16_t size, uint16_t int_widt /** * @} end of Acti group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c index b521ef9..874f766 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_nn_activations_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_activations_q7.c * Description: Q7 neural network activation function using direct table look-up * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.2 * * Target Processor: Cortex-M cores * @@ -40,20 +42,11 @@ * @{ */ -/** - * @brief Q7 neural network activation function using direct table look-up - * @param[in,out] data pointer to input - * @param[in] size number of elements - * @param[in] int_width bit-width of the integer part, assume to be smaller than 3 - * @param[in] type type of activation functions - * - * @details +/* + * Q7 neural network activation function using direct table look-up * - * This is the direct table look-up approach. + * Refer header file for details. * - * Assume here the integer part of the fixed-point is <= 3. - * More than 3 just not making much sense, makes no difference with - * saturation followed by any of these activation functions. */ void arm_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, arm_nn_activation_type type) @@ -87,3 +80,5 @@ void arm_nn_activations_direct_q7(q7_t *data, uint16_t size, uint16_t int_width, /** * @} end of Acti group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c index 806488d..e9ee83a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu6_s8.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. * @@ -63,3 +65,5 @@ void arm_relu6_s8(q7_t *data, uint16_t size) /** * @} end of Acti group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c index cd2911d..93ff722 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_relu_q15.c * Description: Q15 version of ReLU * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.3 * * Target Processor: Cortex-M cores * @@ -40,21 +42,17 @@ * @{ */ -/** - * @brief Q15 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * - * @details +/* + * Q15 ReLu function * - * Optimized relu with QSUB instructions. + * Refer header file for details. * */ void arm_relu_q15(q15_t *data, uint16_t size) { -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for M cores with DSP extension */ uint16_t i = size >> 1; @@ -66,7 +64,7 @@ void arm_relu_q15(q15_t *data, uint16_t size) while (i) { - in = read_q15x2_ia(&input); + in = arm_nn_read_q15x2_ia((const q15_t **)&input); /* extract the first bit */ buf = __ROR(in & 0x80008000, 15); @@ -102,3 +100,5 @@ void arm_relu_q15(q15_t *data, uint16_t size) /** * @} end of Acti group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c index 641b8cc..029b39a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ActivationFunctions/arm_relu_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_relu_q7.c * Description: Q7 version of ReLU * - * $Date: 09. October 2020 - * $Revision: V.1.0.3 + * $Date: 4 Aug 2022 + * $Revision: V.1.1.4 * * Target Processor: Cortex-M cores * @@ -40,21 +42,17 @@ * @{ */ -/** - * @brief Q7 RELU function - * @param[in,out] data pointer to input - * @param[in] size number of elements - * - * @details +/* + * Q7 ReLu function * - * Optimized relu with QSUB instructions. + * Refer header file for details. * */ void arm_relu_q7(q7_t *data, uint16_t size) { -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for M cores with DSP extension */ uint16_t i = size >> 2; @@ -66,7 +64,7 @@ void arm_relu_q7(q7_t *data, uint16_t size) while (i) { - in = read_q7x4_ia(&input); + in = arm_nn_read_q7x4_ia((const q7_t **)&input); /* extract the first bit */ buf = (int32_t)__ROR((uint32_t)in & 0x80808080, 7); @@ -74,7 +72,7 @@ void arm_relu_q7(q7_t *data, uint16_t size) /* if MSB=1, mask will be 0xFF, 0x0 otherwise */ mask = __QSUB8(0x00000000, buf); - write_q7x4_ia(&output, in & (~mask)); + arm_nn_write_q7x4_ia(&output, in & (~mask)); i--; } @@ -107,3 +105,5 @@ void arm_relu_q7(q7_t *data, uint16_t size) /** * @} end of Acti group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s16.c new file mode 100644 index 0000000..7fbb104 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s16.c @@ -0,0 +1,140 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_add_s16 + * Description: Elementwise add + * + * $Date: 10 May 2022 + * $Revision: V.2.1.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup BasicMath + * @{ + */ + +/* + * s16 elementwise add + * + * Refer header file for details. + * + */ + +/* Note: __SHIFT is expected to be <=0 */ + +arm_cmsis_nn_status arm_elementwise_add_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + (void)input_1_offset; + (void)input_2_offset; + (void)out_offset; + int32_t input_1; + int32_t input_2; + int32_t sum; + int32_t two_halfword_1, two_halfword_2; + int16_t sum_1, sum_2; + int32_t loop_count = block_size / 2; + + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + input_1 = (int16_t)(two_halfword_1 & 0xFFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + sum_1 = (int16_t)sum; + + input_1 = (int16_t)(two_halfword_1 >> 16) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = (int16_t)(two_halfword_2 >> 16) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + sum_2 = (int16_t)sum; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(sum_1, sum_2)); + + loop_count--; + } + loop_count = block_size & 0x1; + + while (loop_count > 0) + { + /* C = A + B */ + input_1 = *input_1_vect++ << left_shift; + input_2 = *input_2_vect++ << left_shift; + + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); + + sum = input_1 + input_2; + sum = arm_nn_requantize(sum, out_mult, out_shift); + + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + + *output++ = (int16_t)sum; + + /* Decrement loop counter */ + loop_count--; + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of BasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c index d30156d..9ff0311 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_add_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -19,10 +21,10 @@ /* ---------------------------------------------------------------------- * Project: CMSIS NN Library * Title: arm_elementwise_add_s8 - * Description: Element wise add + * Description: Elementwise add * - * $Date: 09. October 2020 - * $Revision: V.2.5.2 + * $Date: 19 April 2022 + * $Revision: V.3.0.0 * * Target Processor: Cortex-M CPUs * @@ -30,24 +32,6 @@ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" -#if defined(ARM_MATH_MVEI) -#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_helium_utils.h" -#endif - -#if defined(ARM_MATH_MVEI) -#define SAT_INPUT_VECT(__INPUT_V, __MULT, __SHIFT) \ - __INPUT_V = arm_doubling_high_mult_mve(__INPUT_V, __MULT); \ - __INPUT_V = arm_divide_by_power_of_two_mve(__INPUT_V, -__SHIFT); -#endif - -/** - * @note The *_no_sat API does not mean that the input not saturated, Since - * __MULT is a positive integer, it is saturated. The API definition - * has more info about it. - */ -#define SAT_INPUT(__INPUT, __MULT, __SHIFT) \ - __INPUT = arm_nn_doubling_high_mult_no_sat(__INPUT, __MULT); \ - __INPUT = arm_nn_divide_by_power_of_two(__INPUT, -__SHIFT); /** * @ingroup groupNN @@ -59,7 +43,7 @@ */ /* - * s8 element wise add + * s8 elementwise add * * Refer header file for details. * @@ -67,25 +51,25 @@ /* Note: __SHIFT is expected to be <=0 */ -arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_1_mult, - const int32_t input_1_shift, - const int32_t input_2_offset, - const int32_t input_2_mult, - const int32_t input_2_shift, - const int32_t left_shift, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size) +arm_cmsis_nn_status arm_elementwise_add_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_1_mult, + const int32_t input_1_shift, + const int32_t input_2_offset, + const int32_t input_2_mult, + const int32_t input_2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) { #if defined(ARM_MATH_MVEI) - int32_t count = (int32_t)block_size; + int32_t count = block_size; while (count > 0) { @@ -103,11 +87,11 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, vect_1 = vshlq_r_s32(vect_1, left_shift); vect_2 = vshlq_r_s32(vect_2, left_shift); - SAT_INPUT_VECT(vect_1, input_1_mult, input_1_shift); - SAT_INPUT_VECT(vect_2, input_2_mult, input_2_shift); + vect_1 = arm_requantize_mve(vect_1, input_1_mult, input_1_shift); + vect_2 = arm_requantize_mve(vect_2, input_2_mult, input_2_shift); vect_1 = vaddq_s32(vect_1, vect_2); - SAT_INPUT_VECT(vect_1, out_mult, out_shift); + vect_1 = arm_requantize_mve(vect_1, out_mult, out_shift); vect_1 = vaddq_n_s32(vect_1, out_offset); @@ -122,7 +106,7 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, count -= 4; } #else - uint32_t loop_count; + int32_t loop_count; int32_t input_1; int32_t input_2; int32_t sum; @@ -139,7 +123,7 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, loop_count = block_size >> 2; - while (loop_count > 0U) + while (loop_count > 0) { /* 4 outputs are calculated in one loop. The order of calculation is follows the order of output sign extension intrinsic */ @@ -153,62 +137,63 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, b_2 = __SADD16(b_2, offset_2_packed); /* Sum 1 */ - input_1 = (int16_t)(b_1 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); + input_1 = (b_1 & 0x0FFFF) << left_shift; + + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); - input_2 = (int16_t)(b_2 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); + input_2 = (b_2 & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); + sum = arm_nn_requantize(sum, out_mult, out_shift); sum += out_offset; sum = MAX(sum, out_activation_min); sum = MIN(sum, out_activation_max); r1 = (q7_t)sum; /* Sum 3 */ - input_1 = (int16_t)((b_1 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); + input_1 = ((b_1 >> 16) & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); - input_2 = (int16_t)((b_2 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); + input_2 = ((b_2 >> 16) & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); + sum = arm_nn_requantize(sum, out_mult, out_shift); sum += out_offset; sum = MAX(sum, out_activation_min); sum = MIN(sum, out_activation_max); r3 = (q7_t)sum; /* Sum 2 */ - input_1 = (int16_t)(a_1 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); + input_1 = (a_1 & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); - input_2 = (int16_t)(a_2 & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); + input_2 = (a_2 & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); + sum = arm_nn_requantize(sum, out_mult, out_shift); sum += out_offset; sum = MAX(sum, out_activation_min); sum = MIN(sum, out_activation_max); r2 = (q7_t)sum; /* Sum 4 */ - input_1 = (int16_t)((a_1 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_1, input_1_mult, input_1_shift); + input_1 = ((a_1 >> 16) & 0x0FFFF) << left_shift; + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); - input_2 = (int16_t)((a_2 >> 16) & 0x0FFFFL) << left_shift; - SAT_INPUT(input_2, input_2_mult, input_2_shift); + input_2 = ((a_2 >> 16) & 0x0FFFF) << left_shift; + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); + sum = arm_nn_requantize(sum, out_mult, out_shift); sum += out_offset; sum = MAX(sum, out_activation_min); sum = MIN(sum, out_activation_max); r4 = (q7_t)sum; - write_q7x4_ia(&output, __PACKq7(r1, r2, r3, r4)); + arm_nn_write_q7x4_ia(&output, PACK_Q7x4_32x1(r1, r2, r3, r4)); loop_count--; } @@ -218,21 +203,18 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, loop_count = block_size; #endif - while (loop_count > 0U) + while (loop_count > 0) { /* C = A + B */ input_1 = (*input_1_vect++ + input_1_offset) << left_shift; input_2 = (*input_2_vect++ + input_2_offset) << left_shift; - input_1 = arm_nn_doubling_high_mult(input_1, input_1_mult); - input_1 = arm_nn_divide_by_power_of_two(input_1, -input_1_shift); - - input_2 = arm_nn_doubling_high_mult(input_2, input_2_mult); - input_2 = arm_nn_divide_by_power_of_two(input_2, -input_2_shift); + input_1 = arm_nn_requantize(input_1, input_1_mult, input_1_shift); + input_2 = arm_nn_requantize(input_2, input_2_mult, input_2_shift); sum = input_1 + input_2; - SAT_INPUT(sum, out_mult, out_shift); + sum = arm_nn_requantize(sum, out_mult, out_shift); sum += out_offset; sum = MAX(sum, out_activation_min); @@ -246,9 +228,11 @@ arm_status arm_elementwise_add_s8(const int8_t *input_1_vect, #endif /* ARM_MATH_MVEI */ - return (ARM_MATH_SUCCESS); + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of BasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s16.c new file mode 100644 index 0000000..5d53550 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s16.c @@ -0,0 +1,126 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_elementwise_mul_s16 + * Description: Element wise multiplication + * + * $Date: 10 May 2022 + * $Revision: V.2.1.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup BasicMath + * @{ + */ + +/** + * @brief s16 element wise multiplication of two vectors + * + * @note Refer header file for details. + * + */ +arm_cmsis_nn_status arm_elementwise_mul_s16(const int16_t *input_1_vect, + const int16_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int16_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) +{ + (void)input_1_offset; + (void)input_2_offset; + (void)out_offset; + int32_t input_1; + int32_t input_2; + int32_t mul_res; + int32_t two_halfword_1, two_halfword_2; + int16_t mul_1, mul_2; + int32_t loop_count = block_size / 2; + + while (loop_count > 0) + { + two_halfword_1 = arm_nn_read_q15x2_ia(&input_1_vect); + two_halfword_2 = arm_nn_read_q15x2_ia(&input_2_vect); + + input_1 = (int16_t)(two_halfword_1 & 0xFFFF); + input_2 = (int16_t)(two_halfword_2 & 0xFFFF); + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + mul_1 = (int16_t)mul_res; + + input_1 = (int16_t)(two_halfword_1 >> 16); + input_2 = (int16_t)(two_halfword_2 >> 16); + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + mul_2 = (int16_t)mul_res; + + arm_nn_write_q15x2_ia(&output, PACK_Q15x2_32x1(mul_1, mul_2)); + + loop_count--; + } + loop_count = block_size & 0x1; + + while (loop_count > 0) + { + /* C = A * B */ + + input_1 = *input_1_vect++; + input_2 = *input_2_vect++; + + mul_res = input_1 * input_2; + mul_res = arm_nn_requantize(mul_res, out_mult, out_shift); + + mul_res = MAX(mul_res, out_activation_min); + mul_res = MIN(mul_res, out_activation_max); + + *output++ = (int16_t)mul_res; + + /* Decrement loop counter */ + loop_count--; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of BasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c index 35b5413..663112a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/BasicMathFunctions/arm_elementwise_mul_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_elementwise_mul_s8 * Description: Element wise multiplication * - * $Date: January 26, 2021 - * $Revision: V.1.0.5 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,24 +42,24 @@ * @{ */ -/** - * @brief s8 element wise multiplication of two vectors +/* + * s8 element wise multiplication of two vectors * - * @note Refer header file for details. + * Refer header file for details. * */ -arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, - const int8_t *input_2_vect, - const int32_t input_1_offset, - const int32_t input_2_offset, - int8_t *output, - const int32_t out_offset, - const int32_t out_mult, - const int32_t out_shift, - const int32_t out_activation_min, - const int32_t out_activation_max, - const uint32_t block_size) +arm_cmsis_nn_status arm_elementwise_mul_s8(const int8_t *input_1_vect, + const int8_t *input_2_vect, + const int32_t input_1_offset, + const int32_t input_2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t out_activation_min, + const int32_t out_activation_max, + const int32_t block_size) { int32_t loop_count; @@ -163,7 +165,7 @@ arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, mul_res = MIN(mul_res, out_activation_max); r4 = (q7_t)mul_res; - write_q7x4_ia(&output, __PACKq7(r1, r2, r3, r4)); + arm_nn_write_q7x4_ia(&output, PACK_Q7x4_32x1(r1, r2, r3, r4)); loop_count--; } @@ -192,9 +194,11 @@ arm_status arm_elementwise_mul_s8(const int8_t *input_1_vect, loop_count--; } #endif - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of BasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c index b43c8ea..442a497 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_w.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +31,7 @@ * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" /** * @ingroup groupNN @@ -57,9 +60,11 @@ void arm_concatenation_s8_w(const int8_t *input, output += offset_w * (input_x * input_y * input_z); - memcpy(output, input, input_copy_size); + arm_memcpy_q7(output, input, input_copy_size); } /** * @} end of Concatenation group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c index c672339..bcc0d38 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_x.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +31,7 @@ * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" /** * @ingroup groupNN @@ -63,7 +66,7 @@ void arm_concatenation_s8_x(const int8_t *input, // Copy per row for (i = 0; i < num_iterations; ++i) { - memcpy(output, input, input_x); + arm_memcpy_q7(output, input, input_x); input += input_x; output += output_x; } @@ -72,3 +75,5 @@ void arm_concatenation_s8_x(const int8_t *input, /** * @} end of Concatenation group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c index 12119e7..b0f7f43 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_y.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +31,7 @@ * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" /** * @ingroup groupNN @@ -64,7 +67,7 @@ void arm_concatenation_s8_y(const int8_t *input, // Copy per tile for (i = 0; i < num_iterations; ++i) { - memcpy(output, input, input_copy_size); + arm_memcpy_q7(output, input, input_copy_size); input += input_copy_size; output += output_stride; } @@ -73,3 +76,5 @@ void arm_concatenation_s8_y(const int8_t *input, /** * @} end of Concatenation group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c index 32992a2..4ba99f5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConcatenationFunctions/arm_concatenation_s8_z.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -29,6 +31,7 @@ * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" /** * @ingroup groupNN @@ -63,7 +66,7 @@ void arm_concatenation_s8_z(const int8_t *input, for (i = 0; i < input_w; ++i) { - memcpy(output, input, input_copy_size); + arm_memcpy_q7(output, input, input_copy_size); input += input_copy_size; output += output_stride; } @@ -72,3 +75,5 @@ void arm_concatenation_s8_z(const int8_t *input, /** * @} end of Concatenation group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c index aaa233c..64a24d6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1_x_n_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_1_x_n_s8.c * Description: s8 version of 1xN convolution using symmetric quantization. * - * $Date: January 26, 2021 - * $Revision: V.2.0.3 + * $Date: 20 June 2022 + * $Revision: V.3.1.0 * * Target Processor: Cortex-M cores * @@ -47,23 +49,24 @@ * */ -arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) +arm_cmsis_nn_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) { (void)bias_dims; - arm_status status = ARM_MATH_SUCCESS; - if (output_dims->w % 4 != 0) + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + /* The wrapper API is the ultimate reference for argument check */ + if ((input_dims->h != 1) || (output_dims->w % 4 != 0) || conv_params->dilation.w != 1) { - status = ARM_MATH_SIZE_MISMATCH; + status = ARM_CMSIS_NN_ARG_ERROR; goto out; } @@ -78,94 +81,55 @@ arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, const uint16_t pad_x = conv_params->padding.w; const uint16_t stride_x = conv_params->stride.w; - const int32_t input_offset = conv_params->input_offset; - const int32_t out_offset = conv_params->output_offset; - const int32_t out_activation_min = conv_params->activation.min; - const int32_t out_activation_max = conv_params->activation.max; - int32_t *output_mult = quant_params->multiplier; - int32_t *output_shift = quant_params->shift; - - for (int i_out_x = 0; i_out_x <= (output_x - 4); i_out_x += 4) + int i_batch; + for (i_batch = 0; i_batch < input_dims->n; i_batch++) { - int32_t input_begin_idx[4]; - int32_t ker_begin_idx[4]; - int32_t ker_end_idx[4]; - - for (int i = 0; i < 4; i++) + for (int i_out_x = 0; i_out_x <= (output_x - 4); i_out_x += 4) { - const int32_t est_input_x_idx = stride_x * (i_out_x + i) - pad_x; - input_begin_idx[i] = MAX(0, est_input_x_idx); - ker_begin_idx[i] = MAX(0, -est_input_x_idx); - ker_end_idx[i] = MIN(kernel_x, input_x - est_input_x_idx); - } + int32_t input_begin_idx[4]; + int32_t ker_begin_idx[4]; + int32_t ker_end_idx[4]; - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32x4_t s_offset; - int32_t acc[4]; - if ((ker_begin_idx[0] != 0) || (ker_end_idx[3] != kernel_x)) + for (int i = 0; i < 4; i++) { - int32_t sum_row[4]; - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[0] - ker_begin_idx[0]) * input_ch, - input_data + input_begin_idx[0] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + - (ker_begin_idx[0] * input_ch), - &sum_row[0], - &acc[0]); - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[1] - ker_begin_idx[1]) * input_ch, - input_data + input_begin_idx[1] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + - (ker_begin_idx[1] * input_ch), - &sum_row[1], - &acc[1]); - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[2] - ker_begin_idx[2]) * input_ch, - input_data + input_begin_idx[2] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + - (ker_begin_idx[2] * input_ch), - &sum_row[2], - &acc[2]); - - (void)arm_nn_mat_mul_core_1x_s8((ker_end_idx[3] - ker_begin_idx[3]) * input_ch, - input_data + input_begin_idx[3] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch) + - (ker_begin_idx[3] * input_ch), - &sum_row[3], - &acc[3]); - - s_offset = vldrwq_s32(sum_row); + const int32_t est_input_x_idx = stride_x * (i_out_x + i) - pad_x; + input_begin_idx[i] = MAX(0, est_input_x_idx); + ker_begin_idx[i] = MAX(0, -est_input_x_idx); + ker_end_idx[i] = MIN(kernel_x, input_x - est_input_x_idx); } - else + + if ((ker_begin_idx[0] != 0) || (ker_end_idx[3] != kernel_x)) { - int32_t sum_row; - (void)arm_nn_mat_mul_core_4x_s8(kernel_x * input_ch, - stride_x * input_ch, - input_data + input_begin_idx[0] * input_ch, - filter_data + (input_ch * kernel_x * i_out_ch), - &sum_row, - acc); - - s_offset = vdupq_n_s32(sum_row); + for (int i = 0; i < 4; i++) + { + const int32_t actual_kernel_len = ker_end_idx[i] - ker_begin_idx[i]; + arm_nn_mat_mul_core_1x_s8(actual_kernel_len * input_ch, + (kernel_x - actual_kernel_len) * input_ch, + input_data + input_begin_idx[i] * input_ch, + filter_data + (ker_begin_idx[i] * input_ch), + output_ch, + conv_params, + quant_params, + bias_data, + output_data); + output_data += output_ch; + } } - int32x4_t res = vldrwq_s32(acc); - s_offset = vmulq_n_s32(s_offset, input_offset); - res = vaddq_s32(res, s_offset); - if (bias_data) + else { - res = vaddq_n_s32(res, bias_data[i_out_ch]); + output_data = arm_nn_mat_mul_core_4x_s8(kernel_x * input_ch, + stride_x * input_ch, + input_data + input_begin_idx[0] * input_ch, + filter_data, + output_ch, + conv_params, + quant_params, + bias_data, + output_data); } - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; - vstrbq_scatter_offset_s32(output_data, scatter_offset, res); - output_data++; } - output_data += (3 * output_ch); + /* Advance to the next batch */ + input_data += (input_x * input_ch); } #else @@ -189,8 +153,8 @@ arm_status arm_convolve_1_x_n_s8(const cmsis_nn_context *ctx, int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) { -#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * sizeof(int16_t); +#if !defined(ARM_MATH_MVEI) + return arm_convolve_s8_get_buffer_size(input_dims, filter_dims); #else (void)input_dims; (void)filter_dims; @@ -201,3 +165,5 @@ int32_t arm_convolve_1_x_n_s8_get_buffer_size(const cmsis_nn_dims *input_dims, c /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c index 439d219..d0abf21 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_HWC_q7_fast_nonsquare.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_1x1_HWC_q7_fast_nonsquare.c * Description: Fast Q7 version of 1x1 convolution (non-square shape) * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,67 +42,35 @@ * @{ */ -/** - * @brief Fast Q7 version of 1x1 convolution (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is optimized for convolution with 1x1 kernel size (i.e., dim_kernel_x=1 - * and dim_kernel_y=1). It can be used for the second half of MobileNets [1] after depthwise - * separable convolution. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 +/* + * Fast Q7 version of 1x1 convolution (non-sqaure shape) + * Refer function header for details * - * [1] MobileNets: Efficient Convolutional Neural Networks for Mobile Vision Applications - * https://arxiv.org/abs/1704.04861 */ -arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ (void)dim_im_in_y; int16_t i_out_y, i_out_x; @@ -118,7 +88,7 @@ arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, padding_y != 0 || stride_x != 1 || stride_y != 1) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) @@ -191,7 +161,7 @@ arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, padding_y != 0 || stride_x != 1 || stride_y != 1) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -227,9 +197,11 @@ arm_status arm_convolve_1x1_HWC_q7_fast_nonsquare(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c index 3197bd3..98eb524 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_1x1_s8_fast.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,18 +23,16 @@ * Title: arm_convolve_1x1_s8_fast.c * Description: Fast q7 version of 1x1 convolution (non-square shape) * - * $Date: 09. October 2020 - * $Revision: V.2.0.3 + * $Date: 20 june 2022 + * $Revision: V.3.0.1 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M Processors * * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" - -#define DIM_KER_X (1U) -#define DIM_KER_Y (1U) +#include /** * @ingroup groupNN @@ -50,22 +50,22 @@ * */ -arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) +arm_cmsis_nn_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) { - if (input_dims->c % 4 != 0 || conv_params->padding.w != 0 || conv_params->padding.h != 0 || - conv_params->stride.w != 1 || conv_params->stride.h != 1) + if (conv_params->padding.w != 0 || conv_params->padding.h != 0 || conv_params->stride.w != 1 || + conv_params->stride.h != 1) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } (void)ctx; @@ -77,70 +77,33 @@ arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, const int32_t col_len = input_dims->w * input_dims->h * input_dims->n; const int32_t output_ch = output_dims->c; const int32_t input_ch = input_dims->c; - const int32_t input_offset = conv_params->input_offset; - const int32_t out_offset = conv_params->output_offset; - const int32_t out_activation_min = conv_params->activation.min; - const int32_t out_activation_max = conv_params->activation.max; - int32_t *output_mult = quant_params->multiplier; - int32_t *output_shift = quant_params->shift; for (int i_items = 0; i_items <= (col_len - 4); i_items += 4) { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row = 0; - int32_t temp_out[4]; - - (void)arm_nn_mat_mul_core_4x_s8(input_ch, - input_ch, - input_data + i_items * input_ch, - filter_data + i_out_ch * input_ch, - &sum_row, - temp_out); - int32x4_t res = vldrwq_s32(temp_out); - if (bias_data) - { - res = vaddq_n_s32(res, bias_data[i_out_ch]); - } - sum_row = sum_row * input_offset; - res = vaddq_n_s32(res, sum_row); - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = { - 0, (uint32_t)output_ch, (uint32_t)output_ch * 2, (uint32_t)output_ch * 3}; - vstrbq_scatter_offset_s32(output_data, scatter_offset, res); - output_data++; - } - output_data += (3 * output_ch); + output_data = arm_nn_mat_mul_core_4x_s8(input_ch, + input_ch, + input_data + i_items * input_ch, + filter_data, + output_ch, + conv_params, + quant_params, + bias_data, + output_data); } /* Handle left over elements */ for (int i_items = (col_len & ~0x3); i_items < col_len; i_items++) { - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row = 0; - - int32_t acc; - (void)arm_nn_mat_mul_core_1x_s8( - input_ch, input_data + i_items * input_ch, filter_data + i_out_ch * input_ch, &sum_row, &acc); - if (bias_data) - { - acc += bias_data[i_out_ch]; - } - sum_row = (sum_row * input_offset); - acc += sum_row; - acc = arm_nn_requantize(acc, output_mult[i_out_ch], output_shift[i_out_ch]); - acc += out_offset; - - acc = MAX(acc, out_activation_min); - acc = MIN(acc, out_activation_max); - *output_data++ = acc; - } + arm_nn_mat_mul_core_1x_s8(input_ch, + 0, + input_data + i_items * input_ch, + filter_data, + output_ch, + conv_params, + quant_params, + bias_data, + output_data); + output_data += output_ch; } #else @@ -167,7 +130,7 @@ arm_status arm_convolve_1x1_s8_fast(const cmsis_nn_context *ctx, #endif /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims) @@ -179,3 +142,5 @@ int32_t arm_convolve_1x1_s8_fast_get_buffer_size(const cmsis_nn_dims *input_dims /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c index 3859b81..fe642d8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_basic.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q15_basic.c * Description: Q15 version of convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,55 +42,29 @@ * @{ */ -/** - * @brief Basic Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * bufferA size: ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * This basic version is designed to work for any input tensor and weight - * dimension. +/* + * Basic Q15 convolution function + * Refer function header for details */ -arm_status arm_convolve_HWC_q15_basic(const q15_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q15_basic(const q15_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -201,9 +177,11 @@ arm_status arm_convolve_HWC_q15_basic(const q15_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c index 070dd65..a0bbd22 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q15_fast.c * Description: Fast Q15 version of convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,70 +42,39 @@ * @{ */ -/** - * @brief Fast Q15 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * +/* + * Fast Q15 convolution function + * Refer function header for details */ -arm_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; q15_t *pBuffer = bufferA; q15_t *im_buffer = bufferA; q15_t *pOut = Im_out; - if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) + if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0 || dim_im_out & 0x1) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } /* Run the following code for Cortex-M4 and Cortex-M7 */ @@ -215,7 +186,7 @@ arm_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -249,9 +220,11 @@ arm_status arm_convolve_HWC_q15_fast(const q15_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c index 0d970a3..7d62293 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q15_fast_nonsquare.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q15_fast.c * Description: Fast Q15 version of convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,70 +42,34 @@ * @{ */ -/** - * @brief Fast Q15 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 2 - * - * ch_im_out is multipe of 2 - * +/* + * Fast Q15 convolution function (non-sqaure shape) + * Refer function header for details */ -arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q15_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q15_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q15_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q15_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q15_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q15_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; q15_t *pBuffer = bufferA; @@ -113,7 +79,7 @@ arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } /* Run the following code for Cortex-M4 and Cortex-M7 */ @@ -227,7 +193,7 @@ arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, if (ch_im_in % 2 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -262,9 +228,11 @@ arm_status arm_convolve_HWC_q15_fast_nonsquare(const q15_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c index fad560a..ed388a5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_RGB.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q7_RGB.c * Description: Q7 version of convolution for RGB image * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,61 +42,29 @@ * @{ */ -/** - * @brief Q7 convolution function for RGB image - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in equals 3 - * - * This kernel is written exclusively for convolution with ch_im_in - * equals 3. This applies on the first layer of CNNs which has input - * image with RGB format. +/* + * Q7 convolution function for RGB image + * Refer function header for details */ -arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -108,7 +78,7 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, // check if number of input channels is 3 if (ch_im_in != 3) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } // This part implements the im2col function for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) @@ -122,8 +92,7 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, if (i_ker_y < 0 || i_ker_y >= dim_im_in || i_ker_x < 0 || i_ker_x >= dim_im_in) { /* Equivalent to arm_fill_q15(0, pBuffer, ch_im_in) with assumption: ch_im_in = 3 */ - *__SIMD32(pBuffer) = 0x0; - *(pBuffer + 2) = 0; + arm_memset_q7((q7_t *)pBuffer, (q7_t)0, 3 * sizeof(q15_t)); pBuffer += 3; } else @@ -155,7 +124,8 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, * version 2, no weight shuffling required */ *pBuffer++ = top.half_words[0]; - *__SIMD32(pBuffer) = __PKHBT(bottom.word, top.word, 0); + int32_t packed_word = __PKHBT(bottom.word, top.word, 0); + arm_memcpy_q7((q7_t *)pBuffer, (q7_t *)&packed_word, 4); #else /* * big-endian, | 1st | 2nd | 3rd | omit | @@ -169,7 +139,8 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, * version 2, no weight shuffling required */ *pBuffer++ = bottom.half_words[0]; - *__SIMD32(pBuffer) = __PKHTB(top.word, bottom.word, 0); + int32_t packed_word = __PKHTB(top.word, bottom.word, 0); + arm_memcpy_q7((q7_t *)pBuffer, (q7_t *)&packed_word, 4); #endif pBuffer += 2; } @@ -236,7 +207,7 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, // check if number of input channels is 3 if (ch_im_in != 3) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -271,9 +242,11 @@ arm_status arm_convolve_HWC_q7_RGB(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return (ARM_MATH_SUCCESS); + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c index a977287..a74a1a7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q7_basic.c * Description: Q7 version of convolution * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,55 +42,29 @@ * @{ */ -/** - * @brief Basic Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * This basic version is designed to work for any input tensor and weight - * dimension. +/* + * Basic Q7 convolution function + * Refer function header for details */ -arm_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -182,7 +158,7 @@ arm_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, } #else /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - + (void)bufferA; int i, j, k, l, m, n; int conv_out; int in_row, in_col; @@ -219,9 +195,11 @@ arm_status arm_convolve_HWC_q7_basic(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c index 5847263..9079695 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_basic_nonsquare.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q7_basic.c * Description: Q7 version of convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,54 +42,35 @@ * @{ */ -/** - * @brief Basic Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns ARM_MATH_SUCCESS +/* + * Basic Q7 convolution function (non-sqaure shape) + * Refer function header for details + * */ -arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -221,9 +204,11 @@ arm_status arm_convolve_HWC_q7_basic_nonsquare(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c index c3b634f..8f28bd6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q7_fast.c * Description: Fast Q7 version of convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,72 +42,29 @@ * @{ */ -/** - * @brief Fast Q7 convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimention - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in is multiple of 4 ( because of the SIMD32 read and swap ) - * - * ch_im_out is multipe of 2 ( bacause 2x2 mat_mult kernel ) - * - * The im2col converts the Q7 tensor input into Q15 column, which is stored in - * bufferA. There is reordering happenning during this im2col process with - * arm_q7_to_q15_reordered_no_shift. For every four elements, the second and - * third elements are swapped. - * - * The computation kernel arm_nn_mat_mult_kernel_q7_q15_reordered does the - * GEMM computation with the reordered columns. - * - * To speed-up the determination of the padding condition, we split the - * computation into 3x3 parts, i.e., {top, mid, bottom} X {left, mid, right}. - * This reduces the total number of boundary condition checks and improves - * the data copying performance. +/* + * Fast Q7 convolution function + * Refer function header for details */ -arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -121,7 +80,7 @@ arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } /* @@ -337,7 +296,7 @@ arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -372,9 +331,11 @@ arm_status arm_convolve_HWC_q7_fast(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c index 651fe9e..a091be3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_HWC_q7_fast_nonsquare.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_HWC_q7_fast_nonsquare.c * Description: Fast Q7 version of convolution (non-sqaure shape) * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,60 +42,34 @@ * @{ */ -/** - * @brief Fast Q7 convolution function (non-sqaure shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimention x - * @param[in] dim_im_in_y input tensor dimention y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding size x - * @param[in] padding_y padding size y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is multiple of 4 - * ch_im_out is multiple of 2 +/* + * Fast Q7 convolution function (non-sqaure shape) + * Refer function header for details */ -arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x, i_ker_y, i_ker_x; @@ -109,7 +85,7 @@ arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } /* @@ -334,7 +310,7 @@ arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, if (ch_im_in % 4 != 0 || ch_im_out % 2 != 0) { /* check if the input dimension meets the constraints */ - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i = 0; i < ch_im_out; i++) @@ -370,9 +346,11 @@ arm_status arm_convolve_HWC_q7_fast_nonsquare(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_fast_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_fast_s16.c new file mode 100644 index 0000000..26c64fa --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_fast_s16.c @@ -0,0 +1,245 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2010-2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_fast_s16.c + * Description: Optimized s16 version of convolution. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s16 convolution function. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or atleast greater than 4. + * + */ + +arm_cmsis_nn_status arm_convolve_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data) +{ + (void)bias_dims; + if (filter_dims->w * filter_dims->h * input_dims->c >= 512) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_convolve_s8_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + q15_t *buffer_a = (q15_t *)ctx->buf; + + const int32_t input_batches = input_dims->n; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_ch = input_dims->c; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_ch = output_dims->c; + + const int32_t pad_x = conv_params->padding.w; + const int32_t pad_y = conv_params->padding.h; + const int32_t stride_x = conv_params->stride.w; + const int32_t stride_y = conv_params->stride.h; + + const int16_t out_activation_min = conv_params->activation.min; + const int16_t out_activation_max = conv_params->activation.max; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + /* Generate two columns from the input tensor a GEMM computation */ + q15_t *two_column_buf = buffer_a; + q15_t *out = output_data; + /* This part implements the im2col function */ + for (int32_t i_out_y = 0; i_out_y < output_y; i_out_y++) + { + for (int32_t i_out_x = 0; i_out_x < output_x; i_out_x++) + { + for (int32_t i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; + i_ker_y++) + { + for (int32_t i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; + i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + /* Filling 0 for out-of-bound paddings */ + arm_memset_q7((q7_t *)two_column_buf, 0, sizeof(q15_t) * input_ch); + } + else + { + arm_memcpy_q7((q7_t *)two_column_buf, + (const q7_t *)(input_data + (i_ker_y * input_x + i_ker_x) * input_ch), + input_ch * sizeof(q15_t)); + } + two_column_buf += input_ch; + } + } + /* Computation is filed for every 2 columns */ + if (two_column_buf == buffer_a + 2 * input_ch * kernel_y * kernel_x) + { + out = arm_nn_mat_mult_kernel_s16(filter_data, + buffer_a, + output_ch, + output_shift, + output_mult, + out_activation_min, + out_activation_max, + (input_ch * kernel_y * kernel_x), + bias_data, + out); + + /* Counter reset */ + two_column_buf = buffer_a; + } + } + } + + /* Left-over because odd number of output pixels */ + if (two_column_buf != buffer_a) + { + const q7_t *ker_a = filter_data; + int i; + + for (i = 0; i < output_ch; i++) + { + /* Init the accumulator*/ + q31_t sum = 0; + + /* Point to the beginning of the im2col buffer where the input is available as a rearranged column */ + const q15_t *ip_as_col = buffer_a; + + /* 4 multiply and accumulates are done in one loop. */ + uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2; + + while (col_count) + { + q31_t ker_a1, ker_a2; + q31_t ip_b1, ip_b2; + + ker_a = read_and_pad(ker_a, &ker_a1, &ker_a2); + + ip_b1 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = __SMLAD(ker_a1, ip_b1, sum); + ip_b2 = arm_nn_read_q15x2_ia(&ip_as_col); + sum = __SMLAD(ker_a2, ip_b2, sum); + + col_count--; + } + /* Handle left over mac */ + col_count = input_ch * kernel_y * kernel_x & 0x3; + while (col_count) + { + q7_t ker_a1 = *ker_a++; + q15_t ip_b1 = *ip_as_col++; + sum += ker_a1 * ip_b1; + col_count--; + } + if (bias_data) + { + q31_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[i]); + q63_t acc_64 = sum + bias_data[i]; + sum = arm_nn_requantize_s64(acc_64, reduced_multiplier, output_shift[i]); + } + else + { + sum = arm_nn_requantize(sum, output_mult[i], output_shift[i]); + } + sum = MAX(sum, out_activation_min); + sum = MIN(sum, out_activation_max); + *out++ = (q15_t)sum; + } + } +#else + (void)input_data; + (void)output_data; + (void)bias_data; + (void)filter_data; + (void)buffer_a; + (void)kernel_x; + (void)kernel_y; + (void)pad_x; + (void)pad_y; + (void)stride_x; + (void)stride_y; + (void)out_activation_min; + (void)out_activation_max; + (void)output_mult; + (void)output_shift; + return ARM_CMSIS_NN_ARG_ERROR; +#endif + /* Advance to the next batch */ + input_data += (input_x * input_y * input_ch); + output_data += (output_x * output_y * output_ch); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +int32_t arm_convolve_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t); +#else + (void)input_dims; + (void)filter_dims; + return 0; +#endif +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s16.c new file mode 100644 index 0000000..7d8d14f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s16.c @@ -0,0 +1,160 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2010-2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_s16.c + * Description: s16 version of convolution using symmetric quantization. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Basic s16 convolution function. + * + * Refer header file for details. Optimal use case for the DSP/MVE implementation is when input and output channels + * are multiples of 4 or atleast greater than 4. + * + */ + +arm_cmsis_nn_status arm_convolve_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data) +{ + (void)bias_dims; + (void)ctx; + + const int32_t input_batches = input_dims->n; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_ch = input_dims->c; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_ch = output_dims->c; + + const int32_t pad_x = conv_params->padding.w; + const int32_t pad_y = conv_params->padding.h; + const int32_t stride_x = conv_params->stride.w; + const int32_t stride_y = conv_params->stride.h; + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; + + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + for (int32_t i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) + { + const q31_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[i_out_ch]); + + for (int32_t base_idx_y = -pad_y, i_out_y = 0; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int32_t base_idx_x = -pad_x, i_out_x = 0; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + int64_t conv_out_acc = 0; + + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + const int32_t ker_y_start = MAX(0, start_y_max); + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + const int32_t ker_x_start = MAX(0, start_x_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + const int32_t ker_y_end = MIN(kernel_y, end_min_y); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + const int32_t ker_x_end = MIN(kernel_x, end_min_x); + + for (int32_t i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + for (int32_t i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t in_row = base_idx_y + dilation_y * i_ker_y; + const int32_t in_col = base_idx_x + dilation_x * i_ker_x; + + for (int32_t i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + conv_out_acc += input_data[(in_row * input_x + in_col) * input_ch + i_input_ch] * + filter_data[i_out_ch * input_ch * kernel_y * kernel_x + + (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch]; + } + } + } + + if (bias_data) + { + conv_out_acc += bias_data[i_out_ch]; + } + + int32_t conv_out = arm_nn_requantize_s64(conv_out_acc, reduced_multiplier, output_shift[i_out_ch]); + conv_out = MAX(conv_out, out_activation_min); + conv_out = MIN(conv_out, out_activation_max); + output_data[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int16_t)conv_out; + } + } + } + /* Advance to the next batch */ + input_data += (input_x * input_y * input_ch); + output_data += (output_x * output_y * output_ch); + } + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +int32_t arm_convolve_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ + (void)input_dims; + (void)filter_dims; + return 0; +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c index 0c2dba7..2782521 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_convolve_s8.c * Description: s8 version of convolution using symmetric quantization. * - * $Date: January 26, 2021 - * $Revision: V.2.0.4 + * $Date: 19 April 2022 + * $Revision: V.3.0.0 * * Target Processor: Cortex-M cores * @@ -48,22 +50,27 @@ * */ -arm_status arm_convolve_s8(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) +arm_cmsis_nn_status arm_convolve_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) { (void)bias_dims; + + if (ctx->buf == NULL && arm_convolve_s8_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } q15_t *buffer_a = (q15_t *)ctx->buf; - const uint16_t input_batches = input_dims->n; + const int32_t input_batches = input_dims->n; const uint16_t input_x = input_dims->w; const uint16_t input_y = input_dims->h; const uint16_t input_ch = input_dims->c; @@ -95,26 +102,32 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, int32_t buffer_fill_cnt = 0; int32_t padded = 0; const int32_t num_elem = kernel_x * kernel_y * input_ch; + const int32_t dilation_x = conv_params->dilation.w; + const int32_t dilation_y = conv_params->dilation.h; /* This part implements the im2col function */ for (int i_out_y = 0; i_out_y < output_y; i_out_y++) { for (int i_out_x = 0; i_out_x < output_x; i_out_x++) { - for (int i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; - i_ker_y++) + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + + for (int32_t i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) { - for (int i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; - i_ker_x++) + for (int32_t i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) { memset(im2col_buf, (int8_t)-input_offset, sizeof(q7_t) * input_ch); padded = 1; } else { - arm_memcpy_q7(im2col_buf, input_data + (i_ker_y * input_x + i_ker_x) * input_ch, input_ch); + arm_memcpy_q7(im2col_buf, input_data + (k_y * input_x + k_x) * input_ch, input_ch); } im2col_buf += input_ch; } @@ -126,33 +139,15 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, if (buffer_fill_cnt == 4 && (padded == 0)) { buffer_fill_cnt = 0; - for (int i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - int32_t sum_row; - int32_t acc[4]; - - (void)arm_nn_mat_mul_core_4x_s8( - num_elem, num_elem, (q7_t *)buffer_a, filter_data + num_elem * i_out_ch, &sum_row, acc); - int32x4_t s_offset = vdupq_n_s32(sum_row); - - int32x4_t res = vldrwq_s32(acc); - s_offset = vmulq_n_s32(s_offset, input_offset); - if (bias_data) - { - res = vaddq_n_s32(res, bias_data[i_out_ch]); - } - res = vaddq_s32(res, s_offset); - res = arm_requantize_mve(res, output_mult[i_out_ch], output_shift[i_out_ch]); - res = vaddq_n_s32(res, out_offset); - - res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); - res = vminq_s32(res, vdupq_n_s32(out_activation_max)); - - const uint32x4_t scatter_offset = {0, output_ch, output_ch * 2, output_ch * 3}; - vstrbq_scatter_offset_s32(out, scatter_offset, res); - out++; - } - out += (3 * output_ch); + out = arm_nn_mat_mul_core_4x_s8(num_elem, + num_elem, + (q7_t *)buffer_a, + filter_data, + output_ch, + conv_params, + quant_params, + bias_data, + out); im2col_buf = (q7_t *)buffer_a; } else if (buffer_fill_cnt == 4 && (padded != 0)) @@ -196,8 +191,10 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, bias_data, out); } +#else // #if defined(ARM_MATH_MVEI) + const uint16_t dilation_x = conv_params->dilation.w; + const uint16_t dilation_y = conv_params->dilation.h; -#elif defined(ARM_MATH_DSP) int32_t i_out_y, i_out_x, i_ker_y, i_ker_x; /* Generate two columns from the input tensor a GEMM computation */ @@ -209,12 +206,17 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, { for (i_out_x = 0; i_out_x < output_x; i_out_x++) { - for (i_ker_y = i_out_y * stride_y - pad_y; i_ker_y < i_out_y * stride_y - pad_y + kernel_y; i_ker_y++) + const int32_t base_idx_y = stride_y * i_out_y - pad_y; + const int32_t base_idx_x = stride_x * i_out_x - pad_x; + + for (i_ker_y = 0; i_ker_y < kernel_y; i_ker_y++) { - for (i_ker_x = i_out_x * stride_x - pad_x; i_ker_x < i_out_x * stride_x - pad_x + kernel_x; - i_ker_x++) + for (i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + const int32_t k_y = base_idx_y + dilation_y * i_ker_y; + const int32_t k_x = base_idx_x + dilation_x * i_ker_x; + + if (k_y < 0 || k_y >= input_y || k_x < 0 || k_x >= input_x) { /* Filling 0 for out-of-bound paddings */ memset(two_column_buf, 0, sizeof(q15_t) * input_ch); @@ -222,10 +224,8 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, else { /* Copying the pixel data to column */ - arm_q7_to_q15_with_offset(input_data + (i_ker_y * input_x + i_ker_x) * input_ch, - two_column_buf, - input_ch, - input_offset); + arm_q7_to_q15_with_offset( + input_data + (k_y * input_x + k_x) * input_ch, two_column_buf, input_ch, input_offset); } two_column_buf += input_ch; } @@ -271,6 +271,7 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, const q15_t *ip_as_col = buffer_a; /* 4 multiply and accumulates are done in one loop. */ +#if defined(ARM_MATH_DSP) uint16_t col_count = (input_ch * kernel_y * kernel_x) >> 2; while (col_count) @@ -289,6 +290,9 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, } /* Handle left over mac */ col_count = input_ch * kernel_y * kernel_x & 0x3; +#else + uint16_t col_count = input_ch * kernel_y * kernel_x; +#endif while (col_count) { q7_t ker_a1 = *ker_a++; @@ -304,77 +308,32 @@ arm_status arm_convolve_s8(const cmsis_nn_context *ctx, *out++ = (q7_t)sum; } } -#else - /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ - (void)buffer_a; - int32_t i_out_ch, i_out_y, i_out_x, i_input_ch, i_ker_y, i_ker_x; - int32_t conv_out; - - for (i_out_ch = 0; i_out_ch < output_ch; i_out_ch++) - { - for (i_out_y = 0; i_out_y < output_y; i_out_y++) - { - for (i_out_x = 0; i_out_x < output_x; i_out_x++) - { - conv_out = 0; - - const int32_t base_idx_y = stride_y * i_out_y - pad_y; - const int32_t base_idx_x = stride_x * i_out_x - pad_x; - - const int32_t ker_y_start = MAX(0, -base_idx_y); - const int32_t ker_x_start = MAX(0, -base_idx_x); - - const int32_t ker_y_end = MIN(kernel_y, input_y - base_idx_y); - const int32_t ker_x_end = MIN(kernel_x, input_x - base_idx_x); - - for (i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) - { - for (i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) - { - const int32_t in_row = base_idx_y + i_ker_y; - const int32_t in_col = base_idx_x + i_ker_x; - for (i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) - { - conv_out += - (input_data[(in_row * input_x + in_col) * input_ch + i_input_ch] + input_offset) * - filter_data[i_out_ch * input_ch * kernel_y * kernel_x + - (i_ker_y * kernel_x + i_ker_x) * input_ch + i_input_ch]; - } - } - } - if (bias_data) - { - conv_out += bias_data[i_out_ch]; - } - conv_out = arm_nn_requantize(conv_out, output_mult[i_out_ch], output_shift[i_out_ch]); - conv_out += out_offset; - conv_out = MAX(conv_out, out_activation_min); - conv_out = MIN(conv_out, out_activation_max); - output_data[i_out_ch + (i_out_y * output_x + i_out_x) * output_ch] = (int8_t)conv_out; - } - } - } -#endif +#endif // #if defined(ARM_MATH_MVEI) /* Advance to the next batch */ input_data += (input_x * input_y * input_ch); output_data += (output_x * output_y * output_ch); } /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } int32_t arm_convolve_s8_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) { -#if defined(ARM_MATH_DSP) - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t); +#if defined(ARM_MATH_MVEI) + int32_t col_length = input_dims->c * filter_dims->w * filter_dims->h; + // Get number of complete int16 lanes(multiple of 8) for given col_length. This is dependent on + // implementation of arm_nn_mat_mult_s8 + col_length = (col_length + 7) / 8; + // 4 -> number of im2col buffers, 8 -> 8 elements per Q register + return 4 * col_length * 8 * (int32_t)sizeof(int8_t); #else - (void)input_dims; - (void)filter_dims; - return 0; + return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t); #endif } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c new file mode 100644 index 0000000..efdbc41 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s16.c @@ -0,0 +1,134 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2021-2022 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_convolve_wrapper_s16.c + * Description: s16 convolution layer wrapper function with the main purpose to call the optimal kernel available in + * cmsis-nn to perform the convolution. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Convolution layer + * + * Refer header file for details. + * + */ + +arm_cmsis_nn_status arm_convolve_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int64_t *bias_data, + const cmsis_nn_dims *output_dims, + q15_t *output_data) +{ +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + if (filter_dims->w * filter_dims->h * input_dims->c < 512 && + (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + return arm_convolve_fast_s16(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } + else + { + return arm_convolve_s16(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); + } +#else + return arm_convolve_s16(ctx, + conv_params, + quant_params, + input_dims, + input_data, + filter_dims, + filter_data, + bias_dims, + bias_data, + output_dims, + output_data); +#endif +} + +int32_t arm_convolve_wrapper_s16_get_buffer_size(const cmsis_nn_conv_params *conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)conv_params; + (void)output_dims; + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + if (filter_dims->w * filter_dims->h * input_dims->c < 512 && + (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) + { + return arm_convolve_fast_s16_get_buffer_size(input_dims, filter_dims); + } + + return arm_convolve_s16_get_buffer_size(input_dims, filter_dims); +#else + return arm_convolve_s16_get_buffer_size(input_dims, filter_dims); +#endif +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c index 4b2175c..9cd898e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_convolve_wrapper_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +24,8 @@ * Description: s8 convolution layer wrapper function with the main purpose to call the optimal kernel available in * cmsis-nn to perform the convolution. * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 August 2022 + * $Revision: V.2.1.1 * * Target Processor: Cortex-M cores * @@ -47,20 +49,21 @@ * */ -arm_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_conv_params *conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *filter_dims, - const q7_t *filter_data, - const cmsis_nn_dims *bias_dims, - const int32_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) +arm_cmsis_nn_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *filter_dims, + const q7_t *filter_data, + const cmsis_nn_dims *bias_dims, + const int32_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) { - if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (input_dims->c % 4 == 0) && - (conv_params->stride.w == 1) && (conv_params->stride.h == 1) && (filter_dims->w == 1) && (filter_dims->h == 1)) + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (conv_params->stride.w == 1) && + (conv_params->stride.h == 1) && (filter_dims->w == 1) && (filter_dims->h == 1) && + (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) { return arm_convolve_1x1_s8_fast(ctx, conv_params, @@ -74,8 +77,7 @@ arm_status arm_convolve_wrapper_s8(const cmsis_nn_context *ctx, output_dims, output_data); } - else if ((output_dims->h == 1) && (input_dims->h == 1) && (filter_dims->h == 1) && (output_dims->w % 4 == 0) && - (input_dims->n == 1)) + else if ((input_dims->h == 1) && (output_dims->w % 4 == 0) && conv_params->dilation.w == 1 && (filter_dims->h == 1)) { return arm_convolve_1_x_n_s8(ctx, conv_params, @@ -110,13 +112,14 @@ int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv const cmsis_nn_dims *filter_dims, const cmsis_nn_dims *output_dims) { - if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (input_dims->c % 4 == 0) && - (conv_params->stride.w == 1) && (conv_params->stride.h == 1) && (filter_dims->w == 1) && (filter_dims->h == 1)) + if ((conv_params->padding.w == 0) && (conv_params->padding.h == 0) && (conv_params->stride.w == 1) && + (conv_params->stride.h == 1) && (filter_dims->w == 1) && (filter_dims->h == 1) && + (conv_params->dilation.w == 1 && conv_params->dilation.h == 1)) { return arm_convolve_1x1_s8_fast_get_buffer_size(input_dims); } - else if ((output_dims->h == 1) && (input_dims->h == 1) && (filter_dims->h == 1) && (output_dims->w % 4 == 0) && - (input_dims->n == 1)) + else if ((input_dims->h == 1) && (output_dims->w % 4 == 0) && (conv_params->dilation.w == 1) && + (filter_dims->h == 1)) { return arm_convolve_1_x_n_s8_get_buffer_size(input_dims, filter_dims); } @@ -129,3 +132,5 @@ int32_t arm_convolve_wrapper_s8_get_buffer_size(const cmsis_nn_conv_params *conv /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c index 723b22f..def3b47 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_3x3_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +24,8 @@ * Description: Optimized s8 depthwise convolution function for channel * multiplier of 1 and 3x3 kernel size. * - * $Date: 09. October 2020 - * $Revision: V.2.0.1 + * $Date: 19 July 2022 + * $Revision: V.3.1.0 * * Target Processor: Cortex-M CPUs * @@ -49,17 +51,17 @@ * */ -arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) +arm_cmsis_nn_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + q7_t *output) { (void)ctx; (void)bias_dims; @@ -84,14 +86,14 @@ arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, /* Check input constraints input_ch == output_ch */ if (input_ch != output_ch) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } /* Check input constraints pad_x <= 1 */ if (pad_x > 1 || filter_dims->w != 3 || filter_dims->h != 3) { - return ARM_MATH_ARGUMENT_ERROR; + return ARM_CMSIS_NN_ARG_ERROR; } - + const int32_t *bias_base = bias; for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) { for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) @@ -99,12 +101,20 @@ arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, int32_t in_ch = 0; int32_t ker_w_start = MAX(0, -in_w); + bias = bias_base; for (; in_ch <= (input_ch - 4); in_ch += 4) { - int32_t out_buff0 = bias[in_ch + 0]; - int32_t out_buff1 = bias[in_ch + 1]; - int32_t out_buff2 = bias[in_ch + 2]; - int32_t out_buff3 = bias[in_ch + 3]; + int32_t out_buff0 = 0; + int32_t out_buff1 = 0; + int32_t out_buff2 = 0; + int32_t out_buff3 = 0; + if (bias) + { + out_buff0 = *bias++; + out_buff1 = *bias++; + out_buff2 = *bias++; + out_buff3 = *bias++; + } const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; @@ -172,7 +182,11 @@ arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, // Leftover for (; in_ch < input_ch; ++in_ch) { - int32_t out_buff = bias[in_ch]; + int32_t out_buff = 0; + if (bias) + { + out_buff = *bias++; + } const int8_t *input_ptr = input + (in_h + ker_h_start) * (input_ch * input_x) + in_w * input_ch + in_ch; const int8_t *kernel_ptr = kernel + ker_h_start * (input_ch * 3) + in_ch; @@ -204,9 +218,11 @@ arm_status arm_depthwise_conv_3x3_s8(const cmsis_nn_context *ctx, } /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c new file mode 100644 index 0000000..20201b9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_fast_s16.c @@ -0,0 +1,471 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_fast_s16.c + * Description: Optimized s16 depthwise separable convolution function for + * channel multiplier of 1. + * + * $Date: 6 July 2022 + * $Revision: V.1.1.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +/* + * Optimized s16 depthwise convolution function with constraint that in_channel equals out_channel + * + * Refer prototype header file for details. + * + */ + +arm_cmsis_nn_status arm_depthwise_conv_fast_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + q15_t *output) +{ + const int32_t input_ch = input_dims->c; + const int32_t output_ch = output_dims->c; + + /* Check input constraints input_ch == output_ch */ + if (input_ch != output_ch) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (filter_dims->w * filter_dims->h >= 512) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_depthwise_conv_fast_s16_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + +#if defined(ARM_MATH_DSP) + (void)bias_dims; + const int32_t input_x = input_dims->w; + const int32_t input_y = input_dims->h; + const int32_t input_batches = input_dims->n; + const int32_t kernel_x = filter_dims->w; + const int32_t kernel_y = filter_dims->h; + const int32_t pad_x = dw_conv_params->padding.w; + const int32_t pad_y = dw_conv_params->padding.h; + const int32_t stride_x = dw_conv_params->stride.w; + const int32_t stride_y = dw_conv_params->stride.h; + const int32_t *output_shift = quant_params->shift; + const int32_t *output_mult = quant_params->multiplier; + const int32_t output_x = output_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_activation_min = dw_conv_params->activation.min; + const int32_t output_activation_max = dw_conv_params->activation.max; + q15_t *buffer_a = (q15_t *)ctx->buf; + +#if defined(ARM_MATH_MVEI) + int16_t *lhs_buffer = buffer_a; + int16_t *out = output; + int buffer_count = 0; + const int32_t kernel_size = kernel_x * kernel_y; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + /* This part implements the im2col function */ + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + { + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + { + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + { + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + { + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + memset(lhs_buffer, (int16_t)0, (uint32_t)(input_ch * sizeof(int16_t))); + } + else + { + arm_memcpy_q15(lhs_buffer, + (int16_t *)(input + (i_ker_y * input_x + i_ker_x) * input_ch), + (uint32_t)(input_ch * sizeof(int16_t))); + } + lhs_buffer += input_ch; + } + } + buffer_count++; + if (buffer_count == 4) + { + lhs_buffer = buffer_a; + + out = arm_nn_depthwise_conv_nt_t_s16(lhs_buffer, + kernel, + input_ch, + output_shift, + output_mult, + output_activation_min, + output_activation_max, + kernel_size, + bias, + out); + buffer_count = 0; + } + } + } + input += input_x * input_y * input_ch; + } + + /* Handle left over buffers */ + lhs_buffer = buffer_a; + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (input_ch + 3) / 4; + int32_t num_ch_to_process = input_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; num_ch_to_process -= 4, offset += 4, i_loop_cnt++) + { + const int8_t *row_0 = kernel + offset; + const int16_t *col_0 = lhs_buffer + (kernel_size * input_ch * i_buf) + offset; + + int32x4_t out_0 = vdupq_n_s32(0); + + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + const int32x4_t ker_0 = vldrbq_s32(row_0); + + int32x4_t ip_0 = vldrhq_s32(col_0); + out_0 += vmulq_s32(ip_0, ker_0); + + col_0 += input_ch; + row_0 += input_ch; + } + + int64_t in_requantize_0 = (int64_t)out_0[0]; + int64_t in_requantize_1 = (int64_t)out_0[1]; + int64_t in_requantize_2 = (int64_t)out_0[2]; + int64_t in_requantize_3 = (int64_t)out_0[3]; + + if (bias) + { + in_requantize_0 += bias[offset]; + in_requantize_1 += bias[offset + 1]; + in_requantize_2 += bias[offset + 2]; + in_requantize_3 += bias[offset + 3]; + } + + int32_t reduced_multiplier_0 = REDUCE_MULTIPLIER(output_mult[offset]); + int32_t reduced_multiplier_1 = REDUCE_MULTIPLIER(output_mult[offset + 1]); + int32_t reduced_multiplier_2 = REDUCE_MULTIPLIER(output_mult[offset + 2]); + int32_t reduced_multiplier_3 = REDUCE_MULTIPLIER(output_mult[offset + 3]); + + out_0[0] = arm_nn_requantize_s64(in_requantize_0, reduced_multiplier_0, output_shift[offset]); + out_0[1] = arm_nn_requantize_s64(in_requantize_1, reduced_multiplier_1, output_shift[offset + 1]); + out_0[2] = arm_nn_requantize_s64(in_requantize_2, reduced_multiplier_2, output_shift[offset + 2]); + out_0[3] = arm_nn_requantize_s64(in_requantize_3, reduced_multiplier_3, output_shift[offset + 3]); + + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + + mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); + vstrhq_p_s32(out, out_0, p); + + out += 4; + } + + const int tail_ch = input_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + } + +#else // ARM_MATH_DSP + + /* Run the following code in cores using DSP extension */ + q15_t *const col_buffer_start = buffer_a; + q15_t *col_buffer = col_buffer_start; + const int64_t *const bias_start_pos = bias; + const int32_t *const out_mult_start_pos = output_mult; + const int32_t *const out_shift_start_pos = output_shift; + uint16_t row_count; + uint16_t row_shift; + int32_t result; + + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + + /* Out of bounds is only considered for the y axis as it provides a contiguous zero'ing opportunity than + along the x axis */ + const int ker_y_start = MAX(0, -base_idx_y); + /* Condition for kernel end dimension: (base_idx_y + ker_y_end) < input_y */ + const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); + + int32_t index = 0; + if (ker_y_start != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * ker_y_start * sizeof(q15_t)); + index += (kernel_x * input_ch) * ker_y_start; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + i_ker_y; + + for (int i_ker_x = 0; i_ker_x < kernel_x; i_ker_x++) + { + const int32_t idx_x = base_idx_x + i_ker_x; + + if (idx_x < 0 || idx_x >= input_x) + { + memset(&col_buffer[index], 0, input_ch * sizeof(q15_t)); + } + else + { + arm_memcpy_q15(&col_buffer[index], + input + (idx_y * input_x + idx_x) * input_ch, + input_ch * sizeof(q15_t)); + } + index += input_ch; + } + } + + const int diff = kernel_y - ker_y_end; + if (diff != 0) + { + memset(&col_buffer[index], 0, (kernel_x * input_ch) * diff * sizeof(q15_t)); + } + + row_count = output_ch / 4; + row_shift = 0; + bias = bias_start_pos; + output_mult = out_mult_start_pos; + output_shift = out_shift_start_pos; + + while (row_count) + { + q31_t sum_1 = 0; + q31_t sum_2 = 0; + q31_t sum_3 = 0; + q31_t sum_4 = 0; + + int32_t output_mult_1 = REDUCE_MULTIPLIER(output_mult[0]); + int32_t output_mult_2 = REDUCE_MULTIPLIER(output_mult[1]); + int32_t output_mult_3 = REDUCE_MULTIPLIER(output_mult[2]); + int32_t output_mult_4 = REDUCE_MULTIPLIER(output_mult[3]); + output_mult += 4; + + uint16_t col_count = (kernel_x * kernel_y) / 2; + q15_t *col_pos = col_buffer_start + row_shift; + const q7_t *row_pos = kernel + row_shift; + row_shift += 4; + + while (col_count) + { + /* General idea is to read 4 + 4 (input, kernel) pair and re-arrange them in the right order to + use in a SMLAD instruction . One run of this loop produces 4 partial outputs with 8 MACs. */ + q31_t row_a1, row_a2, row_b1, row_b2, col_a, row_c, col_b, col_c; + + /* Read 4 weights */ + row_b1 = arm_nn_read_q7x4(row_pos); + row_a1 = arm_nn_read_q7x4(row_pos + input_ch); + col_a = arm_nn_read_q15x2(col_pos); + col_b = arm_nn_read_q15x2(col_pos + input_ch); + + row_a2 = __SXTB16(row_b1); + row_b1 = __SXTB16(__ROR(row_b1, 8)); + + row_b2 = __SXTB16(row_a1); + row_a1 = __SXTB16(__ROR(row_a1, 8)); + + col_c = __PKHBT(col_b, col_a, 16); + col_a = __PKHTB(col_b, col_a, 16); + row_c = __PKHBT(row_b2, row_a2, 16); + sum_1 = __SMLAD(col_c, row_c, sum_1); + + row_c = __PKHBT(row_b1, row_a1, 16); + sum_2 = __SMLAD(col_a, row_c, sum_2); + + col_a = arm_nn_read_q15x2(col_pos + 2); + col_b = arm_nn_read_q15x2(col_pos + input_ch + 2); + + col_c = __PKHBT(col_b, col_a, 16); + col_a = __PKHTB(col_b, col_a, 16); + row_c = __PKHTB(row_a2, row_b2, 16); + sum_3 = __SMLAD(col_c, row_c, sum_3); + + row_c = __PKHTB(row_a1, row_b1, 16); + sum_4 = __SMLAD(col_a, row_c, sum_4); + + row_pos += input_ch << 1; + col_pos += input_ch << 1; + col_count--; + } + + col_count = (kernel_x * kernel_y) & 0x1; + while (col_count) + { + sum_1 += row_pos[0] * col_pos[0]; + sum_2 += row_pos[1] * col_pos[1]; + sum_3 += row_pos[2] * col_pos[2]; + sum_4 += row_pos[3] * col_pos[3]; + + row_pos += input_ch; + col_pos += input_ch; + + col_count--; + } + + int64_t acc_1 = sum_1; + int64_t acc_2 = sum_2; + int64_t acc_3 = sum_3; + int64_t acc_4 = sum_4; + + if (bias) + { + acc_1 += *bias++; + acc_2 += *bias++; + acc_3 += *bias++; + acc_4 += *bias++; + } + + result = arm_nn_requantize_s64(acc_1, output_mult_1, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (q15_t)result; + + result = arm_nn_requantize_s64(acc_2, output_mult_2, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (q15_t)result; + + result = arm_nn_requantize_s64(acc_3, output_mult_3, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (q15_t)result; + + result = arm_nn_requantize_s64(acc_4, output_mult_4, *output_shift++); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (q15_t)result; + + row_count--; + } + + row_count = output_ch & 0x3; + while (row_count) + { + q15_t *col_pos = col_buffer_start + row_shift; + const q7_t *row_pos = kernel + row_shift; + q31_t sum = 0; + const uint16_t col_count = (kernel_x * kernel_y); + row_shift += 1; + + for (int i = 0; i < col_count; i++) + { + sum += row_pos[i * input_ch] * col_pos[i * input_ch]; + } + int64_t acc = sum; + if (bias) + { + acc += *bias++; + } + result = arm_nn_requantize_s64(acc, REDUCE_MULTIPLIER(*output_mult), *output_shift++); + output_mult++; + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (q15_t)result; + + row_count--; + } + // clear counter and pointers + col_buffer = col_buffer_start; + } + } + + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } +#endif +#else + /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + return arm_depthwise_conv_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + kernel, + bias_dims, + bias, + output_dims, + output); +#endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +int32_t arm_depthwise_conv_fast_s16_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) +{ +#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_MVEI) + /* The + 8 accounts for a worst case out of bounds read of the lhs buffers in the *_nt_t_* function. */ + return 4 * input_dims->c * filter_dims->w * filter_dims->h * sizeof(int16_t) + 8; +#else // ARM_MATH_DSP + return input_dims->c * filter_dims->w * filter_dims->h * sizeof(int16_t); +#endif +#else + (void)input_dims; + (void)filter_dims; + return 0; +#endif +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c new file mode 100644 index 0000000..e0e39ca --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s16.c @@ -0,0 +1,296 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_s16.c + * Description: s16 version of depthwise convolution. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +static void __attribute__((unused)) depthwise_conv_s16_mult_4_s16(const int16_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int64_t *bias, + int16_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max) +{ + for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + { + for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) + { + for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; + ++in_ch, out_ch += ch_mult) + { + for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) + { + int32_t out_buff32[4] = {REDUCE_MULTIPLIER(output_mult[out_ch + 0 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 1 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 2 + mult_tile]), + REDUCE_MULTIPLIER(output_mult[out_ch + 3 + mult_tile])}; + + int64_t out_buff[4] = {0, 0, 0, 0}; + + if (bias) + { + out_buff[0] = bias[out_ch + 0 + mult_tile]; + out_buff[1] = bias[out_ch + 1 + mult_tile]; + out_buff[2] = bias[out_ch + 2 + mult_tile]; + out_buff[3] = bias[out_ch + 3 + mult_tile]; + } + + for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) + { + int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; + int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#pragma clang loop unroll(disable) +#endif + for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); + ++ker_w, ker_idx += output_ch) + { + // TODO: Unroll of 4 with 64 bit accumulator will probably result in too much register + // spills. Try with unroll of 2 when enabling this. + int32_t in_val = input[in_idx + ker_w * input_ch]; + out_buff[0] += in_val * kernel[ker_idx + 0 + mult_tile]; + out_buff[1] += in_val * kernel[ker_idx + 1 + mult_tile]; + out_buff[2] += in_val * kernel[ker_idx + 2 + mult_tile]; + out_buff[3] += in_val * kernel[ker_idx + 3 + mult_tile]; + } + } + + out_buff32[0] = + arm_nn_requantize_s64(out_buff[0], out_buff32[0], output_shift[out_ch + 0 + mult_tile]); + out_buff32[1] = + arm_nn_requantize_s64(out_buff[1], out_buff32[1], output_shift[out_ch + 1 + mult_tile]); + out_buff32[2] = + arm_nn_requantize_s64(out_buff[2], out_buff32[2], output_shift[out_ch + 2 + mult_tile]); + out_buff32[3] = + arm_nn_requantize_s64(out_buff[3], out_buff32[3], output_shift[out_ch + 3 + mult_tile]); + + out_buff32[0] = MIN(MAX(out_buff32[0], output_activation_min), output_activation_max); + out_buff32[1] = MIN(MAX(out_buff32[1], output_activation_min), output_activation_max); + out_buff32[2] = MIN(MAX(out_buff32[2], output_activation_min), output_activation_max); + out_buff32[3] = MIN(MAX(out_buff32[3], output_activation_min), output_activation_max); + + output[out_idx++] = (int16_t)out_buff32[0]; + output[out_idx++] = (int16_t)out_buff32[1]; + output[out_idx++] = (int16_t)out_buff32[2]; + output[out_idx++] = (int16_t)out_buff32[3]; + } + } + } + } +} + +static void depthwise_conv_s16_generic_s16(const int16_t *input, + const uint16_t input_batches, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const int8_t *kernel, + const uint16_t ch_mult, + const uint16_t kernel_x, + const uint16_t kernel_y, + const uint16_t pad_x, + const uint16_t pad_y, + const uint16_t stride_x, + const uint16_t stride_y, + const int64_t *bias, + int16_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y) + +{ + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + { + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + { + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + { + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + { + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + + const q31_t reduced_multiplier = REDUCE_MULTIPLIER(output_mult[idx_out_ch]); + int64_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) + { + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } + + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; + } + + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + dilation_y * i_ker_y; + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t idx_x = base_idx_x + dilation_x * i_ker_x; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + + acc_0 += input[idx_0] * kernel[ker_idx_0]; + } + } + + /* Requantize and clamp output to provided range */ + int32_t result = arm_nn_requantize_s64(acc_0, reduced_multiplier, output_shift[idx_out_ch]); + result = MAX(result, output_activation_min); + result = MIN(result, output_activation_max); + *output++ = (int16_t)result; + } + } + } + } + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); + } +} + +/* + * Basic s16 depthwise convolution function. + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + q15_t *output) +{ + const uint16_t dilation_x = dw_conv_params->dilation.w; + const uint16_t dilation_y = dw_conv_params->dilation.h; + + (void)bias_dims; + (void)ctx; + + depthwise_conv_s16_generic_s16(input, + input_dims->n, + input_dims->w, + input_dims->h, + input_dims->c, + kernel, + dw_conv_params->ch_mult, + filter_dims->w, + filter_dims->h, + dw_conv_params->padding.w, + dw_conv_params->padding.h, + dw_conv_params->stride.w, + dw_conv_params->stride.h, + bias, + output, + quant_params->shift, + quant_params->multiplier, + output_dims->w, + output_dims->h, + dw_conv_params->activation.min, + dw_conv_params->activation.max, + dilation_x, + dilation_y); + + /* Return to application */ + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c index 8a1f7e8..862e87f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -19,10 +21,10 @@ /* ---------------------------------------------------------------------- * Project: CMSIS NN Library * Title: arm_depthwise_conv_s8.c - * Description: s8 version of depthwise convolution. + * Description: s8 version of depthwise convolution. * - * $Date: 09. October 2020 - * $Revision: V.2.0.1 + * $Date: 29 July 2022 + * $Revision: V.3.0.3 * * Target Processor: Cortex-M CPUs * @@ -40,67 +42,83 @@ * @{ */ -static void depthwise_conv_s8_mult_4(const int8_t *input, - const int32_t input_x, - const int32_t input_y, - const int32_t input_ch, - const int8_t *kernel, - const int32_t output_ch, - const int32_t ch_mult, - const int32_t kernel_x, - const int32_t kernel_y, - const int32_t pad_x, - const int32_t pad_y, - const int32_t stride_x, - const int32_t stride_y, - const int32_t *bias, - int8_t *output, - const int32_t *output_shift, - const int32_t *output_mult, - const int32_t output_x, - const int32_t output_y, - const int32_t output_offset, - const int32_t input_offset, - const int32_t output_activation_min, - const int32_t output_activation_max) +#if !defined(__ARMCC_VERSION) +__attribute__((optimize("no-unroll-loops"))) +#endif +static void +depthwise_conv_s8_mult_4(const int8_t *input, + const int32_t input_x, + const int32_t input_y, + const int32_t input_ch, + const int8_t *kernel, + const int32_t output_ch, + const int32_t ch_mult, + const int32_t kernel_x, + const int32_t kernel_y, + const int32_t pad_x, + const int32_t pad_y, + const int32_t stride_x, + const int32_t stride_y, + const int32_t *bias, + int8_t *output, + const int32_t *output_shift, + const int32_t *output_mult, + const int32_t output_x, + const int32_t output_y, + const int32_t output_offset, + const int32_t input_offset, + const int32_t output_activation_min, + const int32_t output_activation_max) { - for (int32_t in_h = -pad_y, out_h = 0, out_idx = 0; out_h < output_y; in_h += stride_y, ++out_h) + const int32_t *bias_base = bias; + const int32_t *mult_base = output_mult; + const int32_t *shift_base = output_shift; + const int8_t *kernel_base = kernel; + + for (int32_t in_h = -pad_y, out_h = 0; out_h < output_y; in_h += stride_y, ++out_h) { for (int32_t in_w = -pad_x, out_w = 0, ker_h_start = MAX(0, -in_h); out_w < output_x; in_w += stride_x, ++out_w) { + bias = bias_base; + output_mult = mult_base; + output_shift = shift_base; for (int32_t in_ch = 0, out_ch = 0, ker_w_start = MAX(0, -in_w); out_ch < output_ch; ++in_ch, out_ch += ch_mult) { for (int mult_tile = 0; mult_tile < ch_mult; mult_tile += 4) { - int32_t out_buff[4]; - - out_buff[0] = bias[out_ch + 0 + mult_tile]; - out_buff[1] = bias[out_ch + 1 + mult_tile]; - out_buff[2] = bias[out_ch + 2 + mult_tile]; - out_buff[3] = bias[out_ch + 3 + mult_tile]; + int32_t out_buff[4] = {0, 0, 0, 0}; + if (bias) + { + out_buff[0] = *bias++; + out_buff[1] = *bias++; + out_buff[2] = *bias++; + out_buff[3] = *bias++; + } for (int32_t ker_h = ker_h_start; ker_h < MIN(kernel_y, input_y - in_h); ++ker_h) { int32_t ker_idx = ker_h * (output_ch * kernel_x) + ker_w_start * output_ch + out_ch; + kernel = kernel_base + mult_tile + ker_idx; int32_t in_idx = (in_h + ker_h) * (input_ch * input_x) + in_w * input_ch + in_ch; - +#if defined(__ARMCC_VERSION) && (__ARMCC_VERSION >= 6010050) +#pragma clang loop unroll(disable) +#endif for (int32_t ker_w = ker_w_start; ker_w < MIN(kernel_x, input_x - in_w); - ++ker_w, ker_idx += output_ch) + ++ker_w, kernel += output_ch) { int32_t in_val = input[in_idx + ker_w * input_ch] + input_offset; - out_buff[0] += in_val * kernel[ker_idx + 0 + mult_tile]; - out_buff[1] += in_val * kernel[ker_idx + 1 + mult_tile]; - out_buff[2] += in_val * kernel[ker_idx + 2 + mult_tile]; - out_buff[3] += in_val * kernel[ker_idx + 3 + mult_tile]; + out_buff[0] += in_val * kernel[0]; + out_buff[1] += in_val * kernel[1]; + out_buff[2] += in_val * kernel[2]; + out_buff[3] += in_val * kernel[3]; } } #if defined(ARM_MATH_MVEI) - (void)out_idx; int32x4_t res = vldrwq_s32(out_buff); - res = arm_requantize_mve_32x4(res, - vldrwq_s32(&output_mult[out_ch + mult_tile]), - vldrwq_s32(&output_shift[out_ch + mult_tile])); + res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); + output_mult += 4; + output_shift += 4; res = vaddq_n_s32(res, output_offset); res = vmaxq_s32(res, vdupq_n_s32(output_activation_min)); @@ -108,14 +126,10 @@ static void depthwise_conv_s8_mult_4(const int8_t *input, vstrbq_s32(output, res); output += 4; #else - out_buff[0] = arm_nn_requantize( - out_buff[0], output_mult[out_ch + 0 + mult_tile], output_shift[out_ch + 0 + mult_tile]); - out_buff[1] = arm_nn_requantize( - out_buff[1], output_mult[out_ch + 1 + mult_tile], output_shift[out_ch + 1 + mult_tile]); - out_buff[2] = arm_nn_requantize( - out_buff[2], output_mult[out_ch + 2 + mult_tile], output_shift[out_ch + 2 + mult_tile]); - out_buff[3] = arm_nn_requantize( - out_buff[3], output_mult[out_ch + 3 + mult_tile], output_shift[out_ch + 3 + mult_tile]); + out_buff[0] = arm_nn_requantize(out_buff[0], *output_mult++, *output_shift++); + out_buff[1] = arm_nn_requantize(out_buff[1], *output_mult++, *output_shift++); + out_buff[2] = arm_nn_requantize(out_buff[2], *output_mult++, *output_shift++); + out_buff[3] = arm_nn_requantize(out_buff[3], *output_mult++, *output_shift++); out_buff[0] += output_offset; out_buff[1] += output_offset; @@ -127,10 +141,10 @@ static void depthwise_conv_s8_mult_4(const int8_t *input, out_buff[2] = MIN(MAX(out_buff[2], output_activation_min), output_activation_max); out_buff[3] = MIN(MAX(out_buff[3], output_activation_min), output_activation_max); - output[out_idx++] = (int8_t)out_buff[0]; - output[out_idx++] = (int8_t)out_buff[1]; - output[out_idx++] = (int8_t)out_buff[2]; - output[out_idx++] = (int8_t)out_buff[3]; + *output++ = (int8_t)out_buff[0]; + *output++ = (int8_t)out_buff[1]; + *output++ = (int8_t)out_buff[2]; + *output++ = (int8_t)out_buff[3]; #endif } @@ -140,6 +154,7 @@ static void depthwise_conv_s8_mult_4(const int8_t *input, } static void depthwise_conv_s8_generic(const q7_t *input, + const uint16_t input_batches, const uint16_t input_x, const uint16_t input_y, const uint16_t input_ch, @@ -161,53 +176,92 @@ static void depthwise_conv_s8_generic(const q7_t *input, const int32_t output_offset, const int32_t input_offset, const int32_t output_activation_min, - const int32_t output_activation_max) + const int32_t output_activation_max, + const uint16_t dilation_x, + const uint16_t dilation_y) + { (void)output_ch; int i_out = 0; - for (int i_out_y = 0; i_out_y < output_y; i_out_y++) + int i_batch; + + for (i_batch = 0; i_batch < input_batches; i_batch++) { - const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; - for (int i_out_x = 0; i_out_x < output_x; i_out_x++) + for (int i_out_y = 0; i_out_y < output_y; i_out_y++) { - const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; - for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) + const int16_t base_idx_y = (i_out_y * stride_y) - pad_y; + for (int i_out_x = 0; i_out_x < output_x; i_out_x++) { - for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) + const int16_t base_idx_x = (i_out_x * stride_x) - pad_x; + for (int i_input_ch = 0; i_input_ch < input_ch; i_input_ch++) { - const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; - int32_t acc_0; - /* Condition for kernel start dimension: (base_idx_ + ker__start) >= 0 */ - const int ker_y_start = MAX(0, -base_idx_y); - const int ker_x_start = MAX(0, -base_idx_x); - /* Condition for kernel end dimension: (base_idx_ + ker__end) < input_ */ - const int ker_y_end = MIN(kernel_y, input_y - base_idx_y); - const int ker_x_end = MIN(kernel_x, input_x - base_idx_x); - acc_0 = bias[idx_out_ch]; - - for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + for (int i_ch_mult = 0; i_ch_mult < ch_mult; i_ch_mult++) { - const int32_t idx_y = base_idx_y + i_ker_y; - for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + const int idx_out_ch = i_ch_mult + i_input_ch * ch_mult; + int32_t acc_0 = 0; + + int ker_y_start; + int ker_x_start; + int ker_y_end; + int ker_x_end; + + if (dilation_x > 1) { - const int32_t idx_x = base_idx_x + i_ker_x; - int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; - int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + const int32_t start_x_max = (-base_idx_x + dilation_x - 1) / dilation_x; + ker_x_start = MAX(0, start_x_max); + const int32_t end_min_x = (input_x - base_idx_x + dilation_x - 1) / dilation_x; + ker_x_end = MIN(kernel_x, end_min_x); + } + else + { + ker_x_start = MAX(0, -base_idx_x); + ker_x_end = MIN(kernel_x, input_x - base_idx_x); + } - acc_0 += (input[idx_0] + input_offset) * kernel[ker_idx_0]; + if (dilation_y > 1) + { + const int32_t start_y_max = (-base_idx_y + dilation_y - 1) / dilation_y; + ker_y_start = MAX(0, start_y_max); + const int32_t end_min_y = (input_y - base_idx_y + dilation_y - 1) / dilation_y; + ker_y_end = MIN(kernel_y, end_min_y); + } + else + { + ker_y_start = MAX(0, -base_idx_y); + ker_y_end = MIN(kernel_y, input_y - base_idx_y); + } + + if (bias) + { + acc_0 = bias[idx_out_ch]; } - } - /* Requantize and clamp output to provided range */ - acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); - acc_0 += output_offset; - acc_0 = MAX(acc_0, output_activation_min); - acc_0 = MIN(acc_0, output_activation_max); + for (int i_ker_y = ker_y_start; i_ker_y < ker_y_end; i_ker_y++) + { + const int32_t idx_y = base_idx_y + dilation_y * i_ker_y; + for (int i_ker_x = ker_x_start; i_ker_x < ker_x_end; i_ker_x++) + { + const int32_t idx_x = base_idx_x + dilation_x * i_ker_x; + int32_t idx_0 = (idx_y * input_x + idx_x) * input_ch + i_input_ch; + int32_t ker_idx_0 = (i_ker_y * kernel_x + i_ker_x) * (input_ch * ch_mult) + idx_out_ch; + + acc_0 += (input[idx_0] + input_offset) * kernel[ker_idx_0]; + } + } + + /* Requantize and clamp output to provided range */ + acc_0 = arm_nn_requantize(acc_0, output_mult[idx_out_ch], output_shift[idx_out_ch]); + acc_0 += output_offset; + acc_0 = MAX(acc_0, output_activation_min); + acc_0 = MIN(acc_0, output_activation_max); - output[i_out++] = acc_0; + output[i_out++] = acc_0; + } } } } + /* Advance to the next batch */ + input += (input_x * input_y * input_ch); } } @@ -218,23 +272,26 @@ static void depthwise_conv_s8_generic(const q7_t *input, * Optimization using DSP extension is not available for the generic case where channel multiplier is > 1. * */ -arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) +arm_cmsis_nn_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + q7_t *output) { - (void)dw_conv_params->dilation; + const uint16_t dilation_x = dw_conv_params->dilation.w; + const uint16_t dilation_y = dw_conv_params->dilation.h; + (void)bias_dims; (void)ctx; - if (dw_conv_params->ch_mult % 4 == 0) + if (dw_conv_params->ch_mult % 4 == 0 && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) { depthwise_conv_s8_mult_4(input, input_dims->w, @@ -263,6 +320,7 @@ arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, else { depthwise_conv_s8_generic(input, + input_dims->n, input_dims->w, input_dims->h, input_dims->c, @@ -284,13 +342,17 @@ arm_status arm_depthwise_conv_s8(const cmsis_nn_context *ctx, dw_conv_params->output_offset, dw_conv_params->input_offset, dw_conv_params->activation.min, - dw_conv_params->activation.max); + dw_conv_params->activation.max, + dilation_x, + dilation_y); } /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c index 56bd0d6..fc12e72 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_s8_opt.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +24,8 @@ * Description: Optimized s8 depthwise separable convolution function for * channel multiplier of 1. * - * $Date: January 26, 2021 - * $Revision: V.2.0.3 + * $Date: 27 July 2022 + * $Revision: V.3.1.0 * * Target Processor: Cortex-M CPUs * @@ -48,28 +50,34 @@ * */ -arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) +arm_cmsis_nn_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + q7_t *output) { const int32_t input_ch = input_dims->c; const int32_t output_ch = output_dims->c; - /* Check input constraints input_ch == output_ch */ + /* Check depth multiplier is 1 */ if (input_ch != output_ch) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; + } + + if (ctx->buf == NULL && arm_depthwise_conv_s8_opt_get_buffer_size(input_dims, filter_dims) > 0) + { + return ARM_CMSIS_NN_ARG_ERROR; } #ifdef ARM_MATH_DSP + (void)bias_dims; const int32_t input_x = input_dims->w; const int32_t input_y = input_dims->h; const int32_t kernel_x = filter_dims->w; @@ -89,7 +97,6 @@ arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, q15_t *buffer_a = (q15_t *)ctx->buf; #ifdef ARM_MATH_MVEI - (void)bias_dims; /* Generate two columns from the input tensor */ q7_t *lhs_buffer = (q7_t *)buffer_a; q7_t *out = output; @@ -97,116 +104,133 @@ arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, int buffer_count = 0; const int32_t kernel_size = kernel_x * kernel_y; - /* This part implements the im2col function */ - for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) + const int32_t ch_loop = (input_ch + (CH_IN_BLOCK_MVE - 1)) / CH_IN_BLOCK_MVE; + int32_t remaining_ch = output_ch; + int32_t active_ch = MIN(CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= CH_IN_BLOCK_MVE; + + for (int i_ch = 0; i_ch < ch_loop; i_ch++) { - for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) + out = output + i_ch * CH_IN_BLOCK_MVE; + const int8_t *input_slice = input + (i_ch * CH_IN_BLOCK_MVE); + + for (int i_out_y = 0, base_idx_y = -pad_y; i_out_y < output_y; base_idx_y += stride_y, i_out_y++) { - for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) + for (int i_out_x = 0, base_idx_x = -pad_x; i_out_x < output_x; base_idx_x += stride_x, i_out_x++) { - for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) + for (int i_ker_y = base_idx_y; i_ker_y < base_idx_y + kernel_y; i_ker_y++) { - if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + for (int i_ker_x = base_idx_x; i_ker_x < base_idx_x + kernel_x; i_ker_x++) { - arm_memset_q7(lhs_buffer, (int8_t)-input_offset, (uint32_t)input_ch); - padded = 1; + if (i_ker_y < 0 || i_ker_y >= input_y || i_ker_x < 0 || i_ker_x >= input_x) + { + arm_memset_q7(lhs_buffer, (int8_t)-input_offset, (uint32_t)active_ch); + padded = 1; + } + else + { + arm_memcpy_q7(lhs_buffer, + input_slice + (i_ker_y * input_x + i_ker_x) * input_ch, + (uint32_t)active_ch); + } + lhs_buffer += CH_IN_BLOCK_MVE; + } + } + buffer_count++; + + if (buffer_count == 4) + { + const int32_t block_offset = i_ch * CH_IN_BLOCK_MVE; + lhs_buffer = (q7_t *)buffer_a; + if (padded == 0) + { + arm_nn_depthwise_conv_nt_t_s8(lhs_buffer, + kernel + block_offset, + input_offset, + active_ch, + input_ch, + output_shift + block_offset, + output_mult + block_offset, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias + block_offset, + out); } else { - arm_memcpy_q7(lhs_buffer, input + (i_ker_y * input_x + i_ker_x) * input_ch, (uint32_t)input_ch); + arm_nn_depthwise_conv_nt_t_padded_s8(lhs_buffer, + kernel + block_offset, + input_offset, + active_ch, + input_ch, + output_shift + block_offset, + output_mult + block_offset, + output_offset, + output_activation_min, + output_activation_max, + kernel_size, + bias + block_offset, + out); + padded = 0; } - lhs_buffer += input_ch; + out += (4 * input_ch); + buffer_count = 0; } } - buffer_count++; + } + /* Handle left over buffers */ + lhs_buffer = (q7_t *)buffer_a; - if (buffer_count == 4) + int8_t *out_base = out; + for (int i_buf = 0; i_buf < buffer_count; i_buf++) + { + int32_t loop_count = (active_ch + 3) / 4; + int32_t num_ch_to_process = active_ch; + out = out_base + (i_buf * input_ch); + for (int i_loop_cnt = 0, offset = i_ch * CH_IN_BLOCK_MVE; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, i_loop_cnt++) { - lhs_buffer = (q7_t *)buffer_a; - if (padded == 0) + const int8_t *col_0 = lhs_buffer + (kernel_size * CH_IN_BLOCK_MVE * i_buf) + (i_loop_cnt * 4); + const int8_t *row_0 = kernel + offset; + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) { - out = arm_nn_depthwise_conv_nt_t_s8(lhs_buffer, - kernel, - input_offset, - input_ch, - output_shift, - output_mult, - output_offset, - output_activation_min, - output_activation_max, - kernel_size, - bias, - out); + out_0 = vldrwq_s32(&bias[offset]); } - else - { - out = arm_nn_depthwise_conv_nt_t_padded_s8(lhs_buffer, - kernel, - input_offset, - input_ch, - output_shift, - output_mult, - output_offset, - output_activation_min, - output_activation_max, - kernel_size, - bias, - out); - padded = 0; - } - buffer_count = 0; - } - } - } - /* Handle left over buffers */ - lhs_buffer = (q7_t *)buffer_a; + for (int i_ker = 0; i_ker < kernel_size; i_ker++) + { + const int32x4_t ker_0 = vldrbq_s32(row_0); + int32x4_t ip_0 = vldrbq_s32(col_0); + ip_0 = vaddq_n_s32(ip_0, input_offset); + out_0 += vmulq_s32(ip_0, ker_0); - for (int i_buf = 0; i_buf < buffer_count; i_buf++) - { - int32_t loop_count = (input_ch + 3) / 4; + col_0 += CH_IN_BLOCK_MVE; + row_0 += input_ch; + } - int32_t num_ch_to_process = input_ch; - for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; num_ch_to_process -= 4, offset += 4, i_loop_cnt++) - { - const int8_t *col_0 = lhs_buffer + (kernel_size * input_ch * i_buf) + offset; - const int8_t *row_0 = kernel + offset; - int32x4_t out_0 = vldrwq_s32(&bias[offset]); + const int32x4_t mult = vldrwq_s32(&output_mult[offset]); + const int32x4_t shift = vldrwq_s32(&output_shift[offset]); - for (int i_ker = 0; i_ker < kernel_size; i_ker++) - { - const int32x4_t ker_0 = vldrbq_s32(row_0); + out_0 = arm_requantize_mve_32x4(out_0, mult, shift); + out_0 = vaddq_n_s32(out_0, output_offset); + out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); + mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); + vstrbq_p_s32(out, out_0, p); - int32x4_t ip_0 = vldrbq_s32(col_0); - ip_0 = vaddq_n_s32(ip_0, input_offset); - out_0 += vmulq_s32(ip_0, ker_0); - - col_0 += input_ch; - row_0 += input_ch; + out += 4; } - - const int32x4_t mult = vldrwq_s32(&output_mult[offset]); - const int32x4_t shift = vldrwq_s32(&output_shift[offset]); - - out_0 = arm_requantize_mve_32x4(out_0, mult, shift); - out_0 = vaddq_n_s32(out_0, output_offset); - out_0 = vmaxq_s32(out_0, vdupq_n_s32(output_activation_min)); - out_0 = vminq_s32(out_0, vdupq_n_s32(output_activation_max)); - mve_pred16_t p = vctp32q((uint32_t)num_ch_to_process); - vstrbq_p_s32(out, out_0, p); - - out += 4; } + buffer_count = 0; - const int tail_ch = input_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } + active_ch = MIN(CH_IN_BLOCK_MVE, remaining_ch); + remaining_ch -= CH_IN_BLOCK_MVE; } #else // ARM_MATH_DSP - (void)bias_dims; /* Run the following code in cores using DSP extension */ q15_t *const col_buffer_start = buffer_a; q15_t *col_buffer = col_buffer_start; @@ -272,10 +296,17 @@ arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, while (row_count) { - q31_t sum = *bias++; - q31_t sum_2 = *bias++; - q31_t sum_3 = *bias++; - q31_t sum_4 = *bias++; + q31_t sum = 0; + q31_t sum_2 = 0; + q31_t sum_3 = 0; + q31_t sum_4 = 0; + if (bias) + { + sum = *bias++; + sum_2 = *bias++; + sum_3 = *bias++; + sum_4 = *bias++; + } uint16_t col_count = (kernel_x * kernel_y) / 2; q15_t *col_pos = col_buffer_start + row_shift; @@ -368,7 +399,11 @@ arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, { q15_t *col_pos = col_buffer_start + row_shift; const q7_t *row_pos = kernel + row_shift; - q31_t sum = *bias++; + q31_t sum = 0; + if (bias) + { + sum = *bias++; + } const uint16_t col_count = (kernel_x * kernel_y); row_shift += 1; @@ -406,14 +441,14 @@ arm_status arm_depthwise_conv_s8_opt(const cmsis_nn_context *ctx, #endif /* ARM_MATH_MVEI | ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dims, const cmsis_nn_dims *filter_dims) { #if defined(ARM_MATH_MVEI) - /* The + 4 accounts for out of bounds read of the lhs buffers in the *_nt_t_* functions. */ - return (2 * input_dims->c * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int16_t) + 4; + (void)input_dims; + return (4 * CH_IN_BLOCK_MVE * filter_dims->w * filter_dims->h) * (int32_t)sizeof(int8_t); #elif defined(ARM_MATH_DSP) return (input_dims->c * filter_dims->w * filter_dims->h) * sizeof(int16_t); #else @@ -426,3 +461,5 @@ int32_t arm_depthwise_conv_s8_opt_get_buffer_size(const cmsis_nn_dims *input_dim /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c index 84621df..0404276 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_u8_basic_ver1.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_depthwise_conv_u8_basic_ver1.c * Description: u8 depthwise convolution function * - * $Date: 09. October 2020 - * $Revision: V.1.1.1 + * $Date: 19 April 2022 + * $Revision: V.2.0.0 * * Target : Cortex-M CPUs * @@ -224,7 +226,7 @@ static void depthwise_conv_u8_generic(const uint8_t *input, * @param[in] dilation_x Dilation along width. Not used and intended for future enhancement. * @param[in] dilation_y Dilation along height. Not used and intended for future enhancement. * @param[in] bias Pointer to optional bias values. If no bias is - * availble, NULL is expected + * available, NULL is expected * @param[in] input_offset Input tensor zero offset * @param[in] filter_offset Kernel tensor zero offset * @param[in] output_offset Output tensor zero offset @@ -236,38 +238,35 @@ static void depthwise_conv_u8_generic(const uint8_t *input, * @param[in] output_shift Amount of right-shift for output * @param[in] output_mult Output multiplier for requantization * @return The function returns one of the following - * ARM_MATH_SIZE_MISMATCH - Not supported dimension of tensors - * ARM_MATH_SUCCESS - Successful operation - * ARM_MATH_ARGUMENT_ERROR - Implementation not available - * + * ARM_CMSIS_NN_SUCCESS - Successful operation * */ -arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, - const uint16_t input_x, - const uint16_t input_y, - const uint16_t input_ch, - const uint8_t *kernel, - const uint16_t kernel_x, - const uint16_t kernel_y, - const int16_t ch_mult, - const int16_t pad_x, - const int16_t pad_y, - const int16_t stride_x, - const int16_t stride_y, - const int16_t dilation_x, - const int16_t dilation_y, - const int32_t *bias, - const int32_t input_offset, - const int32_t filter_offset, - const int32_t output_offset, - uint8_t *output, - const uint16_t output_x, - const uint16_t output_y, - const int32_t output_activation_min, - const int32_t output_activation_max, - const int32_t output_shift, - const int32_t output_mult) +arm_cmsis_nn_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, + const uint16_t input_x, + const uint16_t input_y, + const uint16_t input_ch, + const uint8_t *kernel, + const uint16_t kernel_x, + const uint16_t kernel_y, + const int16_t ch_mult, + const int16_t pad_x, + const int16_t pad_y, + const int16_t stride_x, + const int16_t stride_y, + const int16_t dilation_x, + const int16_t dilation_y, + const int32_t *bias, + const int32_t input_offset, + const int32_t filter_offset, + const int32_t output_offset, + uint8_t *output, + const uint16_t output_x, + const uint16_t output_y, + const int32_t output_activation_min, + const int32_t output_activation_max, + const int32_t output_shift, + const int32_t output_mult) { (void)dilation_x; (void)dilation_y; @@ -328,9 +327,11 @@ arm_status arm_depthwise_conv_u8_basic_ver1(const uint8_t *input, } /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c new file mode 100644 index 0000000..072e7ea --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s16.c @@ -0,0 +1,125 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_depthwise_conv_wrapper_s16.c + * Description: Wrapper API to select appropriate depthwise conv API based + * on dimensions. + * + * $Date: 6 July 2022 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup NNConv + * @{ + */ + +#define USE_FAST_DW_CONV_FUNCTION(dw_conv_params, filter_dims, input_dims) \ + (dw_conv_params->ch_mult == 1 && dw_conv_params->dilation.w == 1 && dw_conv_params->dilation.h == 1 && \ + filter_dims->w * filter_dims->h * input_dims->c < 512) + +/* + * s16 Depthwise conv wrapper function + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s16(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *filter, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + q15_t *output) +{ + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + + if (USE_FAST_DW_CONV_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + status = arm_depthwise_conv_fast_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + else + { + status = arm_depthwise_conv_s16(ctx, + dw_conv_params, + quant_params, + input_dims, + input, + filter_dims, + filter, + bias_dims, + bias, + output_dims, + output); + } + + /* Return to application */ + return status; +} + +int32_t arm_depthwise_conv_wrapper_s16_get_buffer_size(const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_dims *input_dims, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims) +{ + (void)dw_conv_params; + (void)input_dims; + (void)filter_dims; + (void)output_dims; + int32_t size = 0; + + if (USE_FAST_DW_CONV_FUNCTION(dw_conv_params, filter_dims, input_dims)) + { + size = arm_depthwise_conv_fast_s16_get_buffer_size(input_dims, filter_dims); + } + + return size; +} + +/** + * @} end of NNConv group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c index b5bf5df..df2bb64 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_conv_wrapper_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +24,8 @@ * Description: Wrapper API to select appropriate depthwise conv API based * on dimensions. * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 19 April 2022 + * $Revision: V.2.0.0 * * Target Processor: Cortex-M CPUs * @@ -46,23 +48,25 @@ * Refer header file for details. * */ -arm_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, - const cmsis_nn_dw_conv_params *dw_conv_params, - const cmsis_nn_per_channel_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *filter, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) +arm_cmsis_nn_status arm_depthwise_conv_wrapper_s8(const cmsis_nn_context *ctx, + const cmsis_nn_dw_conv_params *dw_conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *filter, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + q7_t *output) { - arm_status status = ARM_MATH_SUCCESS; - if (1 == dw_conv_params->ch_mult) + arm_cmsis_nn_status status = ARM_CMSIS_NN_SUCCESS; + if (1 == dw_conv_params->ch_mult && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) { #if !defined(ARM_MATH_MVEI) - if ((filter_dims->w == 3) && (filter_dims->h == 3) && (dw_conv_params->padding.h <= 1)) + if ((filter_dims->w == 3) && (filter_dims->h == 3) && (dw_conv_params->padding.h <= 1) && + (dw_conv_params->padding.w <= 1)) { status = arm_depthwise_conv_3x3_s8(ctx, dw_conv_params, @@ -119,7 +123,8 @@ int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_par (void)dw_conv_params; int32_t size = 0; - if (input_dims->c == output_dims->c) + if (input_dims->c == output_dims->c && input_dims->n == 1 && dw_conv_params->dilation.w == 1 && + dw_conv_params->dilation.h == 1) { size = arm_depthwise_conv_s8_opt_get_buffer_size(input_dims, filter_dims); } @@ -130,3 +135,5 @@ int32_t arm_depthwise_conv_wrapper_s8_get_buffer_size(const cmsis_nn_dw_conv_par /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c index e2375a3..0a91889 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_depthwise_separable_conv_HWC_q7.c * Description: Q7 depthwise separable convolution function * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,63 +42,29 @@ * @{ */ -/** - * @brief Q7 depthwise separable convolution function - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in input tensor dimension - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel filter kernel size - * @param[in] padding padding sizes - * @param[in] stride convolution stride - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out output tensor dimension - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * @details - * - * Buffer size: - * - * bufferA size: 2*ch_im_in*dim_kernel*dim_kernel - * - * bufferB size: 0 - * - * Input dimension constraints: - * - * ch_im_in equals ch_im_out - * - * Implementation: - * There are 3 nested loop here: - * Inner loop: calculate each output value with MAC instruction over an accumulator - * Mid loop: loop over different output channel - * Outer loop: loop over different output (x, y) +/* + * Q7 depthwise separable convolution function + * Refer function header for details */ -arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, - const uint16_t dim_im_in, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel, - const uint16_t padding, - const uint16_t stride, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, + const uint16_t dim_im_in, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel, + const uint16_t padding, + const uint16_t stride, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_out_y, i_out_x; @@ -111,7 +79,7 @@ arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, /* do some checking here, basically ch_im_in == ch_im_out */ if (ch_im_in != ch_im_out) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) @@ -263,13 +231,13 @@ arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, "smlad %[sum4], r4, r5, %[sum4]\n" "subs %[colCnt], #1\n" "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt), [ ch_im_in ] "r"(ch_im_in) + : [sum] "+r"(sum), + [sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4), + [pB] "+r"(pB), + [pA] "+r"(pA) + : [colCnt] "r"(colCnt), [ch_im_in] "r"(ch_im_in) : "r0", "r1", "r2", "r3", "r4", "r5"); #else /* @@ -307,13 +275,13 @@ arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, "smlad %[sum3], r4, r5, %[sum3]\n" "subs %[colCnt], #1\n" "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt), [ ch_im_in ] "r"(ch_im_in) + : [sum] "+r"(sum), + [sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4), + [pB] "+r"(pB), + [pA] "+r"(pA) + : [colCnt] "r"(colCnt), [ch_im_in] "r"(ch_im_in) : "r0", "r1", "r2", "r3", "r4", "r5"); #endif /* ARM_MATH_BIG_ENDIAN */ @@ -381,7 +349,7 @@ arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, /* do some checking here, basically ch_im_in == ch_im_out */ if (ch_im_in != ch_im_out) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i_out_y = 0; i_out_y < dim_im_out; i_out_y++) @@ -414,9 +382,11 @@ arm_status arm_depthwise_separable_conv_HWC_q7(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c index 1bc08c8..e85b01b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_depthwise_separable_conv_HWC_q7_nonsquare.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_depthwise_separable_conv_HWC_q7_nonsquare.c * Description: Q7 depthwise separable convolution function (non-square shape) * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,62 +42,36 @@ * @{ */ -/** - * @brief Q7 depthwise separable convolution function (non-square shape) - * @param[in] Im_in pointer to input tensor - * @param[in] dim_im_in_x input tensor dimension x - * @param[in] dim_im_in_y input tensor dimension y - * @param[in] ch_im_in number of input tensor channels - * @param[in] wt pointer to kernel weights - * @param[in] ch_im_out number of filters, i.e., output tensor channels - * @param[in] dim_kernel_x filter kernel size x - * @param[in] dim_kernel_y filter kernel size y - * @param[in] padding_x padding sizes x - * @param[in] padding_y padding sizes y - * @param[in] stride_x convolution stride x - * @param[in] stride_y convolution stride y - * @param[in] bias pointer to bias - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in,out] Im_out pointer to output tensor - * @param[in] dim_im_out_x output tensor dimension x - * @param[in] dim_im_out_y output tensor dimension y - * @param[in,out] bufferA pointer to buffer space for input - * @param[in,out] bufferB pointer to buffer space for output - * @return The function returns either - * ARM_MATH_SIZE_MISMATCH or ARM_MATH_SUCCESS based on the outcome of size checking. - * - * This function is the version with full list of optimization tricks, but with - * some contraints: - * ch_im_in is equal to ch_im_out - * +/* + * Q7 depthwise separable convolution function (non-square shape) + * Refer function header for details */ -arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, - const uint16_t dim_im_in_x, - const uint16_t dim_im_in_y, - const uint16_t ch_im_in, - const q7_t *wt, - const uint16_t ch_im_out, - const uint16_t dim_kernel_x, - const uint16_t dim_kernel_y, - const uint16_t padding_x, - const uint16_t padding_y, - const uint16_t stride_x, - const uint16_t stride_y, - const q7_t *bias, - const uint16_t bias_shift, - const uint16_t out_shift, - q7_t *Im_out, - const uint16_t dim_im_out_x, - const uint16_t dim_im_out_y, - q15_t *bufferA, - q7_t *bufferB) +arm_cmsis_nn_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, + const uint16_t dim_im_in_x, + const uint16_t dim_im_in_y, + const uint16_t ch_im_in, + const q7_t *wt, + const uint16_t ch_im_out, + const uint16_t dim_kernel_x, + const uint16_t dim_kernel_y, + const uint16_t padding_x, + const uint16_t padding_y, + const uint16_t stride_x, + const uint16_t stride_y, + const q7_t *bias, + const uint16_t bias_shift, + const uint16_t out_shift, + q7_t *Im_out, + const uint16_t dim_im_out_x, + const uint16_t dim_im_out_y, + q15_t *bufferA, + q7_t *bufferB) { (void)bufferB; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ /* @@ -119,7 +95,7 @@ arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, /* do some checking here, basically ch_im_in == ch_im_out */ if (ch_im_in != ch_im_out) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) @@ -270,13 +246,13 @@ arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, "smlad %[sum4], r4, r5, %[sum4]\n" "subs %[colCnt], #1\n" "bne COL_LOOP\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt), [ ch_im_in ] "r"(ch_im_in) + : [sum] "+r"(sum), + [sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4), + [pB] "+r"(pB), + [pA] "+r"(pA) + : [colCnt] "r"(colCnt), [ch_im_in] "r"(ch_im_in) : "r0", "r1", "r2", "r3", "r4", "r5"); #else // r0 r1 r2 r3 r4 r5 @@ -312,13 +288,13 @@ arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, "smlad %[sum3], r4, r5, %[sum3]\n" "subs %[colCnt], #1\n" "bne COL_LOOP\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt), [ ch_im_in ] "r"(ch_im_in) + : [sum] "+r"(sum), + [sum2] "+r"(sum2), + [sum3] "+r"(sum3), + [sum4] "+r"(sum4), + [pB] "+r"(pB), + [pA] "+r"(pA) + : [colCnt] "r"(colCnt), [ch_im_in] "r"(ch_im_in) : "r0", "r1", "r2", "r3", "r4", "r5"); #endif /*ARM_MATH_BIG_ENDIAN */ @@ -386,7 +362,7 @@ arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, /* do some checking here, basically ch_im_in == ch_im_out */ if (ch_im_in != ch_im_out) { - return ARM_MATH_SIZE_MISMATCH; + return ARM_CMSIS_NN_ARG_ERROR; } for (i_out_y = 0; i_out_y < dim_im_out_y; i_out_y++) @@ -419,9 +395,11 @@ arm_status arm_depthwise_separable_conv_HWC_q7_nonsquare(const q7_t *Im_in, #endif /* ARM_MATH_DSP */ /* Return to application */ - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNConv group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c index efd8138..4804ba9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_depthwise_conv_s8_core.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. * @@ -216,3 +218,5 @@ q7_t *arm_nn_depthwise_conv_s8_core(const q7_t *row, return NULL; #endif } + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c index 3176d43..5c95485 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mult_kernel_q7_q15.c * Description: Matrix-multiplication function for convolution * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.3 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -30,10 +32,10 @@ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" -/** - * @brief Matrix-multiplication function for convolution. +/* + * Matrix-multiplication function for convolution. * - * @details Refer to header file for details. + * Refer to header file for details. * */ @@ -184,3 +186,5 @@ q7_t *arm_nn_mat_mult_kernel_q7_q15(const q7_t *pA, return NULL; #endif /* ARM_MATH_DSP */ } + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c index c141173..29043c8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_q7_q15_reordered.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mult_kernel_q7_q15_reordered.c * Description: Matrix-multiplication function for convolution with reordered columns * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.3 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -30,10 +32,10 @@ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" -/** - * @brief Matrix-multiplication function for convolution with re-ordered input. +/* + * Matrix-multiplication function for convolution with re-ordered input. * - * @details Refer to header file for details. + * Refer to header file for details. * */ @@ -135,3 +137,5 @@ q7_t *arm_nn_mat_mult_kernel_q7_q15_reordered(const q7_t *pA, return NULL; #endif /* ARM_MATH_DSP */ } + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c index 6de79a9..62ee822 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mult_kernel_s8_s16.c * Description: Matrix-multiplication function for convolution * - * $Date: 09. October 2020 - * $Revision: V.1.0.3 + * $Date: 14. December 2021 + * $Revision: V.1.1.0 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -49,174 +51,7 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, const int32_t *const output_bias, q7_t *out_0) { -#if defined(ARM_MATH_MVEI) -#define ROW_PER_LOOP (4) -#define COL_PER_LOOP (8) - - const q7_t *ip_a0_s8 = input_a; - q7_t *out_1 = out_0 + output_ch; - - const int32_t *bias = output_bias; - - int32_t row_count = output_ch / ROW_PER_LOOP; - - while (row_count) - { - const q15_t *ip_b0_s16 = input_b; - const q15_t *ip_b1_s16 = input_b + num_col_a; - - const q7_t *ip_a1_s8 = ip_a0_s8 + num_col_a; - const q7_t *ip_a2_s8 = ip_a0_s8 + num_col_a * 2; - const q7_t *ip_a3_s8 = ip_a0_s8 + num_col_a * 3; - - q31_t ch_0_out_n = bias[0]; - q31_t ch_1_out_n = bias[1]; - q31_t ch_2_out_n = bias[2]; - q31_t ch_3_out_n = bias[3]; - - q31_t ch_0_out_n1 = ch_0_out_n; - q31_t ch_1_out_n1 = ch_1_out_n; - q31_t ch_2_out_n1 = ch_2_out_n; - q31_t ch_3_out_n1 = ch_3_out_n; - bias += 4; - - int32_t col_count = num_col_a / COL_PER_LOOP; - - while (col_count) - { - // Load inputs - const int16x8_t ip_b0 = vld1q_s16(ip_b0_s16); - ip_b0_s16 += COL_PER_LOOP; - const int16x8_t ip_b1 = vld1q_s16(ip_b1_s16); - ip_b1_s16 += COL_PER_LOOP; - - // Load filters - const int16x8_t ip_a0 = vldrbq_s16(ip_a0_s8); - ip_a0_s8 += COL_PER_LOOP; - const int16x8_t ip_a1 = vldrbq_s16(ip_a1_s8); - ip_a1_s8 += COL_PER_LOOP; - const int16x8_t ip_a2 = vldrbq_s16(ip_a2_s8); - ip_a2_s8 += COL_PER_LOOP; - const int16x8_t ip_a3 = vldrbq_s16(ip_a3_s8); - ip_a3_s8 += COL_PER_LOOP; - - // MAC - ch_0_out_n += vmladavq_s16(ip_b0, ip_a0); - ch_1_out_n += vmladavq_s16(ip_b0, ip_a1); - ch_2_out_n += vmladavq_s16(ip_b0, ip_a2); - ch_3_out_n += vmladavq_s16(ip_b0, ip_a3); - ch_0_out_n1 += vmladavq_s16(ip_b1, ip_a0); - ch_1_out_n1 += vmladavq_s16(ip_b1, ip_a1); - ch_2_out_n1 += vmladavq_s16(ip_b1, ip_a2); - ch_3_out_n1 += vmladavq_s16(ip_b1, ip_a3); - - col_count--; - } - - /* Handle tail */ - col_count = (num_col_a & (COL_PER_LOOP - 1)) - 1; - while (col_count >= 0) - { - const int32_t b0 = ip_b0_s16[col_count]; - const int32_t b1 = ip_b1_s16[col_count]; - - ch_0_out_n += b0 * ip_a0_s8[col_count]; - ch_1_out_n += b0 * ip_a1_s8[col_count]; - ch_2_out_n += b0 * ip_a2_s8[col_count]; - ch_3_out_n += b0 * ip_a3_s8[col_count]; - - ch_0_out_n1 += b1 * ip_a0_s8[col_count]; - ch_1_out_n1 += b1 * ip_a1_s8[col_count]; - ch_2_out_n1 += b1 * ip_a2_s8[col_count]; - ch_3_out_n1 += b1 * ip_a3_s8[col_count]; - col_count--; - } - ip_a0_s8 += (num_col_a & (COL_PER_LOOP - 1)); - - int32x4_t out_vec_0; - int32x4_t out_vec_1; - out_vec_0[0] = ch_0_out_n; - out_vec_0[1] = ch_1_out_n; - out_vec_0[2] = ch_2_out_n; - out_vec_0[3] = ch_3_out_n; - - out_vec_1[0] = ch_0_out_n1; - out_vec_1[1] = ch_1_out_n1; - out_vec_1[2] = ch_2_out_n1; - out_vec_1[3] = ch_3_out_n1; - - int32x4_t mult = vldrwq_s32(out_mult); - int32x4_t shift = vldrwq_s32(out_shift); - out_mult += ROW_PER_LOOP; - out_shift += ROW_PER_LOOP; - - out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult, shift); - out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult, shift); - - out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); - out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); - out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); - vstrbq_s32(out_0, out_vec_0); - out_0 += ROW_PER_LOOP; - - out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); - out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); - out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); - vstrbq_s32(out_1, out_vec_1); - out_1 += ROW_PER_LOOP; - row_count--; - ip_a0_s8 += (num_col_a * 3); - } - - row_count = output_ch & (ROW_PER_LOOP - 1); - - if (row_count) - { - ip_a0_s8 = input_a + num_col_a * (output_ch & ~3); - const mve_pred16_t p = vctp32q((uint32_t)row_count); - int32x4_t out_vec_0 = vdupq_n_s32(0); - int32x4_t out_vec_1 = vdupq_n_s32(0); - int32x4_t mult_tail; - int32x4_t shift_tail; - - for (int i_ch = 0; i_ch < row_count; i_ch++) - { - int32_t output_0 = bias[i_ch]; - int32_t output_1 = bias[i_ch]; - const q15_t *ip_b0_s16 = input_b; - const q15_t *ip_b1_s16 = input_b + num_col_a; - - for (int i_idx = 0; i_idx < num_col_a; i_idx++) - { - output_0 += ip_b0_s16[i_idx] * ip_a0_s8[i_idx]; - output_1 += ip_b1_s16[i_idx] * ip_a0_s8[i_idx]; - } - - ip_a0_s8 += num_col_a; - out_vec_0[i_ch] = output_0; - out_vec_1[i_ch] = output_1; - mult_tail[i_ch] = out_mult[i_ch]; - shift_tail[i_ch] = out_shift[i_ch]; - } - out_vec_0 = arm_requantize_mve_32x4(out_vec_0, mult_tail, shift_tail); - out_vec_1 = arm_requantize_mve_32x4(out_vec_1, mult_tail, shift_tail); - - out_vec_0 = vaddq_n_s32(out_vec_0, out_offset); - out_vec_0 = vmaxq_s32(out_vec_0, vdupq_n_s32(activation_min)); - out_vec_0 = vminq_s32(out_vec_0, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out_0, out_vec_0, p); - - out_vec_1 = vaddq_n_s32(out_vec_1, out_offset); - out_vec_1 = vmaxq_s32(out_vec_1, vdupq_n_s32(activation_min)); - out_vec_1 = vminq_s32(out_vec_1, vdupq_n_s32(activation_max)); - - vstrbq_p_s32(out_1, out_vec_1, p); - out_1 += row_count; - } - - return out_1; - -#elif defined(ARM_MATH_DSP) +#if !defined(ARM_MATH_MVEI) /* set up the second output pointers */ q7_t *out_1 = out_0 + output_ch; const int32_t *bias = output_bias; @@ -233,12 +68,20 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, /* align the second pointer for A */ const q7_t *ip_a1 = ip_a0 + num_col_a; + q31_t ch_0_out_0 = 0; + q31_t ch_0_out_1 = 0; + q31_t ch_1_out_0 = 0; + q31_t ch_1_out_1 = 0; /* Init accumulator with bias for channel N and N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; - q31_t ch_1_out_0 = *bias; - q31_t ch_1_out_1 = *bias++; + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + ch_1_out_0 = *bias; + ch_1_out_1 = *bias++; + } +#if defined(ARM_MATH_DSP) uint16_t col_count = num_col_a / 4; /* accumulate over the vector */ while (col_count) @@ -266,6 +109,9 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, col_count--; } /* while over col_count */ col_count = num_col_a & 0x3; +#else + uint16_t col_count = num_col_a; +#endif while (col_count) { q7_t a0 = *ip_a0++; @@ -320,10 +166,17 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, const q15_t *ip_b0 = input_b; const q15_t *ip_b1 = ip_b0 + num_col_a; + q31_t ch_0_out_0 = 0; + q31_t ch_0_out_1 = 0; + /* load the bias */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; + if (bias) + { + ch_0_out_0 = *bias; + ch_0_out_1 = *bias++; + } +#if defined(ARM_MATH_DSP) uint16_t col_count = num_col_a >> 2; while (col_count) { @@ -344,6 +197,9 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, col_count--; } col_count = num_col_a & 0x3; +#else + uint16_t col_count = num_col_a; +#endif while (col_count) { q7_t a0 = *ip_a0++; @@ -389,3 +245,5 @@ q7_t *arm_nn_mat_mult_kernel_s8_s16(const q7_t *input_a, return NULL; #endif } + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c deleted file mode 100644 index 8fc0b0f..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_kernel_s8_s16_reordered.c +++ /dev/null @@ -1,201 +0,0 @@ -/* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. - * - * SPDX-License-Identifier: Apache-2.0 - * - * Licensed under the Apache License, Version 2.0 (the License); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an AS IS BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/* ---------------------------------------------------------------------- - * Project: CMSIS NN Library - * Title: arm_nn_mat_mult_kernel_s8_s16_reordered.c - * Description: Matrix-multiplication function for convolution with reordered columns - * - * $Date: 09. October 2020 - * $Revision: V.1.0.3 - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" - -/* - * Matrix-multiplication with re-ordered input and bias inputs for convolution with per-channel - * requantization. The re-ordering is a consequence of sign extension is done by the SXTB16 command. - * - * Refer header file for details. This function differs from arm_nn_mat_mult_kernel_s8_s16(), in that it uses - * read_and_pad_reordered() instead of arm_nn_mat_mult_kernel_s8_s16(). Investigating the cycles impact and - * unifying these two functions is a potential future improvement. - * - */ - -q7_t *arm_nn_mat_mult_kernel_s8_s16_reordered(const q7_t *input_a, - const q15_t *input_b, - const uint16_t output_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int16_t activation_min, - const int16_t activation_max, - const uint16_t num_col_a, - const int32_t *const output_bias, - q7_t *out_0) -{ -#if defined(ARM_MATH_DSP) - /* set up the second output pointers */ - q7_t *out_1 = out_0 + output_ch; - const int32_t *bias = output_bias; - - uint16_t row_count = output_ch / 2; - const q7_t *ip_a0 = input_a; - /* this loop over rows in A */ - while (row_count) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* align the second pointer for A */ - const q7_t *ip_a1 = ip_a0 + num_col_a; - - /* Init accumulator with bias for channel N and N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = *bias++; - q31_t ch_1_out_0 = *bias; - q31_t ch_1_out_1 = *bias++; - - uint16_t col_count = num_col_a / 4; - /* accumulate over the vector */ - while (col_count) - { - q31_t a01, a02, a11, a12; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); - ip_a1 = read_and_pad_reordered(ip_a1, &a11, &a12); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); - ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); - - col_count--; - } /* while over col_count */ - - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - out_mult++; - out_shift++; - - ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); - ch_1_out_0 += out_offset; - ch_1_out_0 = MAX(ch_1_out_0, activation_min); - ch_1_out_0 = MIN(ch_1_out_0, activation_max); - *out_0++ = (q7_t)ch_1_out_0; - - ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); - ch_1_out_1 += out_offset; - ch_1_out_1 = MAX(ch_1_out_1, activation_min); - ch_1_out_1 = MIN(ch_1_out_1, activation_max); - *out_1++ = (q7_t)ch_1_out_1; - out_mult++; - out_shift++; - - /* skip row */ - ip_a0 += num_col_a; - row_count--; - } - - if (output_ch & 1) - { - /* setup pointers for B */ - const q15_t *ip_b0 = input_b; - const q15_t *ip_b1 = ip_b0 + num_col_a; - - /* Init accumulator with bias for channel N + 1 */ - q31_t ch_0_out_0 = *bias; - q31_t ch_0_out_1 = ch_0_out_0; - - int32_t col_count = num_col_a / 4; - while (col_count) - { - q31_t a01, a02; - q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); - q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ip_a0 = read_and_pad_reordered(ip_a0, &a01, &a02); - - ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); - - b0 = arm_nn_read_q15x2_ia(&ip_b0); - b1 = arm_nn_read_q15x2_ia(&ip_b1); - - ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); - ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); - - col_count--; - } /* while over col_count */ - - ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); - ch_0_out_0 += out_offset; - ch_0_out_0 = MAX(ch_0_out_0, activation_min); - ch_0_out_0 = MIN(ch_0_out_0, activation_max); - *out_0++ = (q7_t)ch_0_out_0; - - ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); - ch_0_out_1 += out_offset; - ch_0_out_1 = MAX(ch_0_out_1, activation_min); - ch_0_out_1 = MIN(ch_0_out_1, activation_max); - *out_1++ = (q7_t)ch_0_out_1; - } - - out_0 += output_ch; - - /* return the new output pointer with offset */ - return out_0; -#else - (void)input_a; - (void)input_b; - (void)output_ch; - (void)out_shift; - (void)out_mult; - (void)out_offset; - (void)activation_min; - (void)activation_max; - (void)num_col_a; - (void)output_bias; - (void)out_0; - /* To be completed */ - return NULL; -#endif -} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c index f3d6c5e..9eed28f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ConvolutionFunctions/arm_nn_mat_mult_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mult_s8.c * Description: General Matrix-multiplication function * - * $Date: 09. October 2020 - * $Revision: V.2.0.5 + * $Date: 16 August 2022 + * $Revision: V.2.0.7 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -73,27 +75,27 @@ q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) { mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); - const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); + const int16x8_t offset = vdupq_x_n_s16(col_offset, p); row_len_tmp -= 8; - int16x8_t r0 = vldrbq_z_s16(ip_r0, p); - ip_r0 += 8; - - int16x8_t c0 = vldrbq_z_s16(ip_c0, p); + int16x8_t c0 = vldrbq_s16(ip_c0); ip_c0 += 8; - c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); + c0 = vaddq_s16(c0, offset); - int16x8_t c1 = vldrbq_z_s16(ip_c1, p); + int16x8_t c1 = vldrbq_s16(ip_c1); ip_c1 += 8; - c1 = vaddq_m_s16(vuninitializedq_s16(), c1, offset, p); + c1 = vaddq_s16(c1, offset); - int16x8_t c2 = vldrbq_z_s16(ip_c2, p); + int16x8_t c2 = vldrbq_s16(ip_c2); ip_c2 += 8; - c2 = vaddq_m_s16(vuninitializedq_s16(), c2, offset, p); + c2 = vaddq_s16(c2, offset); - int16x8_t c3 = vldrbq_z_s16(ip_c3, p); + int16x8_t c3 = vldrbq_s16(ip_c3); ip_c3 += 8; - c3 = vaddq_m_s16(vuninitializedq_s16(), c3, offset, p); + c3 = vaddq_s16(c3, offset); + + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); acc_1 = vmladavaq_p_s16(acc_1, r0, c1, p); @@ -133,15 +135,15 @@ q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, for (int i_row_loop = 0; i_row_loop < row_loop_cnt; i_row_loop++) { const mve_pred16_t p = vctp16q((uint32_t)row_len_tmp); - const int16x8_t offset = vdupq_m_n_s16(vuninitializedq_s16(), col_offset, p); + const int16x8_t offset = vdupq_x_n_s16(col_offset, p); row_len_tmp -= 8; - int16x8_t r0 = vldrbq_z_s16(ip_r0, p); - ip_r0 += 8; - int16x8_t c0 = vldrbq_z_s16(ip_c0, p); + int16x8_t c0 = vldrbq_s16(ip_c0); ip_c0 += 8; + c0 = vaddq_s16(c0, offset); - c0 = vaddq_m_s16(vuninitializedq_s16(), c0, offset, p); + int16x8_t r0 = vldrbq_z_s16(ip_r0, p); + ip_r0 += 8; acc_0 = vmladavaq_p_s16(acc_0, r0, c0, p); } @@ -178,3 +180,5 @@ q7_t *arm_nn_mat_mult_s8(const q7_t *input_row, return NULL; #endif } + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c index a52139d..0987a31 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_mat_q7_vec_q15.c * Description: Mixed Q15-Q7 fully-connected layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,43 +42,23 @@ * @{ */ -/** +/* * @brief Mixed Q15-Q7 fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Q7_Q15 version of the fully connected layer - * - * Weights are in q7_t and Activations are in q15_t - * + * Refer function header for details */ -arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q15_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q15_t *pOut, + q15_t *vec_buffer) { (void)vec_buffer; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q7_t *pB = pM; @@ -188,10 +170,12 @@ arm_status arm_fully_connected_mat_q7_vec_q15(const q15_t *pV, #endif /* ARM_MATH_DSP */ - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); + /* Return to ARM_CMSIS_NN_SUCCESS */ + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c index e173b2a..f4872c1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_mat_q7_vec_q15_opt.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_mat_q7_vec_q15_opt.c * Description: Mixed Q15-Q7 opt fully-connected layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,92 +42,24 @@ * @{ */ -/** - * @brief Mixed Q15-Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Q7_Q15 version of the fully connected layer - * - * Weights are in q7_t and Activations are in q15_t - * - * Limitation: x4 version requires weight reordering to work - * - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original q7_t matrix looks like this: - * - * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | - * - * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | - * - * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | - * - * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | - * - * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | - * - * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a21 | a12 | a22 | a31 | a41 | a32 | a42 | - * - * | a13 | a23 | a14 | a24 | a33 | a43 | a34 | a44 | - * - * | a15 | a25 | a16 | a26 | a35 | a45 | a36 | a46 | - * - * The column left over will be in-order. - * which is: - * | a17 | a27 | a37 | a47 | - * - * For the left-over rows, we do 1x1 computation, so the data remains - * as its original order. - * - * So the stored weight matrix looks like this: - * - * | a11 | a21 | a12 | a22 | a31 | a41 | - * - * | a32 | a42 | a13 | a23 | a14 | a24 | - * - * | a33 | a43 | a34 | a44 | a15 | a25 | - * - * | a16 | a26 | a35 | a45 | a36 | a46 | - * - * | a17 | a27 | a37 | a47 | a51 | a52 | - * - * | a53 | a54 | a55 | a56 | a57 | a61 | - * - * | a62 | a63 | a64 | a65 | a66 | a67 | - * +/* + * Mixed Q15-Q7 opt fully-connected layer function + * Refer function header for details */ -arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q15_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q15_t *pOut, + q15_t *vec_buffer) { (void)vec_buffer; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q7_t *pB = pM; @@ -204,55 +138,47 @@ arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, */ #ifndef ARM_MATH_BIG_ENDIAN - asm volatile("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r1, [%[pB]], #8\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt) - : "r0", "r1", "r2", "r3", "r4"); + asm volatile( + "COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r1, [%[pB]], #8\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n" + : [sum] "+r"(sum), [sum2] "+r"(sum2), [sum3] "+r"(sum3), [sum4] "+r"(sum4), [pB] "+r"(pB), [pA] "+r"(pA) + : [colCnt] "r"(colCnt) + : "r0", "r1", "r2", "r3", "r4"); #else - asm volatile("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r1, [%[pB]], #8\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt) - : "r0", "r1", "r2", "r3", "r4"); + asm volatile( + "COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r1, [%[pB]], #8\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n" + : [sum] "+r"(sum), [sum2] "+r"(sum2), [sum3] "+r"(sum3), [sum4] "+r"(sum4), [pB] "+r"(pB), [pA] "+r"(pA) + : [colCnt] "r"(colCnt) + : "r0", "r1", "r2", "r3", "r4"); #endif /* ARM_MATH_BIG_ENDIAN */ #endif /* USE_INTRINSIC */ @@ -408,10 +334,12 @@ arm_status arm_fully_connected_mat_q7_vec_q15_opt(const q15_t *pV, #endif /* ARM_MATH_DSP */ - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); + /* Return to ARM_CMSIS_NN_SUCCESS */ + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c index 157688b..6ea0b27 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_q15.c * Description: Q15 basic fully-connected layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,40 +42,23 @@ * @{ */ -/** - * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * +/* + * Q15 opt fully-connected layer function + * Refer function header for details */ -arm_status arm_fully_connected_q15(const q15_t *pV, - const q15_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t *bias, - q15_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_q15(const q15_t *pV, + const q15_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t *bias, + q15_t *pOut, + q15_t *vec_buffer) { (void)vec_buffer; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q15_t *pB = pM; @@ -187,9 +172,11 @@ arm_status arm_fully_connected_q15(const q15_t *pV, #endif /* ARM_MATH_DSP */ /* Return to application */ - return (ARM_MATH_SUCCESS); + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c index 9b34c22..82887fa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q15_opt.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_q15_opt.c * Description: Q15 opt fully-connected layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,71 +42,23 @@ * @{ */ -/** +/* * @brief Q15 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * - * @details - * - * Buffer size: - * - * vec_buffer size: 0 - * - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original matrix looks like this: - * - * | a11 | a12 | a13 | - * - * | a21 | a22 | a23 | - * - * | a31 | a32 | a33 | - * - * | a41 | a42 | a43 | - * - * | a51 | a52 | a53 | - * - * | a61 | a62 | a63 | - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | - * - * | a13 | a23 | a33 | a43 | - * - * Remaining rows are kept the same original order. - * - * So the stored weight matrix looks like this: - * - * - * | a11 | a12 | a21 | a22 | a31 | a32 | a41 | a42 | - * - * | a13 | a23 | a33 | a43 | a51 | a52 | a53 | a61 | - * - * | a62 | a63 | + * Refer function header for details */ -arm_status arm_fully_connected_q15_opt(const q15_t *pV, - const q15_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q15_t *bias, - q15_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_q15_opt(const q15_t *pV, + const q15_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q15_t *bias, + q15_t *pOut, + q15_t *vec_buffer) { (void)vec_buffer; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q15_t *pB = pM; @@ -155,26 +109,22 @@ arm_status arm_fully_connected_q15_opt(const q15_t *pV, * activation data: inV */ - asm volatile("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #4\n" - "ldr.w r0, [%[pB]], #16\n" - "smlad %[sum], r4, r0, %[sum]\n" - "ldr.w r1, [%[pB] , #-12]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r2, [%[pB] , #-8]\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "ldr.w r3, [%[pB] , #-4]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt) - : "r0", "r1", "r2", "r3", "r4"); + asm volatile( + "COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #4\n" + "ldr.w r0, [%[pB]], #16\n" + "smlad %[sum], r4, r0, %[sum]\n" + "ldr.w r1, [%[pB] , #-12]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r2, [%[pB] , #-8]\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "ldr.w r3, [%[pB] , #-4]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n" + : [sum] "+r"(sum), [sum2] "+r"(sum2), [sum3] "+r"(sum3), [sum4] "+r"(sum4), [pB] "+r"(pB), [pA] "+r"(pA) + : [colCnt] "r"(colCnt) + : "r0", "r1", "r2", "r3", "r4"); #endif /* USE_INTRINSIC */ @@ -327,10 +277,12 @@ arm_status arm_fully_connected_q15_opt(const q15_t *pV, #endif /* ARM_MATH_DSP */ - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); + /* Return to ARM_CMSIS_NN_SUCCESS */ + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c index 6d91db1..de67bb2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_q7.c * Description: Q7 basic fully-connected layer function * - * $Date: January 26, 2021 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,42 +42,23 @@ * @{ */ -/** - * @brief Q7 basic fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: dim_vec - * - * This basic function is designed to work with regular weight - * matrix without interleaving. - * +/* + * Q7 basic fully-connected layer function + * Refer function header for details */ -arm_status arm_fully_connected_q7(const q7_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_q7(const q7_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q7_t *pOut, + q15_t *vec_buffer) { -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q7_t *pB = pM; @@ -191,10 +174,12 @@ arm_status arm_fully_connected_q7(const q7_t *pV, #endif /* ARM_MATH_DSP */ - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); + /* Return to ARM_CMSIS_NN_SUCCESS */ + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c index 2b6026c..0c8eae6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_q7_opt.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_q7_opt.c * Description: Q7 basic fully-connected layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.1 * * Target Processor: Cortex-M cores * @@ -40,103 +42,23 @@ * @{ */ -/** - * @brief Q7 opt fully-connected layer function - * @param[in] pV pointer to input vector - * @param[in] pM pointer to matrix weights - * @param[in] dim_vec length of the vector - * @param[in] num_of_rows number of rows in weight matrix - * @param[in] bias_shift amount of left-shift for bias - * @param[in] out_shift amount of right-shift for output - * @param[in] bias pointer to bias - * @param[in,out] pOut pointer to output vector - * @param[in,out] vec_buffer pointer to buffer space for input - * @return The function returns ARM_MATH_SUCCESS - * - * @details - * - * Buffer size: - * - * vec_buffer size: dim_vec - * - * This opt function is designed to work with interleaved weight - * matrix. The vector input is assumed in q7_t format, we call - * arm_q7_to_q15_no_shift_shuffle function to expand into - * q15_t format with certain weight re-ordering, refer to the function - * comments for more details. - * Here we use only one pointer to read 4 rows in the weight - * matrix. So if the original q7_t matrix looks like this: - * - * | a11 | a12 | a13 | a14 | a15 | a16 | a17 | - * - * | a21 | a22 | a23 | a24 | a25 | a26 | a27 | - * - * | a31 | a32 | a33 | a34 | a35 | a36 | a37 | - * - * | a41 | a42 | a43 | a44 | a45 | a46 | a47 | - * - * | a51 | a52 | a53 | a54 | a55 | a56 | a57 | - * - * | a61 | a62 | a63 | a64 | a65 | a66 | a67 | - * - * - * We operates on multiple-of-4 rows, so the first four rows becomes - * - * | a11 | a21 | a13 | a23 | a31 | a41 | a33 | a43 | - * - * | a12 | a22 | a14 | a24 | a32 | a42 | a34 | a44 | - * - * | a15 | a25 | a35 | a45 | a16 | a26 | a36 | a46 | - * - * So within the kernel, we first read the re-ordered vector in as: - * - * | b1 | b3 | and | b2 | b4 | - * - * the four q31_t weights will look like - * - * | a11 | a13 |, | a21 | a23 |, | a31 | a33 |, | a41 | a43 | - * - * | a12 | a14 |, | a22 | a24 |, | a32 | a34 |, | a42 | a44 | - * - * The column left over will be in-order. - * which is: - * - * | a17 | a27 | a37 | a47 | - * - * For the left-over rows, we do 1x1 computation, so the data remains - * as its original order. - * - * So the stored weight matrix looks like this: - * - * | a11 | a21 | a13 | a23 | a31 | a41 | - * - * | a33 | a43 | a12 | a22 | a14 | a24 | - * - * | a32 | a42 | a34 | a44 | a15 | a25 | - * - * | a35 | a45 | a16 | a26 | a36 | a46 | - * - * | a17 | a27 | a37 | a47 | a51 | a52 | - * - * | a53 | a54 | a55 | a56 | a57 | a61 | - * - * | a62 | a63 | a64 | a65 | a66 | a67 | - * - * +/* + * Q7 opt fully-connected layer function + * Refer function header for details */ -arm_status arm_fully_connected_q7_opt(const q7_t *pV, - const q7_t *pM, - const uint16_t dim_vec, - const uint16_t num_of_rows, - const uint16_t bias_shift, - const uint16_t out_shift, - const q7_t *bias, - q7_t *pOut, - q15_t *vec_buffer) +arm_cmsis_nn_status arm_fully_connected_q7_opt(const q7_t *pV, + const q7_t *pM, + const uint16_t dim_vec, + const uint16_t num_of_rows, + const uint16_t bias_shift, + const uint16_t out_shift, + const q7_t *bias, + q7_t *pOut, + q15_t *vec_buffer) { -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ const q7_t *pB = pM; @@ -237,81 +159,73 @@ arm_status arm_fully_connected_q7_opt(const q7_t *pV, */ #ifndef ARM_MATH_BIG_ENDIAN - asm volatile("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #8\n" - "ldr.w r1, [%[pB]], #16\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-12]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "ldr.w r4, [%[pA], #-4]\n" - "ldr.w r1, [%[pB], #-8]\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r1, %[sum]\n" - "smlad %[sum2], r4, r0, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r3, %[sum3]\n" - "smlad %[sum4], r4, r2, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt) - : "r0", "r1", "r2", "r3", "r4"); + asm volatile( + "COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #8\n" + "ldr.w r1, [%[pB]], #16\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-12]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "ldr.w r4, [%[pA], #-4]\n" + "ldr.w r1, [%[pB], #-8]\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r1, %[sum]\n" + "smlad %[sum2], r4, r0, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r3, %[sum3]\n" + "smlad %[sum4], r4, r2, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n" + : [sum] "+r"(sum), [sum2] "+r"(sum2), [sum3] "+r"(sum3), [sum4] "+r"(sum4), [pB] "+r"(pB), [pA] "+r"(pA) + : [colCnt] "r"(colCnt) + : "r0", "r1", "r2", "r3", "r4"); #else - asm volatile("COL_LOOP_%=:\n" - "ldr.w r4, [%[pA]], #8\n" - "ldr.w r1, [%[pB]], #16\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-12]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "ldr.w r4, [%[pA], #-4]\n" - "ldr.w r1, [%[pB], #-8]\n" - "mov.w r0, r1, ror #8\n" - "sxtb16 r0, r0\n" - "sxtb16 r1, r1\n" - "smlad %[sum], r4, r0, %[sum]\n" - "smlad %[sum2], r4, r1, %[sum2]\n" - "ldr.w r3, [%[pB], #-4]\n" - "mov.w r2, r3, ror #8\n" - "sxtb16 r2, r2\n" - "sxtb16 r3, r3\n" - "smlad %[sum3], r4, r2, %[sum3]\n" - "smlad %[sum4], r4, r3, %[sum4]\n" - "subs %[colCnt], #1\n" - "bne COL_LOOP_%=\n" - : [ sum ] "+r"(sum), - [ sum2 ] "+r"(sum2), - [ sum3 ] "+r"(sum3), - [ sum4 ] "+r"(sum4), - [ pB ] "+r"(pB), - [ pA ] "+r"(pA) - : [ colCnt ] "r"(colCnt) - : "r0", "r1", "r2", "r3", "r4"); + asm volatile( + "COL_LOOP_%=:\n" + "ldr.w r4, [%[pA]], #8\n" + "ldr.w r1, [%[pB]], #16\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-12]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "ldr.w r4, [%[pA], #-4]\n" + "ldr.w r1, [%[pB], #-8]\n" + "mov.w r0, r1, ror #8\n" + "sxtb16 r0, r0\n" + "sxtb16 r1, r1\n" + "smlad %[sum], r4, r0, %[sum]\n" + "smlad %[sum2], r4, r1, %[sum2]\n" + "ldr.w r3, [%[pB], #-4]\n" + "mov.w r2, r3, ror #8\n" + "sxtb16 r2, r2\n" + "sxtb16 r3, r3\n" + "smlad %[sum3], r4, r2, %[sum3]\n" + "smlad %[sum4], r4, r3, %[sum4]\n" + "subs %[colCnt], #1\n" + "bne COL_LOOP_%=\n" + : [sum] "+r"(sum), [sum2] "+r"(sum2), [sum3] "+r"(sum3), [sum4] "+r"(sum4), [pB] "+r"(pB), [pA] "+r"(pA) + : [colCnt] "r"(colCnt) + : "r0", "r1", "r2", "r3", "r4"); #endif /* ARM_MATH_BIG_ENDIAN */ #endif /* USE_INTRINSIC */ @@ -382,6 +296,7 @@ arm_status arm_fully_connected_q7_opt(const q7_t *pV, #else /* Run the following code as reference implementation for Cortex-M0 and Cortex-M3 */ + (void)vec_buffer; uint16_t rowCnt = num_of_rows >> 2; const q7_t *pB = pM; const q7_t *pA; @@ -485,10 +400,12 @@ arm_status arm_fully_connected_q7_opt(const q7_t *pV, #endif /* ARM_MATH_DSP */ - /* Return to ARM_MATH_SUCCESS */ - return (ARM_MATH_SUCCESS); + /* Return to ARM_CMSIS_NN_SUCCESS */ + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s16.c new file mode 100644 index 0000000..8e43b71 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s16.c @@ -0,0 +1,101 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2010-2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_fully_connected_s16 + * Description: Fully connected function compatible with TF Lite. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M and Cortex-A cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup FC + * @{ + */ + +/* + * S16 basic fully-connected and matrix multiplication layer function for TensorFlow Lite + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_fully_connected_s16(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q15_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int64_t *bias, + const cmsis_nn_dims *output_dims, + q15_t *output) +{ + (void)bias_dims; + (void)ctx; + (void)fc_params->filter_offset; + + int32_t batch_cnt = input_dims->n; + + const q31_t reduced_multiplier = REDUCE_MULTIPLIER(quant_params->multiplier); + + while (batch_cnt) + { + arm_nn_vec_mat_mult_t_s16(input, + kernel, + bias, + output, + reduced_multiplier, + quant_params->shift, + filter_dims->n, /* col_dim or accum_depth */ + output_dims->c, /* row_dim or output_depth */ + fc_params->activation.min, + fc_params->activation.max); + input += filter_dims->n; + output += output_dims->c; + batch_cnt--; + } + + return (ARM_CMSIS_NN_SUCCESS); +} + +int32_t arm_fully_connected_s16_get_buffer_size(const cmsis_nn_dims *filter_dims) +{ + (void)filter_dims; + return 0; +} + +/** + * @} end of FC group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c index 22f8449..08f100a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/FullyConnectedFunctions/arm_fully_connected_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_fully_connected_s8 * Description: Fully connected function compatible with TF Lite. * - * $Date: 09. October 2020 - * $Revision: V.2.0.1 + * $Date: 19 April 2022 + * $Revision: V.4.0.0 * * Target Processor: Cortex-M and Cortex-A cores * @@ -47,20 +49,22 @@ * */ -arm_status arm_fully_connected_s8(const cmsis_nn_context *ctx, - const cmsis_nn_fc_params *fc_params, - const cmsis_nn_per_tensor_quant_params *quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input, - const cmsis_nn_dims *filter_dims, - const q7_t *kernel, - const cmsis_nn_dims *bias_dims, - const int32_t *bias, - const cmsis_nn_dims *output_dims, - q7_t *output) +arm_cmsis_nn_status arm_fully_connected_s8(const cmsis_nn_context *ctx, + const cmsis_nn_fc_params *fc_params, + const cmsis_nn_per_tensor_quant_params *quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input, + const cmsis_nn_dims *filter_dims, + const q7_t *kernel, + const cmsis_nn_dims *bias_dims, + const int32_t *bias, + const cmsis_nn_dims *output_dims, + q7_t *output) { (void)bias_dims; (void)ctx; + (void)fc_params->filter_offset; + int32_t batch_cnt = input_dims->n; while (batch_cnt) @@ -70,19 +74,20 @@ arm_status arm_fully_connected_s8(const cmsis_nn_context *ctx, bias, output, fc_params->input_offset, - fc_params->filter_offset, + 0, fc_params->output_offset, quant_params->multiplier, quant_params->shift, filter_dims->n, /* col_dim or accum_depth */ output_dims->c, /* row_dim or output_depth */ fc_params->activation.min, - fc_params->activation.max); + fc_params->activation.max, + 1L); input += filter_dims->n; output += output_dims->c; batch_cnt--; } - return (ARM_MATH_SUCCESS); + return (ARM_CMSIS_NN_SUCCESS); } int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims) @@ -94,3 +99,5 @@ int32_t arm_fully_connected_s8_get_buffer_size(const cmsis_nn_dims *filter_dims) /** * @} end of FC group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c index 6b32c42..7875682 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_accumulate_q7_to_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2021 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_accumulate_q7_to_q15.c * Description: Accumulate q7 vector into q15 one. * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 20 July 2021 + * $Revision: V.1.1.2 * * pSrc Processor: Cortex-M CPUs * @@ -44,11 +46,13 @@ void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length) { q15_t *pCnt = pDst; const q7_t *pV = pSrc; + int32_t count = length; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) q31_t v1, v2, vo1, vo2; - int32_t cnt = length >> 2; + count = length >> 2; q31_t in; - while (cnt > 0l) + while (count > 0l) { q31_t value = arm_nn_read_q7x4_ia(&pV); v1 = __SXTB16(__ROR((uint32_t)value, 8)); @@ -67,16 +71,19 @@ void arm_nn_accumulate_q7_to_q15(q15_t *pDst, const q7_t *pSrc, uint32_t length) in = arm_nn_read_q15x2(pCnt); arm_nn_write_q15x2_ia(&pCnt, __QADD16(vo2, in)); - cnt--; + count--; } - cnt = length & 0x3; - while (cnt > 0l) + count = length & 0x3; +#endif + while (count > 0l) { *pCnt++ += *pV++; - cnt--; + count--; } } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c index 5845653..7ff743d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_add_q7.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. * @@ -21,8 +23,8 @@ * Title: arm_nn_add_q7.c * Description: Non saturating addition of elements of a q7 vector. * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 20. July 2021 + * $Revision: V.1.1.1 * * Target Processor: Cortex-M cores * @@ -44,7 +46,7 @@ void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size) { uint32_t block_count; q31_t result = 0; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Loop unrolling: Compute 4 outputs at a time */ block_count = block_size >> 2U; @@ -79,4 +81,5 @@ void arm_nn_add_q7(const q7_t *input, q31_t *output, uint32_t block_size) /** * @} end of NNBasicMath group - */ \ No newline at end of file + */ +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c index c15b2a2..7d12144 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_padded_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_depthwise_conv_nt_t_padded_s8.c * Description: Depthwise convolution with padded matrices. * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 27. July 2022 + * $Revision: V.2.0.0 * * Target Processor: Cortex-M processors with MVE extension * -------------------------------------------------------------------- */ @@ -46,38 +48,43 @@ * */ -q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t input_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out) +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t input_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out) { #if defined(ARM_MATH_MVEI) - int32_t loop_count = (num_ch + 3) / 4; + int32_t loop_count = (active_ch + 3) / 4; const int32_t *bias = output_bias; - uint32_t num_ch_to_process = num_ch; + uint32_t num_ch_to_process = active_ch; for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; num_ch_to_process -= 4, out += 4, offset += 4, i_loop_cnt++) { - int32x4_t out_0 = vldrwq_s32(bias); + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(bias); + bias += 4; + } int32x4_t out_1 = out_0; int32x4_t out_2 = out_0; int32x4_t out_3 = out_0; - bias += 4; const int8_t *rhs_0 = rhs + offset; const int8_t *lhs_0 = lhs + offset; - const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; - const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; - const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + const int8_t *lhs_1 = lhs + row_x_col * CH_IN_BLOCK_MVE + offset; + const int8_t *lhs_2 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 3) + offset; for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) { @@ -100,12 +107,12 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, out_3 += vmulq_s32(ip_3, ker_0); - lhs_0 += num_ch; - lhs_1 += num_ch; - lhs_2 += num_ch; - lhs_3 += num_ch; + lhs_0 += CH_IN_BLOCK_MVE; + lhs_1 += CH_IN_BLOCK_MVE; + lhs_2 += CH_IN_BLOCK_MVE; + lhs_3 += CH_IN_BLOCK_MVE; - rhs_0 += num_ch; + rhs_0 += total_ch; } const int32x4_t mult = vldrwq_s32(out_mult); @@ -124,33 +131,29 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, out_1 = vaddq_n_s32(out_1, out_offset); out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + num_ch, out_1, p); + vstrbq_p_s32(out + total_ch, out_1, p); out_2 = arm_requantize_mve_32x4(out_2, mult, shift); out_2 = vaddq_n_s32(out_2, out_offset); out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 2 * num_ch, out_2, p); + vstrbq_p_s32(out + 2 * total_ch, out_2, p); out_3 = arm_requantize_mve_32x4(out_3, mult, shift); out_3 = vaddq_n_s32(out_3, out_offset); out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 3 * num_ch, out_3, p); + vstrbq_p_s32(out + 3 * total_ch, out_3, p); } - const int tail_ch = num_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } - return out + (3 * num_ch); + return ARM_CMSIS_NN_SUCCESS; #else (void)lhs; (void)rhs; (void)input_offset; - (void)num_ch; + (void)active_ch; + (void)total_ch; (void)out_shift; (void)out_mult; (void)out_offset; @@ -159,10 +162,12 @@ q7_t *arm_nn_depthwise_conv_nt_t_padded_s8(const q7_t *lhs, (void)row_x_col; (void)output_bias; (void)out; - return NULL; + return ARM_CMSIS_NN_NO_IMPL_ERROR; #endif } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c new file mode 100644 index 0000000..503aa64 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s16.c @@ -0,0 +1,175 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_depthwise_conv_nt_t_s16.c + * Description: Depthwise convolution on matrices with no padding. + * + * $Date: 6 July 2022 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M processors with MVE extension + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * Depthwise convolution of rhs matrix with 4 lhs matrices with no padding. Dimensions are the same for lhs and rhs. + * + * Refer header file for details. + * + */ +int16_t *arm_nn_depthwise_conv_nt_t_s16(const int16_t *lhs, + const q7_t *rhs, + const uint16_t num_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int64_t *const output_bias, + int16_t *out) +{ +#if defined(ARM_MATH_MVEI) + + const int64_t *bias = output_bias; + int32_t loop_count = (num_ch + 3) / 4; + uint32_t num_ch_to_process = num_ch; + + for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; + num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) + { + const int8_t *rhs_0 = rhs + offset; + const int16_t *lhs_0 = lhs + offset; + const int16_t *lhs_1 = lhs + row_x_col * num_ch + offset; + const int16_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; + const int16_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + + int32x4_t out_0 = vdupq_n_s32(0); + int32x4_t out_1 = vdupq_n_s32(0); + int32x4_t out_2 = vdupq_n_s32(0); + int32x4_t out_3 = vdupq_n_s32(0); + + for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) + { + const int32x4_t ker_0 = vldrbq_s32(rhs_0); + + int32x4_t ip_0 = vldrhq_s32(lhs_0); + out_0 += vmulq_s32(ip_0, ker_0); + + int32x4_t ip_1 = vldrhq_s32(lhs_1); + out_1 += vmulq_s32(ip_1, ker_0); + + int32x4_t ip_2 = vldrhq_s32(lhs_2); + out_2 += vmulq_s32(ip_2, ker_0); + + int32x4_t ip_3 = vldrhq_s32(lhs_3); + out_3 += vmulq_s32(ip_3, ker_0); + + lhs_0 += num_ch; + lhs_1 += num_ch; + lhs_2 += num_ch; + lhs_3 += num_ch; + + rhs_0 += num_ch; + } + + for (int i_requantize = 0; i_requantize < 4; i_requantize++) + { + int32_t reduced_multiplier = REDUCE_MULTIPLIER(out_mult[i_requantize]); + int32_t shift = out_shift[i_requantize]; + int64_t in_requantize_0 = (int64_t)out_0[i_requantize]; + int64_t in_requantize_1 = (int64_t)out_1[i_requantize]; + int64_t in_requantize_2 = (int64_t)out_2[i_requantize]; + int64_t in_requantize_3 = (int64_t)out_3[i_requantize]; + + if (bias) + { + in_requantize_0 += *bias; + in_requantize_1 += *bias; + in_requantize_2 += *bias; + in_requantize_3 += *bias; + bias++; + } + + out_0[i_requantize] = arm_nn_requantize_s64(in_requantize_0, reduced_multiplier, shift); + out_1[i_requantize] = arm_nn_requantize_s64(in_requantize_1, reduced_multiplier, shift); + out_2[i_requantize] = arm_nn_requantize_s64(in_requantize_2, reduced_multiplier, shift); + out_3[i_requantize] = arm_nn_requantize_s64(in_requantize_3, reduced_multiplier, shift); + } + + mve_pred16_t p = vctp32q(num_ch_to_process); + + out_0 = vmaxq_s32(out_0, vdupq_n_s32(activation_min)); + out_0 = vminq_s32(out_0, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out, out_0, p); + + out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); + out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + num_ch, out_1, p); + + out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); + out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + 2 * num_ch, out_2, p); + + out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); + out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); + vstrhq_p_s32(out + 3 * num_ch, out_3, p); + + out_mult += 4; + out_shift += 4; + } + const int tail_ch = num_ch & 0x3; + if (tail_ch != 0) + { + out -= (4 - tail_ch); + } + + return out + (3 * num_ch); +#else + (void)lhs; + (void)rhs; + (void)num_ch; + (void)out_shift; + (void)out_mult; + (void)activation_min; + (void)activation_max; + (void)row_x_col; + (void)output_bias; + (void)out; + return NULL; +#endif +} + +/** + * @} end of NNBasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c index 1c1e316..b8d0871 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_depthwise_conv_nt_t_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_depthwise_conv_nt_t_s8.c * Description: Depthwise convolution on matrices with no padding. * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 27. July 2022 + * $Revision: V.2.0.0 * * Target Processor: Cortex-M processors with MVE extension. * -------------------------------------------------------------------- */ @@ -44,39 +46,43 @@ * Refer header file for details. * */ - -q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const int32_t input_offset, - const uint16_t num_ch, - const int32_t *out_shift, - const int32_t *out_mult, - const int32_t out_offset, - const int32_t activation_min, - const int32_t activation_max, - const uint16_t row_x_col, - const int32_t *const output_bias, - q7_t *out) +arm_cmsis_nn_status arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const int32_t input_offset, + const int32_t active_ch, + const int32_t total_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t out_offset, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t row_x_col, + const int32_t *const output_bias, + q7_t *out) { #if defined(ARM_MATH_MVEI) const int32_t *bias = output_bias; - int32_t loop_count = (num_ch + 3) / 4; - uint32_t num_ch_to_process = num_ch; + int32_t loop_count = (active_ch + 3) / 4; + uint32_t num_ch_to_process = active_ch; for (int i_loop_cnt = 0, offset = 0; i_loop_cnt < loop_count; num_ch_to_process -= 4, offset += 4, out += 4, i_loop_cnt++) { - int32x4_t out_0 = vldrwq_s32(bias); + int32x4_t out_0 = vdupq_n_s32(0); + if (bias) + { + out_0 = vldrwq_s32(bias); + bias += 4; + } int32x4_t out_1 = out_0; int32x4_t out_2 = out_0; int32x4_t out_3 = out_0; - bias += 4; const int8_t *rhs_0 = rhs + offset; const int8_t *lhs_0 = lhs + offset; - const int8_t *lhs_1 = lhs + row_x_col * num_ch + offset; - const int8_t *lhs_2 = lhs + (row_x_col * num_ch * 2) + offset; - const int8_t *lhs_3 = lhs + (row_x_col * num_ch * 3) + offset; + const int8_t *lhs_1 = lhs + row_x_col * CH_IN_BLOCK_MVE + offset; + const int8_t *lhs_2 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 2) + offset; + const int8_t *lhs_3 = lhs + (row_x_col * CH_IN_BLOCK_MVE * 3) + offset; int32x4_t ker_sum = vdupq_n_s32(0); for (int i_row_x_col = 0; i_row_x_col < row_x_col; i_row_x_col++) @@ -96,12 +102,12 @@ q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, int32x4_t ip_3 = vldrbq_s32(lhs_3); out_3 += vmulq_s32(ip_3, ker_0); - lhs_0 += num_ch; - lhs_1 += num_ch; - lhs_2 += num_ch; - lhs_3 += num_ch; + lhs_0 += CH_IN_BLOCK_MVE; + lhs_1 += CH_IN_BLOCK_MVE; + lhs_2 += CH_IN_BLOCK_MVE; + lhs_3 += CH_IN_BLOCK_MVE; - rhs_0 += num_ch; + rhs_0 += total_ch; } ker_sum = vmulq_n_s32(ker_sum, input_offset); @@ -126,33 +132,28 @@ q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, out_1 = vaddq_n_s32(out_1, out_offset); out_1 = vmaxq_s32(out_1, vdupq_n_s32(activation_min)); out_1 = vminq_s32(out_1, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + num_ch, out_1, p); + vstrbq_p_s32(out + total_ch, out_1, p); out_2 = arm_requantize_mve_32x4(out_2, mult, shift); out_2 = vaddq_n_s32(out_2, out_offset); out_2 = vmaxq_s32(out_2, vdupq_n_s32(activation_min)); out_2 = vminq_s32(out_2, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 2 * num_ch, out_2, p); + vstrbq_p_s32(out + 2 * total_ch, out_2, p); out_3 = arm_requantize_mve_32x4(out_3, mult, shift); out_3 = vaddq_n_s32(out_3, out_offset); out_3 = vmaxq_s32(out_3, vdupq_n_s32(activation_min)); out_3 = vminq_s32(out_3, vdupq_n_s32(activation_max)); - vstrbq_p_s32(out + 3 * num_ch, out_3, p); + vstrbq_p_s32(out + 3 * total_ch, out_3, p); } - const int tail_ch = num_ch & 0x3; - if (tail_ch != 0) - { - out -= (4 - tail_ch); - } - - return out + (3 * num_ch); + return ARM_CMSIS_NN_SUCCESS; #else (void)lhs; (void)rhs; (void)input_offset; - (void)num_ch; + (void)active_ch; + (void)total_ch; (void)out_shift; (void)out_mult; (void)out_offset; @@ -161,10 +162,12 @@ q7_t *arm_nn_depthwise_conv_nt_t_s8(const q7_t *lhs, (void)row_x_col; (void)output_bias; (void)out; - return NULL; + return ARM_CMSIS_NN_NO_IMPL_ERROR; #endif } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c index a1b21c2..67685df 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_1x_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mul_core_1x_s8.c * Description: General Matrix-multiplication function * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 22 Aug 2022 + * $Revision: V.3.1.0 * * Target Processor: Cortex-M cores * -------------------------------------------------------------------- */ @@ -44,43 +46,110 @@ * Refer header file for details. * */ - -arm_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output) +arm_cmsis_nn_status arm_nn_mat_mul_core_1x_s8(int32_t row_elements, + const int32_t skipped_row_elements, + const int8_t *row_base_ref, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output) { - int32_t acc_n0 = 0; - int32_t sum_tmp = 0; +#if defined(ARM_MATH_MVEI) + const int8_t *col_base = col_base_ref; + int32_t *output_mult = quant_params->multiplier; + int32_t *output_shift = quant_params->shift; + const int32_t out_offset = conv_params->output_offset; + const int32_t out_activation_min = conv_params->activation.min; + const int32_t out_activation_max = conv_params->activation.max; + + int32_t acc[4]; + for (int i = 0; i < out_ch; i++) + { + int32_t acc_n0 = 0; + const int8_t *row_base = row_base_ref; -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) + int32_t sum_tmp = 0; - __ASM volatile(" vldrb.8 q0, [%[col]], 16 \n" - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vaddva.s8 %[sum], q0 \n" - " vldrb.8 q1, [%[row0]], 16 \n" - " vmladava.s8 %[out0], q0, q1 \n" - " vldrb.8 q0, [%[col]], 16 \n" - " letp lr, 2b \n" - "1: \n" - : [col] "+r"(col_base), [sum] "+Te"(sum_tmp), [row0] "+r"(row_base), [out0] "+Te"(acc_n0) - : [cnt] "r"(row_elements) - : "q0", "q1", "memory", "r14"); +#if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < row_elements; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += row_base[j] * col; + } #else - for (int i = 0; i < row_elements; i++) + __ASM volatile(" vldrb.8 q0, [%[col]], #16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), [sum] "+Te"(sum_tmp), [row0] "+r"(row_base), [out0] "+Te"(acc_n0) + : [cnt] "r"(row_elements) + : "q0", "q1", "memory", "r14"); +#endif + + sum_tmp *= conv_params->input_offset; + acc_n0 += sum_tmp; + + const int32_t index = i & 0x3; + acc[index] = acc_n0; + + if (index == 3) + { + int32x4_t res = vldrwq_s32(acc); + if (bias) + { + res = vaddq_s32(res, vldrwq_s32(bias)); + bias += 4; + } + res = arm_requantize_mve_32x4(res, vldrwq_s32(output_mult), vldrwq_s32(output_shift)); + output_mult += 4; + output_shift += 4; + res = vaddq_n_s32(res, out_offset); + res = vmaxq_s32(res, vdupq_n_s32(out_activation_min)); + res = vminq_s32(res, vdupq_n_s32(out_activation_max)); + vstrbq_s32(output, res); + output += 4; + } + col_base = col_base_ref + (i + 1) * (row_elements + skipped_row_elements); + } + // Handle left over elements + for (int i = 0; i < (out_ch & 0x3); i++) { - sum_tmp += col_base[i]; - acc_n0 += row_base[i] * col_base[i]; + int32_t acc_n0 = acc[i]; + if (bias) + { + acc_n0 += bias[i]; + } + acc_n0 = arm_nn_requantize(acc_n0, output_mult[i], output_shift[i]); + acc_n0 += conv_params->output_offset; + acc_n0 = MAX(acc_n0, conv_params->activation.min); + acc_n0 = MIN(acc_n0, conv_params->activation.max); + *output++ = (q7_t)acc_n0; } -#endif - *sum_col = sum_tmp; - *output = acc_n0; - return ARM_MATH_SUCCESS; +#else + (void)row_elements; + (void)skipped_row_elements; + (void)row_base_ref; + (void)col_base_ref; + (void)out_ch; + (void)conv_params; + (void)quant_params; + (void)bias; + (void)output; +#endif + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c index bcd8be6..b0ea228 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_core_4x_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,14 +23,13 @@ * Title: arm_nn_mat_mul_core_4x_s8.c * Description: General matrix multiplication function for MVE extension * - * $Date: 09. October 2020 - * $Revision: V.2.0.1 + * $Date: 22. Aug 2022 + * $Revision: V.3.1.0 * - * Target Processor: Cortex-M cores + * Target Processor: Cortex-M processors * -------------------------------------------------------------------- */ - +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" - /** * @ingroup groupSupport */ @@ -44,73 +45,109 @@ * Refer header file for details. * */ -arm_status arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, - const int32_t offset, - const int8_t *row_base, - const int8_t *col_base, - int32_t *const sum_col, - int32_t *const output) + +int8_t *arm_nn_mat_mul_core_4x_s8(const int32_t row_elements, + const int32_t offset, + const int8_t *row_base, + const int8_t *col_base_ref, + const int32_t out_ch, + const cmsis_nn_conv_params *conv_params, + const cmsis_nn_per_channel_quant_params *quant_params, + const int32_t *bias, + int8_t *output) { - int32_t acc_n0 = 0; - int32_t acc_n1 = 0; - int32_t acc_n2 = 0; - int32_t acc_n3 = 0; - const int8_t *ip_row_0 = row_base; - const int8_t *ip_row_1 = row_base + offset; - const int8_t *ip_row_2 = row_base + (2 * offset); - const int8_t *ip_row_3 = row_base + (3 * offset); - int32_t sum_tmp = 0; +#if defined(ARM_MATH_MVEI) + for (int i = 0; i < out_ch; i++) + { + int32_t acc_n0 = 0; + int32_t acc_n1 = 0; + int32_t acc_n2 = 0; + int32_t acc_n3 = 0; -#if defined(ARM_MATH_MVEI) && !defined(ARM_MATH_AUTOVECTORIZE) - __ASM volatile(" vldrb.8 q0, [%[col]], 16 \n" - " wlstp.8 lr, %[cnt], 1f \n" - "2: \n" - " vaddva.s8 %[sum], q0 \n" - " vldrb.8 q1, [%[row0]], 16 \n" - " vmladava.s8 %[out0], q0, q1 \n" - " vldrb.8 q2, [%[row1]], 16 \n" - " vmladava.s8 %[out1], q0, q2 \n" - " vldrb.8 q3, [%[row2]], 16 \n" - " vmladava.s8 %[out2], q0, q3 \n" - " vldrb.8 q4, [%[row3]], 16 \n" - " vmladava.s8 %[out3], q0, q4 \n" - " vldrb.8 q0, [%[col]], 16 \n" - " letp lr, 2b \n" - "1: \n" - : [col] "+r"(col_base), - [sum] "+Te"(sum_tmp), - [row0] "+r"(ip_row_0), - [row1] "+r"(ip_row_1), - [row2] "+r"(ip_row_2), - [row3] "+r"(ip_row_3), - [out0] "+Te"(acc_n0), - [out1] "+Te"(acc_n1), - [out2] "+Te"(acc_n2), - [out3] "+Te"(acc_n3) - : [cnt] "r"(row_elements) - : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); + const int8_t *ip_row_0 = row_base; + const int8_t *ip_row_1 = row_base + offset; + const int8_t *ip_row_2 = row_base + (2 * offset); + const int8_t *ip_row_3 = row_base + (3 * offset); + const int8_t *col_base = col_base_ref + i * row_elements; + int32_t sum_tmp = 0; + +#if defined(ARM_MATH_AUTOVECTORIZE) + for (int j = 0; j < row_elements; j++) + { + int32_t col = col_base[j]; + sum_tmp += col; + acc_n0 += ip_row_0[j] * col; + acc_n1 += ip_row_1[j] * col; + acc_n2 += ip_row_2[j] * col; + acc_n3 += ip_row_3[j] * col; + } #else - for (int i = 0; i < row_elements; i++) - { - int32_t col = col_base[i]; - sum_tmp += col; - acc_n0 += ip_row_0[i] * col; - acc_n1 += ip_row_1[i] * col; - acc_n2 += ip_row_2[i] * col; - acc_n3 += ip_row_3[i] * col; - } + __ASM volatile(" vldrb.8 q0, [%[col]], #16 \n" + " wlstp.8 lr, %[cnt], 1f \n" + "2: \n" + " vaddva.s8 %[sum], q0 \n" + " vldrb.8 q1, [%[row0]], #16 \n" + " vmladava.s8 %[out0], q0, q1 \n" + " vldrb.8 q2, [%[row1]], #16 \n" + " vmladava.s8 %[out1], q0, q2 \n" + " vldrb.8 q3, [%[row2]], #16 \n" + " vmladava.s8 %[out2], q0, q3 \n" + " vldrb.8 q4, [%[row3]], #16 \n" + " vmladava.s8 %[out3], q0, q4 \n" + " vldrb.8 q0, [%[col]], #16 \n" + " letp lr, 2b \n" + "1: \n" + : [col] "+r"(col_base), + [sum] "+Te"(sum_tmp), + [row0] "+r"(ip_row_0), + [row1] "+r"(ip_row_1), + [row2] "+r"(ip_row_2), + [row3] "+r"(ip_row_3), + [out0] "+Te"(acc_n0), + [out1] "+Te"(acc_n1), + [out2] "+Te"(acc_n2), + [out3] "+Te"(acc_n3) + : [cnt] "r"(row_elements) + : "q0", "q1", "q2", "q3", "q4", "memory", "r14"); #endif - output[0] = acc_n0; - output[1] = acc_n1; - output[2] = acc_n2; - output[3] = acc_n3; - *sum_col = sum_tmp; + int32x4_t res = {acc_n0, acc_n1, acc_n2, acc_n3}; + sum_tmp *= conv_params->input_offset; + if (bias) + { + sum_tmp += bias[i]; + } + res = vaddq_n_s32(res, sum_tmp); - return ARM_MATH_SUCCESS; + res = arm_requantize_mve(res, quant_params->multiplier[i], quant_params->shift[i]); + res = vaddq_n_s32(res, conv_params->output_offset); + + res = vmaxq_s32(res, vdupq_n_s32(conv_params->activation.min)); + res = vminq_s32(res, vdupq_n_s32(conv_params->activation.max)); + + const uint32x4_t scatter_offset = {0, (uint32_t)out_ch, (uint32_t)out_ch * 2, (uint32_t)out_ch * 3}; + vstrbq_scatter_offset_s32(output, scatter_offset, res); + output++; + } + + return output + (3 * out_ch); +#else + (void)row_elements; + (void)offset; + (void)row_base; + (void)col_base_ref; + (void)out_ch; + (void)conv_params; + (void)quant_params; + (void)bias; + (void)output; + return NULL; +#endif } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_kernel_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_kernel_s16.c new file mode 100644 index 0000000..b93e078 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mul_kernel_s16.c @@ -0,0 +1,254 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_mat_mult_kernel_s16.c + * Description: Matrix-multiplication function for convolution + * + * $Date: 12 August 2021 + * $Revision: V.1.1.0 + * + * Target Processor: Cortex-M cores + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/* + * Matrix-multiplication function for convolution with per-channel requantization. + * + * Refer header file for details. + * + */ + +q15_t *arm_nn_mat_mult_kernel_s16(const q7_t *input_a, + const q15_t *input_b, + const int32_t output_ch, + const int32_t *out_shift, + const int32_t *out_mult, + const int16_t activation_min, + const int16_t activation_max, + const int32_t num_col_a, + const int64_t *const output_bias, + q15_t *out_0) +{ + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + /* set up the second output pointers */ + q15_t *out_1 = out_0 + output_ch; + const int64_t *bias = output_bias; + uint16_t row_count = output_ch / 2; + const q7_t *ip_a0 = input_a; + + /* this loop over rows in A */ + while (row_count) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + /* align the second pointer for A */ + const q7_t *ip_a1 = ip_a0 + num_col_a; + + /* Init accumulator for channel N and N + 1 */ + q31_t ch_0_out_0 = 0; + q31_t ch_0_out_1 = 0; + q31_t ch_1_out_0 = 0; + q31_t ch_1_out_1 = 0; + + uint16_t col_count = num_col_a / 4; + /* accumulate over the vector */ + while (col_count) + { + q31_t a01, a02, a11, a12; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + ip_a1 = read_and_pad(ip_a1, &a11, &a12); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a11, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a11, b1, ch_1_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + ch_1_out_0 = __SMLAD(a12, b0, ch_1_out_0); + ch_1_out_1 = __SMLAD(a12, b1, ch_1_out_1); + + col_count--; + } /* while over col_count */ + col_count = num_col_a & 0x3; + while (col_count) + { + q7_t a0 = *ip_a0++; + q15_t b0 = *ip_b0++; + q7_t a1 = *ip_a1++; + q15_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + ch_1_out_0 += a1 * b0; + ch_1_out_1 += a1 * b1; + col_count--; + } /* while over col_count */ + if (bias) + { + q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + q63_t acc_64 = ch_0_out_0 + *bias; + ch_0_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + acc_64 = ch_0_out_1 + *bias++; + ch_0_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + out_mult++; + } + else + { + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + out_mult++; + } + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q15_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q15_t)ch_0_out_1; + out_shift++; + + if (bias) + { + q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + q63_t acc_64 = ch_1_out_0 + *bias; + ch_1_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + acc_64 = ch_1_out_1 + *bias++; + ch_1_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + out_mult++; + } + else + { + ch_1_out_0 = arm_nn_requantize(ch_1_out_0, *out_mult, *out_shift); + ch_1_out_1 = arm_nn_requantize(ch_1_out_1, *out_mult, *out_shift); + out_mult++; + } + ch_1_out_0 = MAX(ch_1_out_0, activation_min); + ch_1_out_0 = MIN(ch_1_out_0, activation_max); + *out_0++ = (q15_t)ch_1_out_0; + + ch_1_out_1 = MAX(ch_1_out_1, activation_min); + ch_1_out_1 = MIN(ch_1_out_1, activation_max); + *out_1++ = (q15_t)ch_1_out_1; + out_shift++; + + /* skip row */ + ip_a0 += num_col_a; + row_count--; + } + + /* compute the last odd numbered row if any */ + if (output_ch & 0x1) + { + /* setup pointers for B */ + const q15_t *ip_b0 = input_b; + const q15_t *ip_b1 = ip_b0 + num_col_a; + + q31_t ch_0_out_0 = 0; + q31_t ch_0_out_1 = 0; + + uint16_t col_count = num_col_a >> 2; + while (col_count) + { + q31_t a01, a02; + q31_t b0 = arm_nn_read_q15x2_ia(&ip_b0); + q31_t b1 = arm_nn_read_q15x2_ia(&ip_b1); + + ip_a0 = read_and_pad(ip_a0, &a01, &a02); + + ch_0_out_0 = __SMLAD(a01, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a01, b1, ch_0_out_1); + + b0 = arm_nn_read_q15x2_ia(&ip_b0); + b1 = arm_nn_read_q15x2_ia(&ip_b1); + ch_0_out_0 = __SMLAD(a02, b0, ch_0_out_0); + ch_0_out_1 = __SMLAD(a02, b1, ch_0_out_1); + + col_count--; + } + col_count = num_col_a & 0x3; + while (col_count) + { + q7_t a0 = *ip_a0++; + q15_t b0 = *ip_b0++; + q15_t b1 = *ip_b1++; + + ch_0_out_0 += a0 * b0; + ch_0_out_1 += a0 * b1; + col_count--; + } + if (bias) + { + q31_t reduced_multiplier = REDUCE_MULTIPLIER(*out_mult); + q63_t acc_64 = ch_0_out_0 + *bias; + ch_0_out_0 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + acc_64 = ch_0_out_1 + *bias++; + ch_0_out_1 = arm_nn_requantize_s64(acc_64, reduced_multiplier, *out_shift); + } + else + { + ch_0_out_0 = arm_nn_requantize(ch_0_out_0, *out_mult, *out_shift); + ch_0_out_1 = arm_nn_requantize(ch_0_out_1, *out_mult, *out_shift); + } + ch_0_out_0 = MAX(ch_0_out_0, activation_min); + ch_0_out_0 = MIN(ch_0_out_0, activation_max); + *out_0++ = (q15_t)ch_0_out_0; + + ch_0_out_1 = MAX(ch_0_out_1, activation_min); + ch_0_out_1 = MIN(ch_0_out_1, activation_max); + *out_1++ = (q15_t)ch_0_out_1; + out_mult++; + out_shift++; + } + + out_0 += output_ch; + + /* return the new output pointer with offset */ + return out_0; +#else + (void)input_a; + (void)input_b; + (void)output_ch; + (void)out_shift; + (void)out_mult; + (void)activation_min; + (void)activation_max; + (void)num_col_a; + (void)output_bias; + (void)out_0; + /* To be completed */ + return NULL; +#endif +} + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c index 1fbe5db..552a4e1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mat_mult_nt_t_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2020-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mat_mult_s8_nt_t_s8 * Description: Matrix multiplication support function with the right-hand-side (rhs) matrix transposed * - * $Date: 09. October 2020 - * $Revision: V.1.0.3 + * $Date: 19 April 2022 + * $Revision: V.2.0.0 * * Target Processor: Cortex-M * @@ -45,19 +47,19 @@ * Refer header file for details. * */ -arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t *dst_multipliers, - const int32_t *dst_shifts, - const int32_t lhs_rows, - const int32_t rhs_rows, - const int32_t rhs_cols, - const int32_t lhs_offset, - const int32_t dst_offset, - const int32_t activation_min, - const int32_t activation_max) +arm_cmsis_nn_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t *dst_multipliers, + const int32_t *dst_shifts, + const int32_t lhs_rows, + const int32_t rhs_rows, + const int32_t rhs_cols, + const int32_t lhs_offset, + const int32_t dst_offset, + const int32_t activation_min, + const int32_t activation_max) { #if defined(ARM_MATH_DSP) const int32_t off0 = rhs_cols - 4; @@ -574,9 +576,11 @@ arm_status arm_nn_mat_mult_nt_t_s8(const q7_t *lhs, } } #endif - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c index 921ad3c..ec58c86 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mult_q15.c * Description: Q15 vector multiplication with variable output shifts * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.1.3 * * Target Processor: Cortex-M cores * @@ -39,88 +41,15 @@ * @{ */ -/** - * @brief Q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector +/* + * Q7 vector multiplication with variable output shifts + * Refer function header for details * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable Q15 range [0x8000 0x7FFF] will be saturated. */ void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out_shift, uint32_t blockSize) { - uint32_t blkCnt; /* loop counters */ - -#if defined(ARM_MATH_DSP) - - /* Run the below code for Cortex-M4 and Cortex-M3 */ - q31_t inA1, inA2, inB1, inB2; /* temporary input variables */ - q15_t out1, out2, out3, out4; /* temporary output variables */ - q31_t mul1, mul2, mul3, mul4; /* temporary variables */ - - /* loop Unrolling */ - blkCnt = blockSize >> 2U; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (blkCnt > 0U) - { - /* read two samples at a time from sourceA */ - inA1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); - /* read two samples at a time from sourceB */ - inB1 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); - /* read two samples at a time from sourceA */ - inA2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcA); - /* read two samples at a time from sourceB */ - inB2 = arm_nn_read_q15x2_ia((const q15_t **)&pSrcB); - - /* multiply mul = sourceA * sourceB */ - mul1 = (q31_t)((q15_t)(inA1 >> 16) * (q15_t)(inB1 >> 16)); - mul2 = (q31_t)((q15_t)inA1 * (q15_t)inB1); - mul3 = (q31_t)((q15_t)(inA2 >> 16) * (q15_t)(inB2 >> 16)); - mul4 = (q31_t)((q15_t)inA2 * (q15_t)inB2); - - /* saturate result to 16 bit */ - out1 = (q15_t)__SSAT((q31_t)(mul1 + NN_ROUND(out_shift)) >> out_shift, 16); - out2 = (q15_t)__SSAT((q31_t)(mul2 + NN_ROUND(out_shift)) >> out_shift, 16); - out3 = (q15_t)__SSAT((q31_t)(mul3 + NN_ROUND(out_shift)) >> out_shift, 16); - out4 = (q15_t)__SSAT((q31_t)(mul4 + NN_ROUND(out_shift)) >> out_shift, 16); - - /* store the result */ -#ifndef ARM_MATH_BIG_ENDIAN - - *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); - *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); - -#else - - *__SIMD32(pDst)++ = __PKHBT(out2, out1, 16); - *__SIMD32(pDst)++ = __PKHBT(out4, out3, 16); - -#endif /* #ifndef ARM_MATH_BIG_ENDIAN */ - - /* Decrement the blockSize loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4U; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Initialize blkCnt with number of samples */ - blkCnt = blockSize; - -#endif /* #if defined (ARM_MATH_DSP) */ + uint32_t blkCnt = blockSize; /* loop counters */ while (blkCnt > 0U) { @@ -136,3 +65,5 @@ void arm_nn_mult_q15(q15_t *pSrcA, q15_t *pSrcB, q15_t *pDst, const uint16_t out /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c index 78bcdf2..0d02f9a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_mult_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_mult_q7.c * Description: Q7 vector multiplication with variable output shifts * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.1.3 * * Target Processor: Cortex-M cores * @@ -39,62 +41,14 @@ * @{ */ -/** - * @brief Q7 vector multiplication with variable output shifts - * @param[in] *pSrcA pointer to the first input vector - * @param[in] *pSrcB pointer to the second input vector - * @param[out] *pDst pointer to the output vector - * @param[in] out_shift amount of right-shift for output - * @param[in] blockSize number of samples in each vector - * - * Scaling and Overflow Behavior: - * \par - * The function uses saturating arithmetic. - * Results outside of the allowable Q7 range [0x80 0x7F] will be saturated. +/* + * Q7 vector multiplication with variable output shifts + * Refer function header for details */ void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shift, uint32_t blockSize) { - uint32_t blkCnt; /* loop counters */ - -#if defined(ARM_MATH_DSP) - - /* Run the below code for Cortex-M4 and Cortex-M3 */ - q7_t out1, out2, out3, out4; /* Temporary variables to store the product */ - - /* loop Unrolling */ - blkCnt = blockSize >> 2U; - - /* First part of the processing with loop unrolling. Compute 4 outputs at a time. - ** a second loop below computes the remaining 1 to 3 samples. */ - while (blkCnt > 0U) - { - /* C = A * B */ - /* Multiply the inputs and store the results in temporary variables */ - out1 = (q7_t)__SSAT(((q15_t)((q15_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out2 = (q7_t)__SSAT(((q15_t)((q15_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out3 = (q7_t)__SSAT(((q15_t)((q15_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - out4 = (q7_t)__SSAT(((q15_t)((q15_t)(*pSrcA++) * (*pSrcB++) + NN_ROUND(out_shift)) >> out_shift), 8); - - /* Store the results of 4 inputs in the destination buffer in single cycle by packing */ - *__SIMD32(pDst)++ = __PACKq7(out1, out2, out3, out4); - - /* Decrement the blockSize loop counter */ - blkCnt--; - } - - /* If the blockSize is not a multiple of 4, compute any remaining output samples here. - ** No loop unrolling is used. */ - blkCnt = blockSize % 0x4U; - -#else - - /* Run the below code for Cortex-M0 */ - - /* Initialize blkCnt with number of samples */ - blkCnt = blockSize; - -#endif /* #if defined (ARM_MATH_DSP) */ + uint32_t blkCnt = blockSize; /* loop counters */ while (blkCnt > 0U) { @@ -110,3 +64,5 @@ void arm_nn_mult_q7(q7_t *pSrcA, q7_t *pSrcB, q7_t *pDst, const uint16_t out_shi /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c new file mode 100644 index 0000000..54f5403 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s16.c @@ -0,0 +1,372 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_s16 + * Description: s16 vector by matrix (transposed) multiplication + * + * $Date: 11 August 2022 + * $Revision: V.2.1.0 + * + * Target Processor: Cortex-M + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" +#define MAX_COL_COUNT (512) + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s16 vector(lhs) by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s16(const q15_t *lhs, + const q7_t *rhs, + const q63_t *bias, + q15_t *dst, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ +#if defined(ARM_MATH_DSP) + + int32_t rhs_cols_fast = rhs_cols; + + if (rhs_cols > MAX_COL_COUNT) + { + rhs_cols_fast = MAX_COL_COUNT; + } + +#if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 4; + int32_t col_loop_cnt = (rhs_cols_fast + 7) / 8; + + for (int32_t i_row_loop_count = 0; i_row_loop_count < row_loop_cnt; i_row_loop_count++) + { + int32_t col_cnt = rhs_cols_fast; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr_0 = rhs; + const int8_t *rhs_ptr_1 = rhs + rhs_cols; + const int8_t *rhs_ptr_2 = rhs + rhs_cols * 2; + const int8_t *rhs_ptr_3 = rhs + rhs_cols * 3; + + int32_t result_0 = 0; + int32_t result_1 = 0; + int32_t result_2 = 0; + int32_t result_3 = 0; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + + int16x8_t rhs_input_0 = vldrbq_z_s16(rhs_ptr_0, pred); + int16x8_t rhs_input_1 = vldrbq_z_s16(rhs_ptr_1, pred); + int16x8_t rhs_input_2 = vldrbq_z_s16(rhs_ptr_2, pred); + int16x8_t rhs_input_3 = vldrbq_z_s16(rhs_ptr_3, pred); + + result_0 = vmladavaq_s16(result_0, lhs_input, rhs_input_0); + result_1 = vmladavaq_s16(result_1, lhs_input, rhs_input_1); + result_2 = vmladavaq_s16(result_2, lhs_input, rhs_input_2); + result_3 = vmladavaq_s16(result_3, lhs_input, rhs_input_3); + + lhs_ptr += 8; + + rhs_ptr_0 += 8; + rhs_ptr_1 += 8; + rhs_ptr_2 += 8; + rhs_ptr_3 += 8; + } + + int64_t result_64_0 = result_0; + int64_t result_64_1 = result_1; + int64_t result_64_2 = result_2; + int64_t result_64_3 = result_3; + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64_0 += *rhs_ptr_0++ * lhs_temp; + result_64_1 += *rhs_ptr_1++ * lhs_temp; + result_64_2 += *rhs_ptr_2++ * lhs_temp; + result_64_3 += *rhs_ptr_3++ * lhs_temp; + } + } + + if (bias) + { + result_64_0 += *bias++; + result_64_1 += *bias++; + result_64_2 += *bias++; + result_64_3 += *bias++; + } + + int32_t tmp; + tmp = arm_nn_requantize_s64(result_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_2, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + tmp = 0; + tmp = arm_nn_requantize_s64(result_64_3, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + rhs += 4 * rhs_cols; + } + + for (int8_t rows_left = rhs_rows & 0x3; rows_left > 0; rows_left--) + { + int32_t result = 0; + + col_loop_cnt = (rhs_cols_fast + 7) / 8; + + const int16_t *lhs_ptr = lhs; + const int8_t *rhs_ptr = rhs; + + int32_t col_cnt = (int32_t)rhs_cols_fast; + + for (int i_col_loop_cnt = 0; i_col_loop_cnt < col_loop_cnt; i_col_loop_cnt++) + { + mve_pred16_t pred = vctp16q(col_cnt); + col_cnt -= 8; + + int16x8_t lhs_input = vldrhq_z_s16(lhs_ptr, pred); + int16x8_t rhs_input = vldrbq_z_s16(rhs_ptr, pred); + + result = vmladavaq_p_s16(result, lhs_input, rhs_input, pred); + + lhs_ptr += 8; + rhs_ptr += 8; + } + + int64_t result_64 = result; + + if (bias) + { + result_64 += *bias++; + } + + if (rhs_cols > MAX_COL_COUNT) + { + for (int i_rhs_cols = MAX_COL_COUNT; i_rhs_cols < rhs_cols; i_rhs_cols++) + { + const int16_t lhs_temp = *lhs_ptr++; + + result_64 += *rhs_ptr++ * lhs_temp; + } + } + + int32_t tmp = 0; + tmp = arm_nn_requantize_s64(result_64, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + rhs += rhs_cols; + } + +#else // ARM_MATH_MVEI + + const int32_t row_loop_cnt = rhs_rows / 2; + + for (int32_t i = 0; i < row_loop_cnt; i++) + { + + q63_t acc_64_0 = 0; + q63_t acc_64_1 = 0; + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; + + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t ker_0, ker_1, vec_part_0, vec_part_1; + + vec_part_0 = arm_nn_read_q15x2_ia(&lhs_vec); + vec_part_1 = arm_nn_read_q15x2_ia(&lhs_vec); + + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + acc_0 = __SMLAD(ker_0, vec_part_0, acc_0); + acc_0 = __SMLAD(ker_1, vec_part_1, acc_0); + + rhs_1 = read_and_pad(rhs_1, &ker_0, &ker_1); + + acc_1 = __SMLAD(ker_0, vec_part_0, acc_1); + acc_1 = __SMLAD(ker_1, vec_part_1, acc_1); + } + + acc_64_0 += acc_0; + acc_64_1 += acc_1; + + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_64_1 += lhs_temp * (*rhs_1); + rhs_1++; + } + + if (bias) + { + acc_64_0 += *bias++; + acc_64_1 += *bias++; + } + q31_t tmp; + + tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + + tmp = arm_nn_requantize_s64(acc_64_1, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + } + + if (rhs_rows & 0x1) + { + q63_t acc_64_0 = 0; + int32_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols_fast / 4; + + const int16_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t ker_0, ker_1, vec; + rhs_0 = read_and_pad(rhs_0, &ker_0, &ker_1); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = __SMLAD(ker_0, vec, acc_0); + + vec = arm_nn_read_q15x2_ia(&lhs_vec); + acc_0 = __SMLAD(ker_1, vec, acc_0); + } + + acc_64_0 += acc_0; + + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec); + lhs_vec++; + acc_64_0 += lhs_temp * (*rhs_0); + rhs_0++; + } + + if (bias) + { + acc_64_0 += *bias++; + } + q31_t tmp; + tmp = arm_nn_requantize_s64(acc_64_0, dst_multiplier, dst_shift); + tmp = MAX(tmp, activation_min); + tmp = MIN(tmp, activation_max); + *dst++ = (q15_t)tmp; + } + +#endif // ARM_MATH_MVEI +#else // ARM_MATH_DSP + for (int i_row_loop_cnt = 0; i_row_loop_cnt < rhs_rows; i_row_loop_cnt++) + { + const q15_t *lhs_ptr = lhs; + const q7_t *rhs_ptr_0 = &rhs[0]; + + q63_t result = 0; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const q63_t rhs_value0 = (int8_t)*rhs_ptr_0; + const q63_t lhs_value = *lhs_ptr; + + result += lhs_value * rhs_value0; + + ++rhs_ptr_0; + ++lhs_ptr; + } + + if (bias) + { + result += *bias++; + } + // Quantize down + result = arm_nn_requantize_s64(result, dst_multiplier, dst_shift); + + // Clamp the result + result = ((result) > (activation_min) ? (result) : (activation_min)); + result = ((result) < (activation_max) ? (result) : (activation_max)); + + *dst++ = (q15_t)result; + rhs += rhs_cols; + } +#endif // ARM_MATH_DSP + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c index 8e4e66c..7663bb6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2020-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_nn_vec_mat_mult_t_s8 * Description: s8 vector by matrix (transposed) multiplication * - * $Date: 09. October 2020 - * $Revision: V.1.5.1 + * $Date: 16 Aug 2022 + * $Revision: V.4.0.2 * * Target Processor: Cortex-M * @@ -45,38 +47,25 @@ * Refer header file for details. * */ -arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, - const q7_t *rhs, - const q31_t *bias, - q7_t *dst, - const int32_t lhs_offset, - const int32_t rhs_offset, - const int32_t dst_offset, - const int32_t dst_multiplier, - const int32_t dst_shift, - const int32_t rhs_cols, - const int32_t rhs_rows, - const int32_t activation_min, - const int32_t activation_max) +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, + const q7_t *rhs, + const q31_t *bias, + q7_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max, + const int32_t address_offset) { + (void)rhs_offset; #if defined(ARM_MATH_MVEI) - int32_t row_loop_cnt = rhs_rows / 3; - - int32_t lhs_sum = 0; - { - const int32_t col_loop_cnt = (rhs_cols + 15) / 16; - uint32_t col_cnt = (uint32_t)rhs_cols; - const int8_t *lhs_vec = lhs; - for (int i = 0; i < col_loop_cnt; i++) - { - mve_pred16_t p = vctp8q(col_cnt); - col_cnt -= 16; - - const int8x16_t input = vldrbq_z_s8(lhs_vec, p); - lhs_sum = vaddvaq_p_s8(lhs_sum, input, p); - lhs_vec += 16; - } - } + const int32_t row_loop_cnt = rhs_rows / 3; + const uint32x4_t address_offset_array = {0, address_offset, address_offset * 2, address_offset * 3}; for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) { @@ -128,21 +117,26 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, if (bias) { int32x4_t b = vldrwq_z_s32(bias, p); - acc = vaddq_m_s32(vuninitializedq_s32(), acc, b, p); + acc = vaddq_x_s32(acc, b, p); bias += 3; } const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, 0}; - acc += vdupq_n_s32(lhs_offset) * rhs_sum; - acc += vdupq_n_s32(rhs_offset * lhs_sum); - acc += vdupq_n_s32(lhs_offset * rhs_offset * rhs_cols); acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); acc = vaddq_s32(acc, vdupq_n_s32(dst_offset)); acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); acc = vminq_s32(acc, vdupq_n_s32(activation_max)); - vstrbq_p_s32(dst, acc, p); - dst += 3; + + if (address_offset > 1L) + { + vstrbq_scatter_offset_s32(dst, address_offset_array, acc); + } + else + { + vstrbq_p_s32(dst, acc, p); + } + dst += 3 * address_offset; } const int loop_cnt = rhs_rows % 3; @@ -175,8 +169,7 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, acc_0 += *bias; bias++; } - const int32_t offsets = - (rhs_sum_0 * lhs_offset) + (lhs_sum * rhs_offset) + (lhs_offset * rhs_offset * rhs_cols); + const int32_t offsets = rhs_sum_0 * lhs_offset; acc_0 += offsets; acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); acc_0 += dst_offset; @@ -184,279 +177,189 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, // Clamp the result acc_0 = MAX(acc_0, activation_min); *dst = MIN(acc_0, activation_max); - dst++; + dst += address_offset; } #elif defined(ARM_MATH_DSP) - const int32_t off0 = rhs_cols - 4; - const int16_t lhs_offset_s16 = lhs_offset; - const int16_t rhs_offset_s16 = rhs_offset; - + const int32_t row_loop_cnt = rhs_rows / 2; + const int16_t lhs_offset_s16 = (int16_t)lhs_offset; const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16); - const uint32_t rhs_offset_s16x2 = __PKHBT(rhs_offset_s16, rhs_offset_s16, 16); - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + for (int32_t i = 0; i < row_loop_cnt; i++) { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = 0; - q31_t res01 = 0; + int32_t acc_0 = 0; + int32_t acc_1 = 0; if (bias) { - res00 = *bias++; - res01 = *bias++; + acc_0 = *bias++; + acc_1 = *bias++; } - int32_t rhs_cols_idx = 0; + const int32_t col_loop_cnt = rhs_cols / 4; - q31_t val0, val1, val2, val3, val4, val5; - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) - { - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - // Read 4 x int8 values from the RHS matrix - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - - val2 = __SXTAB16(rhs_offset_s16x2, val0); - // Read 4 x int8 values from the LHS vector - val1 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val1); - // Read 4 x int8 values from the RHS matrix - val4 = arm_nn_read_q7x4((const q7_t *)rhs_ptr + off0); - val1 = __SXTAB16(lhs_offset_s16x2, __ROR(val1, 8)); - - // Perform the accumulations - res00 = __SMLAD(val3, val2, res00); - val5 = __SXTAB16(rhs_offset_s16x2, val4); - res00 = __SMLAD(val1, val0, res00); - val4 = __SXTAB16(rhs_offset_s16x2, __ROR(val4, 8)); - res01 = __SMLAD(val3, val5, res01); - res01 = __SMLAD(val1, val4, res01); - } + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + for (int j = col_loop_cnt; j != 0; j--) { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; + int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec); + int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); - res00 += lhs_value * rhs_value0; - res01 += lhs_value * rhs_value1; + vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0); - ++rhs_ptr; - ++lhs_ptr; - } + int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0); + int32_t ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = __SXTB16(ker_0); - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); - res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + acc_0 = __SMLAD(ker_1, vec_1, acc_0); + acc_0 = __SMLAD(ker_0, vec_0, acc_0); - // Add offset - res00 += dst_offset; - res01 += dst_offset; + ker_0 = arm_nn_read_q7x4_ia(&rhs_1); + ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = __SXTB16(ker_0); - // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - res01 = MAX(res01, activation_min); - res01 = MIN(res01, activation_max); + acc_1 = __SMLAD(ker_1, vec_1, acc_1); + acc_1 = __SMLAD(ker_0, vec_0, acc_1); + } - *dst++ = (q7_t)res00; - *dst++ = (q7_t)res01; + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; + acc_1 += lhs_temp * (*rhs_1); + rhs_1++; + } - rhs += 2 * rhs_cols; + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Add offset + acc_0 += dst_offset; + acc_1 += dst_offset; + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (int8_t)acc_0; + *(dst + address_offset) = (int8_t)acc_1; + dst += 2 * address_offset; } - if (rhs_rows % 2) + if (rhs_rows & 0x1) { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; - - q31_t res00 = 0; + int32_t acc_0 = 0; if (bias) { - res00 = *bias++; + acc_0 = *bias++; } + const int32_t col_loop_cnt = rhs_cols / 4; - int32_t rhs_cols_idx = 0; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; - q31_t val0, val1, val2, val3; - for (; rhs_cols_idx <= (rhs_cols - 16); rhs_cols_idx += 16) + for (int i = col_loop_cnt; i != 0; i--) { - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - - val0 = arm_nn_read_q7x4_ia((const q7_t **)&rhs_ptr); - val1 = __SXTAB16(rhs_offset_s16x2, val0); - val2 = arm_nn_read_q7x4_ia((const q7_t **)&lhs_ptr); - val0 = __SXTAB16(rhs_offset_s16x2, __ROR(val0, 8)); - val3 = __SXTAB16(lhs_offset_s16x2, val2); - val2 = __SXTAB16(lhs_offset_s16x2, __ROR(val2, 8)); - - // Partial accumulations - res00 = __SMLAD(val3, val1, res00); - res00 = __SMLAD(val2, val0, res00); - } + int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec); + int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0); - for (; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) - { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; + int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0); + int32_t ker_1 = __SXTB16_RORn((uint32_t)ker_0, 8); + ker_0 = __SXTB16(ker_0); - res00 += lhs_value * rhs_value0; + acc_0 = __SMLAD(ker_1, vec_1, acc_0); + acc_0 = __SMLAD(ker_0, vec_0, acc_0); + } - ++rhs_ptr; - ++lhs_ptr; + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0); + rhs_0++; } - // Quantize down - res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); // Add offset - res00 += dst_offset; - + acc_0 += dst_offset; // Clamp the result - res00 = MAX(res00, activation_min); - res00 = MIN(res00, activation_max); - - *dst = (q7_t)res00; + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (int8_t)acc_0; + dst += address_offset; } #else - for (int32_t rhs_rows_idx = 0; rhs_rows_idx <= (rhs_rows - 2); rhs_rows_idx += 2) + const int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) { - const q7_t *lhs_ptr = &lhs[0]; - const q7_t *rhs_ptr = &rhs[0]; + const q7_t *lhs_ptr = lhs; + const q7_t *rhs_ptr_0 = &rhs[0]; + const q7_t *rhs_ptr_1 = &rhs[rhs_cols]; + const q7_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; q31_t res00 = 0; q31_t res01 = 0; + q31_t res02 = 0; if (bias) { res00 = *bias++; res01 = *bias++; + res02 = *bias++; } - for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t rhs_value1 = rhs_ptr[rhs_cols] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; + const q31_t rhs_value0 = (int8_t)*rhs_ptr_0; + const q31_t rhs_value1 = (int8_t)*rhs_ptr_1; + const q31_t rhs_value2 = (int8_t)*rhs_ptr_2; + const q31_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; res00 += lhs_value * rhs_value0; res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; - ++rhs_ptr; + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; ++lhs_ptr; } - // Quantize down res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); // Add offset res00 += dst_offset; res01 += dst_offset; + res02 += dst_offset; // Clamp the result res00 = MAX(res00, activation_min); res00 = MIN(res00, activation_max); res01 = MAX(res01, activation_min); res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); - *dst++ = (q7_t)res00; - *dst++ = (q7_t)res01; + *dst = (q7_t)res00; + *(dst + address_offset) = (q7_t)res01; + *(dst + 2 * address_offset) = (q7_t)res02; + dst += 3 * address_offset; - rhs += 2 * rhs_cols; + rhs += 3 * rhs_cols; } - if (rhs_rows % 2) + const int loop_cnt = rhs_rows % 3; + + for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) { const q7_t *lhs_ptr = &lhs[0]; const q7_t *rhs_ptr = &rhs[0]; @@ -469,8 +372,8 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) { - q31_t rhs_value0 = rhs_ptr[0] + rhs_offset; - q31_t lhs_value = lhs_ptr[0] + lhs_offset; + q31_t rhs_value0 = (int8_t)rhs_ptr[0]; + q31_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; res00 += lhs_value * rhs_value0; @@ -488,13 +391,16 @@ arm_status arm_nn_vec_mat_mult_t_s8(const q7_t *lhs, res00 = MAX(res00, activation_min); res00 = MIN(res00, activation_max); - *dst = (q7_t)res00; + *dst = (int8_t)res00; + dst += address_offset; + rhs += rhs_cols; } #endif - - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of NNBasicMath group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c new file mode 100644 index 0000000..293edb2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nn_vec_mat_mult_t_svdf_s8.c @@ -0,0 +1,345 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2021-2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_vec_mat_mult_t_svdf_s8 + * Description: s8 vector by matrix (transposed) multiplication with + * s16 output. Targetted at SVDF operator. + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup NNBasicMath + * @{ + */ + +/* + * s8 vector(lhs) by matrix (transposed) multiplication + * + * Refer header file for details. + * + */ +arm_cmsis_nn_status arm_nn_vec_mat_mult_t_svdf_s8(const q7_t *lhs, + const q7_t *rhs, + q15_t *dst, + const int32_t lhs_offset, + const int32_t rhs_offset, + const int32_t dst_offset, + const int32_t dst_multiplier, + const int32_t dst_shift, + const int32_t rhs_cols, + const int32_t rhs_rows, + const int32_t activation_min, + const int32_t activation_max) +{ + (void)rhs_offset; + if (rhs_cols < 0 || (NN_Q31_MAX - rhs_cols) < 16 || dst_offset < 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + (void)rhs_offset; +#if defined(ARM_MATH_MVEI) + int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + int32_t acc_2 = 0; + + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + const int8_t *rhs_2 = rhs + 2 * rhs_cols; + + int32_t rhs_sum_0 = 0; + int32_t rhs_sum_1 = 0; + int32_t rhs_sum_2 = 0; + + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p); + acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p); + + const int8x16_t ker_1 = vldrbq_z_s8(rhs_1, p); + rhs_sum_1 = vaddvaq_p_s8(rhs_sum_1, ker_1, p); + acc_1 = vmladavaq_p_s8(acc_1, ker_1, input, p); + + const int8x16_t ker_2 = vldrbq_z_s8(rhs_2, p); + rhs_sum_2 = vaddvaq_p_s8(rhs_sum_2, ker_2, p); + acc_2 = vmladavaq_p_s8(acc_2, ker_2, input, p); + + lhs_vec += 16; + rhs_0 += 16; + rhs_1 += 16; + rhs_2 += 16; + } + rhs += 3 * rhs_cols; + + int32x4_t acc = {acc_0, acc_1, acc_2, 0}; + const int32x4_t rhs_sum = {rhs_sum_0, rhs_sum_1, rhs_sum_2, 0}; + acc += vdupq_n_s32(lhs_offset) * rhs_sum; + + acc = arm_requantize_mve(acc, dst_multiplier, dst_shift); + acc = vmaxq_s32(acc, vdupq_n_s32(activation_min)); + acc = vminq_s32(acc, vdupq_n_s32(activation_max)); + *(dst) = (int16_t)acc[0]; + *(dst + dst_offset) = (int16_t)acc[1]; + *(dst + 2 * dst_offset) = (int16_t)acc[2]; + dst += 3 * dst_offset; + } + + const int loop_cnt = rhs_rows % 3; + for (int i_row_loop_cnt = 0; i_row_loop_cnt < loop_cnt; i_row_loop_cnt++) + { + int32_t acc_0 = 0; + const int32_t col_loop_cnt = (rhs_cols + 15) / 16; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + int32_t rhs_sum_0 = 0; + uint32_t col_cnt = (uint32_t)rhs_cols; + + for (int i = 0; i < col_loop_cnt; i++) + { + mve_pred16_t p = vctp8q(col_cnt); + col_cnt -= 16; + const int8x16_t input = vldrbq_z_s8(lhs_vec, p); + + const int8x16_t ker_0 = vldrbq_z_s8(rhs_0, p); + rhs_sum_0 = vaddvaq_p_s8(rhs_sum_0, ker_0, p); + acc_0 = vmladavaq_p_s8(acc_0, ker_0, input, p); + + lhs_vec += 16; + rhs_0 += 16; + } + rhs += rhs_cols; + + const int32_t offsets = rhs_sum_0 * lhs_offset; + acc_0 = __QADD(acc_0, offsets); + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + *dst = (q15_t)MIN(acc_0, activation_max); + dst += dst_offset; + } + +#elif defined(ARM_MATH_DSP) + int32_t row_loop_cnt = rhs_rows / 2; + + const int16_t lhs_offset_s16 = lhs_offset; + const int16_t rhs_offset_s16 = rhs_offset; + + const uint32_t lhs_offset_s16x2 = __PKHBT(lhs_offset_s16, lhs_offset_s16, 16); + const uint32_t rhs_offset_s16x2 = __PKHBT(rhs_offset_s16, rhs_offset_s16, 16); + for (int32_t i = 0; i < row_loop_cnt; i++) + { + int32_t acc_0 = 0; + int32_t acc_1 = 0; + + const int32_t col_loop_cnt = rhs_cols / 4; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + const int8_t *rhs_1 = rhs + rhs_cols; + rhs += 2 * rhs_cols; + for (int j = col_loop_cnt; j != 0; j--) + { + int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec); + int32_t vec_1 = __SXTAB16_RORn(lhs_offset_s16x2, (uint32_t)vec_0, 8); + vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0); + int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0); + int32_t ker_1 = __SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0); + acc_0 = __SMLAD(ker_1, vec_1, acc_0); + acc_0 = __SMLAD(ker_0, vec_0, acc_0); + ker_0 = arm_nn_read_q7x4_ia(&rhs_1); + ker_1 = __SXTAB16_RORn(rhs_offset_s16x2, (uint32_t)ker_0, 8); + ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0); + acc_1 = __SMLAD(ker_1, vec_1, acc_1); + acc_1 = __SMLAD(ker_0, vec_0, acc_1); + } + for (int k = col_loop_cnt * 4; k < rhs_cols; k++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0 + rhs_offset); + rhs_0++; + acc_1 += lhs_temp * (*rhs_1 + rhs_offset); + rhs_1++; + } + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + acc_1 = arm_nn_requantize(acc_1, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + acc_1 = MAX(acc_1, activation_min); + acc_1 = MIN(acc_1, activation_max); + *dst = (q15_t)acc_0; + *(dst + dst_offset) = (q15_t)acc_1; + dst += 2 * dst_offset; + } + if (rhs_rows & 0x1) + { + int32_t acc_0 = 0; + const int32_t col_loop_cnt = rhs_cols / 4; + const int8_t *lhs_vec = lhs; + const int8_t *rhs_0 = rhs; + for (int i = col_loop_cnt; i != 0; i--) + { + int32_t vec_0 = arm_nn_read_q7x4_ia(&lhs_vec); + int32_t vec_1 = __SXTAB16(lhs_offset_s16x2, __ROR((uint32_t)vec_0, 8)); + vec_0 = __SXTAB16(lhs_offset_s16x2, vec_0); + int32_t ker_0 = arm_nn_read_q7x4_ia(&rhs_0); + int32_t ker_1 = __SXTAB16(rhs_offset_s16x2, __ROR((uint32_t)ker_0, 8)); + ker_0 = __SXTAB16(rhs_offset_s16x2, ker_0); + acc_0 = __SMLAD(ker_1, vec_1, acc_0); + acc_0 = __SMLAD(ker_0, vec_0, acc_0); + } + for (int j = col_loop_cnt * 4; j < rhs_cols; j++) + { + const int32_t lhs_temp = (*lhs_vec + lhs_offset); + lhs_vec++; + acc_0 += lhs_temp * (*rhs_0 + rhs_offset); + rhs_0++; + } + acc_0 = arm_nn_requantize(acc_0, dst_multiplier, dst_shift); + + // Clamp the result + acc_0 = MAX(acc_0, activation_min); + acc_0 = MIN(acc_0, activation_max); + *dst = (q15_t)acc_0; + dst += dst_offset; + } + +#else + + int32_t row_loop_cnt = rhs_rows / 3; + + for (int i_row_loop_cnt = 0; i_row_loop_cnt < row_loop_cnt; i_row_loop_cnt++) + { + const q7_t *lhs_ptr = lhs; + const q7_t *rhs_ptr_0 = &rhs[0]; + const q7_t *rhs_ptr_1 = &rhs[rhs_cols]; + const q7_t *rhs_ptr_2 = &rhs[rhs_cols * 2]; + + q31_t res00 = 0; + q31_t res01 = 0; + q31_t res02 = 0; + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + const q31_t rhs_value0 = (int8_t)*rhs_ptr_0; + const q31_t rhs_value1 = (int8_t)*rhs_ptr_1; + const q31_t rhs_value2 = (int8_t)*rhs_ptr_2; + const q31_t lhs_value = (int8_t)*lhs_ptr + lhs_offset; + + res00 += lhs_value * rhs_value0; + res01 += lhs_value * rhs_value1; + res02 += lhs_value * rhs_value2; + + ++rhs_ptr_0; + ++rhs_ptr_1; + ++rhs_ptr_2; + ++lhs_ptr; + } + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + res01 = arm_nn_requantize(res01, dst_multiplier, dst_shift); + res02 = arm_nn_requantize(res02, dst_multiplier, dst_shift); + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + res01 = MAX(res01, activation_min); + res01 = MIN(res01, activation_max); + res02 = MAX(res02, activation_min); + res02 = MIN(res02, activation_max); + + *dst = (q15_t)res00; + *(dst + dst_offset) = (q15_t)res01; + *(dst + 2 * dst_offset) = (q15_t)res02; + dst += 3 * dst_offset; + rhs += 3 * rhs_cols; + } + + const int loop_cnt = rhs_rows % 3; + + for (int i_loop_cnt = 0; i_loop_cnt < loop_cnt; i_loop_cnt++) + { + const q7_t *lhs_ptr = &lhs[0]; + const q7_t *rhs_ptr = &rhs[0]; + + q31_t res00 = 0; + + for (int32_t rhs_cols_idx = 0; rhs_cols_idx < rhs_cols; ++rhs_cols_idx) + { + q31_t rhs_value0 = (int8_t)rhs_ptr[0] + rhs_offset; + q31_t lhs_value = (int8_t)lhs_ptr[0] + lhs_offset; + + res00 += lhs_value * rhs_value0; + + ++rhs_ptr; + ++lhs_ptr; + } + + // Quantize down + res00 = arm_nn_requantize(res00, dst_multiplier, dst_shift); + + // Clamp the result + res00 = MAX(res00, activation_min); + res00 = MIN(res00, activation_max); + + *dst = (q15_t)res00; + dst += dst_offset; + rhs += rhs_cols; + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of NNBasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c index fb8b9d9..ef8093e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_nntables.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. * @@ -201,3 +203,5 @@ const q15_t tanhHTable_q15[192] = { 0x803c, 0x8044, 0x804d, 0x8057, 0x8062, 0x806f, 0x807e, 0x808f, 0x80a2, 0x80b8, 0x80d0, 0x80ec, 0x810b, 0x812e, 0x8156, 0x8183, 0x81b7, 0x81f1, 0x8232, 0x827c, 0x82d0, 0x832f, 0x839a, 0x8412, }; + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c index 7045b69..110a93b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_no_shift.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_q7_to_q15_no_shift.c * Description: Converts the elements of the Q7 vector to Q15 vector without left-shift * - * $Date: May 29, 2020 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.3 * * Target Processor: Cortex-M cores * @@ -39,20 +41,9 @@ * @{ */ -/** +/* * @brief Converts the elements of the Q7 vector to Q15 vector without left-shift - * @param[in] *pSrc points to the Q7 input vector - * @param[out] *pDst points to the Q15 output vector - * @param[in] blockSize length of the input vector - * - * \par Description: - * - * The equation used for the conversion process is: - * - *
- * 	pDst[n] = (q15_t) pSrc[n];   0 <= n < blockSize.
- * 
- * + * Refer function header for details */ void arm_q7_to_q15_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize) @@ -119,3 +110,5 @@ void arm_q7_to_q15_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t blockSize) /** * @} end of nndata_convert group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c index 449e91b..c7ee063 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_no_shift.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_q7_to_q15_reordered_no_shift.c * Description: Converts the elements of the Q7 vector to reordered Q15 vector without left-shift * - * $Date: May 29, 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.1.1.2 * * Target Processor: Cortex-M cores * @@ -39,13 +41,10 @@ * @{ */ -/** - * @brief Converts the elements of the Q7 vector to reordered Q15 vector without left-shift - * @param[in] *pSrc points to the Q7 input vector - * @param[out] *pDst points to the Q15 output vector - * @param[in] blockSize length of the input vector +/* + * Converts the elements of the Q7 vector to reordered Q15 vector without left-shift * - * @details + * Refer to header for details * * This function does the q7 to q15 expansion with re-ordering * @@ -79,7 +78,7 @@ void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t bl const q7_t *pIn = pSrc; /* Src pointer */ uint32_t blkCnt; /* loop counter */ -#ifndef ARM_MATH_CM0_FAMILY +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) q31_t in; q31_t in1, in2; @@ -103,11 +102,11 @@ void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t bl in2 = __SXTB16(in); #ifndef ARM_MATH_BIG_ENDIAN - *__SIMD32(pDst)++ = in2; - *__SIMD32(pDst)++ = in1; + arm_nn_write_q7x4_ia((q7_t **)&pDst, in2); + arm_nn_write_q7x4_ia((q7_t **)&pDst, in1); #else - *__SIMD32(pDst)++ = in1; - *__SIMD32(pDst)++ = in2; + arm_nn_write_q7x4_ia((q7_t **)&pDst, in1); + arm_nn_write_q7x4_ia((q7_t **)&pDst, in2); #endif /* Decrement the loop counter */ @@ -141,3 +140,5 @@ void arm_q7_to_q15_reordered_no_shift(const q7_t *pSrc, q15_t *pDst, uint32_t bl /** * @} end of q7_to_x group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c index 4cfb03f..572c7bc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_reordered_with_offset.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -22,8 +24,8 @@ * Description: Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. The re-ordering * is a signature of sign extension intrinsic(DSP extension). * - * $Date: May 29, 2020 - * $Revision: V.2.0.3 + * $Date: 4 Aug 2022 + * $Revision: V.2.0.4 * * Target Processor: Cortex-M cores * @@ -40,10 +42,10 @@ * @{ */ -/** - * @brief Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. +/* + * Converts the elements of the Q7 vector to a reordered Q15 vector with an added offset. * - * @note Refer header file for details. + * Refer header file for details. * */ @@ -98,3 +100,5 @@ void arm_q7_to_q15_reordered_with_offset(const q7_t *src, q15_t *dst, uint32_t b /** * @} end of nndata_convert group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c index 4d76ae9..fd88488 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/NNSupportFunctions/arm_q7_to_q15_with_offset.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. * @@ -112,3 +114,5 @@ void arm_q7_to_q15_with_offset(const q7_t *src, q15_t *dst, uint32_t block_size, /** * @} end of nndata_convert group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s16.c new file mode 100644 index 0000000..be5b7f0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s16.c @@ -0,0 +1,311 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_avgpool_s16.c + * Description: Pooling function implementations + * + * $Date: 27 July 2022 + * $Revision: V.2.2.0 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + +static void scale_q31_to_q15_and_clamp(const q31_t *buffer, + q15_t *target, + int32_t length, + const int32_t count, + const int act_min, + const int act_max) +{ + const int half_count = count / 2; + + for (int i = 0; i < length; i++) + { + int32_t sum = buffer[i] > 0 ? (buffer[i] + half_count) : (buffer[i] - half_count); + sum = sum / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + target[i] = (q15_t)sum; + } +} +#endif + +/** + * @ingroup groupNN + + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * s16 average pooling function + * + * Refer to header file for details. + * + */ +arm_cmsis_nn_status arm_avgpool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q15_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q15_t *dst) +{ + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int32_t act_min = pool_params->activation.min; + const int32_t act_max = pool_params->activation.max; + const int32_t ch_src = input_dims->c; +#if defined(ARM_MATH_MVEI) + (void)ctx; + for (int i_y = 0; i_y < output_y; i_y++) + { + for (int i_x = 0; i_x < output_x; i_x++) + { + const int32_t k_y_start = MAX(0, i_y * stride_y - pad_y); + const int32_t k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); + + const int32_t k_x_start = MAX(0, i_x * stride_x - pad_x); + const int32_t k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); + + const int16_t *src_base = src; + int16_t *out = &dst[ch_src * (i_x + i_y * output_x)]; + + int32_t ch_count = (ch_src + 7) / 8; + int32_t channels = ch_src; + + while (ch_count > 0) + { + int32_t count = 0; + + int32x4_t sum_1 = vdupq_n_s32(0); + int32x4_t sum_2 = vdupq_n_s32(0); + // Load store tail predicate + const mve_pred16_t ld_st_p = vctp16q(channels); + channels -= 8; + + for (int k_y = k_y_start; k_y < k_y_end; k_y++) + { + for (int k_x = k_x_start; k_x < k_x_end; k_x++) + { + const int16_t *src_inner = src_base + (ch_src * (k_x + k_y * input_x)); + const int16x8_t temp = vldrhq_z_s16(src_inner, ld_st_p); + + const int32x4_t temp_lo = vmovlbq_s16(temp); + const int32x4_t temp_hi = vmovltq_s16(temp); + + sum_1 = vaddq_s32(sum_1, temp_lo); + sum_2 = vaddq_s32(sum_2, temp_hi); + + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + // Perform the following operation + // sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + const int32_t half_count = count / 2; + // Predicate for 'sum > 0' operation + mve_pred16_t p = vcmpgtq_n_s32(sum_1, 0); + sum_1 = vaddq_m_n_s32(sum_1, sum_1, half_count, p); + sum_1 = vsubq_m_n_s32(sum_1, sum_1, half_count, ~p); + + p = vcmpgtq_n_s32(sum_2, 0); + sum_2 = vaddq_m_n_s32(sum_2, sum_2, half_count, p); + sum_2 = vsubq_m_n_s32(sum_2, sum_2, half_count, ~p); + + for (int i = 0; i < 4; i++) + { + sum_1[i] = sum_1[i] / count; + sum_2[i] = sum_2[i] / count; + } + + sum_1 = vmaxq_s32(sum_1, vdupq_n_s32(act_min)); + sum_1 = vminq_s32(sum_1, vdupq_n_s32(act_max)); + + sum_2 = vmaxq_s32(sum_2, vdupq_n_s32(act_min)); + sum_2 = vminq_s32(sum_2, vdupq_n_s32(act_max)); + + int16x8_t temp = vdupq_n_s16(0); + temp = vmovnbq_s32(temp, sum_1); + temp = vmovntq_s32(temp, sum_2); + + vstrhq_p_s16(out, temp, ld_st_p); + + out += 8; + ch_count--; + src_base += 8; + } + } + } +#elif defined(ARM_MATH_DSP) + + q31_t *buffer = (q31_t *)ctx->buf; + + if (buffer == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + /* Run the following code for CPU's with DSP extension + */ + for (int i_y = 0, idx_y = -pad_y; i_y < output_y; idx_y += stride_y, i_y++) + { + for (int i_x = 0, idx_x = -pad_x; i_x < output_x; idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: + (base_idx_ + kernel__start) >= 0 */ + const int32_t kernel_y_start = MAX(0, -idx_y); + const int32_t kernel_x_start = MAX(0, -idx_x); + + /* Condition for kernel end dimension: + (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - idx_x); + + int count = 0; + + for (int k_y = kernel_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = kernel_x_start; k_x < kernel_x_end; k_x++) + { + const q15_t *start = src + ch_src * (k_x + idx_x + (k_y + idx_y) * input_x); + + if (count == 0) + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = start[i]; + } + } + else + { + for (int i = 0; i < ch_src; i++) + { + buffer[i] = __QADD(start[i], buffer[i]); + } + } + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + scale_q31_to_q15_and_clamp(buffer, dst, ch_src, count, act_min, act_max); + dst += ch_src; + } + } + +#else + /* Reference C code adapted from CMSIS-NN arm_avgpool_s8.c. + */ + + (void)ctx; + + for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) + { + for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); + + for (int i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) + { + int sum = 0; + int count = 0; + + for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) + { + sum += src[i_ch_in + ch_src * (k_x + base_idx_x + (k_y + base_idx_y) * input_x)]; + count++; + } + } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + sum = MAX(sum, act_min); + sum = MIN(sum, act_max); + + dst[i_ch_in + ch_src * (i_x + i_y * output_x)] = sum; + } + } + } +#endif + + return ARM_CMSIS_NN_SUCCESS; +} + +int32_t arm_avgpool_s16_get_buffer_size(const int output_x, const int ch_src) +{ + (void)output_x; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + return (ch_src * (int32_t)sizeof(int32_t)); +#else + (void)ch_src; +#endif + return 0; +} + +/** + * @} end of Pooling group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c index 452222e..05c284f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_avgpool_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_avgpool_s8.c * Description: Pooling function implementations * - * $Date: 09. October 2020 - * $Revision: V.2.0.3 + * $Date: 7 July 2022 + * $Revision: V.3.0.2 * * Target Processor: Cortex-M CPUs * @@ -32,7 +34,6 @@ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" #if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) - static void scale_q31_to_q7_and_clamp(const q31_t *buffer, q7_t *target, int32_t length, @@ -41,6 +42,7 @@ static void scale_q31_to_q7_and_clamp(const q31_t *buffer, const int act_max) { const int half_count = count / 2; + for (int i = 0; i < length; i++) { int32_t sum = buffer[i] > 0 ? (buffer[i] + half_count) : (buffer[i] - half_count); @@ -71,13 +73,13 @@ static void scale_q31_to_q7_and_clamp(const q31_t *buffer, #if defined(ARM_MATH_MVEI) -arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q7_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q7_t *dst) { (void)ctx; const int32_t input_y = input_dims->h; @@ -94,153 +96,136 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, const int32_t act_max = pool_params->activation.max; const int32_t ch_src = input_dims->c; - int32_t i_x, i_y; - int32_t k_x, k_y; - - for (i_y = 0; i_y < output_y; i_y++) + for (int i_y = 0; i_y < output_y; i_y++) { - for (i_x = 0; i_x < output_x; i_x++) + for (int i_x = 0; i_x < output_x; i_x++) { + const int32_t k_y_start = MAX(0, i_y * stride_y - pad_y); + const int32_t k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); - int32_t k_y_start, k_y_end; - int32_t k_x_start, k_x_end; - int32_t chCnt; - const int8_t *pTmp, *pTmpInner; - int8_t *pDst; + const int32_t k_x_start = MAX(0, i_x * stride_x - pad_x); + const int32_t k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); - k_y_start = MAX(0, i_y * stride_y - pad_y); - k_y_end = MIN(i_y * stride_y - pad_y + kernel_y, input_y); + const int8_t *src_base = src; + int8_t *out = &dst[ch_src * (i_x + i_y * output_x)]; - k_x_start = MAX(0, i_x * stride_x - pad_x); - k_x_end = MIN(i_x * stride_x - pad_x + kernel_x, input_x); + int32_t ch_count = (ch_src + 15) / 16; + int32_t channels = ch_src; - pTmp = src; - pDst = &dst[ch_src * (i_x + i_y * output_x)]; - - chCnt = ch_src >> 4; - while (chCnt > 0) + while (ch_count > 0) { - int32x4_t sumV1, sumV2, sumV3, sumV4; - - int8x16_t tempV; - int16x8_t tempVLO, tempVHI; - int32x4_t tempVLOLO, tempVLOHI, tempVHILO, tempVHIHI; + int8x16_t temp; + int16x8_t temp_lo, temp_hi; + int32x4_t temp_lo_lo, temp_lo_hi, temp_hi_lo, temp_hi_hi; int32_t count = 0; - sumV1 = vdupq_n_s32(0); - sumV2 = vdupq_n_s32(0); - sumV3 = vdupq_n_s32(0); - sumV4 = vdupq_n_s32(0); + int32x4_t sum_1 = vdupq_n_s32(0); + int32x4_t sum_2 = vdupq_n_s32(0); + int32x4_t sum_3 = vdupq_n_s32(0); + int32x4_t sum_4 = vdupq_n_s32(0); + // Load store tail predicate + const mve_pred16_t ld_st_p = vctp8q(channels); + channels -= 16; - for (k_y = k_y_start; k_y < k_y_end; k_y++) + for (int k_y = k_y_start; k_y < k_y_end; k_y++) { - for (k_x = k_x_start; k_x < k_x_end; k_x++) + for (int k_x = k_x_start; k_x < k_x_end; k_x++) { - pTmpInner = pTmp + (ch_src * (k_x + k_y * input_x)); - tempV = vldrbq_s8(pTmpInner); + const int8_t *src_inner = src_base + (ch_src * (k_x + k_y * input_x)); + temp = vldrbq_z_s8(src_inner, ld_st_p); - tempVLO = vmovlbq_s8(tempV); - tempVHI = vmovltq_s8(tempV); + temp_lo = vmovlbq_s8(temp); + temp_hi = vmovltq_s8(temp); - tempVLOLO = vmovlbq_s16(tempVLO); - tempVLOHI = vmovltq_s16(tempVLO); + temp_lo_lo = vmovlbq_s16(temp_lo); + temp_lo_hi = vmovltq_s16(temp_lo); - tempVHILO = vmovlbq_s16(tempVHI); - tempVHIHI = vmovltq_s16(tempVHI); + temp_hi_lo = vmovlbq_s16(temp_hi); + temp_hi_hi = vmovltq_s16(temp_hi); - sumV1 = vaddq_s32(sumV1, tempVLOLO); - sumV2 = vaddq_s32(sumV2, tempVLOHI); - sumV3 = vaddq_s32(sumV3, tempVHILO); - sumV4 = vaddq_s32(sumV4, tempVHIHI); + sum_1 = vaddq_s32(sum_1, temp_lo_lo); + sum_2 = vaddq_s32(sum_2, temp_lo_hi); + sum_3 = vaddq_s32(sum_3, temp_hi_lo); + sum_4 = vaddq_s32(sum_4, temp_hi_hi); count++; } } - sumV1[0] = sumV1[0] > 0 ? (sumV1[0] + count / 2) / count : (sumV1[0] - count / 2) / count; - sumV1[1] = sumV1[1] > 0 ? (sumV1[1] + count / 2) / count : (sumV1[1] - count / 2) / count; - sumV1[2] = sumV1[2] > 0 ? (sumV1[2] + count / 2) / count : (sumV1[2] - count / 2) / count; - sumV1[3] = sumV1[3] > 0 ? (sumV1[3] + count / 2) / count : (sumV1[3] - count / 2) / count; - - sumV2[0] = sumV2[0] > 0 ? (sumV2[0] + count / 2) / count : (sumV2[0] - count / 2) / count; - sumV2[1] = sumV2[1] > 0 ? (sumV2[1] + count / 2) / count : (sumV2[1] - count / 2) / count; - sumV2[2] = sumV2[2] > 0 ? (sumV2[2] + count / 2) / count : (sumV2[2] - count / 2) / count; - sumV2[3] = sumV2[3] > 0 ? (sumV2[3] + count / 2) / count : (sumV2[3] - count / 2) / count; - - sumV3[0] = sumV3[0] > 0 ? (sumV3[0] + count / 2) / count : (sumV3[0] - count / 2) / count; - sumV3[1] = sumV3[1] > 0 ? (sumV3[1] + count / 2) / count : (sumV3[1] - count / 2) / count; - sumV3[2] = sumV3[2] > 0 ? (sumV3[2] + count / 2) / count : (sumV3[2] - count / 2) / count; - sumV3[3] = sumV3[3] > 0 ? (sumV3[3] + count / 2) / count : (sumV3[3] - count / 2) / count; + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } - sumV4[0] = sumV4[0] > 0 ? (sumV4[0] + count / 2) / count : (sumV4[0] - count / 2) / count; - sumV4[1] = sumV4[1] > 0 ? (sumV4[1] + count / 2) / count : (sumV4[1] - count / 2) / count; - sumV4[2] = sumV4[2] > 0 ? (sumV4[2] + count / 2) / count : (sumV4[2] - count / 2) / count; - sumV4[3] = sumV4[3] > 0 ? (sumV4[3] + count / 2) / count : (sumV4[3] - count / 2) / count; + // Perform the following operation + // sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; + const int32_t half_count = count / 2; + // Predicate for 'sum > 0' operation + mve_pred16_t p = vcmpgtq_n_s32(sum_1, 0); + sum_1 = vaddq_m_n_s32(sum_1, sum_1, half_count, p); + sum_1 = vsubq_m_n_s32(sum_1, sum_1, half_count, ~p); - sumV1 = vmaxq_s32(sumV1, vdupq_n_s32(act_min)); - sumV1 = vminq_s32(sumV1, vdupq_n_s32(act_max)); + p = vcmpgtq_n_s32(sum_2, 0); + sum_2 = vaddq_m_n_s32(sum_2, sum_2, half_count, p); + sum_2 = vsubq_m_n_s32(sum_2, sum_2, half_count, ~p); - sumV2 = vmaxq_s32(sumV2, vdupq_n_s32(act_min)); - sumV2 = vminq_s32(sumV2, vdupq_n_s32(act_max)); + p = vcmpgtq_n_s32(sum_3, 0); + sum_3 = vaddq_m_n_s32(sum_3, sum_3, half_count, p); + sum_3 = vsubq_m_n_s32(sum_3, sum_3, half_count, ~p); - sumV3 = vmaxq_s32(sumV3, vdupq_n_s32(act_min)); - sumV3 = vminq_s32(sumV3, vdupq_n_s32(act_max)); + p = vcmpgtq_n_s32(sum_4, 0); + sum_4 = vaddq_m_n_s32(sum_4, sum_4, half_count, p); + sum_4 = vsubq_m_n_s32(sum_4, sum_4, half_count, ~p); - sumV4 = vmaxq_s32(sumV4, vdupq_n_s32(act_min)); - sumV4 = vminq_s32(sumV4, vdupq_n_s32(act_max)); + for (int i = 0; i < 4; i++) + { + sum_1[i] = sum_1[i] / count; + sum_2[i] = sum_2[i] / count; + sum_3[i] = sum_3[i] / count; + sum_4[i] = sum_4[i] / count; + } - tempVLO = vmovnbq_s32(tempVLO, sumV1); - tempVLO = vmovntq_s32(tempVLO, sumV2); + sum_1 = vmaxq_s32(sum_1, vdupq_n_s32(act_min)); + sum_1 = vminq_s32(sum_1, vdupq_n_s32(act_max)); - tempVHI = vmovnbq_s32(tempVHI, sumV3); - tempVHI = vmovntq_s32(tempVHI, sumV4); + sum_2 = vmaxq_s32(sum_2, vdupq_n_s32(act_min)); + sum_2 = vminq_s32(sum_2, vdupq_n_s32(act_max)); - tempV = vmovnbq_s16(tempV, tempVLO); - tempV = vmovntq_s16(tempV, tempVHI); + sum_3 = vmaxq_s32(sum_3, vdupq_n_s32(act_min)); + sum_3 = vminq_s32(sum_3, vdupq_n_s32(act_max)); - vstrbq_s8(pDst, tempV); - pDst += 16; + sum_4 = vmaxq_s32(sum_4, vdupq_n_s32(act_min)); + sum_4 = vminq_s32(sum_4, vdupq_n_s32(act_max)); - chCnt--; - pTmp += 16; - } + temp_lo = vmovnbq_s32(temp_lo, sum_1); + temp_lo = vmovntq_s32(temp_lo, sum_2); - chCnt = ch_src & 0xF; - while (chCnt > 0) - { - int32_t sum = 0; - int32_t count = 0; + temp_hi = vmovnbq_s32(temp_hi, sum_3); + temp_hi = vmovntq_s32(temp_hi, sum_4); - for (k_y = k_y_start; k_y < k_y_end; k_y++) - { - for (k_x = k_x_start; k_x < k_x_end; k_x++) - { - sum += pTmp[ch_src * (k_x + k_y * input_x)]; - count++; - } - } - sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; - sum = MAX(sum, act_min); - sum = MIN(sum, act_max); + temp = vmovnbq_s16(temp, temp_lo); + temp = vmovntq_s16(temp, temp_hi); - *pDst++ = sum; + vstrbq_p_s8(out, temp, ld_st_p); + out += 16; - chCnt--; - pTmp++; + ch_count--; + src_base += 16; } } } - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } #else -arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) +arm_cmsis_nn_status arm_avgpool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q7_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q7_t *dst) { const int32_t input_y = input_dims->h; const int32_t input_x = input_dims->w; @@ -255,6 +240,11 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, const int32_t act_min = pool_params->activation.min; const int32_t act_max = pool_params->activation.max; const int32_t ch_src = input_dims->c; + + if (ctx->buf == NULL && arm_avgpool_s8_get_buffer_size(output_dims->w, input_dims->c)) + { + return ARM_CMSIS_NN_ARG_ERROR; + } q31_t *buffer = (q31_t *)ctx->buf; #if defined(ARM_MATH_DSP) @@ -300,6 +290,13 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, count++; } } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + scale_q31_to_q7_and_clamp(buffer, dst, ch_src, count, act_min, act_max); dst += ch_src; } @@ -309,20 +306,18 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, /* Reference C code adapted from CMSIS-NN arm_avepool_q7_HWC. */ (void)buffer; - int16_t i_ch_in, i_x, i_y; - int16_t k_x, k_y; - for (i_y = 0; i_y < output_y; i_y++) + for (int i_y = 0; i_y < output_y; i_y++) { - for (i_x = 0; i_x < output_x; i_x++) + for (int i_x = 0; i_x < output_x; i_x++) { - for (i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) + for (int i_ch_in = 0; i_ch_in < ch_src; i_ch_in++) { int sum = 0; int count = 0; - for (k_y = i_y * stride_y - pad_y; k_y < i_y * stride_y - pad_y + kernel_y; k_y++) + for (int k_y = i_y * stride_y - pad_y; k_y < i_y * stride_y - pad_y + kernel_y; k_y++) { - for (k_x = i_x * stride_x - pad_x; k_x < i_x * stride_x - pad_x + kernel_x; k_x++) + for (int k_x = i_x * stride_x - pad_x; k_x < i_x * stride_x - pad_x + kernel_x; k_x++) { if (k_y >= 0 && k_x >= 0 && k_y < input_y && k_x < input_x) { @@ -331,6 +326,13 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, } } } + + // Prevent static code issue DIVIDE_BY_ZERO. + if (count == 0) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + sum = sum > 0 ? (sum + count / 2) / count : (sum - count / 2) / count; sum = MAX(sum, act_min); sum = MIN(sum, act_max); @@ -341,7 +343,7 @@ arm_status arm_avgpool_s8(const cmsis_nn_context *ctx, } #endif - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } #endif /* ARM_MATH_MVEI */ @@ -360,3 +362,5 @@ int32_t arm_avgpool_s8_get_buffer_size(const int output_x, const int ch_src) /** * @} end of Pooling group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s16.c new file mode 100644 index 0000000..0b39d5e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s16.c @@ -0,0 +1,216 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * SPDX-FileCopyrightText: Copyright 2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_max_pool_s16.c + * Description: Pooling function implementations + * + * $Date: 16 August 2022 + * $Revision: V.2.1.1 + * + * Target Processor: Cortex-M CPUs + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +static void compare_and_replace_if_larger(int16_t *base, const int16_t *target, int32_t length) +{ +#if defined(ARM_MATH_MVEI) + int32_t loop_count = (length + 7) / 8; + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp16q((uint32_t)length); + const int16x8_t op_1 = vldrhq_z_s16(base, p); + const int16x8_t op_2 = vldrhq_z_s16(target, p); + const int16x8_t max = vmaxq_s16(op_1, op_2); + vstrhq_p_s16(base, max, p); + base += 8; + target += 8; + length -= 8; + } +#else + q15_t *dst = base; + const q15_t *src = target; + union arm_nnword ref_max; + union arm_nnword comp_max; + int32_t cnt = length >> 1; + + while (cnt > 0l) + { + ref_max.word = arm_nn_read_q15x2(dst); + comp_max.word = arm_nn_read_q15x2_ia(&src); + + if (comp_max.half_words[0] > ref_max.half_words[0]) + { + ref_max.half_words[0] = comp_max.half_words[0]; + } + if (comp_max.half_words[1] > ref_max.half_words[1]) + { + ref_max.half_words[1] = comp_max.half_words[1]; + } + + arm_nn_write_q15x2_ia(&dst, ref_max.word); + + cnt--; + } + + if (length & 0x1) + { + if (*src > *dst) + { + *dst = *src; + } + } +#endif +} + +static void clamp_output(int16_t *source, int32_t length, const int16_t act_min, const int16_t act_max) +{ +#if defined(ARM_MATH_MVEI) + const int16x8_t min = vdupq_n_s16((int16_t)act_min); + const int16x8_t max = vdupq_n_s16((int16_t)act_max); + + int32_t loop_count = (length + 7) / 8; + for (int i = 0; i < loop_count; i++) + { + mve_pred16_t p = vctp16q((uint32_t)length); + length -= 8; + const int16x8_t src = vldrhq_z_s16(source, p); + int16x8_t res = vmaxq_x_s16(src, min, p); + res = vminq_x_s16(res, max, p); + vstrhq_p_s16(source, res, p); + source += 8; + } +#else + union arm_nnword in; + int32_t cnt = length >> 1; + + while (cnt > 0l) + { + in.word = arm_nn_read_q15x2(source); + + in.half_words[0] = MAX(in.half_words[0], act_min); + in.half_words[0] = MIN(in.half_words[0], act_max); + in.half_words[1] = MAX(in.half_words[1], act_min); + in.half_words[1] = MIN(in.half_words[1], act_max); + + arm_nn_write_q15x2_ia(&source, in.word); + cnt--; + } + + if (length & 0x1) + { + int16_t comp = *source; + comp = MAX(comp, act_min); + comp = MIN(comp, act_max); + *source = comp; + } +#endif +} + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Pooling + * @{ + */ + +/* + * Optimized s16 max pooling function + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_max_pool_s16(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const int16_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + int16_t *dst) +{ + const int32_t input_y = input_dims->h; + const int32_t input_x = input_dims->w; + const int32_t output_y = output_dims->h; + const int32_t output_x = output_dims->w; + const int32_t stride_y = pool_params->stride.h; + const int32_t stride_x = pool_params->stride.w; + const int32_t kernel_y = filter_dims->h; + const int32_t kernel_x = filter_dims->w; + const int32_t pad_y = pool_params->padding.h; + const int32_t pad_x = pool_params->padding.w; + const int16_t act_min = pool_params->activation.min; + const int16_t act_max = pool_params->activation.max; + const int32_t channel_in = input_dims->c; + (void)ctx; + int16_t *dst_base = dst; + + for (int i_y = 0, base_idx_y = -pad_y; i_y < output_y; base_idx_y += stride_y, i_y++) + { + for (int i_x = 0, base_idx_x = -pad_x; i_x < output_x; base_idx_x += stride_x, i_x++) + { + /* Condition for kernel start dimension: (base_idx_ + kernel__start) >= 0 */ + const int32_t ker_y_start = MAX(0, -base_idx_y); + const int32_t ker_x_start = MAX(0, -base_idx_x); + + /* Condition for kernel end dimension: (base_idx_ + kernel__end) < dim_src_ */ + const int32_t kernel_y_end = MIN(kernel_y, input_y - base_idx_y); + const int32_t kernel_x_end = MIN(kernel_x, input_x - base_idx_x); + + int count = 0; + + for (int k_y = ker_y_start; k_y < kernel_y_end; k_y++) + { + for (int k_x = ker_x_start; k_x < kernel_x_end; k_x++) + { + const int16_t *start = src + channel_in * (k_x + base_idx_x + (k_y + base_idx_y) * input_x); + + if (count == 0) + { + memcpy(dst, start, channel_in * sizeof(int16_t)); + count++; + } + else + { + compare_and_replace_if_larger(dst, start, channel_in); + } + } + } + /* 'count' is expected to be non-zero here. */ + dst += channel_in; + } + } + + clamp_output(dst_base, output_x * output_y * channel_in, act_min, act_max); + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Pooling group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c index 6c3d3b2..581a8c6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_max_pool_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_max_pool_s8.c * Description: Pooling function implementations * - * $Date: 19. Februari 2021 - * $Revision: V.2.0.2 + * $Date: 16 August 2022 + * $Revision: V.3.0.1 * * Target Processor: Cortex-M CPUs * @@ -40,7 +42,7 @@ static void compare_and_replace_if_larger_q7(q7_t *base, const q7_t *target, int mve_pred16_t p = vctp8q((uint32_t)length); const int8x16_t op_1 = vldrbq_z_s8(base, p); const int8x16_t op_2 = vldrbq_z_s8(target, p); - const int8x16_t max = vmaxq_m_s8(vuninitializedq_s8(), op_1, op_2, p); + const int8x16_t max = vmaxq_x_s8(op_1, op_2, p); vstrbq_p_s8(base, max, p); base += 16; target += 16; @@ -75,7 +77,7 @@ static void compare_and_replace_if_larger_q7(q7_t *base, const q7_t *target, int ref_max.bytes[3] = comp_max.bytes[3]; } - write_q7x4_ia(&dst, ref_max.word); + arm_nn_write_q7x4_ia(&dst, ref_max.word); cnt--; } @@ -98,15 +100,16 @@ static void clamp_output(q7_t *source, int32_t length, const int32_t act_min, co { #if defined(ARM_MATH_MVEI) int32_t loop_count = (length + 15) / 16; + const int8x16_t vmin = vdupq_n_s8((int8_t)act_min); + const int8x16_t vmax = vdupq_n_s8((int8_t)act_max); + for (int i = 0; i < loop_count; i++) { mve_pred16_t p = vctp8q((uint32_t)length); length -= 16; const int8x16_t src = vldrbq_z_s8(source, p); - const int8x16_t predicated_min = vdupq_m_n_s8(vuninitializedq_s8(), (int8_t)act_min, p); - const int8x16_t predicated_max = vdupq_m_n_s8(vuninitializedq_s8(), (int8_t)act_max, p); - int8x16_t res = vmaxq_m_s8(vuninitializedq_s8(), src, predicated_min, p); - res = vminq_m_s8(vuninitializedq_s8(), res, predicated_max, p); + int8x16_t res = vmaxq_x_s8(src, vmin, p); + res = vminq_x_s8(res, vmax, p); vstrbq_p_s8(source, res, p); source += 16; } @@ -127,7 +130,7 @@ static void clamp_output(q7_t *source, int32_t length, const int32_t act_min, co in.bytes[3] = MAX(in.bytes[3], act_min); in.bytes[3] = MIN(in.bytes[3], act_max); - write_q7x4_ia(&source, in.word); + arm_nn_write_q7x4_ia(&source, in.word); cnt--; } @@ -159,13 +162,13 @@ static void clamp_output(q7_t *source, int32_t length, const int32_t act_min, co * */ -arm_status arm_max_pool_s8(const cmsis_nn_context *ctx, - const cmsis_nn_pool_params *pool_params, - const cmsis_nn_dims *input_dims, - const q7_t *src, - const cmsis_nn_dims *filter_dims, - const cmsis_nn_dims *output_dims, - q7_t *dst) +arm_cmsis_nn_status arm_max_pool_s8(const cmsis_nn_context *ctx, + const cmsis_nn_pool_params *pool_params, + const cmsis_nn_dims *input_dims, + const q7_t *src, + const cmsis_nn_dims *filter_dims, + const cmsis_nn_dims *output_dims, + q7_t *dst) { const int32_t input_y = input_dims->h; const int32_t input_x = input_dims->w; @@ -205,7 +208,7 @@ arm_status arm_max_pool_s8(const cmsis_nn_context *ctx, if (count == 0) { - memcpy(dst, start, channel_in); + arm_memcpy_q7(dst, start, channel_in); count++; } else @@ -221,9 +224,11 @@ arm_status arm_max_pool_s8(const cmsis_nn_context *ctx, clamp_output(dst_base, output_x * output_y * channel_in, act_min, act_max); - return ARM_MATH_SUCCESS; + return ARM_CMSIS_NN_SUCCESS; } /** * @} end of Pooling group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c index 1c3fda4..c88fc24 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/PoolingFunctions/arm_pool_q7_HWC.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_pool_q7_HWC.c * Description: Pooling function implementations * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.1.1.2 * * Target Processor: Cortex-M cores * @@ -31,10 +33,10 @@ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) -/** - * @brief A few utility functions used by pooling functions +/* + * A few utility functions used by pooling functions * * */ @@ -75,7 +77,7 @@ static void compare_and_replace_if_larger_q7(q7_t *base, // base data if (com.bytes[3] > in.bytes[3]) in.bytes[3] = com.bytes[3]; - *__SIMD32(pIn)++ = in.word; + arm_nn_write_q7x4_ia(&pIn, in.word); cnt--; } @@ -119,10 +121,10 @@ static void accumulate_q7_to_q15(q15_t *base, q7_t *target, const uint16_t lengt #endif in = arm_nn_read_q15x2(pCnt); - *__SIMD32(pCnt)++ = __QADD16(vo1, in); + arm_nn_write_q15x2_ia(&pCnt, __QADD16(vo1, in)); in = arm_nn_read_q15x2(pCnt); - *__SIMD32(pCnt)++ = __QADD16(vo2, in); + arm_nn_write_q15x2_ia(&pCnt, __QADD16(vo2, in)); cnt--; } @@ -178,7 +180,7 @@ void arm_maxpool_q7_HWC(q7_t *Im_in, q7_t *Im_out) { (void)bufferA; -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ int16_t i_x, i_y; @@ -334,7 +336,7 @@ void arm_avepool_q7_HWC(q7_t *Im_in, q7_t *Im_out) { -#if defined(ARM_MATH_DSP) +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) /* Run the following code for Cortex-M4 and Cortex-M7 */ q15_t *buffer = (q15_t *)bufferA; @@ -462,3 +464,5 @@ void arm_avepool_q7_HWC(q7_t *Im_in, /** * @} end of Pooling group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c index f772263..0b1892b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/ReshapeFunctions/arm_reshape_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,14 +23,15 @@ * Title: arm_reshape_s8.c * Description: Reshape a s8 vector * - * $Date: September 2019 - * $Revision: V.1.0.0 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.1 * * Target Processor: Cortex-M cores * * -------------------------------------------------------------------- */ #include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" /** * @ingroup groupNN @@ -39,7 +42,7 @@ * @{ */ -/** +/* * Basic s8 reshape function. * * Refer header file for details. @@ -48,9 +51,11 @@ void arm_reshape_s8(const int8_t *input, int8_t *output, const uint32_t total_size) { - memcpy(output, input, total_size); + arm_memcpy_q7(output, input, total_size); } /** * @} end of Reshape group - */ \ No newline at end of file + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c index c99350d..3d386e8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_svdf_s8.c * Description: S8 basic SVDF layer function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 May 2022 + * $Revision: V.4.0.1 * * Target Processor: Cortex-M processors * @@ -41,29 +43,29 @@ */ /* - * S8 SVDF layer function for TensorFlow Lite + * S8 SVDF layer function for TensorFlow Lite with 8 bit state tensor * * Refer to header file for details. * */ -arm_status arm_svdf_s8(const cmsis_nn_context *input_ctx, - const cmsis_nn_context *output_ctx, - const cmsis_nn_svdf_params *svdf_params, - const cmsis_nn_per_tensor_quant_params *input_quant_params, - const cmsis_nn_per_tensor_quant_params *output_quant_params, - const cmsis_nn_dims *input_dims, - const q7_t *input_data, - const cmsis_nn_dims *state_dims, - q15_t *state_data, - const cmsis_nn_dims *weights_feature_dims, - const q7_t *weights_feature_data, - const cmsis_nn_dims *weights_time_dims, - const q15_t *weights_time_data, - const cmsis_nn_dims *bias_dims, - const q31_t *bias_data, - const cmsis_nn_dims *output_dims, - q7_t *output_data) +arm_cmsis_nn_status arm_svdf_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *state_dims, + q7_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const q7_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const q7_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const q31_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) { (void)bias_dims; (void)state_dims; @@ -81,141 +83,193 @@ arm_status arm_svdf_s8(const cmsis_nn_context *input_ctx, const int32_t out_activation_max = svdf_params->output_activation.max; const int16_t rank = svdf_params->rank; - int32_t zp_32 = (-zp_in & 0xffff) | ((-zp_in & 0xffff) << 16); - const int32_t input_batches = input_dims->n; const int32_t input_height = input_dims->h; const int32_t feature_batches = weights_feature_dims->n; const int32_t time_batches = weights_time_dims->h; const int32_t unit_count = feature_batches / rank; + if (input_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } q31_t *buffer_a = (q31_t *)input_ctx->buf; + + if (output_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } q31_t *buffer_b = (q31_t *)output_ctx->buf; - memmove((q15_t *)state_data, - (q15_t *)state_data + 1, - (size_t)(input_batches * feature_batches * time_batches * (int32_t)sizeof(int16_t))); + // Left shift state + memmove((int8_t *)state_data, + (int8_t *)state_data + 1, + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int8_t))); - q15_t *res_ptr = state_data + (time_batches - 1); + // Matrix multiplication input * feature weight for (int i_batch = 0; i_batch < input_batches; i_batch++) { - const q7_t *buffer_1 = weights_feature_data; - for (int r = 0; r < feature_batches; r++) - { - q31_t dot_prod = 0; + q7_t *res_ptr = state_data + (time_batches * i_batch * feature_batches) + (time_batches - 1); + const q7_t *weight = weights_feature_data; + const q7_t *input = input_data + i_batch * input_height; - const q7_t *buffer_2 = input_data + i_batch * input_height; + arm_cmsis_nn_status res = arm_nn_vec_mat_mult_t_s8(input, + weight, + NULL, + res_ptr, + -zp_in, + 0, + 0, + multiplier_in, + shift_in, + input_height, + feature_batches, + in_activation_min, + in_activation_max, + time_batches); -#if defined(ARM_MATH_DSP) - int c = 0; - int32_t block_count = input_height >> 2; - for (int i = 0; i < block_count; i++) - { - c += 4; + if (res != ARM_CMSIS_NN_SUCCESS) + { + return res; + } + } - q31_t r1 = arm_nn_read_q7x4_ia(&buffer_1); - q31_t r1_a = __SXTB16(r1); - q31_t r1_b = __SXTB16(__ROR((uint32_t)r1, 8)); + // Matrix multiplicate time weight * state tensors + { + q31_t *ptr_a = buffer_a; + const int8_t *v2 = state_data; + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + const int8_t *v1 = weights_time_data; - q31_t r2 = arm_nn_read_q7x4_ia(&buffer_2); - q31_t r2_a = __SXTAB16(zp_32, r2); - q31_t r2_b = __SXTAB16(zp_32, __ROR((uint32_t)r2, 8)); + for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) + { + *ptr_a = 0; + int32_t sum = 0; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + // Perform matrix multiplication in blocks of four + int j = 0; + int32_t block_count = time_batches >> 2; + for (int i = 0; i < block_count; i++) + { + j += 4; - dot_prod = __SMLAD(r1_a, r2_a, dot_prod); - dot_prod = __SMLAD(r1_b, r2_b, dot_prod); - } + q31_t r1_1, r1_2, r2_1, r2_2; + v1 = read_and_pad_reordered(v1, &r1_1, &r1_2); + v2 = read_and_pad_reordered(v2, &r2_1, &r2_2); + sum = __SMLAD(r1_1, r2_1, sum); + sum = __SMLAD(r1_2, r2_2, sum); + } - for (; c < input_height; c++) - { - dot_prod += *buffer_1 * (*buffer_2 - zp_in); - buffer_1++; - buffer_2++; - } + // Process the remaining data + for (; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } #else - for (int c = 0; c < input_height; c++) - { - dot_prod += *buffer_1 * (*buffer_2 - zp_in); - buffer_1++; - buffer_2++; - } + for (int j = 0; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } #endif - dot_prod = arm_nn_requantize(dot_prod, multiplier_in, shift_in); - dot_prod = CLAMP(dot_prod, in_activation_max, in_activation_min); - *res_ptr = dot_prod; - res_ptr += time_batches; + *ptr_a = sum; + ptr_a++; + } } } - for (int i_batch = 0; i_batch < input_batches; i_batch++) + if (bias_data) { - q31_t *ptr_a = buffer_a + i_batch * feature_batches; - - const q15_t *v1 = weights_time_data; - const q15_t *v2 = state_data + i_batch * time_batches * feature_batches; - for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) + if (unit_count == feature_batches) { - *ptr_a = 0; - - int32_t sum = 0; -#if defined(ARM_MATH_DSP) - int j = 0; - int32_t block_count = time_batches >> 1; - for (int i = 0; i < block_count; i++) + for (int i = 0; i < input_batches; i++) { - j += 2; - q31_t r1 = arm_nn_read_q15x2_ia(&v1); - q31_t r2 = arm_nn_read_q15x2_ia(&v2); - - sum = __SMLAD(r1, r2, sum); - } + q31_t *output_temp = buffer_b + i * feature_batches; + const q31_t *ptr_a = buffer_a + i * feature_batches; - // Process the remaining data - for (; j < time_batches; j++) - { - sum += *v1 * *v2; - v1++; - v2++; + const int32_t *bi = bias_data; + for (int j = 0; j < feature_batches; j++) + { + output_temp[j] = ptr_a[j] + bi[j]; + } } -#else - for (int j = 0; j < time_batches; j++) + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) { - sum += *v1 * *v2; - v1++; - v2++; - } -#endif + q31_t *output_data_temp = buffer_b + i_batch * unit_count; + q31_t *ptr_a = buffer_a + i_batch * feature_batches; - *ptr_a = sum; - ptr_a++; + for (int i = 0; i < unit_count; i++) + { + int32_t sum = bias_data[i]; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } } } - - for (int i_batch = 0; i_batch < input_batches; i_batch++) + else { - q31_t *output_data_temp = buffer_b + i_batch * unit_count; - q31_t *ptr_a = buffer_a + i_batch * feature_batches; - - for (int i = 0; i < unit_count; i++) + for (int i_batch = 0; i_batch < input_batches; i_batch++) { - output_data_temp[i] = bias_data[i]; - for (int j = 0; j < rank; j++) + q31_t *output_data_temp = buffer_b + i_batch * unit_count; + q31_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) { - output_data_temp[i] += *ptr_a; - ptr_a++; + int32_t sum = 0; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; } } } +#if defined(ARM_MATH_MVEI) + int32_t num_elements = input_batches * unit_count; + const int32_t loop_count = (num_elements + 3) / 4; + for (int i_op = 0; i_op < loop_count; i_op++) + { + mve_pred16_t p = vctp32q((uint32_t)num_elements); + int32x4_t op = vldrwq_z_s32(buffer_b, p); + op = arm_requantize_mve(op, multiplier_out, shift_2); + op = vaddq_n_s32(op, zp_out); + const int32x4_t min_vec = vdupq_n_s32((int8_t)out_activation_min); + const int32x4_t max_vec = vdupq_n_s32((int8_t)out_activation_max); + op = vmaxq_s32(op, min_vec); + op = vminq_s32(op, max_vec); + vstrbq_p_s32(output_data, op, p); + output_data += 4; + buffer_b += 4; + num_elements -= 4; + } +#else for (int i = 0; i < input_batches * unit_count; i++) { output_data[i] = (q7_t)CLAMP( arm_nn_requantize(buffer_b[i], multiplier_out, shift_2) + zp_out, out_activation_max, out_activation_min); } +#endif - return (ARM_MATH_SUCCESS); + return (ARM_CMSIS_NN_SUCCESS); } /** * @} end of SVDF group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_state_s16_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_state_s16_s8.c new file mode 100644 index 0000000..d804121 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SVDFunctions/arm_svdf_state_s16_s8.c @@ -0,0 +1,271 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_svdf_s8.c + * Description: S8 basic SVDF layer function with s16 state tensor + * + * $Date: 4 May 2022 + * $Revision: V.2.0.1 + * + * Target Processor: Cortex-M processors + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup SVDF + * @{ + */ + +/* + * S8 SVDF layer function for TensorFlow Lite with 16 bit state tensor + * + * Refer to header file for details. + * + */ + +arm_cmsis_nn_status arm_svdf_state_s16_s8(const cmsis_nn_context *input_ctx, + const cmsis_nn_context *output_ctx, + const cmsis_nn_svdf_params *svdf_params, + const cmsis_nn_per_tensor_quant_params *input_quant_params, + const cmsis_nn_per_tensor_quant_params *output_quant_params, + const cmsis_nn_dims *input_dims, + const q7_t *input_data, + const cmsis_nn_dims *state_dims, + q15_t *state_data, + const cmsis_nn_dims *weights_feature_dims, + const q7_t *weights_feature_data, + const cmsis_nn_dims *weights_time_dims, + const q15_t *weights_time_data, + const cmsis_nn_dims *bias_dims, + const q31_t *bias_data, + const cmsis_nn_dims *output_dims, + q7_t *output_data) +{ + (void)bias_dims; + (void)state_dims; + (void)output_dims; + + const q31_t multiplier_in = input_quant_params->multiplier; + const q31_t shift_in = input_quant_params->shift; + const q31_t multiplier_out = output_quant_params->multiplier; + const q31_t shift_2 = output_quant_params->shift; + const int32_t zp_in = svdf_params->input_offset; + const int32_t zp_out = svdf_params->output_offset; + const int32_t in_activation_min = svdf_params->input_activation.min; + const int32_t in_activation_max = svdf_params->input_activation.max; + const int32_t out_activation_min = svdf_params->output_activation.min; + const int32_t out_activation_max = svdf_params->output_activation.max; + const int16_t rank = svdf_params->rank; + + const int32_t input_batches = input_dims->n; + const int32_t input_height = input_dims->h; + const int32_t feature_batches = weights_feature_dims->n; + const int32_t time_batches = weights_time_dims->h; + const int32_t unit_count = feature_batches / rank; + + if (input_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + q31_t *buffer_a = (q31_t *)input_ctx->buf; + + if (output_ctx->buf == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + q31_t *buffer_b = (q31_t *)output_ctx->buf; + + // Left shift state + memmove((q15_t *)state_data, + (q15_t *)state_data + 1, + (size_t)((input_batches * feature_batches * time_batches - 1) * (int32_t)sizeof(int16_t))); + + // Matrix multiplication input * feature weight + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + q15_t *res_ptr = state_data + (time_batches * i_batch * feature_batches) + (time_batches - 1); + const q7_t *weight = weights_feature_data; + const q7_t *input = input_data + i_batch * input_height; + + arm_cmsis_nn_status res = arm_nn_vec_mat_mult_t_svdf_s8(input, + weight, + res_ptr, + -zp_in, + 0, + time_batches, + multiplier_in, + shift_in, + input_height, + feature_batches, + in_activation_min, + in_activation_max); + + if (res != ARM_CMSIS_NN_SUCCESS) + { + return res; + } + } + + { + // Matrix multiplication time weight * state tensors + q31_t *ptr_a = buffer_a; + const q15_t *v2 = state_data; + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + const q15_t *v1 = weights_time_data; + + for (int i_feature_batch = 0; i_feature_batch < feature_batches; i_feature_batch++) + { + *ptr_a = 0; + int32_t sum = 0; +#if defined(ARM_MATH_DSP) && !defined(ARM_MATH_MVEI) + // Perform matrix multiplication in blocks of two + int j = 0; + int32_t block_count = time_batches >> 1; + for (int i = 0; i < block_count; i++) + { + j += 2; + q31_t r1 = arm_nn_read_q15x2_ia(&v1); + q31_t r2 = arm_nn_read_q15x2_ia(&v2); + + sum = __SMLAD(r1, r2, sum); + } + + // Process the remaining data + for (; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#else + for (int j = 0; j < time_batches; j++) + { + sum += *v1 * *v2; + v1++; + v2++; + } +#endif + + *ptr_a = sum; + ptr_a++; + } + } + } + + if (bias_data) + { + if (unit_count == feature_batches) + { + for (int i = 0; i < input_batches; i++) + { + q31_t *output_temp = buffer_b + i * feature_batches; + const q31_t *ptr_a = buffer_a + i * feature_batches; + + const int32_t *bi = bias_data; + for (int j = 0; j < feature_batches; j++) + { + output_temp[j] = ptr_a[j] + bi[j]; + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + q31_t *output_data_temp = buffer_b + i_batch * unit_count; + q31_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = bias_data[i]; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + } + else + { + for (int i_batch = 0; i_batch < input_batches; i_batch++) + { + q31_t *output_data_temp = buffer_b + i_batch * unit_count; + q31_t *ptr_a = buffer_a + i_batch * feature_batches; + + for (int i = 0; i < unit_count; i++) + { + int32_t sum = 0; + for (int j = 0; j < rank; j++) + { + sum += *ptr_a; + ptr_a++; + } + output_data_temp[i] = sum; + } + } + } + +#if defined(ARM_MATH_MVEI) + int32_t num_elements = input_batches * unit_count; + const int32_t loop_count = (num_elements + 3) / 4; + for (int i_op = 0; i_op < loop_count; i_op++) + { + mve_pred16_t p = vctp32q((uint32_t)num_elements); + int32x4_t op = vldrwq_z_s32(buffer_b, p); + op = arm_requantize_mve(op, multiplier_out, shift_2); + op = vaddq_n_s32(op, zp_out); + const int32x4_t min_vec = vdupq_n_s32((int8_t)out_activation_min); + const int32x4_t max_vec = vdupq_n_s32((int8_t)out_activation_max); + op = vmaxq_s32(op, min_vec); + op = vminq_s32(op, max_vec); + vstrbq_p_s32(output_data, op, p); + output_data += 4; + buffer_b += 4; + num_elements -= 4; + } +#else + for (int i = 0; i < input_batches * unit_count; i++) + { + output_data[i] = (q7_t)CLAMP( + arm_nn_requantize(buffer_b[i], multiplier_out, shift_2) + zp_out, out_activation_max, out_activation_min); + } +#endif + + return (ARM_CMSIS_NN_SUCCESS); +} + +/** + * @} end of SVDF group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c new file mode 100644 index 0000000..5328340 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_nn_softmax_common_s8.c @@ -0,0 +1,151 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_nn_softmax_common_s8.c + * Description: Softmax with s8 input and output of s8 or s16. + * + * $Date: 17 March 2022 + * $Revision: V.1.0.1 + * + * Target Processor: Cortex-M processors + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +#define ACCUM_BITS 12 + +/** + * @ingroup groupSupport + */ + +/** + * @addtogroup Softmax + * @{ + */ + +/* + * Softmax function with s8 input and output of s8 or s16. + * + * Refer header file for details. + * + */ +void arm_nn_softmax_common_s8(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + const bool int16_output, + void *output) +{ + const int32_t mask = (1 << shift); + + int32_t col = 0; + int32_t row_idx; + + for (row_idx = 0; row_idx < num_rows; ++row_idx) + { + // Find the maximum value in order to ensure numerical stability + int8_t max = *input; + + for (col = 1; col < row_size; ++col) + { + max = MAX(max, input[col]); + } + + int32_t diff = 0; + int32_t sum = 0; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); + } + } + + const int32_t headroom = __CLZ(sum); + const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31)); + int32_t bits_over_unit; + + if (int16_output) + { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + return; +#endif + int16_t *output_s16 = (int16_t *)output + row_idx * row_size; + + bits_over_unit = ACCUM_BITS - headroom + 15; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q15_MIN; + output_s16[col] = (int16_t)CLAMP(res, (int32_t)NN_Q15_MAX, (int32_t)NN_Q15_MIN); + } + else + { + output_s16[col] = NN_Q15_MIN; + } + } + } + else + { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + return; +#endif + int8_t *output_s8 = (int8_t *)output + row_idx * row_size; + + bits_over_unit = ACCUM_BITS - headroom + 23; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + if (diff >= diff_min) + { + const int32_t res = + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q7_MIN; + output_s8[col] = (int8_t)CLAMP(res, (int32_t)NN_Q7_MAX, (int32_t)NN_Q7_MIN); + } + else + { + output_s8[col] = NN_Q7_MIN; + } + } + } + + input += row_size; + } +} + +/** + * @} end of NNBasicMath group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c index 59e1a87..550c111 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q15.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2018 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2018, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_softmax_q15.c * Description: Q15 softmax function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.2 * * Target Processor: Cortex-M cores * @@ -39,13 +41,9 @@ * @{ */ -/** - * @brief Q15 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector +/* + * Q15 softmax function * - * @details * * Here, instead of typical e based softmax, we use * 2-based softmax, i.e.,: @@ -116,3 +114,5 @@ void arm_softmax_q15(const q15_t *vec_in, const uint16_t dim_vec, q15_t *p_out) /** * @} end of Softmax group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c index f86c3be..bb37660 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2020, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_softmax_q7.c * Description: Q7 softmax function * - * $Date: 09. October 2020 - * $Revision: V.1.0.2 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.3 * * Target Processor: Cortex-M cores * @@ -39,13 +41,8 @@ * @{ */ -/** - * @brief Q7 softmax function - * @param[in] vec_in pointer to input vector - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector - * - * @details +/* + * Q7 softmax function * * Here, instead of typical natural logarithm e based softmax, we use * 2-based softmax here, i.e.,: @@ -105,3 +102,5 @@ void arm_softmax_q7(const q7_t *vec_in, const uint16_t dim_vec, q7_t *p_out) /** * @} end of Softmax group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s16.c new file mode 100644 index 0000000..be45eae --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s16.c @@ -0,0 +1,126 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_s16.c + * Description: S16 softmax function + * + * $Date: 19 April 2022 + * $Revision: V.2.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @addtogroup Softmax + * @{ + */ + +arm_cmsis_nn_status arm_softmax_s16(const int16_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const cmsis_nn_softmax_lut_s16 *softmax_params, + int16_t *output) +{ + int32_t col = 0; + int32_t row_idx; + + if (softmax_params->exp_lut == NULL || softmax_params->one_by_one_lut == NULL) + { + return ARM_CMSIS_NN_ARG_ERROR; + } + + for (row_idx = 0; row_idx < num_rows; ++row_idx) + { + // Find the maximum value in order to ensure numerical stability + int16_t max = *input; + for (col = 1; col < row_size; ++col) + { + max = MAX(max, input[col]); + } + + int32_t diff = 0; + int32_t sum = 0; + int16_t *cached_exp_results = output; + + for (col = 0; col < row_size; ++col) + { + diff = input[col] - max; + const int32_t scaled_diff = arm_nn_requantize(diff, mult, shift); + const int32_t symmetric_scaled_diff = scaled_diff + NN_Q15_MAX; + const int16_t saturated_symmetric_scaled_diff = MIN(MAX(symmetric_scaled_diff, NN_Q15_MIN), NN_Q15_MAX); + + // Lookup from exp table and cache result for next step + const int16_t index = (256 + (saturated_symmetric_scaled_diff >> 7)); + const int16_t offset = saturated_symmetric_scaled_diff & 0x7f; + const int16_t base = softmax_params->exp_lut[index]; + const int16_t slope = softmax_params->exp_lut[index + 1] - softmax_params->exp_lut[index]; + const int16_t delta = (slope * offset + 64) >> 7; + const int16_t result = (base + delta); + cached_exp_results[col] = result; + + sum += cached_exp_results[col]; + } + + const int32_t headroom = __CLZ(sum); + + // Compute the reciprocal 1/sum + const int32_t shifted_sum = (((sum) << (headroom - 1)) + (1 << 13)) >> 14; + + // Since LUT computes 1/(1 + x), compute x = (sum - 1) => -65536 + // Since LUT expects a symmetrical input, recenter from [UINT16_MIN, UINT16_MAX] to [INT16_MIN, INT16_MAX] => + // -32768 ==> So in total -65536 -32768 => -98304 + const int16_t symmetric_shifted_sum = shifted_sum - 98304; + + // Lookup from one by one table + const int16_t index = (256 + (symmetric_shifted_sum >> 7)); + const int16_t offset = symmetric_shifted_sum & 0x7f; + const int16_t base = softmax_params->one_by_one_lut[index]; + const int16_t slope = softmax_params->one_by_one_lut[index + 1] - softmax_params->one_by_one_lut[index]; + const int16_t delta = (slope * offset + 64) >> 7; + const int16_t one_by_one_result = (base + delta); + + for (col = 0; col < row_size; ++col) + { + const int16_t right_shift = 30 - headroom; + int32_t result = (cached_exp_results[col] * one_by_one_result) >> right_shift; + result = (result + 1) >> 1; // Last shift position and insert round + output[col] = (int16_t)result; + } + + output += row_size; + input += row_size; + } + + return ARM_CMSIS_NN_SUCCESS; +} + +/** + * @} end of Softmax group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c index ff5a7f4..2de8707 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. + * Copyright (C) 2010-2022 Arm Limited or its affiliates. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_softmax_s8.c * Description: S8 softmax function * - * $Date: 09. October 2020 - * $Revision: V.2.0.1 + * $Date: 9 March 2022 + * $Revision: V.2.1.0 * * Target Processor: Cortex-M cores * @@ -69,7 +71,7 @@ static int32x4_t arm_exp_on_negative_values_mve_32x4(int32x4_t val) mve_pred16_t p = vcmpeqq_n_s32(val, 0); mask = vmvnq_m_s32(vdupq_n_s32(0), vdupq_n_s32(0), p); - result = SELECT_USING_MASK(mask, vdupq_n_s32(Q31_MAX), result); + result = SELECT_USING_MASK(mask, vdupq_n_s32(NN_Q31_MAX), result); return result; } #endif @@ -93,8 +95,8 @@ void arm_softmax_s8(const int8_t *input, { #ifdef ARM_MATH_MVEI -#define ACT_MIN ((int8_t)Q7_MIN) -#define ACT_MAX ((int8_t)Q7_MAX) +#define ACT_MIN ((int8_t)NN_Q7_MIN) +#define ACT_MAX ((int8_t)NN_Q7_MAX) const int32_t mask = (1 << shift); @@ -149,7 +151,7 @@ void arm_softmax_s8(const int8_t *input, const int32_t headroom = __CLZ((uint32_t)sum); const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; - const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); + const int32_t shifted_scale = ONE_OVER1((sum > 0 ? sum << headroom : 0) - (1 << 31)); vec_count = row_size / 4; idx = 0; @@ -192,7 +194,8 @@ void arm_softmax_s8(const int8_t *input, if (diff >= diff_min) { const int32_t res = - DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) - 128; + DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) + + NN_Q7_MIN; output[tail_idx + i] = (int8_t)CLAMP(res, (int32_t)ACT_MAX, (int32_t)ACT_MIN); } else @@ -205,57 +208,12 @@ void arm_softmax_s8(const int8_t *input, output += row_size; } #else - const int32_t mask = (1 << shift); - - int32_t col = 0; - int32_t row_idx; - - for (row_idx = 0; row_idx < num_rows; ++row_idx) - { - // Find the maximum value in order to ensure numerical stability - int8_t max = *input; - - for (col = 1; col < row_size; ++col) - { - max = MAX(max, input[col]); - } - - int32_t diff = 0; - int32_t sum = 0; - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if (diff >= diff_min) - { - sum += DIV_POW2(EXP_ON_NEG(MUL_SAT(diff * mask, mult)), ACCUM_BITS); - } - } - - const int32_t headroom = __CLZ(sum); - const int32_t bits_over_unit = ACCUM_BITS - headroom + 23; - const int32_t shifted_scale = ONE_OVER1((sum << headroom) - (1 << 31)); - - for (col = 0; col < row_size; ++col) - { - diff = input[col] - max; - if (diff >= diff_min) - { - const int32_t res = - DIV_POW2(MUL_SAT(shifted_scale, EXP_ON_NEG(MUL_SAT(diff * mask, mult))), bits_over_unit) - 128; - output[col] = (int8_t)CLAMP(res, (int32_t)127, (int32_t)-128); - } - else - { - output[col] = -128; - } - } - input += row_size; - output += row_size; - } - + arm_nn_softmax_common_s8(input, num_rows, row_size, mult, shift, diff_min, false, (void *)output); #endif } + /** * @} end of Softmax group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8_s16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8_s16.c new file mode 100644 index 0000000..a6eb67a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_s8_s16.c @@ -0,0 +1,59 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES +/* + * Copyright (C) 2022 Arm Limited or its affiliates. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/* ---------------------------------------------------------------------- + * Project: CMSIS NN Library + * Title: arm_softmax_s8_s16.c + * Description: S8 to s16 softmax function + * + * $Date: 7 January 2022 + * $Revision: V.1.0.0 + * + * Target Processor: Cortex-M cores + * + * -------------------------------------------------------------------- */ + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnsupportfunctions.h" + +/** + * @ingroup groupNN + */ + +/** + * @addtogroup Softmax + * @{ + */ + +void arm_softmax_s8_s16(const int8_t *input, + const int32_t num_rows, + const int32_t row_size, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int16_t *output) +{ + arm_nn_softmax_common_s8(input, num_rows, row_size, mult, shift, diff_min, true, (void *)output); +} +/** + * @} end of Softmax group + */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c index 148e955..a9b27fb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_u8.c @@ -1,3 +1,5 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* * Copyright (C) 2010-2020 Arm Limited or its affiliates. All rights reserved. * @@ -100,4 +102,5 @@ void arm_softmax_u8(const uint8_t *input, } /** * @} end of Softmax group - */ \ No newline at end of file + */ +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c index 6d2e3e4..25220fe 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/NN/Source/SoftmaxFunctions/arm_softmax_with_batch_q7.c @@ -1,5 +1,7 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES /* - * Copyright (C) 2010-2019 Arm Limited or its affiliates. All rights reserved. + * SPDX-FileCopyrightText: Copyright 2010-2019, 2022 Arm Limited and/or its affiliates * * SPDX-License-Identifier: Apache-2.0 * @@ -21,8 +23,8 @@ * Title: arm_softmax_with_batch_q7.c * Description: Q7 softmax function * - * $Date: 09. October 2020 - * $Revision: V.1.0.1 + * $Date: 4 Aug 2022 + * $Revision: V.1.0.2 * * Target Processor: Cortex-M and Cortex-A cores * @@ -39,14 +41,10 @@ * @{ */ -/** - * @brief Q7 softmax function with batch parameter - * @param[in] vec_in pointer to input vector - * @param[in] nb_batches number of batches - * @param[in] dim_vec input vector dimention - * @param[out] p_out pointer to output vector +/* + * Q7 softmax function with batch parameter * - * @details + * details * * Here, instead of typical natural logarithm e based softmax, we use * 2-based softmax here, i.e.,: @@ -72,3 +70,5 @@ void arm_softmax_with_batch_q7(const q7_t *vec_in, const uint16_t nb_batches, co /** * @} end of Softmax group */ + +#endif // EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE index 6332af0..7c3542e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE @@ -8,4 +8,3 @@ Folders containing files under different permissive license than Apache 2.0 are * tensorflow - Apache 2.0 * third_party/flatbuffers - Apache 2.0 * third_party/gemmlowp - Apache 2.0 -* utensor - Apache 2.0 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE-apache-2.0.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE-apache-2.0.txt index 59cd3f8..0cdd12c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE-apache-2.0.txt +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/LICENSE-apache-2.0.txt @@ -162,4 +162,4 @@ in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. +accepting any such warranty or additional liability. \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/anomaly/anomaly.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/anomaly/anomaly.h deleted file mode 100644 index ee90353..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/anomaly/anomaly.h +++ /dev/null @@ -1,87 +0,0 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. - */ - -#ifndef _EDGE_IMPULSE_ANOMALY_H_ -#define _EDGE_IMPULSE_ANOMALY_H_ - -#include -#include -#include -#include "model-parameters/anomaly_types.h" - -#ifdef __cplusplus -namespace { -#endif // __cplusplus - -/** - * Standard scaler, scales all values in the input vector - * Note that this *modifies* the array in place! - * @param input Array of input values - * @param scale Array of scale values (obtain from StandardScaler in Python) - * @param mean Array of mean values (obtain from StandardScaler in Python) - * @param input_size Size of input, scale and mean arrays - */ -void standard_scaler(float *input, const float *scale, const float *mean, size_t input_size) { - for (size_t ix = 0; ix < input_size; ix++) { - input[ix] = (input[ix] - mean[ix]) / scale[ix]; - } -} - -/** - * Calculate the distance between input vector and the cluster - * @param input Array of input values (already scaled by standard_scaler) - * @param input_size Size of the input array - * @param cluster A cluster (number of centroids should match input_size) - */ -float calculate_cluster_distance(float *input, size_t input_size, const ei_classifier_anom_cluster_t *cluster) { - // todo: check input_size and centroid size? - - float dist = 0.0f; - for (size_t ix = 0; ix < input_size; ix++) { - dist += pow(input[ix] - cluster->centroid[ix], 2); - } - return sqrt(dist) - cluster->max_error; -} - -/** - * Get minimum distance to a cluster - * @param input Array of input values (already scaled by standard_scaler) - * @param input_size Size of the input array - * @param clusters Array of clusters - * @param cluster_size Size of cluster array - */ -float get_min_distance_to_cluster(float *input, size_t input_size, const ei_classifier_anom_cluster_t *clusters, size_t cluster_size) { - float min = 1000.0f; - for (size_t ix = 0; ix < cluster_size; ix++) { - float dist = calculate_cluster_distance(input, input_size, &clusters[ix]); - if (dist < min) { - min = dist; - } - } - return min; -} - -#ifdef __cplusplus -} -#endif // __cplusplus - -#endif // _EDGE_IMPULSE_ANOMALY_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_aligned_malloc.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_aligned_malloc.h index 0981724..7ef1a26 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_aligned_malloc.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_aligned_malloc.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EDGE_IMPULSE_ALIGNED_MALLOC_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_config.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_config.h index cb33b0a..8865a85 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_config.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_config.h @@ -1,38 +1,48 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EI_CLASSIFIER_CONFIG_H_ #define _EI_CLASSIFIER_CONFIG_H_ // clang-format off + +// This is a file that's only used in benchmarking to override HW optimized kernels +#ifdef __has_include + #if __has_include("source/benchmark.h") + #include "source/benchmark.h" + #endif +#endif + +#if EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 + #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0 + #define EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES 1 +#endif + #ifndef EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN #if defined(__MBED__) - #include "mbed.h" - #if (MBED_VERSION < MBED_ENCODE_VERSION(5, 7, 0)) + #include "mbed_version.h" + #if (MBED_VERSION < MBED_ENCODE_VERSION((5), (7), (0))) #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0 #else #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 1 #endif // Mbed OS 5.7 version check -#elif defined(__TARGET_CPU_CORTEX_M0) || defined(__TARGET_CPU_CORTEX_M0PLUS) || defined(__TARGET_CPU_CORTEX_M3) || defined(__TARGET_CPU_CORTEX_M4) || defined(__TARGET_CPU_CORTEX_M7) || defined(ARDUINO_NRF52_ADAFRUIT) + +// __ARM_ARCH_PROFILE is a predefine of arm-gcc. __TARGET_* is armcc +#elif __ARM_ARCH_PROFILE == 'M' || defined(__TARGET_CPU_CORTEX_M0) || defined(__TARGET_CPU_CORTEX_M0PLUS) || defined(__TARGET_CPU_CORTEX_M3) || defined(__TARGET_CPU_CORTEX_M4) || defined(__TARGET_CPU_CORTEX_M7) || defined(ARDUINO_NRF52_ADAFRUIT) #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 1 #else #define EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN 0 @@ -51,7 +61,8 @@ #endif // EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 #if EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -#define CMSIS_NN 1 +#define CMSIS_NN 1 +#define EI_CLASSIFIER_TFLITE_LOAD_CMSIS_NN_SOURCES 1 #endif #ifndef EI_CLASSIFIER_TFLITE_ENABLE_ARC @@ -62,5 +73,30 @@ #endif // CPU_ARC #endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC +#ifndef EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN + #if defined(ESP32) + #include "sdkconfig.h" + #define EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN 1 + #define ESP_NN 1 + #endif // ESP32 check + #if defined(CONFIG_IDF_TARGET_ESP32S3) + #define EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 1 + #endif // ESP32S3 check +#else + #define ESP_NN 1 +#endif + +// no include checks in the compiler? then just include metadata and then ops_define (optional if on EON model) +#ifndef __has_include + #include "model-parameters/model_metadata.h" + #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED == 1) + #include "tflite-model/trained_model_ops_define.h" + #endif +#else + #if __has_include("tflite-model/trained_model_ops_define.h") + #include "tflite-model/trained_model_ops_define.h" + #endif +#endif // __has_include + // clang-format on #endif // _EI_CLASSIFIER_CONFIG_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_smooth.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_smooth.h index 8072a87..4f7e039 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_smooth.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_smooth.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EI_CLASSIFIER_SMOOTH_H_ @@ -84,7 +79,7 @@ const char* ei_classifier_smooth_update(ei_classifier_smooth_t *smooth, ei_impul reading = (int)ix; } } -#if EI_CLASSIFIER_HAS_ANOMALY == 1 +#if EI_CLASSIFIER_HAS_ANOMALY if (result->anomaly >= smooth->anomaly_confidence) { reading = -2; // anomaly } @@ -138,7 +133,7 @@ const char* ei_classifier_smooth_update(ei_classifier_smooth_t *smooth, ei_impul * Clear up a smooth structure */ void ei_classifier_smooth_free(ei_classifier_smooth_t *smooth) { - free(smooth->last_readings); + ei_free(smooth->last_readings); } #endif // #if EI_CLASSIFIER_OBJECT_DETECTION != 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_types.h index d146bde..b9404ea 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_classifier_types.h @@ -1,70 +1,289 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_ #define _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_ #include +// needed for standalone C example #include "model-parameters/model_metadata.h" +#ifndef EI_CLASSIFIER_MAX_OBJECT_DETECTION_COUNT +#define EI_CLASSIFIER_MAX_OBJECT_DETECTION_COUNT 10 +#endif + +/** + * @defgroup ei_structs Structs + * + * Public-facing structs for Edge Impulse C++ SDK. + * + * @addtogroup ei_structs + * @{ + */ + +/** + * @brief Holds the output of inference, anomaly results, and timing information. + * + * `ei_impulse_result_t` holds the output of `run_classifier()`. If object detection is + * enabled, then the output results is a + * pointer to an array of bounding boxes of size `bounding_boxes_count`, as given by + * [ei_impulse_result_bounding_box_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_bounding_box_t). + * Otherwise, results are stored as an array of classification scores, as given by + * [ei_impulse_result_classification_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_classification_t). + * + * If anomaly detection is enabled (e.g. `EI_CLASSIFIER_HAS_ANOMALY == 1`), then the + * anomaly score will be stored as a floating point value in `anomaly`. + * + * Timing information is stored in an + * [ei_impulse_result_timing_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_timing_t) + * struct. + * + * **Source**: [classifier/ei_classifier_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_classifier_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) + */ typedef struct { + /** + * Label of the detected object + */ const char *label; + + /** + * Value of the detected object + */ float value; } ei_impulse_result_classification_t; +/** + * @brief Holds the output of visual anomaly detection (FOMO-AD) + * + * If visual anomaly detection is enabled (e.g. `EI_CLASSIFIER_HAS_VISUAL_ANOMALY == + * 1`), then the output results will be a pointer to an array of grid cells of size + * `visual_ad_count`, as given by + * [ei_impulse_result_bounding_box_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_bounding_box_t). + * + * The visual anomaly detection result is stored in `visual_ad_result`, which contains the mean and max values of the grid cells. + * + * **Source**: [classifier/ei_classifier_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_classifier_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) +*/ +typedef struct { + /** + * Mean value of the grid cells + */ + float mean_value; + + /** + * Max value of the grid cells + */ + float max_value; +} ei_impulse_visual_ad_result_t; + +/** + * @brief Holds information for a single bounding box. + * + * If object detection is enabled (i.e. `EI_CLASSIFIER_OBJECT_DETECTION == 1`), then + * inference results will be one or more bounding boxes. The bounding boxes with the + * highest confidence scores (assuming those scores are equal to or greater than + * `EI_CLASSIFIER_OBJECT_DETECTION_THRESHOLD`), given by the `value` member, are + * returned from inference. The total number of bounding boxes returned will be at + * least `EI_CLASSIFIER_OBJECT_DETECTION_COUNT`. The exact number of bounding boxes + * is stored in `bounding_boxes_count` field of [ei_impulse_result_t]/C++ Inference + * SDK Library/structs/ei_impulse_result_t.md). + * + * A bounding box is a rectangle that ideally surrounds the identified object. The + * (`x`, `y`) coordinates in the struct identify the top-left corner of the box. + * `label` is the predicted class with the highest confidence score. `value` is the + * confidence score between [0.0..1.0] of the given `label`. + * + * **Source**: [classifier/ei_classifier_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_classifier_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) +*/ typedef struct { + /** + * Pointer to a character array describing the associated class of the given + * bounding box. Taken from one of the elements of + * `ei_classifier_inferencing_categories[]`. + */ const char *label; + + /** + * x coordinate of the top-left corner of the bounding box + */ uint32_t x; + + /** + * y coordinate of the top-left corner of the bounding box + */ uint32_t y; + + /** + * Width of the bounding box + */ uint32_t width; + + /** + * Height of the bounding box + */ uint32_t height; + + /** + * Confidence score of the label describing the bounding box + */ float value; } ei_impulse_result_bounding_box_t; +/** + * @brief Holds timing information about the processing (DSP) and inference blocks. + * + * Records timing information during the execution of the preprocessing (DSP) and + * inference blocks. Can be used to determine if inference will meet timing requirements + * on your particular platform. + * + * **Source**: [classifier/ei_classifier_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_classifier_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) + */ typedef struct { + /** + * If using `run_impulse()` to perform sampling and inference, it is the amount of + * time (in milliseconds) it took to fetch raw samples. Not used for + * `run_classifier()`. + */ int sampling; + + /** + * Amount of time (in milliseconds) it took to run the preprocessing (DSP) block + */ int dsp; + + /** + * Amount of time (in milliseconds) it took to run the inference block + */ int classification; + + /** + * Amount of time (in milliseconds) it took to run anomaly detection. Valid only if + * `EI_CLASSIFIER_HAS_ANOMALY == 1`. + */ int anomaly; + + /** + * Amount of time (in milliseconds) it took to run the post-processing block + */ + int64_t dsp_us; + + /** + * Amount of time (in milliseconds) it took to run the inference block + */ + int64_t classification_us; + + /** + * Amount of time (in microseconds) it took to run anomaly detection. Valid only if + * `EI_CLASSIFIER_HAS_ANOMALY == 1`. + */ + int64_t anomaly_us; } ei_impulse_result_timing_t; +/** + * @brief Holds the output of inference, anomaly results, and timing information. + * + * `ei_impulse_result_t` holds the output of `run_classifier()`. If object detection is + * enabled (e.g. `EI_CLASSIFIER_OBJECT_DETECTION == 1`), then the output results is a + * pointer to an array of bounding boxes of size `bounding_boxes_count`, as given by + * [ei_impulse_result_bounding_box_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_bounding_box_t). + * Otherwise, results are stored as an array of classification scores, as given by + * [ei_impulse_result_classification_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_classification_t). + * + * If anomaly detection is enabled (e.g. `EI_CLASSIFIER_HAS_ANOMALY == 1`), then the + * anomaly score will be stored as a floating point value in `anomaly`. + * + * Timing information is stored in an + * [ei_impulse_result_timing_t](https://docs.edgeimpulse.com/reference/ei_impulse_result_timing_t) + * struct. + * + * **Source**: [classifier/ei_classifier_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_classifier_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) + */ typedef struct { -#if EI_CLASSIFIER_OBJECT_DETECTION == 1 - ei_impulse_result_bounding_box_t bounding_boxes[EI_CLASSIFIER_OBJECT_DETECTION_COUNT]; + /** + * Array of bounding boxes of the detected objects, if object detection is enabled. + */ + ei_impulse_result_bounding_box_t *bounding_boxes; + + /** + * Number of bounding boxes detected. If object detection is not enabled, this will + * be 0. + */ + uint32_t bounding_boxes_count; + + /** + * Array of classification results. If object detection is enabled, this will be + * empty. + */ +#if EI_CLASSIFIER_LABEL_COUNT == 0 + // EI_CLASSIFIER_LABEL_COUNT can be 0 for anomaly only models + // to prevent compiler warnings/errors, we need to have at least one element + ei_impulse_result_classification_t classification[1]; #else ei_impulse_result_classification_t classification[EI_CLASSIFIER_LABEL_COUNT]; #endif + + /** + * Anomaly score. If anomaly detection is not enabled, this will be 0. A higher + * anomaly score indicates greater likelihood of an anomalous sample (e.g. it is + * farther away from its cluster). + */ float anomaly; + + /** + * Timing information for the processing (DSP) and inference blocks. + */ ei_impulse_result_timing_t timing; + + /** + * Copy the output data to a buffer. If set to false, the output data will be + * returned as a pointer to the internal buffer. If set to true, the output data + * will be copied to the buffer provided in `ei_impulse_output_t`. + */ + bool copy_output; +#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY || __DOXYGEN__ + /** + * Array of grid cells of the detected visual anomalies, if visual anomaly detection + * is enabled. + */ + ei_impulse_result_bounding_box_t *visual_ad_grid_cells; + + /** + * Number of grid cells detected as visual anomalies, if visual anomaly detection is + * enabled. + */ + uint32_t visual_ad_count; + + /** + * Visual anomaly detection result, if visual anomaly detection is enabled. + */ + ei_impulse_visual_ad_result_t visual_ad_result; +#endif // EI_CLASSIFIER_HAS_VISUAL_ANOMALY } ei_impulse_result_t; -typedef struct { - uint32_t buf_idx; - float running_sum; -#if (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW > 1) - float maf_buffer[(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW >> 1)]; -#else - float maf_buffer[1]; -#endif -}ei_impulse_maf; +/** @} */ #endif // _EDGE_IMPULSE_RUN_CLASSIFIER_TYPES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_fill_result_struct.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_fill_result_struct.h new file mode 100644 index 0000000..b6f2b32 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_fill_result_struct.h @@ -0,0 +1,1732 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_ +#define _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_ + +using namespace ei; + +#include "model-parameters/model_metadata.h" +#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1 +#include "model-parameters/model_variables.h" +#endif +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/ei_classifier_types.h" +#include "edge-impulse-sdk/classifier/ei_nms.h" +#include "edge-impulse-sdk/dsp/ei_vector.h" + +#ifndef EI_HAS_OBJECT_DETECTION + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_SSD) + #define EI_HAS_SSD 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_FOMO) + #define EI_HAS_FOMO 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) + #define EI_HAS_YOLOV5 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX) + #define EI_HAS_YOLOX 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV7) + #define EI_HAS_YOLOV7 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_SSD) + #define EI_HAS_TAO_DECODE_DETECTIONS 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4) + #define EI_HAS_TAO_YOLO 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3) + #define EI_HAS_TAO_YOLOV3 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4) + #define EI_HAS_TAO_YOLOV4 1 + #endif + #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV2) + #define EI_HAS_YOLOV2 1 + #endif +#endif + +__attribute__((unused)) inline float sigmoid(float a) { + return 1.0f / (1.0f + exp(-a)); +} + +#ifdef EI_HAS_FOMO +typedef struct cube { + size_t x; + size_t y; + size_t width; + size_t height; + float confidence; + const char *label; +} ei_classifier_cube_t; + +/** + * Checks whether a new section overlaps with a cube, + * and if so, will **update the cube** + */ +__attribute__((unused)) static bool ei_cube_check_overlap(ei_classifier_cube_t *c, int x, int y, int width, int height, float confidence) { + bool is_overlapping = !(c->x + c->width < x || c->y + c->height < y || c->x > x + width || c->y > y + height); + if (!is_overlapping) return false; + + // if we overlap, but the x of the new box is lower than the x of the current box + if (x < c->x) { + // update x to match new box and make width larger (by the diff between the boxes) + c->x = x; + c->width += c->x - x; + } + // if we overlap, but the y of the new box is lower than the y of the current box + if (y < c->y) { + // update y to match new box and make height larger (by the diff between the boxes) + c->y = y; + c->height += c->y - y; + } + // if we overlap, and x+width of the new box is higher than the x+width of the current box + if (x + width > c->x + c->width) { + // just make the box wider + c->width += (x + width) - (c->x + c->width); + } + // if we overlap, and y+height of the new box is higher than the y+height of the current box + if (y + height > c->y + c->height) { + // just make the box higher + c->height += (y + height) - (c->y + c->height); + } + // if the new box has higher confidence, then override confidence of the whole box + if (confidence > c->confidence) { + c->confidence = confidence; + } + return true; +} + +__attribute__((unused)) static void ei_handle_cube(std::vector *cubes, int x, int y, float vf, const char *label, float detection_threshold) { + if (vf < detection_threshold) return; + + bool has_overlapping = false; + int width = 1; + int height = 1; + + for (auto c : *cubes) { + // not cube for same class? continue + if (strcmp(c->label, label) != 0) continue; + + if (ei_cube_check_overlap(c, x, y, width, height, vf)) { + has_overlapping = true; + break; + } + } + + if (!has_overlapping) { + ei_classifier_cube_t *cube = new ei_classifier_cube_t(); + cube->x = x; + cube->y = y; + cube->width = 1; + cube->height = 1; + cube->confidence = vf; + cube->label = label; + cubes->push_back(cube); + } +} + +__attribute__((unused)) static void fill_result_struct_from_cubes(ei_impulse_result_t *result, std::vector *cubes, int out_width_factor, uint32_t object_detection_count) { + std::vector bbs; + static std::vector results; + int added_boxes_count = 0; + results.clear(); + for (auto sc : *cubes) { + bool has_overlapping = false; + + int x = sc->x; + int y = sc->y; + int width = sc->width; + int height = sc->height; + const char *label = sc->label; + float vf = sc->confidence; + + for (auto c : bbs) { + // not cube for same class? continue + if (strcmp(c->label, label) != 0) continue; + + if (ei_cube_check_overlap(c, x, y, width, height, vf)) { + has_overlapping = true; + break; + } + } + + if (has_overlapping) { + continue; + } + + bbs.push_back(sc); + + ei_impulse_result_bounding_box_t tmp = { + .label = sc->label, + .x = (uint32_t)(sc->x * out_width_factor), + .y = (uint32_t)(sc->y * out_width_factor), + .width = (uint32_t)(sc->width * out_width_factor), + .height = (uint32_t)(sc->height * out_width_factor), + .value = sc->confidence + }; + + results.push_back(tmp); + added_boxes_count++; + } + + // if we didn't detect min required objects, fill the rest with fixed value + if (added_boxes_count < object_detection_count) { + results.resize(object_detection_count); + for (size_t ix = added_boxes_count; ix < object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + for (auto c : *cubes) { + delete c; + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); +} +#endif + +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_fomo(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + int out_width, + int out_height) { +#ifdef EI_HAS_FOMO + std::vector cubes; + + int out_width_factor = impulse->input_width / out_width; + + for (size_t y = 0; y < out_width; y++) { + // ei_printf(" [ "); + for (size_t x = 0; x < out_height; x++) { + size_t loc = ((y * out_height) + x) * (impulse->label_count + 1); + + for (size_t ix = 1; ix < impulse->label_count + 1; ix++) { + float vf = data[loc+ix]; + + ei_handle_cube(&cubes, x, y, vf, impulse->categories[ix - 1], block_config->threshold); + } + } + } + + fill_result_struct_from_cubes(result, &cubes, out_width_factor, impulse->object_detection_count); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif +} + +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_i8_fomo(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + int8_t *data, + float zero_point, + float scale, + int out_width, + int out_height) { +#ifdef EI_HAS_FOMO + std::vector cubes; + + int out_width_factor = impulse->input_width / out_width; + + for (size_t y = 0; y < out_width; y++) { + // ei_printf(" [ "); + for (size_t x = 0; x < out_height; x++) { + size_t loc = ((y * out_height) + x) * (impulse->label_count + 1); + + for (size_t ix = 1; ix < impulse->label_count + 1; ix++) { + int8_t v = data[loc+ix]; + float vf = static_cast(v - zero_point) * scale; + + ei_handle_cube(&cubes, x, y, vf, impulse->categories[ix - 1], block_config->threshold); + } + } + } + + fill_result_struct_from_cubes(result, &cubes, out_width_factor, impulse->object_detection_count); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif +} + +/** + * Fill the result structure from an unquantized output tensor + * (we don't support quantized here a.t.m.) + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_object_detection(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + float *scores, + float *labels, + bool debug) { +#ifdef EI_HAS_SSD + static std::vector results; + results.clear(); + results.resize(impulse->object_detection_count); + for (size_t ix = 0; ix < impulse->object_detection_count; ix++) { + + float score = scores[ix]; + float label = labels[ix]; + + if (score >= block_config->threshold) { + float ystart = data[(ix * 4) + 0]; + float xstart = data[(ix * 4) + 1]; + float yend = data[(ix * 4) + 2]; + float xend = data[(ix * 4) + 3]; + + if (xstart < 0) xstart = 0; + if (xstart > 1) xstart = 1; + if (ystart < 0) ystart = 0; + if (ystart > 1) ystart = 1; + if (yend < 0) yend = 0; + if (yend > 1) yend = 1; + if (xend < 0) xend = 0; + if (xend > 1) xend = 1; + if (xend < xstart) xend = xstart; + if (yend < ystart) yend = ystart; + + if (debug) { + ei_printf("%s (", impulse->categories[(uint32_t)label]); + ei_printf_float(label); + ei_printf("): "); + ei_printf_float(score); + ei_printf(" [ "); + ei_printf_float(xstart); + ei_printf(", "); + ei_printf_float(ystart); + ei_printf(", "); + ei_printf_float(xend); + ei_printf(", "); + ei_printf_float(yend); + ei_printf(" ]\n"); + } + + results[ix].label = impulse->categories[(uint32_t)label]; + results[ix].x = static_cast(xstart * static_cast(impulse->input_width)); + results[ix].y = static_cast(ystart * static_cast(impulse->input_height)); + results[ix].width = static_cast((xend - xstart) * static_cast(impulse->input_width)); + results[ix].height = static_cast((yend - ystart) * static_cast(impulse->input_height)); + results[ix].value = score; + } + else { + results[ix].value = 0.0f; + } + } + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif +} + +/** + * Fill the result structure from a quantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_i8(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + int8_t *data, + float zero_point, + float scale, + bool debug) { + for (uint32_t ix = 0; ix < impulse->label_count; ix++) { + float value = static_cast(data[ix] - zero_point) * scale; + + if (debug) { + ei_printf("%s:\t", impulse->categories[ix]); + ei_printf_float(value); + ei_printf("\n"); + } + result->classification[ix].label = impulse->categories[ix]; + result->classification[ix].value = value; + } + + return EI_IMPULSE_OK; +} + +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + float *data, + bool debug) { + for (uint32_t ix = 0; ix < impulse->label_count; ix++) { + float value = data[ix]; + + if (debug) { + ei_printf("%s:\t", impulse->categories[ix]); + ei_printf_float(value); + ei_printf("\n"); + } + result->classification[ix].label = impulse->categories[ix]; + result->classification[ix].value = value; + } + + return EI_IMPULSE_OK; +} + +/** + * Fill the visual anomaly result structures from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_visual_ad_struct_f32(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + float *data, + float threshold, + bool debug) { +#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY + float max_val = 0; + float sum_val = 0; + // the feature extractor output will be 1/8 of input + // due to the cut-off layer chosen in MobileNetV2 + uint32_t grid_size_x = (impulse->input_width / 8) / 2 - 1; + uint32_t grid_size_y = (impulse->input_height / 8) / 2 - 1; + + for (uint32_t ix = 0; ix < grid_size_x * grid_size_y; ix++) { + float value = data[ix]; + sum_val += value; + if (value > max_val) { + max_val = value; + } + } + + result->visual_ad_result.mean_value = sum_val / (grid_size_x * grid_size_y); + result->visual_ad_result.max_value = max_val; + + static ei_vector results; + + int added_boxes_count = 0; + results.clear(); + + for (uint32_t x = 0; x <= grid_size_x - 1; x++) { + for (uint32_t y = 0; y <= grid_size_y - 1; y++) { + if (data[x * grid_size_x + y] >= threshold) { + ei_impulse_result_bounding_box_t tmp = { + .label = "anomaly", + .x = static_cast(y * (static_cast(impulse->input_height) / grid_size_y)), + .y = static_cast(x * (static_cast(impulse->input_width) / grid_size_x)), + .width = (impulse->input_width / grid_size_x), + .height = (impulse->input_height / grid_size_y), + .value = data[x * grid_size_x + y] + }; + + results.push_back(tmp); + added_boxes_count++; + } + } + } + + // if we didn't detect min required objects, fill the rest with fixed value + if (added_boxes_count < impulse->object_detection_count) { + results.resize(impulse->object_detection_count); + for (size_t ix = added_boxes_count; ix < impulse->object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->visual_ad_grid_cells = results.data(); + result->visual_ad_count = results.size(); +#endif // EI_CLASSIFIER_HAS_VISUAL_ANOMALY + return EI_IMPULSE_OK; +} + +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolov5(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + int version, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_YOLOV5 + static std::vector results; + results.clear(); + + size_t col_size = 5 + impulse->label_count; + size_t row_count = output_features_count / col_size; + + for (size_t ix = 0; ix < row_count; ix++) { + size_t base_ix = ix * col_size; + float xc = data[base_ix + 0]; + float yc = data[base_ix + 1]; + float w = data[base_ix + 2]; + float h = data[base_ix + 3]; + float x = xc - (w / 2.0f); + float y = yc - (h / 2.0f); + if (x < 0) { + x = 0; + } + if (y < 0) { + y = 0; + } + if (x + w > impulse->input_width) { + w = impulse->input_width - x; + } + if (y + h > impulse->input_height) { + h = impulse->input_height - y; + } + + if (w < 0 || h < 0) { + continue; + } + + float score = data[base_ix + 4]; + + uint32_t label = 0; + for (size_t lx = 0; lx < impulse->label_count; lx++) { + float l = data[base_ix + 5 + lx]; + if (l > 0.5f) { + label = lx; + break; + } + } + + if (score >= block_config->threshold && score <= 1.0f) { + ei_impulse_result_bounding_box_t r; + r.label = impulse->categories[label]; + + if (version != 5) { + x *= static_cast(impulse->input_width); + y *= static_cast(impulse->input_height); + w *= static_cast(impulse->input_width); + h *= static_cast(impulse->input_height); + } + + r.x = static_cast(x); + r.y = static_cast(y); + r.width = static_cast(w); + r.height = static_cast(h); + r.value = score; + results.push_back(r); + } + } + + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &results, debug); + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results.size(); + size_t min_object_detection_count = impulse->object_detection_count; + if (added_boxes_count < min_object_detection_count) { + results.resize(min_object_detection_count); + for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif +} + +/** + * Fill the result structure from a quantized output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_quantized_yolov5(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + int version, + T *data, + float zero_point, + float scale, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_YOLOV5 + static std::vector results; + results.clear(); + + size_t col_size = 5 + impulse->label_count; + size_t row_count = output_features_count / col_size; + + for (size_t ix = 0; ix < row_count; ix++) { + size_t base_ix = ix * col_size; + float xc = (data[base_ix + 0] - zero_point) * scale; + float yc = (data[base_ix + 1] - zero_point) * scale; + float w = (data[base_ix + 2] - zero_point) * scale; + float h = (data[base_ix + 3] - zero_point) * scale; + float x = xc - (w / 2.0f); + float y = yc - (h / 2.0f); + if (x < 0) { + x = 0; + } + if (y < 0) { + y = 0; + } + if (x + w > impulse->input_width) { + w = impulse->input_width - x; + } + if (y + h > impulse->input_height) { + h = impulse->input_height - y; + } + + if (w < 0 || h < 0) { + continue; + } + + float score = (data[base_ix + 4] - zero_point) * scale; + + uint32_t label = 0; + for (size_t lx = 0; lx < impulse->label_count; lx++) { + float l = (data[base_ix + 5 + lx] - zero_point) * scale; + if (l > 0.5f) { + label = lx; + break; + } + } + + if (score >= block_config->threshold && score <= 1.0f) { + ei_impulse_result_bounding_box_t r; + r.label = ei_classifier_inferencing_categories[label]; + + if (version != 5) { + x *= static_cast(impulse->input_width); + y *= static_cast(impulse->input_height); + w *= static_cast(impulse->input_width); + h *= static_cast(impulse->input_height); + } + + r.x = static_cast(x); + r.y = static_cast(y); + r.width = static_cast(w); + r.height = static_cast(h); + r.value = score; + results.push_back(r); + } + } + + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &results, debug); + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results.size(); + size_t min_object_detection_count = impulse->object_detection_count; + if (added_boxes_count < min_object_detection_count) { + results.resize(min_object_detection_count); + for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif +} + +/** + * Fill the result structure from an unquantized output tensor + * (we don't support quantized here a.t.m.) + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolox(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_YOLOX + static std::vector results; + results.clear(); + + // START: def yolox_postprocess() + + // if not p6: + // strides = [8, 16, 32] + // else: + // strides = [8, 16, 32, 64] + const std::vector strides { 8, 16, 32 }; + + // hsizes = [img_size[0] // stride for stride in strides] + // wsizes = [img_size[1] // stride for stride in strides] + std::vector hsizes(strides.size()); + std::vector wsizes(strides.size()); + for (int ix = 0; ix < (int)strides.size(); ix++) { + hsizes[ix] = (int)floor((float)impulse->input_width / (float)strides[ix]); + wsizes[ix] = (int)floor((float)impulse->input_height / (float)strides[ix]); + } + + // for hsize, wsize, stride in zip(hsizes, wsizes, strides): + // grid = np.stack((xv, yv), 2).reshape(1, -1, 2) + // grids.append(grid) + // shape = grid.shape[:2] + // expanded_strides.append(np.full((*shape, 1), stride)) + std::vector grids; + std::vector expanded_strides; + + for (int ix = 0; ix < (int)strides.size(); ix++) { + int hsize = hsizes.at(ix); + int wsize = wsizes.at(ix); + int stride = strides.at(ix); + + // xv, yv = np.meshgrid(np.arange(wsize), np.arange(hsize)) + // grid = np.stack((xv, yv), 2).reshape(1, -1, 2) + matrix_i32_t *grid = new matrix_i32_t(hsize * wsize, 2); + int grid_ix = 0; + for (int h = 0; h < hsize; h++) { + for (int w = 0; w < wsize; w++) { + grid->buffer[grid_ix + 0] = w; + grid->buffer[grid_ix + 1] = h; + grid_ix += 2; + } + } + grids.push_back(grid); + + // shape = grid.shape[:2] + // expanded_strides.append(np.full((*shape, 1), stride)) + matrix_i32_t *expanded_stride = new matrix_i32_t(hsize * wsize, 1); + for (int ix = 0; ix < hsize * wsize; ix++) { + expanded_stride->buffer[ix] = stride; + } + expanded_strides.push_back(expanded_stride); + } + + // grids = np.concatenate(grids, 1) + int total_grid_rows = 0; + for (auto g : grids) { + total_grid_rows += g->rows; + } + matrix_i32_t c_grid(total_grid_rows, 2); + int c_grid_ix = 0; + for (auto g : grids) { + for (int row = 0; row < (int)g->rows; row++) { + c_grid.buffer[c_grid_ix + 0] = g->buffer[(row * 2) + 0]; + c_grid.buffer[c_grid_ix + 1] = g->buffer[(row * 2) + 1]; + c_grid_ix += 2; + } + delete g; + } + + // expanded_strides = np.concatenate(expanded_strides, 1) + int total_stride_rows = 0; + for (auto g : expanded_strides) { + total_stride_rows += g->rows; + } + matrix_i32_t c_expanded_strides(total_stride_rows, 1); + int c_expanded_strides_ix = 0; + for (auto g : expanded_strides) { + for (int row = 0; row < (int)g->rows; row++) { + c_expanded_strides.buffer[c_expanded_strides_ix + 0] = g->buffer[(row * 1) + 0]; + c_expanded_strides_ix += 1; + } + delete g; + } + + const int output_rows = output_features_count / (5 + impulse->label_count); + matrix_t outputs(output_rows, 5 + impulse->label_count, data); + for (int row = 0; row < (int)outputs.rows; row++) { + float v0 = outputs.buffer[(row * outputs.cols) + 0]; + float v1 = outputs.buffer[(row * outputs.cols) + 1]; + float v2 = outputs.buffer[(row * outputs.cols) + 2]; + float v3 = outputs.buffer[(row * outputs.cols) + 3]; + + float cgrid0 = (float)c_grid.buffer[(row * c_grid.cols) + 0]; + float cgrid1 = (float)c_grid.buffer[(row * c_grid.cols) + 1]; + + float stride = (float)c_expanded_strides.buffer[row]; + + // outputs[..., :2] = (outputs[..., :2] + grids) * expanded_strides + outputs.buffer[(row * outputs.cols) + 0] = (v0 + cgrid0) * stride; + outputs.buffer[(row * outputs.cols) + 1] = (v1 + cgrid1) * stride; + + // outputs[..., 2:4] = np.exp(outputs[..., 2:4]) * expanded_strides + outputs.buffer[(row * outputs.cols) + 2] = exp(v2) * stride; + outputs.buffer[(row * outputs.cols) + 3] = exp(v3) * stride; + } + + // END: def yolox_postprocess() + + // boxes = predictions[:, :4] + matrix_t boxes(outputs.rows, 4); + for (int row = 0; row < (int)outputs.rows; row++) { + boxes.buffer[(row * boxes.cols) + 0] = outputs.buffer[(row * outputs.cols) + 0]; + boxes.buffer[(row * boxes.cols) + 1] = outputs.buffer[(row * outputs.cols) + 1]; + boxes.buffer[(row * boxes.cols) + 2] = outputs.buffer[(row * outputs.cols) + 2]; + boxes.buffer[(row * boxes.cols) + 3] = outputs.buffer[(row * outputs.cols) + 3]; + } + + // scores = predictions[:, 4:5] * predictions[:, 5:] + matrix_t scores(outputs.rows, impulse->label_count); + for (int row = 0; row < (int)outputs.rows; row++) { + float confidence = outputs.buffer[(row * outputs.cols) + 4]; + for (int cc = 0; cc < impulse->label_count; cc++) { + scores.buffer[(row * scores.cols) + cc] = confidence * outputs.buffer[(row * outputs.cols) + (5 + cc)]; + } + } + + // iterate through scores to see if we have anything with confidence + for (int row = 0; row < (int)scores.rows; row++) { + for (int col = 0; col < (int)scores.cols; col++) { + float confidence = scores.buffer[(row * scores.cols) + col]; + + if (confidence >= block_config->threshold && confidence <= 1.0f) { + ei_impulse_result_bounding_box_t r; + r.label = impulse->categories[col]; + r.value = confidence; + + // now find the box... + float xcenter = boxes.buffer[(row * boxes.cols) + 0]; + float ycenter = boxes.buffer[(row * boxes.cols) + 1]; + float width = boxes.buffer[(row * boxes.cols) + 2]; + float height = boxes.buffer[(row * boxes.cols) + 3]; + + int x = (int)(xcenter - (width / 2.0f)); + int y = (int)(ycenter - (height / 2.0f)); + + if (x < 0) { + x = 0; + } + if (x > (int)impulse->input_width) { + x = impulse->input_width; + } + if (y < 0) { + y = 0; + } + if (y > (int)impulse->input_height) { + y = impulse->input_height; + } + + r.x = x; + r.y = y; + r.width = (int)round(width); + r.height = (int)round(height); + + results.push_back(r); + } + } + } + + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &results, debug); + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results.size(); + size_t min_object_detection_count = impulse->object_detection_count; + if (added_boxes_count < min_object_detection_count) { + results.resize(min_object_detection_count); + for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // EI_HAS_YOLOX +} + +/** + * Fill the result structure from an unquantized output tensor + * (we don't support quantized here a.t.m.) + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolox_detect(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count) { +#ifdef EI_HAS_YOLOX + static std::vector results; + results.clear(); + + // expected format [xmin ymin xmax ymax score label] + const int output_rows = output_features_count / 6; + matrix_t outputs(output_rows, 6, data); + + // iterate through scores to see if we have anything with confidence + for (int row = 0; row < (int)outputs.rows; row++) { + float confidence = outputs.buffer[(row * outputs.cols) + 4]; + int class_idx = (int)outputs.buffer[(row * outputs.cols) + 5]; + + if (confidence >= block_config->threshold && confidence <= 1.0f) { + ei_impulse_result_bounding_box_t r; + r.label = ei_classifier_inferencing_categories[class_idx]; + r.value = confidence; + + // now find the box... + float xmin = outputs.buffer[(row * outputs.cols) + 0]; + float ymin = outputs.buffer[(row * outputs.cols) + 1]; + float xmax = outputs.buffer[(row * outputs.cols) + 2]; + float ymax = outputs.buffer[(row * outputs.cols) + 3]; + + float width = xmax - xmin; + float height = ymax - ymin; + + int x = (int)xmin; + int y = (int)ymin; + + if (x < 0) { + x = 0; + } + if (x > (int)impulse->input_width) { + x = impulse->input_width; + } + if (y < 0) { + y = 0; + } + if (y > (int)impulse->input_height) { + y = impulse->input_height; + } + + r.x = x; + r.y = y; + r.width = (int)round(width); + r.height = (int)round(height); + + results.push_back(r); + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // EI_HAS_YOLOX +} + +/** + * Fill the result structure from an unquantized output tensor + * (we don't support quantized here a.t.m.) + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolov7(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count) { +#ifdef EI_HAS_YOLOV7 + static std::vector results; + results.clear(); + + size_t col_size = 7; + size_t row_count = output_features_count / col_size; + + // output is: + // batch_id, xmin, ymin, xmax, ymax, cls_id, score + for (size_t ix = 0; ix < row_count; ix++) { + size_t base_ix = ix * col_size; + float xmin = data[base_ix + 1]; + float ymin = data[base_ix + 2]; + float xmax = data[base_ix + 3]; + float ymax = data[base_ix + 4]; + uint32_t label = (uint32_t)data[base_ix + 5]; + float score = data[base_ix + 6]; + + if (score >= block_config->threshold && score <= 1.0f) { + ei_impulse_result_bounding_box_t r; + r.label = ei_classifier_inferencing_categories[label]; + + r.x = static_cast(xmin); + r.y = static_cast(ymin); + r.width = static_cast(xmax - xmin); + r.height = static_cast(ymax - ymin); + r.value = score; + results.push_back(r); + } + } + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results.size(); + size_t min_object_detection_count = impulse->object_detection_count; + if (added_boxes_count < min_object_detection_count) { + results.resize(min_object_detection_count); + for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_YOLOV7 +} + +#if (EI_HAS_TAO_DECODE_DETECTIONS == 1) || (EI_HAS_TAO_YOLO == 1) + +__attribute__((unused)) static void prepare_tao_results_common(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + std::vector *results) { + #define EI_CLASSIFIER_OBJECT_DETECTION_KEEP_TOPK 200 + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results->size(); + size_t object_detection_count = impulse->object_detection_count; + if (added_boxes_count < object_detection_count) { + results->resize(object_detection_count); + for (size_t ix = added_boxes_count; ix < object_detection_count; ix++) { + (*results)[ix].value = 0.0f; + } + } + + // we sort in reverse order accross all classes, + // since results for each class are pushed to the end. + std::sort(results->begin(), results->end(), [ ]( const ei_impulse_result_bounding_box_t& lhs, const ei_impulse_result_bounding_box_t& rhs ) + { + return lhs.value > rhs.value; + }); + + // keep topK + if (results->size() > EI_CLASSIFIER_OBJECT_DETECTION_KEEP_TOPK) { + results->erase(results->begin() + EI_CLASSIFIER_OBJECT_DETECTION_KEEP_TOPK, results->end()); + } + + result->bounding_boxes = results->data(); + result->bounding_boxes_count = results->size(); +} + + +#endif + +#ifdef EI_HAS_TAO_DECODE_DETECTIONS +/** + * Fill the result structure from an output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_tao_decode_detections_common(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + float threshold, + bool debug = false) { + + size_t col_size = 12 + impulse->label_count + 1; + size_t row_count = output_features_count / col_size; + + static std::vector results; + static std::vector class_results; + results.clear(); + + for (size_t cls_idx = 1; cls_idx < (size_t)(impulse->label_count + 1); cls_idx++) { + + std::vector boxes; + std::vector scores; + std::vector classes; + class_results.clear(); + + for (size_t ix = 0; ix < row_count; ix++) { + + float score = (static_cast(data[ix * col_size + cls_idx]) - zero_point) * scale; + + if ((score < threshold) || (score > 1.0f)) { + continue; + } + + // # 1. calculate boxes location + size_t base_ix = ix * col_size + col_size; // references the end of the row + + float r_12 = (static_cast(data[base_ix - 12]) - zero_point) * scale; + float r_11 = (static_cast(data[base_ix - 11]) - zero_point) * scale; + float r_10 = (static_cast(data[base_ix - 10]) - zero_point) * scale; + float r_9 = (static_cast(data[base_ix - 9]) - zero_point) * scale; + float r_8 = (static_cast(data[base_ix - 8]) - zero_point) * scale; + float r_7 = (static_cast(data[base_ix - 7]) - zero_point) * scale; + float r_6 = (static_cast(data[base_ix - 6]) - zero_point) * scale; + float r_5 = (static_cast(data[base_ix - 5]) - zero_point) * scale; + float r_4 = (static_cast(data[base_ix - 4]) - zero_point) * scale; + float r_3 = (static_cast(data[base_ix - 3]) - zero_point) * scale; + float r_2 = (static_cast(data[base_ix - 2]) - zero_point) * scale; + float r_1 = (static_cast(data[base_ix - 1]) - zero_point) * scale; + + // cx_pred = y_pred[..., -12] + // cy_pred = y_pred[..., -11] + // w_pred = y_pred[..., -10] + // h_pred = y_pred[..., -9] + float cx_pred = r_12; + float cy_pred = r_11; + float w_pred = r_10; + float h_pred = r_9; + + // w_anchor = y_pred[..., -6] - y_pred[..., -8] + // h_anchor = y_pred[..., -5] - y_pred[..., -7] + float w_anchor = r_6 - r_8; + float h_anchor = r_5 - r_7; + + // cx_anchor = tf.truediv(y_pred[..., -6] + y_pred[..., -8], 2.0) + // cy_anchor = tf.truediv(y_pred[..., -5] + y_pred[..., -7], 2.0) + float cx_anchor = (r_6 + r_8) / 2.0f; + float cy_anchor = (r_5 + r_7) / 2.0f; + + // cx_variance = y_pred[..., -4] + // cy_variance = y_pred[..., -3] + float cx_variance = r_4; + float cy_variance = r_3; + + // variance_w = y_pred[..., -2] + // variance_h = y_pred[..., -1] + float variance_w = r_2; + float variance_h = r_1; + + // # Convert anchor box offsets to image offsets. + // cx = cx_pred * cx_variance * w_anchor + cx_anchor + // cy = cy_pred * cy_variance * h_anchor + cy_anchor + // w = tf.exp(w_pred * variance_w) * w_anchor + // h = tf.exp(h_pred * variance_h) * h_anchor + float cx = cx_pred * cx_variance * w_anchor + cx_anchor; + float cy = cy_pred * cy_variance * h_anchor + cy_anchor; + float w = exp(w_pred * variance_w) * w_anchor; + float h = exp(h_pred * variance_h) * h_anchor; + + // # Convert 'centroids' to 'corners'. + float xmin = cx - (w / 2.0f); + float ymin = cy - (h / 2.0f); + float xmax = cx + (w / 2.0f); + float ymax = cy + (h / 2.0f); + + xmin *= impulse->input_width; + ymin *= impulse->input_height; + xmax *= impulse->input_width; + ymax *= impulse->input_height; + + boxes.push_back(ymin); + boxes.push_back(xmin); + boxes.push_back(ymax); + boxes.push_back(xmax); + scores.push_back(score); + classes.push_back((int)(cls_idx-1)); + } + + size_t nr_boxes = scores.size(); + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &class_results, + boxes.data(), scores.data(), classes.data(), + nr_boxes, + true /*clip_boxes*/, + debug); + + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + for (auto bb: class_results) { + results.push_back(bb); + } + } + + prepare_tao_results_common(impulse, result, &results); + + return EI_IMPULSE_OK; +} +#endif // #ifdef EI_HAS_TAO_DETECT_DETECTIONS + +/** + * Fill the result structure from a quantized output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_quantized_tao_decode_detections(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_DECODE_DETECTIONS + return fill_result_struct_tao_decode_detections_common(impulse, result, data, zero_point, scale, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_DETECT_DETECTIONS +} + + +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_tao_decode_detections(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_DECODE_DETECTIONS + return fill_result_struct_tao_decode_detections_common(impulse, result, data, 0.0f, 1.0f, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_DETECT_DETECTIONS +} + +#ifdef EI_HAS_TAO_YOLOV3 +/** + * Fill the result structure from an output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_tao_yolov3_common(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + float threshold, + bool debug) { + // # x: 3-D tensor. Last dimension is + // (cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...) + size_t col_size = 11 + impulse->label_count; + size_t row_count = output_features_count / col_size; + + static std::vector results; + static std::vector class_results; + + results.clear(); + for (size_t cls_idx = 0; cls_idx < (size_t)impulse->label_count; cls_idx++) { + + std::vector boxes; + std::vector scores; + std::vector classes; + class_results.clear(); + + for (size_t ix = 0; ix < row_count; ix++) { + size_t data_ix = ix * col_size; + float r_0 = (static_cast(data[data_ix + 0]) - zero_point) * scale; + float r_1 = (static_cast(data[data_ix + 1]) - zero_point) * scale; + float r_2 = (static_cast(data[data_ix + 2]) - zero_point) * scale; + float r_3 = (static_cast(data[data_ix + 3]) - zero_point) * scale; + float r_4 = (static_cast(data[data_ix + 4]) - zero_point) * scale; + float r_5 = (static_cast(data[data_ix + 5]) - zero_point) * scale; + float r_6 = (static_cast(data[data_ix + 6]) - zero_point) * scale; + float r_7 = (static_cast(data[data_ix + 7]) - zero_point) * scale; + float r_8 = (static_cast(data[data_ix + 8]) - zero_point) * scale; + float r_9 = (static_cast(data[data_ix + 9]) - zero_point) * scale; + float r_10 = (static_cast(data[data_ix + 10]) - zero_point) * scale; + + float cls = (static_cast(data[data_ix + 11 + cls_idx]) - zero_point) * scale; + float score = sigmoid(cls) * sigmoid(r_10); + + if ((score < threshold) || (score > 1.0f)) { + continue; + } + + float by = r_0 + sigmoid(r_6) * r_4; + float bx = r_1 + sigmoid(r_7) * r_5; + float bh = r_2 * exp(r_8); + float bw = r_3 * exp(r_9); + + float ymin = by - 0.5 * bh; + float xmin = bx - 0.5 * bw; + float ymax = by + 0.5 * bh; + float xmax = bx + 0.5 * bw; + + // from relative to absolute + ymin *= impulse->input_height; + xmin *= impulse->input_width; + ymax *= impulse->input_height; + xmax *= impulse->input_width; + + boxes.push_back(ymin); + boxes.push_back(xmin); + boxes.push_back(ymax); + boxes.push_back(xmax); + scores.push_back(score); + classes.push_back((int)cls_idx); + } + + size_t nr_boxes = scores.size(); + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &class_results, + boxes.data(), scores.data(), classes.data(), + nr_boxes, + true /*clip_boxes*/, + debug); + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + for (auto bb: class_results) { + results.push_back(bb); + } + } + + prepare_tao_results_common(impulse, result, &results); + return EI_IMPULSE_OK; +} +#endif // #ifdef EI_HAS_TAO_YOLOV3 + +#ifdef EI_HAS_TAO_YOLOV4 +/** + * Fill the result structure from an output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_tao_yolov4_common(const ei_impulse_t *impulse, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + float threshold, + bool debug) { + // # x: 3-D tensor. Last dimension is + // (cy, cx, ph, pw, step_y, step_x, pred_y, pred_x, pred_h, pred_w, object, cls...) + size_t col_size = 11 + impulse->label_count; + size_t row_count = output_features_count / col_size; + + static std::vector results; + static std::vector class_results; + results.clear(); + + const float grid_scale_xy = 1.0f; + + for (size_t cls_idx = 0; cls_idx < (size_t)impulse->label_count; cls_idx++) { + + std::vector boxes; + std::vector scores; + std::vector classes; + class_results.clear(); + + for (size_t ix = 0; ix < row_count; ix++) { + + float r_0 = (static_cast(data[ix * col_size + 0]) - zero_point) * scale; + float r_1 = (static_cast(data[ix * col_size + 1]) - zero_point) * scale; + float r_2 = (static_cast(data[ix * col_size + 2]) - zero_point) * scale; + float r_3 = (static_cast(data[ix * col_size + 3]) - zero_point) * scale; + float r_4 = (static_cast(data[ix * col_size + 4]) - zero_point) * scale; + float r_5 = (static_cast(data[ix * col_size + 5]) - zero_point) * scale; + float r_6 = (static_cast(data[ix * col_size + 6]) - zero_point) * scale; + float r_7 = (static_cast(data[ix * col_size + 7]) - zero_point) * scale; + float r_8 = (static_cast(data[ix * col_size + 8]) - zero_point) * scale; + float r_9 = (static_cast(data[ix * col_size + 9]) - zero_point) * scale; + float r_10 = (static_cast(data[ix * col_size + 10]) - zero_point) * scale; + + float cls = (static_cast(data[ix * col_size + 11 + cls_idx]) - zero_point) * scale; + float score = sigmoid(cls) * sigmoid(r_10); + + if ((score < threshold) || (score > 1.0f)) { + continue; + } + + float pred_y = sigmoid(r_6) * grid_scale_xy - (grid_scale_xy - 1.0f) / 2.0f; + float pred_x = sigmoid(r_7) * grid_scale_xy - (grid_scale_xy - 1.0f) / 2.0f; + float pred_h = exp(std::min(r_8, 8.0f)); + float pred_w = exp(std::min(r_9, 8.0f)); + + r_6 = pred_y; + r_7 = pred_x; + r_8 = pred_h; + r_9 = pred_w; + + float by = r_0 + r_6 * r_4; + float bx = r_1 + r_7 * r_5; + float bh = r_2 * r_8; + float bw = r_3 * r_9; + + float ymin = by - 0.5 * bh; + float xmin = bx - 0.5 * bw; + float ymax = by + 0.5 * bh; + float xmax = bx + 0.5 * bw; + + // from relative to absolute + ymin *= impulse->input_height; + xmin *= impulse->input_width; + ymax *= impulse->input_height; + xmax *= impulse->input_width; + + boxes.push_back(ymin); + boxes.push_back(xmin); + boxes.push_back(ymax); + boxes.push_back(xmax); + scores.push_back(score); + classes.push_back((int)cls_idx); + } + + size_t nr_boxes = scores.size(); + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, &class_results, + boxes.data(), scores.data(), classes.data(), + nr_boxes, + true /*clip_boxes*/, + debug); + if (nms_res != EI_IMPULSE_OK) { + return nms_res; + } + + for (auto bb: class_results) { + results.push_back(bb); + } + } + + prepare_tao_results_common(impulse, result, &results); + return EI_IMPULSE_OK; +} +#endif // #ifdef EI_HAS_TAO_YOLOV4 + +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_tao_yolov3(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_YOLOV3 + return fill_result_struct_tao_yolov3_common(impulse, result, data, 0.0f, 1.0f, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_YOLOV3 +} + +/** + * Fill the result structure from a quantized output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_quantized_tao_yolov3(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_YOLOV3 + return fill_result_struct_tao_yolov3_common(impulse, result, data, zero_point, scale, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_YOLOV3 +} + +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_tao_yolov4(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_YOLOV4 + return fill_result_struct_tao_yolov4_common(impulse, result, data, 0.0f, 1.0f, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_YOLOV4 +} + +/** + * Fill the result structure from a quantized output tensor +*/ +template +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_quantized_tao_yolov4(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + T *data, + float zero_point, + float scale, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_TAO_YOLOV4 + return fill_result_struct_tao_yolov4_common(impulse, result, data, zero_point, scale, output_features_count, block_config->threshold, debug); +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_TAO_YOLOV4 +} + +#ifdef EI_HAS_YOLOV2 +// based on akida_models-1.2.0/detection/processing.py +// input is "2D" array with shape [grid_h * grid_w * nb_box, nb_classes] +__attribute__((unused)) static void softmax(std::vector& input, const size_t nb_classes) +{ + const float max = *std::max_element(input.begin(), input.end()); + const float min = *std::min_element(input.begin(), input.end()); + const float t = -100.0f; + + // x = x - np.max(x) + std::transform(input.begin(), input.end(), input.begin(), + [max](float x) { return x - max; }); + + // if np.min(x) < t: x = x / np.min(x) * t + std::transform(input.begin(), input.end(), input.begin(), + [min, t](float x) { return x < t ? (x / min * t): x; }); + + // e_x = np.exp(x) + // do it in place as we don't need raw the input anymore + std::transform(input.begin(), input.end(), input.begin(), + [](float x) { return std::exp(x); }); + + // e_x / e_x.sum(axis, keepdims=True) + // calculated for each 'row', across nb_classes + for(auto it = input.begin(); it != input.end(); it += nb_classes) { + float sum = 0.0f; + // e_x.sum(axis, keepdims=True) + for(auto it2 = it; it2 != it + nb_classes; it2++) { + sum += *it2; + } + // e_x / e_x.sum(axis, keepdims=True) + std::transform(it, it + nb_classes, it, + [sum](float ex) { return ex / sum; }); + } +} + +class BoundingBox { +public: + float x1, y1, x2, y2, confidence; + std::vector classes; + + BoundingBox(float x1, float y1, float x2, float y2, float confidence, const std::vector& classes) + : x1(x1), y1(y1), x2(x2), y2(y2), confidence(confidence), classes(classes) {} + + float get_score() const { + return confidence; + } + + int get_label() const { + auto maxElementIndex = std::max_element(classes.begin(), classes.end()) - classes.begin(); + return maxElementIndex; + } + + float _interval_overlap(float x1, float x2, float x3, float x4) const { + if(x3 < x1) { + if(x4 < x1) { + return 0; + } + return std::min(x2, x4) - x1; + } + if(x2 < x3) { + return 0; + } + return std::min(x2, x4) - x3; + } + + + float iou(const BoundingBox& other) const { + // Implementation of the Intersection over Union calculation + float intersect_w = this->_interval_overlap(this->x1, this->x2, other.x1, other.x2); + float intersect_h = this->_interval_overlap(this->y1, this->y2, other.y1, other.y2); + + float intersect = intersect_w * intersect_h; + + float w1 = this->x2 - this->x1; + float h1 = this->y2 - this->y1; + float w2 = other.x2 - other.x1; + float h2 = other.y2 - other.y1; + + float un = w1 * h1 + w2 * h2 - intersect; + + return float(intersect) / un; + } +}; +#endif // EI_HAS_YOLOV2 +/** + * Fill the result structure from an unquantized output tensor + */ +__attribute__((unused)) static EI_IMPULSE_ERROR fill_result_struct_f32_yolov2(const ei_impulse_t *impulse, + const ei_learning_block_config_tflite_graph_t *block_config, + ei_impulse_result_t *result, + float *data, + size_t output_features_count, + bool debug = false) { +#ifdef EI_HAS_YOLOV2 + static std::vector results; + results.clear(); + + // Example output shape: (7, 7, 5, 7) + // TODO: calculate grid_h, grid_w, nb_box from output_features_count or get as a param + // grid_h, grid_w, nb_box = output.shape[:3] + const size_t grid_h = 7; + const size_t grid_w = 7; + const size_t nb_box = 5; + const std::vector> anchors = {{0.56594, 1.05012}, {1.0897, 2.03908}, {2.37823, 3.00376}, {2.4593, 4.913}, {5.15981, 5.56699}}; + + const size_t nb_classes = impulse->label_count; + const float obj_threshold = 0.5; + const float nms_threshold = 0.5; + std::vector output; + const int stride = 4 + 1 + nb_classes; + + output.assign(data, data + output_features_count); + + // boxes = [] + std::vector boxes; + + // equivalent to: classes_confidences = output[..., 5:] + std::vector classes_confidences; + const size_t dim = 5; + for(auto it = output.begin() + dim; it <= output.end(); it += (dim + nb_classes)) { + classes_confidences.insert(classes_confidences.end(), it, it + nb_classes); + } + // calculate softmax for later use, we need to calculate it across the whole input data so operate on a sliced output + softmax(classes_confidences, nb_classes); + + for (size_t row = 0; row < grid_h; ++row) { + for (size_t col = 0; col < grid_w; ++col) { + for (size_t b = 0; b < nb_box; ++b) { + size_t idx = row * grid_w * nb_box * stride + col * nb_box * stride + b * stride; + size_t classes_idx = row * grid_w * nb_box * nb_classes + col * nb_box * nb_classes + b * nb_classes; + + // Apply sigmoid to the 4th element + // output[..., 4] = _sigmoid(output[..., 4]) + float sigmoid_val = sigmoid(output[idx + 4]); + output[idx + 4] = sigmoid_val; + + // classes = output[row, col, b, 5:] + std::vector classes(classes_confidences.begin() + classes_idx, classes_confidences.begin() + classes_idx + nb_classes); + + // output[..., 5:] = output[..., 4][..., np.newaxis] * _softmax(output[..., 5:]) + // output[..., 5:] *= output[..., 5:] > obj_threshold + std::transform(classes.begin(), classes.end(), classes.begin(), + [sigmoid_val, obj_threshold](float c) { c *= sigmoid_val; return c > obj_threshold ? c : 0.0f; }); + + // if np.sum(classes) > 0: + float sum = 0.0f; + for(auto it = classes.begin(); it != classes.end(); it++) { + sum += *it; + } + if(sum > 0.0f) { + // x, y, w, h = output[row, col, b, :4] + float x = output[idx + 0]; + float y = output[idx + 1]; + float w = output[idx + 2]; + float h = output[idx + 3]; + + // x = (col + _sigmoid(x)) / grid_w # center position, unit: image width + x = (col + sigmoid(x)) / grid_w; + // y = (row + _sigmoid(y)) / grid_h # center position, unit: image height + y = (row + sigmoid(y)) / grid_h; + // w = anchors[b][0] * np.exp(w) / grid_w # unit: image width + w = anchors[b].first * std::exp(w) / grid_w; + // h = anchors[b][1] * np.exp(h) / grid_h # unit: image height + h = anchors[b].second * std::exp(h) / grid_h; + + // confidence = output[row, col, b, 4] + float confidence = output[idx + 4]; + + // x1 = max(x - w / 2, 0) + float x1 = std::max(x - w / 2, 0.0f); + // y1 = max(y - h / 2, 0) + float y1 = std::max(y - h / 2, 0.0f); + // x2 = min(x + w / 2, grid_w) + float x2 = std::min(x + w / 2, static_cast(grid_w)); + // y2 = min(y + h / 2, grid_h) + float y2 = std::min(y + h / 2, static_cast(grid_h)); + + boxes.emplace_back(x1, y1, x2, y2, confidence, classes); + } + } + } + } + + // Non-maximal suppression (on boxes) + for (size_t c = 0; c < nb_classes; ++c) { + std::vector> sorted_indices; + for (size_t i = 0; i < boxes.size(); ++i) { + sorted_indices.emplace_back(boxes[i].classes[c], i); + } + + std::sort(sorted_indices.begin(), sorted_indices.end(), + [](const std::pair& a, const std::pair& b) { + return a.first > b.first; + }); + + for (size_t i = 0; i < sorted_indices.size(); ++i) { + int index_i = sorted_indices[i].second; + if (boxes[index_i].classes[c] == 0) + continue; + + for (size_t j = i + 1; j < sorted_indices.size(); ++j) { + int index_j = sorted_indices[j].second; + + if ((boxes[index_i].iou(boxes[index_j]) >= nms_threshold) && + (boxes[index_i].get_label() == (int)c) && + (boxes[index_j].get_label() == (int)c)) { + boxes[index_j].confidence = 0; + } + } + } + } + + // remove the boxes which are less likely than a obj_threshold + boxes.erase(std::remove_if(boxes.begin(), boxes.end(), + [obj_threshold](const BoundingBox& box) { + return box.get_score() <= obj_threshold; + }), boxes.end()); + + // sort boxes by box.get_score() + std::sort(boxes.begin(), boxes.end(), + [](const BoundingBox& a, const BoundingBox& b) { + return a.get_score() > b.get_score(); + }); + + // convert relative coordinates to absolute coordinates + for(auto & box: boxes) { + ei_impulse_result_bounding_box_t res; + res.label = ei_classifier_inferencing_categories[box.get_label()]; + res.x = ceil(box.x1 * impulse->input_width); + res.y = ceil(box.y1 * impulse->input_height); + res.width = ceil((box.x2 - box.x1) * impulse->input_width); + res.height = ceil((box.y2 - box.y1) * impulse->input_height); + res.value = box.get_score(); + results.push_back(res); + } + + // if we didn't detect min required objects, fill the rest with fixed value + size_t added_boxes_count = results.size(); + size_t min_object_detection_count = impulse->object_detection_count; + if (added_boxes_count < min_object_detection_count) { + results.resize(min_object_detection_count); + for (size_t ix = added_boxes_count; ix < min_object_detection_count; ix++) { + results[ix].value = 0.0f; + } + } + + result->bounding_boxes = results.data(); + result->bounding_boxes_count = results.size(); + + return EI_IMPULSE_OK; +#else + return EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE; +#endif // #ifdef EI_HAS_YOLOV7 +} + +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 +bool find_mtx_by_idx(ei_feature_t* mtx, ei::matrix_t** matrix, uint32_t mtx_id, size_t mtx_size) { + for (size_t i = 0; i < mtx_size; i++) { + if (mtx[i].matrix == NULL) { + continue; + } + if (mtx[i].blockId == mtx_id || mtx[i].blockId == 0) { + *matrix = mtx[i].matrix; + return true; + } + } + return false; +} +#endif + +#endif // _EI_CLASSIFIER_FILL_RESULT_STRUCT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_model_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_model_types.h index 7e2de8d..4c36205 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_model_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_model_types.h @@ -1,45 +1,336 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EDGE_IMPULSE_MODEL_TYPES_H_ #define _EDGE_IMPULSE_MODEL_TYPES_H_ #include + +#include "edge-impulse-sdk/classifier/ei_classifier_types.h" +#include "edge-impulse-sdk/dsp/ei_dsp_handle.h" #include "edge-impulse-sdk/dsp/numpy.hpp" +#if EI_CLASSIFIER_USE_FULL_TFLITE || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA) || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX) +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#else +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#endif // EI_CLASSIFIER_USE_FULL_TFLITE + +#define EI_CLASSIFIER_NONE 255 +#define EI_CLASSIFIER_UTENSOR 1 +#define EI_CLASSIFIER_TFLITE 2 +#define EI_CLASSIFIER_CUBEAI 3 +#define EI_CLASSIFIER_TFLITE_FULL 4 +#define EI_CLASSIFIER_TENSAIFLOW 5 +#define EI_CLASSIFIER_TENSORRT 6 +#define EI_CLASSIFIER_DRPAI 7 +#define EI_CLASSIFIER_TFLITE_TIDL 8 +#define EI_CLASSIFIER_AKIDA 9 +#define EI_CLASSIFIER_SYNTIANT 10 +#define EI_CLASSIFIER_ONNX_TIDL 11 +#define EI_CLASSIFIER_MEMRYX 12 + +#define EI_CLASSIFIER_SENSOR_UNKNOWN -1 +#define EI_CLASSIFIER_SENSOR_MICROPHONE 1 +#define EI_CLASSIFIER_SENSOR_ACCELEROMETER 2 +#define EI_CLASSIFIER_SENSOR_CAMERA 3 +#define EI_CLASSIFIER_SENSOR_9DOF 4 +#define EI_CLASSIFIER_SENSOR_ENVIRONMENTAL 5 +#define EI_CLASSIFIER_SENSOR_FUSION 6 + +// These must match the enum values in TensorFlow Lite's "TfLiteType" +#define EI_CLASSIFIER_DATATYPE_FLOAT32 1 +#define EI_CLASSIFIER_DATATYPE_INT8 9 + +#define EI_CLASSIFIER_LAST_LAYER_UNKNOWN -1 +#define EI_CLASSIFIER_LAST_LAYER_SSD 1 +#define EI_CLASSIFIER_LAST_LAYER_FOMO 2 +#define EI_CLASSIFIER_LAST_LAYER_YOLOV5 3 +#define EI_CLASSIFIER_LAST_LAYER_YOLOX 4 +#define EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI 5 +#define EI_CLASSIFIER_LAST_LAYER_YOLOV7 6 +#define EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET 7 +#define EI_CLASSIFIER_LAST_LAYER_TAO_SSD 8 +#define EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3 9 +#define EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4 10 +#define EI_CLASSIFIER_LAST_LAYER_YOLOV2 11 + +#define EI_CLASSIFIER_IMAGE_SCALING_NONE 0 +#define EI_CLASSIFIER_IMAGE_SCALING_0_255 1 +#define EI_CLASSIFIER_IMAGE_SCALING_TORCH 2 +#define EI_CLASSIFIER_IMAGE_SCALING_MIN1_1 3 +#define EI_CLASSIFIER_IMAGE_SCALING_MIN128_127 4 +#define EI_CLASSIFIER_IMAGE_SCALING_BGR_SUBTRACT_IMAGENET_MEAN 5 + +// maps back to ClassificationMode in keras-types.ts +#define EI_CLASSIFIER_CLASSIFICATION_MODE_CLASSIFICATION 1 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_REGRESSION 2 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_OBJECT_DETECTION 3 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_ANOMALY_GMM 4 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY 5 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_ANOMALY_KMEANS 6 +#define EI_CLASSIFIER_CLASSIFICATION_MODE_DSP 7 + +struct ei_impulse; + +typedef struct { + ei::matrix_t* matrix; + uint32_t blockId; +} ei_feature_t; + +typedef struct { + uint16_t implementation_version; + bool is_configured; + uint32_t average_window_duration_ms; + float detection_threshold; + uint32_t suppression_ms; + uint32_t suppression_flags; +} ei_model_performance_calibration_t; + +typedef int (*extract_fn_t)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, float frequency); typedef struct { + uint32_t blockId; size_t n_output_features; - int (*extract_fn)(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency); + extract_fn_t extract_fn; void *config; uint8_t *axes; size_t axes_size; + int version; // future proof, can easily add to this struct now + DspHandle* (*factory)(void* config, float sampling_freq); // nullptr means no state + // v1 ends here } ei_model_dsp_t; typedef struct { - size_t n_output_features; - int (*extract_fn)(ei::signal_i16_t *signal, ei::matrix_i32_t *output_matrix, void *config, const float frequency); + float *centroid; + float max_error; +} ei_classifier_anom_cluster_t; + +typedef struct { + uint32_t blockId; + bool keep_output; + EI_IMPULSE_ERROR (*infer_fn)(const ei_impulse *impulse, ei_feature_t *fmatrix, uint32_t learn_block_index, uint32_t* input_block_ids, uint32_t input_block_ids_size, ei_impulse_result_t *result, void *config, bool debug); void *config; - uint8_t *axes; - size_t axes_size; -} ei_model_dsp_i16_t; + int image_scaling; + const uint32_t* input_block_ids; + const uint32_t input_block_ids_size; + uint32_t output_features_count; +} ei_learning_block_t; + +typedef struct { + uint16_t implementation_version; + uint8_t input_datatype; + bool input_quantized; + float input_scale; + float input_zeropoint; + uint8_t output_datatype; + bool output_quantized; + float output_scale; + float output_zeropoint; +} ei_config_tensaiflow_graph_t; + +typedef struct { + uint16_t implementation_version; + const unsigned char *model; + size_t model_size; + size_t arena_size; +} ei_config_tflite_graph_t; + +typedef struct { + uint16_t implementation_version; + TfLiteStatus (*model_init)(void*(*alloc_fnc)(size_t, size_t)); + TfLiteStatus (*model_invoke)(); + TfLiteStatus (*model_reset)(void (*free)(void* ptr)); + TfLiteStatus (*model_input)(int, TfLiteTensor*); + TfLiteStatus (*model_output)(int, TfLiteTensor*); +} ei_config_tflite_eon_graph_t; + +typedef struct { + uint16_t implementation_version; + uint8_t classification_mode; + uint32_t block_id; + /* object detection */ + bool object_detection; + int8_t object_detection_last_layer; + uint8_t output_data_tensor; + uint8_t output_labels_tensor; + uint8_t output_score_tensor; + /* object detection and visual AD */ + float threshold; + /* tflite graph params */ + bool quantized; + bool compiled; + /* tflite graph config pointer */ + void *graph_config; +} ei_learning_block_config_tflite_graph_t; + +typedef struct { + uint16_t implementation_version; + uint8_t classification_mode; + const uint16_t *anom_axis; + uint16_t anom_axes_size; + const ei_classifier_anom_cluster_t *anom_clusters; + uint16_t anom_cluster_count; + const float *anom_scale; + const float *anom_mean; +} ei_learning_block_config_anomaly_kmeans_t; + +typedef struct { + uint16_t implementation_version; + uint8_t classification_mode; + const uint16_t *anom_axis; + uint16_t anom_axes_size; + float anomaly_threshold; + bool visual; + void* graph_config; +} ei_learning_block_config_anomaly_gmm_t; + +typedef struct { + float confidence_threshold; + float iou_threshold; +} ei_object_detection_nms_config_t; + +typedef struct ei_impulse { + /* project details */ + uint32_t project_id; + const char *project_owner; + const char *project_name; + uint32_t deploy_version; + + /* DSP details */ + uint32_t nn_input_frame_size; + uint32_t raw_sample_count; + uint32_t raw_samples_per_frame; + uint32_t dsp_input_frame_size; + uint32_t input_width; + uint32_t input_height; + uint32_t input_frames; + float interval_ms; + float frequency; + size_t dsp_blocks_size; + ei_model_dsp_t *dsp_blocks; + + /* object detection */ + uint16_t object_detection_count; + uint32_t fomo_output_size; + uint32_t tflite_output_features_count; + + /* learning blocks */ + const size_t learning_blocks_size; + const ei_learning_block_t *learning_blocks; + + /* inference parameters */ + uint32_t inferencing_engine; + + /* sensors and on-device inference */ + uint32_t sensor; + const char *fusion_string; + uint32_t slice_size; + uint32_t slices_per_model_window; + + /* output details */ + uint16_t has_anomaly; + uint16_t label_count; + const ei_model_performance_calibration_t calibration; + const char **categories; + ei_object_detection_nms_config_t object_detection_nms; +} ei_impulse_t; + +class ei_impulse_state_t { +typedef DspHandle* _dsp_handle_ptr_t; +public: + const ei_impulse_t *impulse; // keep a pointer to the impulse + _dsp_handle_ptr_t *dsp_handles; + bool is_temp_handle = false; // to know if we're using the old (stateless) API + ei_impulse_state_t(const ei_impulse_t *impulse) + : impulse(impulse) + { + const auto num_dsp_blocks = impulse->dsp_blocks_size; + dsp_handles = (_dsp_handle_ptr_t*)ei_malloc(sizeof(_dsp_handle_ptr_t)*num_dsp_blocks); + for(size_t ix = 0; ix < num_dsp_blocks; ix++) { + dsp_handles[ix] = nullptr; + } + } + + DspHandle* get_dsp_handle(size_t ix) { + if (dsp_handles[ix] == nullptr) { + dsp_handles[ix] = impulse->dsp_blocks[ix].factory(impulse->dsp_blocks[ix].config, impulse->frequency); + } + return dsp_handles[ix]; + } + + void reset() + { + for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) { + if (dsp_handles[ix] != nullptr) { + delete dsp_handles[ix]; + dsp_handles[ix] = nullptr; + } + } + } + + void* operator new(size_t size) { + return ei_malloc(size); + } + + void operator delete(void* ptr) { + ei_free(ptr); + } + + void* operator new[](size_t size) { + return ei_malloc(size); + } + + void operator delete[](void* ptr) { + ei_free(ptr); + } + + ~ei_impulse_state_t() + { + reset(); + ei_free(dsp_handles); + } +}; + +class ei_impulse_handle_t { +public: + ei_impulse_handle_t(const ei_impulse_t *impulse) + : state(impulse), impulse(impulse) {}; + ei_impulse_state_t state; + const ei_impulse_t *impulse; +}; + +typedef struct { + uint32_t block_id; + uint16_t implementation_version; + int axes; + const unsigned char *model; + size_t model_size; + size_t arena_size; +} ei_dsp_config_tflite_t; + +typedef struct { + uint32_t block_id; + uint16_t implementation_version; + int axes; + TfLiteStatus (*init_fn)(void*(*alloc_fnc)(size_t, size_t)); + TfLiteStatus (*invoke_fn)(); + TfLiteStatus (*reset_fn)(void (*free)(void* ptr)); + TfLiteStatus (*input_fn)(int, TfLiteTensor*); + TfLiteStatus (*output_fn)(int, TfLiteTensor*); +} ei_dsp_config_tflite_eon_t; #endif // _EDGE_IMPULSE_MODEL_TYPES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_nms.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_nms.h new file mode 100644 index 0000000..5bfcdf8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_nms.h @@ -0,0 +1,392 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EDGE_IMPULSE_NMS_H_ +#define _EDGE_IMPULSE_NMS_H_ + +#include "model-parameters/model_metadata.h" +#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1 +#include "model-parameters/model_variables.h" +#endif +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/ei_classifier_types.h" +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" + +#if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_SSD) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV2) + +// The code below comes from tensorflow/lite/kernels/internal/reference/non_max_suppression.h +// Copyright 2019 The TensorFlow Authors. All rights reserved. +// Licensed under the Apache License, Version 2.0 +#include +#include +#include +#include + +// A pair of diagonal corners of the box. +struct BoxCornerEncoding { + float y1; + float x1; + float y2; + float x2; +}; + +static inline float ComputeIntersectionOverUnion(const float* boxes, const int i, + const int j) { + auto& box_i = reinterpret_cast(boxes)[i]; + auto& box_j = reinterpret_cast(boxes)[j]; + const float box_i_y_min = std::min(box_i.y1, box_i.y2); + const float box_i_y_max = std::max(box_i.y1, box_i.y2); + const float box_i_x_min = std::min(box_i.x1, box_i.x2); + const float box_i_x_max = std::max(box_i.x1, box_i.x2); + const float box_j_y_min = std::min(box_j.y1, box_j.y2); + const float box_j_y_max = std::max(box_j.y1, box_j.y2); + const float box_j_x_min = std::min(box_j.x1, box_j.x2); + const float box_j_x_max = std::max(box_j.x1, box_j.x2); + + const float area_i = + (box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min); + const float area_j = + (box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min); + if (area_i <= 0 || area_j <= 0) return 0.0; + const float intersection_ymax = std::min(box_i_y_max, box_j_y_max); + const float intersection_xmax = std::min(box_i_x_max, box_j_x_max); + const float intersection_ymin = std::max(box_i_y_min, box_j_y_min); + const float intersection_xmin = std::max(box_i_x_min, box_j_x_min); + const float intersection_area = + std::max(intersection_ymax - intersection_ymin, 0.0) * + std::max(intersection_xmax - intersection_xmin, 0.0); + return intersection_area / (area_i + area_j - intersection_area); +} + +// Implements (Single-Class) Soft NMS (with Gaussian weighting). +// Supports functionality of TensorFlow ops NonMaxSuppressionV4 & V5. +// Reference: "Soft-NMS - Improving Object Detection With One Line of Code" +// [Bodla et al, https://arxiv.org/abs/1704.04503] +// Implementation adapted from the TensorFlow NMS code at +// tensorflow/core/kernels/non_max_suppression_op.cc. +// +// Arguments: +// boxes: box encodings in format [y1, x1, y2, x2], shape: [num_boxes, 4] +// num_boxes: number of candidates +// scores: scores for candidate boxes, in the same order. shape: [num_boxes] +// max_output_size: the maximum number of selections. +// iou_threshold: Intersection-over-Union (IoU) threshold for NMS +// score_threshold: All candidate scores below this value are rejected +// soft_nms_sigma: Soft NMS parameter, used for decaying scores +// +// Outputs: +// selected_indices: all the selected indices. Underlying array must have +// length >= max_output_size. Cannot be null. +// selected_scores: scores of selected indices. Defer from original value for +// Soft NMS. If not null, array must have length >= max_output_size. +// num_selected_indices: Number of selections. Only these many elements are +// set in selected_indices, selected_scores. Cannot be null. +// +// Assumes inputs are valid (for eg, iou_threshold must be >= 0). +static inline void NonMaxSuppression(const float* boxes, const int num_boxes, + const float* scores, const int max_output_size, + const float iou_threshold, + const float score_threshold, + const float soft_nms_sigma, int* selected_indices, + float* selected_scores, + int* num_selected_indices) { + struct Candidate { + int index; + float score; + int suppress_begin_index; + }; + + // Priority queue to hold candidates. + auto cmp = [](const Candidate bs_i, const Candidate bs_j) { + return bs_i.score < bs_j.score; + }; + std::priority_queue, decltype(cmp)> + candidate_priority_queue(cmp); + // Populate queue with candidates above the score threshold. + for (int i = 0; i < num_boxes; ++i) { + if (scores[i] > score_threshold) { + candidate_priority_queue.emplace(Candidate({i, scores[i], 0})); + } + } + + *num_selected_indices = 0; + int num_outputs = std::min(static_cast(candidate_priority_queue.size()), + max_output_size); + if (num_outputs == 0) return; + + // NMS loop. + float scale = 0; + if (soft_nms_sigma > 0.0) { + scale = -0.5 / soft_nms_sigma; + } + while (*num_selected_indices < num_outputs && + !candidate_priority_queue.empty()) { + Candidate next_candidate = candidate_priority_queue.top(); + const float original_score = next_candidate.score; + candidate_priority_queue.pop(); + + // Overlapping boxes are likely to have similar scores, therefore we + // iterate through the previously selected boxes backwards in order to + // see if `next_candidate` should be suppressed. We also enforce a property + // that a candidate can be suppressed by another candidate no more than + // once via `suppress_begin_index` which tracks which previously selected + // boxes have already been compared against next_candidate prior to a given + // iteration. These previous selected boxes are then skipped over in the + // following loop. + bool should_hard_suppress = false; + for (int j = *num_selected_indices - 1; + j >= next_candidate.suppress_begin_index; --j) { + const float iou = ComputeIntersectionOverUnion( + boxes, next_candidate.index, selected_indices[j]); + + // First decide whether to perform hard suppression. + if (iou >= iou_threshold) { + should_hard_suppress = true; + break; + } + + // Suppress score if NMS sigma > 0. + if (soft_nms_sigma > 0.0) { + next_candidate.score = + next_candidate.score * std::exp(scale * iou * iou); + } + + // If score has fallen below score_threshold, it won't be pushed back into + // the queue. + if (next_candidate.score <= score_threshold) break; + } + // If `next_candidate.score` has not dropped below `score_threshold` + // by this point, then we know that we went through all of the previous + // selections and can safely update `suppress_begin_index` to + // `selected.size()`. If on the other hand `next_candidate.score` + // *has* dropped below the score threshold, then since `suppress_weight` + // always returns values in [0, 1], further suppression by items that were + // not covered in the above for loop would not have caused the algorithm + // to select this item. We thus do the same update to + // `suppress_begin_index`, but really, this element will not be added back + // into the priority queue. + next_candidate.suppress_begin_index = *num_selected_indices; + + if (!should_hard_suppress) { + if (next_candidate.score == original_score) { + // Suppression has not occurred, so select next_candidate. + selected_indices[*num_selected_indices] = next_candidate.index; + if (selected_scores) { + selected_scores[*num_selected_indices] = next_candidate.score; + } + ++*num_selected_indices; + } + if ((soft_nms_sigma > 0.0) && (next_candidate.score > score_threshold)) { + // Soft suppression might have occurred and current score is still + // greater than score_threshold; add next_candidate back onto priority + // queue. + candidate_priority_queue.push(next_candidate); + } + } + } +} + +/** + * Run non-max suppression over the results array (for bounding boxes) + */ +EI_IMPULSE_ERROR ei_run_nms( + const ei_impulse_t *impulse, + std::vector *results, + float *boxes, + float *scores, + int *classes, + size_t bb_count, + bool clip_boxes, + bool debug) { + + if (bb_count < 1) { + return EI_IMPULSE_OK; + } + + int *selected_indices = (int*)ei_malloc(1 * bb_count * sizeof(int)); + float *selected_scores = (float*)ei_malloc(1 * bb_count * sizeof(float)); + + if (!scores || !boxes || !selected_indices || !selected_scores || !classes) { + ei_free(selected_indices); + ei_free(selected_scores); + return EI_IMPULSE_OUT_OF_MEMORY; + } + + // boxes: box encodings in format [y1, x1, y2, x2], shape: [num_boxes, 4] + // num_boxes: number of candidates + // scores: scores for candidate boxes, in the same order. shape: [num_boxes] + // max_output_size: the maximum number of selections. + // iou_threshold: Intersection-over-Union (IoU) threshold for NMS + // score_threshold: All candidate scores below this value are rejected + // soft_nms_sigma: Soft NMS parameter, used for decaying scores + + int num_selected_indices; + + NonMaxSuppression( + (const float*)boxes, // boxes + bb_count, // num_boxes + (const float*)scores, // scores + bb_count, // max_output_size + impulse->object_detection_nms.iou_threshold, // iou_threshold + impulse->object_detection_nms.confidence_threshold, // score_threshold + 0.0f, // soft_nms_sigma + selected_indices, + selected_scores, + &num_selected_indices); + + std::vector new_results; + + for (size_t ix = 0; ix < (size_t)num_selected_indices; ix++) { + + int out_ix = selected_indices[ix]; + ei_impulse_result_bounding_box_t bb; + bb.label = impulse->categories[classes[out_ix]]; + bb.value = selected_scores[ix]; + + float ymin = boxes[(out_ix * 4) + 0]; + float xmin = boxes[(out_ix * 4) + 1]; + float ymax = boxes[(out_ix * 4) + 2]; + float xmax = boxes[(out_ix * 4) + 3]; + + if (clip_boxes) { + ymin = std::min(std::max(ymin, 0.0f), (float)impulse->input_height); + xmin = std::min(std::max(xmin, 0.0f), (float)impulse->input_width); + ymax = std::min(std::max(ymax, 0.0f), (float)impulse->input_height); + xmax = std::min(std::max(xmax, 0.0f), (float)impulse->input_width); + } + + bb.y = static_cast(ymin); + bb.x = static_cast(xmin); + bb.height = static_cast(ymax) - bb.y; + bb.width = static_cast(xmax) - bb.x; + new_results.push_back(bb); + + if (debug) { + ei_printf("Found bb with label %s\n", bb.label); + } + + } + + results->clear(); + + for (size_t ix = 0; ix < new_results.size(); ix++) { + results->push_back(new_results[ix]); + } + + ei_free(selected_indices); + ei_free(selected_scores); + + return EI_IMPULSE_OK; + +} + +/** + * Run non-max suppression over the results array (for bounding boxes) + */ +EI_IMPULSE_ERROR ei_run_nms( + const ei_impulse_t *impulse, + std::vector *results, + bool clip_boxes, + bool debug) { + + size_t bb_count = 0; + for (size_t ix = 0; ix < results->size(); ix++) { + auto bb = results->at(ix); + if (bb.value == 0) { + continue; + } + bb_count++; + } + + if (bb_count < 1) { + return EI_IMPULSE_OK; + } + + float *boxes = (float*)ei_malloc(4 * bb_count * sizeof(float)); + float *scores = (float*)ei_malloc(1 * bb_count * sizeof(float)); + int *classes = (int*) ei_malloc(bb_count * sizeof(int)); + + if (!scores || !boxes || !classes) { + ei_free(boxes); + ei_free(scores); + ei_free(classes); + return EI_IMPULSE_OUT_OF_MEMORY; + } + + size_t box_ix = 0; + for (size_t ix = 0; ix < results->size(); ix++) { + auto bb = results->at(ix); + if (bb.value == 0) { + continue; + } + boxes[(box_ix * 4) + 0] = bb.y; + boxes[(box_ix * 4) + 1] = bb.x; + boxes[(box_ix * 4) + 2] = bb.y + bb.height; + boxes[(box_ix * 4) + 3] = bb.x + bb.width; + scores[box_ix] = bb.value; + + for (size_t j = 0; j < impulse->label_count; j++) { + if (strcmp(impulse->categories[j], bb.label) == 0) + classes[box_ix] = j; + } + + box_ix++; + } + + EI_IMPULSE_ERROR nms_res = ei_run_nms(impulse, results, + boxes, scores, + classes, bb_count, + clip_boxes, + debug); + + + ei_free(boxes); + ei_free(scores); + ei_free(classes); + + return nms_res; + +} + +/** + * Run non-max suppression over the results array (for bounding boxes) + */ +EI_IMPULSE_ERROR ei_run_nms( + const ei_impulse_t *impulse, + std::vector *results, + bool debug = false) { + return ei_run_nms(impulse, results, true, debug); +} + +/** + * Run non-max suppression over the results array (for bounding boxes) + */ +EI_IMPULSE_ERROR ei_run_nms(std::vector *results, bool debug = false) { +#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1 + auto& impulse = *ei_default_impulse.impulse; +#else + const ei_impulse_t impulse = { + .object_detection_nms.confidence_threshold = 0.0f, + .object_detection_nms.iou_threshold = 0.2f + }; +#endif + return ei_run_nms(&impulse, results, debug); +} + +#endif // #if (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOX) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_SSD) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3) || (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4) + +#endif // _EDGE_IMPULSE_NMS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_performance_calibration.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_performance_calibration.h new file mode 100644 index 0000000..a14c1e5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_performance_calibration.h @@ -0,0 +1,199 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef EI_PERFORMANCE_CALIBRATION_H +#define EI_PERFORMANCE_CALIBRATION_H + +/* Includes ---------------------------------------------------------------- */ +#include "edge-impulse-sdk/dsp/numpy_types.h" +#include "edge-impulse-sdk/dsp/returntypes.hpp" +#include "ei_model_types.h" + +/* Private const types ----------------------------------------------------- */ +#define MEM_ERROR "ERR: Failed to allocate memory for performance calibration\r\n" + +#define EI_PC_RET_NO_EVENT_DETECTED -1 +#define EI_PC_RET_MEMORY_ERROR -2 + +class RecognizeEvents { + +public: + RecognizeEvents( + const ei_model_performance_calibration_t *config, + uint32_t n_labels, + uint32_t sample_length, + float sample_interval_ms) + { + this->_score_array = nullptr; + this->_running_sum = nullptr; + this->_detection_threshold = config->detection_threshold; + this->_suppression_flags = config->suppression_flags; + this->_should_boost = config->is_configured; + this->_n_labels = n_labels; + + /* Determine sample length in ms */ + float sample_length_ms = (static_cast(sample_length) * sample_interval_ms); + + /* Calculate number of inference runs needed for the duration window */ + this->_average_window_duration_samples = + (config->average_window_duration_ms < static_cast(sample_length_ms)) + ? 1 + : static_cast(static_cast(config->average_window_duration_ms) / sample_length_ms); + + /* Calculate number of inference runs for suppression */ + this->_suppression_samples = (config->suppression_ms < static_cast(sample_length_ms)) + ? 0 + : static_cast(static_cast(config->suppression_ms) / sample_length_ms); + + /* Detection threshold should be high enough to only classifiy 1 possibly output */ + if (this->_detection_threshold <= (1.f / this->_n_labels)) { + ei_printf("ERR: Classifier detection threshold too low\r\n"); + return; + } + + /* Array to store scores for all labels */ + this->_score_array = (float *)ei_malloc( + this->_average_window_duration_samples * this->_n_labels * sizeof(float)); + + if (this->_score_array == NULL) { + ei_printf(MEM_ERROR); + return; + } + + for (uint32_t i = 0; i < this->_average_window_duration_samples * this->_n_labels; i++) { + this->_score_array[i] = 0.f; + } + this->_score_idx = 0; + + /* Running sum for all labels */ + this->_running_sum = (float *)ei_malloc(this->_n_labels * sizeof(float)); + + if (this->_running_sum != NULL) { + for (uint32_t i = 0; i < this->_n_labels; i++) { + this->_running_sum[i] = 0.f; + } + } + else { + ei_printf(MEM_ERROR); + return; + } + + this->_suppression_count = this->_suppression_samples; + this->_n_scores_in_array = 0; + } + + ~RecognizeEvents() + { + if (this->_score_array) { + ei_free((void *)this->_score_array); + } + if (this->_running_sum) { + ei_free((void *)this->_running_sum); + } + } + + bool should_boost() + { + return this->_should_boost; + } + + int32_t trigger(ei_impulse_result_classification_t *scores) + { + int32_t recognized_event = EI_PC_RET_NO_EVENT_DETECTED; + float current_top_score = 0.f; + uint32_t current_top_index = 0; + + /* Check pointers */ + if (this->_score_array == NULL || this->_running_sum == NULL) { + return EI_PC_RET_MEMORY_ERROR; + } + + /* Update the score array and running sum */ + for (uint32_t i = 0; i < this->_n_labels; i++) { + this->_running_sum[i] -= this->_score_array[(this->_score_idx * this->_n_labels) + i]; + this->_running_sum[i] += scores[i].value; + this->_score_array[(this->_score_idx * this->_n_labels) + i] = scores[i].value; + } + + if (++this->_score_idx >= this->_average_window_duration_samples) { + this->_score_idx = 0; + } + + /* Number of samples to average, increases until the buffer is full */ + if (this->_n_scores_in_array < this->_average_window_duration_samples) { + this->_n_scores_in_array++; + } + + /* Average data and place in scores & determine top score */ + for (uint32_t i = 0; i < this->_n_labels; i++) { + scores[i].value = this->_running_sum[i] / this->_n_scores_in_array; + + if (scores[i].value > current_top_score) { + if(this->_suppression_flags == 0) { + current_top_score = scores[i].value; + current_top_index = i; + } + else if(this->_suppression_flags & (1 << i)) { + current_top_score = scores[i].value; + current_top_index = i; + } + } + } + + /* Check threshold, suppression */ + if (this->_suppression_samples && this->_suppression_count < this->_suppression_samples) { + this->_suppression_count++; + } + else { + if (current_top_score >= this->_detection_threshold) { + recognized_event = current_top_index; + + if (this->_suppression_flags & (1 << current_top_index)) { + this->_suppression_count = 0; + } + } + } + + return recognized_event; + }; + + void *operator new(size_t size) + { + void *p = ei_malloc(size); + return p; + } + + void operator delete(void *p) + { + ei_free(p); + } + +private: + uint32_t _average_window_duration_samples; + float _detection_threshold; + bool _should_boost; + uint32_t _suppression_samples; + uint32_t _suppression_count; + uint32_t _suppression_flags; + uint32_t _n_labels; + float *_score_array; + uint32_t _score_idx; + float *_running_sum; + uint32_t _n_scores_in_array; +}; + +#endif //EI_PERFORMANCE_CALIBRATION diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_quantize.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_quantize.h new file mode 100644 index 0000000..727d920 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_quantize.h @@ -0,0 +1,37 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __EI_QUANTIZE__H__ +#define __EI_QUANTIZE__H__ + +#include +#include + +static int32_t pre_cast_quantize(float value, float scale, int32_t zero_point, bool is_signed) { + + int32_t max_value = is_signed ? 127 : 255; + int32_t min_value = is_signed ? -128 : 0; + // Saturate/clip any overflows post scaling + return std::min( std::max( static_cast(round(value / scale)) + zero_point, min_value), max_value); +} + +#endif //!__EI_QUANTIZE__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier.h index 3a1724a..02a3523 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier.h @@ -1,216 +1,402 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_H_ #define _EDGE_IMPULSE_RUN_CLASSIFIER_H_ +#include "ei_model_types.h" #include "model-parameters/model_metadata.h" -#if EI_CLASSIFIER_HAS_ANOMALY == 1 -#include "model-parameters/anomaly_clusters.h" -#endif #include "ei_run_dsp.h" #include "ei_classifier_types.h" -#include "ei_classifier_smooth.h" #include "ei_signal_with_axes.h" +#include "ei_performance_calibration.h" + +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/porting/ei_logging.h" +#include + +#if EI_CLASSIFIER_HAS_ANOMALY +#include "inferencing_engines/anomaly.h" +#endif + #if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 #include "ei_sampler.h" #endif -#include "edge-impulse-sdk/porting/ei_classifier_porting.h" -#include "model-parameters/dsp_blocks.h" #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1) -#include -#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" -#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" -#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" - -#include "tflite-model/tflite-trained.h" -#if defined(EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER) && EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER == 1 -#include "tflite-model/tflite-resolver.h" -#endif // EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER - -static tflite::MicroErrorReporter micro_error_reporter; -static tflite::ErrorReporter* error_reporter = µ_error_reporter; +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h" #elif EI_CLASSIFIER_COMPILED == 1 -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "tflite-model/trained_model_compiled.h" -#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" - +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h" #elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL - -#include "tensorflow/lite/c/common.h" -#include "tensorflow/lite/interpreter.h" -#include "tensorflow/lite/kernels/register.h" -#include "tensorflow/lite/model.h" -#include "tensorflow/lite/optional_debug_tools.h" -#include "tflite-model/tflite-trained.h" - +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_TIDL +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h" #elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT) -#include -#include "tflite-model/onnx-trained.h" -#include "tflite/linux-jetson-nano/libeitrt.h" -EiTrt* ei_trt_handle = NULL; - +#include "edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW +#include "edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI +#include "edge-impulse-sdk/classifier/inferencing_engines/drpai.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA +#include "edge-impulse-sdk/classifier/inferencing_engines/akida.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL +#include "edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h" +#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX +#include "edge-impulse-sdk/classifier/inferencing_engines/memryx.h" #elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_NONE // noop #else #error "Unknown inferencing engine" #endif -#if EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE && defined(EI_CLASSIFIER_ENABLE_DETECTION_POSTPROCESS_OP) -namespace tflite { -namespace ops { -namespace micro { -extern TfLiteRegistration Register_TFLite_Detection_PostProcess(void); -} // namespace micro -} // namespace ops - - -extern float post_process_boxes[10 * 4 * sizeof(float)]; -extern float post_process_classes[10]; -extern float post_process_scores[10]; - -} // namespace tflite - -static TfLiteRegistration post_process_op = tflite::ops::micro::Register_TFLite_Detection_PostProcess(); - -#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE && defined(EI_CLASSIFIER_ENABLE_DETECTION_POSTPROCESS_OP) - -#if ECM3532 -void* __dso_handle = (void*) &__dso_handle; -#endif +// This file has an implicit dependency on ei_run_dsp.h, so must come after that include! +#include "model-parameters/model_variables.h" #ifdef __cplusplus namespace { #endif // __cplusplus /* Function prototypes ----------------------------------------------------- */ -extern "C" EI_IMPULSE_ERROR run_inference(ei::matrix_t *fmatrix, ei_impulse_result_t *result, bool debug); -extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(signal_t *signal, ei_impulse_result_t *result, bool debug); -static EI_IMPULSE_ERROR can_run_classifier_image_quantized(); -static void calc_cepstral_mean_and_var_normalization_mfcc(ei_matrix *matrix, void *config_ptr); -static void calc_cepstral_mean_and_var_normalization_mfe(ei_matrix *matrix, void *config_ptr); -static void calc_cepstral_mean_and_var_normalization_spectrogram(ei_matrix *matrix, void *config_ptr); +extern "C" EI_IMPULSE_ERROR run_inference(ei_impulse_handle_t *handle, ei_feature_t *fmatrix, ei_impulse_result_t *result, bool debug); +extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized(const ei_impulse_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug); +static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr); + +#if EI_CLASSIFIER_LOAD_IMAGE_SCALING +EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix); +EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix); +#endif // EI_CLASSIFIER_LOAD_IMAGE_SCALING /* Private variables ------------------------------------------------------- */ -#if EI_CLASSIFIER_LABEL_COUNT > 0 -ei_impulse_maf classifier_maf[EI_CLASSIFIER_LABEL_COUNT] = {{0}}; -#else -ei_impulse_maf classifier_maf[0]; -#endif static uint64_t classifier_continuous_features_written = 0; +static RecognizeEvents *avg_scores = NULL; /* Private functions ------------------------------------------------------- */ +/* These functions (up to Public functions section) are not exposed to end-user, +therefore changes are allowed. */ + + /** - * @brief Run a moving average filter over the classification result. - * The size of the filter determines the response of the filter. - * It is now set to the number of slices per window. - * @param maf Pointer to maf object - * @param[in] classification Classification output on current slice + * @brief Display the results of the inference * - * @return Averaged classification value + * @param result The result */ -extern "C" float run_moving_average_filter(ei_impulse_maf *maf, float classification) +__attribute__((unused)) void display_results(ei_impulse_result_t* result) { - maf->running_sum -= maf->maf_buffer[maf->buf_idx]; - maf->running_sum += classification; - maf->maf_buffer[maf->buf_idx] = classification; + // print the predictions + ei_printf("Predictions (DSP: %d ms., Classification: %d ms., Anomaly: %d ms.): \n", + result->timing.dsp, result->timing.classification, result->timing.anomaly); +#if EI_CLASSIFIER_OBJECT_DETECTION == 1 + ei_printf("#Object detection results:\r\n"); + bool bb_found = result->bounding_boxes[0].value > 0; + for (size_t ix = 0; ix < result->bounding_boxes_count; ix++) { + auto bb = result->bounding_boxes[ix]; + if (bb.value == 0) { + continue; + } + ei_printf(" %s (", bb.label); + ei_printf_float(bb.value); + ei_printf(") [ x: %u, y: %u, width: %u, height: %u ]\n", bb.x, bb.y, bb.width, bb.height); + } - if (++maf->buf_idx >= (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW >> 1)) { - maf->buf_idx = 0; + if (!bb_found) { + ei_printf(" No objects found\n"); } -#if (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW > 1) - return maf->running_sum / (float)(EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW >> 1); -#else - return maf->running_sum; +#elif (EI_CLASSIFIER_LABEL_COUNT == 1) && (!EI_CLASSIFIER_HAS_ANOMALY)// regression + ei_printf("#Regression results:\r\n"); + ei_printf(" %s: ", result->classification[0].label); + ei_printf_float(result->classification[0].value); + ei_printf("\n"); + +#elif EI_CLASSIFIER_LABEL_COUNT > 1 // if there is only one label, this is an anomaly only + ei_printf("#Classification results:\r\n"); + for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { + ei_printf(" %s: ", result->classification[ix].label); + ei_printf_float(result->classification[ix].value); + ei_printf("\n"); + } +#endif +#if EI_CLASSIFIER_HAS_ANOMALY == 3 // visual AD + ei_printf("#Visual anomaly grid results:\r\n"); + for (uint32_t i = 0; i < result->visual_ad_count; i++) { + ei_impulse_result_bounding_box_t bb = result->visual_ad_grid_cells[i]; + if (bb.value == 0) { + continue; + } + ei_printf(" %s (", bb.label); + ei_printf_float(bb.value); + ei_printf(") [ x: %u, y: %u, width: %u, height: %u ]\n", bb.x, bb.y, bb.width, bb.height); + } + ei_printf("Visual anomaly values: Mean "); + ei_printf_float(result->visual_ad_result.mean_value); + ei_printf(" Max "); + ei_printf_float(result->visual_ad_result.max_value); + ei_printf("\r\n"); +#elif (EI_CLASSIFIER_HAS_ANOMALY > 0) // except for visual AD + ei_printf("Anomaly prediction: "); + ei_printf_float(result->anomaly); + ei_printf("\r\n"); #endif } /** - * @brief Reset all values in filter to 0 + * @brief Do inferencing over the processed feature matrix + * + * @param impulse struct with information about model and DSP + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable * - * @param maf Pointer to maf object + * @return The ei impulse error. */ -static void clear_moving_average_filter(ei_impulse_maf *maf) +extern "C" EI_IMPULSE_ERROR run_inference( + ei_impulse_handle_t *handle, + ei_feature_t *fmatrix, + ei_impulse_result_t *result, + bool debug = false) { - maf->running_sum = 0; + auto& impulse = handle->impulse; + for (size_t ix = 0; ix < impulse->learning_blocks_size; ix++) { + + ei_learning_block_t block = impulse->learning_blocks[ix]; + +#if EI_CLASSIFIER_LOAD_IMAGE_SCALING + // we do not plan to have multiple dsp blocks with image + // so just apply scaling to the first one + EI_IMPULSE_ERROR scale_res = ei_scale_fmatrix(&block, fmatrix[0].matrix); + if (scale_res != EI_IMPULSE_OK) { + return scale_res; + } +#endif + + result->copy_output = block.keep_output; + + EI_IMPULSE_ERROR res = block.infer_fn(impulse, fmatrix, ix, (uint32_t*)block.input_block_ids, block.input_block_ids_size, result, block.config, debug); + if (res != EI_IMPULSE_OK) { + return res; + } - for (int i = 0; i < (EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW >> 1); i++) { - maf->maf_buffer[i] = 0.f; +#if EI_CLASSIFIER_LOAD_IMAGE_SCALING + // undo scaling + scale_res = ei_unscale_fmatrix(&block, fmatrix[0].matrix); + if (scale_res != EI_IMPULSE_OK) { + return scale_res; + } +#endif + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; } + + return EI_IMPULSE_OK; } /** - * @brief Init static vars + * @brief Process a complete impulse + * + * @param impulse struct with information about model and DSP + * @param signal Sample data + * @param result Output classifier results + * @param handle Handle from open_impulse. nullptr for backward compatibility + * @param[in] debug Debug output enable + * + * @return The ei impulse error. */ -extern "C" void run_classifier_init(void) +extern "C" EI_IMPULSE_ERROR process_impulse(ei_impulse_handle_t *handle, + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false) { - classifier_continuous_features_written = 0; - ei_dsp_clear_continuous_audio_state(); + if(!handle) { + return EI_IMPULSE_INFERENCE_ERROR; + } - for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { - clear_moving_average_filter(&classifier_maf[ix]); +#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL)) || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI + // Shortcut for quantized image models + ei_learning_block_t block = handle->impulse->learning_blocks[0]; + if (can_run_classifier_image_quantized(handle->impulse, block) == EI_IMPULSE_OK) { + return run_classifier_image_quantized(handle->impulse, signal, result, debug); + } +#endif + + memset(result, 0, sizeof(ei_impulse_result_t)); + uint32_t block_num = handle->impulse->dsp_blocks_size + handle->impulse->learning_blocks_size; + + // smart pointer to features array + std::unique_ptr features_ptr(new ei_feature_t[block_num]); + ei_feature_t* features = features_ptr.get(); + memset(features, 0, sizeof(ei_feature_t) * block_num); + + // have it outside of the loop to avoid going out of scope + std::unique_ptr *matrix_ptrs = new std::unique_ptr[block_num]; + + uint64_t dsp_start_us = ei_read_timer_us(); + + size_t out_features_index = 0; + + for (size_t ix = 0; ix < handle->impulse->dsp_blocks_size; ix++) { + ei_model_dsp_t block = handle->impulse->dsp_blocks[ix]; + matrix_ptrs[ix] = std::unique_ptr(new ei::matrix_t(1, block.n_output_features)); + features[ix].matrix = matrix_ptrs[ix].get(); + features[ix].blockId = block.blockId; + + if (out_features_index + block.n_output_features > handle->impulse->nn_input_frame_size) { + ei_printf("ERR: Would write outside feature buffer\n"); + delete[] matrix_ptrs; + return EI_IMPULSE_DSP_ERROR; + } + +#if EIDSP_SIGNAL_C_FN_POINTER + if (block.axes_size != handle->impulse->raw_samples_per_frame) { + ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n"); + delete[] matrix_ptrs; + return EI_IMPULSE_DSP_ERROR; + } + auto internal_signal = signal; +#else + SignalWithAxes swa(signal, block.axes, block.axes_size, handle->impulse); + auto internal_signal = swa.get_signal(); +#endif + + int ret; + if (block.factory) { // ie, if we're using state + // Msg user + static bool has_printed = false; + if (!has_printed) { + EI_LOGI("Impulse maintains state. Call run_classifier_init() to reset state (e.g. if data stream is interrupted.)\n"); + has_printed = true; + } + + // getter has a lazy init, so we can just call it + auto dsp_handle = handle->state.get_dsp_handle(ix); + if(dsp_handle) { + ret = dsp_handle->extract(internal_signal, features[ix].matrix, block.config, handle->impulse->frequency); + } else { + return EI_IMPULSE_OUT_OF_MEMORY; + } + } else { + ret = block.extract_fn(internal_signal, features[ix].matrix, block.config, handle->impulse->frequency); + } + + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + delete[] matrix_ptrs; + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + delete[] matrix_ptrs; + return EI_IMPULSE_CANCELED; + } + + out_features_index += block.n_output_features; + } + +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + for (size_t ix = 0; ix < handle->impulse->learning_blocks_size; ix++) { + ei_learning_block_t block = handle->impulse->learning_blocks[ix]; + + if (block.keep_output) { + matrix_ptrs[handle->impulse->dsp_blocks_size + ix] = std::unique_ptr(new ei::matrix_t(1, block.output_features_count)); + features[handle->impulse->dsp_blocks_size + ix].matrix = matrix_ptrs[handle->impulse->dsp_blocks_size + ix].get(); + features[handle->impulse->dsp_blocks_size + ix].blockId = block.blockId; + } + } +#endif // EI_CLASSIFIER_SINGLE_FEATURE_INPUT + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < block_num; ix++) { + if (features[ix].matrix == nullptr) { + continue; + } + for (size_t jx = 0; jx < features[ix].matrix->cols; jx++) { + ei_printf_float(features[ix].matrix->buffer[jx]); + ei_printf(" "); + } + ei_printf("\n"); + } + } + + if (debug) { + ei_printf("Running impulse...\n"); + } + + EI_IMPULSE_ERROR res = run_inference(handle, features, result, debug); + delete[] matrix_ptrs; + return res; +} + +/** + * @brief Opens an impulse + * + * @param impulse struct with information about model and DSP + * + * @return A pointer to the impulse handle, or nullptr if memory allocation failed. + */ +extern "C" EI_IMPULSE_ERROR init_impulse(ei_impulse_handle_t *handle) { + if (!handle) { + return EI_IMPULSE_OUT_OF_MEMORY; } + handle->state.reset(); + return EI_IMPULSE_OK; } /** - * @brief Fill the complete matrix with sample slices. From there, run inference - * on the matrix. + * @brief Process a complete impulse for continuous inference * - * @param signal Sample data - * @param result Classification output - * @param[in] debug Debug output enable boot - * @param enable_maf Enables the moving average filter + * @param impulse struct with information about model and DSP + * @param signal Sample data + * @param result Output classifier results + * @param[in] debug Debug output enable * * @return The ei impulse error. */ -extern "C" EI_IMPULSE_ERROR run_classifier_continuous(signal_t *signal, ei_impulse_result_t *result, - bool debug = false, bool enable_maf = true) +extern "C" EI_IMPULSE_ERROR process_impulse_continuous(ei_impulse_handle_t *handle, + signal_t *signal, + ei_impulse_result_t *result, + bool debug, + bool enable_maf) { - static ei::matrix_t static_features_matrix(1, EI_CLASSIFIER_NN_INPUT_FRAME_SIZE); + auto impulse = handle->impulse; + static ei::matrix_t static_features_matrix(1, impulse->nn_input_frame_size); if (!static_features_matrix.buffer) { return EI_IMPULSE_ALLOC_FAILED; } + memset(result, 0, sizeof(ei_impulse_result_t)); + EI_IMPULSE_ERROR ei_impulse_error = EI_IMPULSE_OK; - uint64_t dsp_start_ms = ei_read_timer_ms(); + uint64_t dsp_start_us = ei_read_timer_us(); size_t out_features_index = 0; - bool is_mfcc = false; - bool is_mfe = false; - bool is_spectrogram = false; - for (size_t ix = 0; ix < ei_dsp_blocks_size; ix++) { - ei_model_dsp_t block = ei_dsp_blocks[ix]; + for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) { + ei_model_dsp_t block = impulse->dsp_blocks[ix]; - if (out_features_index + block.n_output_features > EI_CLASSIFIER_NN_INPUT_FRAME_SIZE) { + if (out_features_index + block.n_output_features > impulse->nn_input_frame_size) { ei_printf("ERR: Would write outside feature buffer\n"); return EI_IMPULSE_DSP_ERROR; } @@ -223,15 +409,12 @@ extern "C" EI_IMPULSE_ERROR run_classifier_continuous(signal_t *signal, ei_impul /* Switch to the slice version of the mfcc feature extract function */ if (block.extract_fn == extract_mfcc_features) { extract_fn_slice = &extract_mfcc_per_slice_features; - is_mfcc = true; } else if (block.extract_fn == extract_spectrogram_features) { extract_fn_slice = &extract_spectrogram_per_slice_features; - is_spectrogram = true; } else if (block.extract_fn == extract_mfe_features) { extract_fn_slice = &extract_mfe_per_slice_features; - is_mfe = true; } else { ei_printf("ERR: Unknown extract function, only MFCC, MFE and spectrogram supported\n"); @@ -241,14 +424,14 @@ extern "C" EI_IMPULSE_ERROR run_classifier_continuous(signal_t *signal, ei_impul matrix_size_t features_written; #if EIDSP_SIGNAL_C_FN_POINTER - if (block.axes_size != EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { + if (block.axes_size != impulse->raw_samples_per_frame) { ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n"); return EI_IMPULSE_DSP_ERROR; } - int ret = extract_fn_slice(signal, &fm, block.config, EI_CLASSIFIER_FREQUENCY, &features_written); + int ret = extract_fn_slice(signal, &fm, block.config, impulse->frequency, &features_written); #else - SignalWithAxes swa(signal, block.axes, block.axes_size); - int ret = extract_fn_slice(swa.get_signal(), &fm, block.config, EI_CLASSIFIER_FREQUENCY, &features_written); + SignalWithAxes swa(signal, block.axes, block.axes_size, impulse); + int ret = extract_fn_slice(swa.get_signal(), &fm, block.config, impulse->frequency, &features_written); #endif if (ret != EIDSP_OK) { @@ -265,1187 +448,597 @@ extern "C" EI_IMPULSE_ERROR run_classifier_continuous(signal_t *signal, ei_impul out_features_index += block.n_output_features; } - result->timing.dsp = ei_read_timer_ms() - dsp_start_ms; + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); - if (debug) { - ei_printf("\r\nFeatures (%d ms.): ", result->timing.dsp); - for (size_t ix = 0; ix < static_features_matrix.cols; ix++) { - ei_printf_float(static_features_matrix.buffer[ix]); - ei_printf(" "); - } - ei_printf("\n"); - } + if (classifier_continuous_features_written >= impulse->nn_input_frame_size) { + dsp_start_us = ei_read_timer_us(); - if (classifier_continuous_features_written >= EI_CLASSIFIER_NN_INPUT_FRAME_SIZE) { - dsp_start_ms = ei_read_timer_ms(); - ei::matrix_t classify_matrix(1, EI_CLASSIFIER_NN_INPUT_FRAME_SIZE); + uint32_t block_num = impulse->dsp_blocks_size + impulse->learning_blocks_size; - /* Create a copy of the matrix for normalization */ - for (size_t m_ix = 0; m_ix < EI_CLASSIFIER_NN_INPUT_FRAME_SIZE; m_ix++) { - classify_matrix.buffer[m_ix] = static_features_matrix.buffer[m_ix]; - } + // smart pointer to features array + std::unique_ptr features_ptr(new ei_feature_t[block_num]); + ei_feature_t* features = features_ptr.get(); + memset(features, 0, sizeof(ei_feature_t) * block_num); - if (is_mfcc) { - calc_cepstral_mean_and_var_normalization_mfcc(&classify_matrix, ei_dsp_blocks[0].config); - } - else if (is_spectrogram) { - calc_cepstral_mean_and_var_normalization_spectrogram(&classify_matrix, ei_dsp_blocks[0].config); - } - else if (is_mfe) { - calc_cepstral_mean_and_var_normalization_mfe(&classify_matrix, ei_dsp_blocks[0].config); + // have it outside of the loop to avoid going out of scope + std::unique_ptr *matrix_ptrs = new std::unique_ptr[block_num]; + + out_features_index = 0; + // iterate over every dsp block and run normalization + for (size_t ix = 0; ix < impulse->dsp_blocks_size; ix++) { + ei_model_dsp_t block = impulse->dsp_blocks[ix]; + matrix_ptrs[ix] = std::unique_ptr(new ei::matrix_t(1, block.n_output_features)); + features[ix].matrix = matrix_ptrs[ix].get(); + features[ix].blockId = block.blockId; + + /* Create a copy of the matrix for normalization */ + for (size_t m_ix = 0; m_ix < block.n_output_features; m_ix++) { + features[ix].matrix->buffer[m_ix] = static_features_matrix.buffer[out_features_index + m_ix]; + } + + if (block.extract_fn == extract_mfcc_features) { + calc_cepstral_mean_and_var_normalization_mfcc(features[ix].matrix, block.config); + } + else if (block.extract_fn == extract_spectrogram_features) { + calc_cepstral_mean_and_var_normalization_spectrogram(features[ix].matrix, block.config); + } + else if (block.extract_fn == extract_mfe_features) { + calc_cepstral_mean_and_var_normalization_mfe(features[ix].matrix, block.config); + } + out_features_index += block.n_output_features; } - result->timing.dsp += ei_read_timer_ms() - dsp_start_ms; -#if EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE + result->timing.dsp_us += ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + if (debug) { - ei_printf("Running neural network...\n"); + ei_printf("Feature Matrix: \n"); + for (size_t ix = 0; ix < features->matrix->cols; ix++) { + ei_printf_float(features->matrix->buffer[ix]); + ei_printf(" "); + } + ei_printf("\n"); + ei_printf("Running impulse...\n"); + } + + ei_impulse_error = run_inference(handle, features, result, debug); + +#if EI_CLASSIFIER_CALIBRATION_ENABLED + if (impulse->sensor == EI_CLASSIFIER_SENSOR_MICROPHONE) { + if((void *)avg_scores != NULL && enable_maf == true) { + if (enable_maf && !impulse->calibration.is_configured) { + // perfcal is not configured, print msg first time + static bool has_printed_msg = false; + + if (!has_printed_msg) { + ei_printf("WARN: run_classifier_continuous, enable_maf is true, but performance calibration is not configured.\n"); + ei_printf(" Previously we'd run a moving-average filter over your outputs in this case, but this is now disabled.\n"); + ei_printf(" Go to 'Performance calibration' in your Edge Impulse project to configure post-processing parameters.\n"); + ei_printf(" (You can enable this from 'Dashboard' if it's not visible in your project)\n"); + ei_printf("\n"); + + has_printed_msg = true; + } + } + else { + // perfcal is configured + static bool has_printed_msg = false; + + if (!has_printed_msg) { + ei_printf("\nPerformance calibration is configured for your project. If no event is detected, all values are 0.\r\n\n"); + has_printed_msg = true; + } + + int label_detected = avg_scores->trigger(result->classification); + + if (avg_scores->should_boost()) { + for (int i = 0; i < impulse->label_count; i++) { + if (i == label_detected) { + result->classification[i].value = 1.0f; + } + else { + result->classification[i].value = 0.0f; + } + } + } + } + } } #endif - ei_impulse_error = run_inference(&classify_matrix, result, debug); - - if (enable_maf) { - for (size_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { - #if EI_CLASSIFIER_OBJECT_DETECTION != 1 - result->classification[ix].value = - run_moving_average_filter(&classifier_maf[ix], result->classification[ix].value); - #endif - } + delete[] matrix_ptrs; + } + else { + for (int i = 0; i < impulse->label_count; i++) { + // set label correctly in the result struct if we have no results (otherwise is nullptr) + result->classification[i].label = impulse->categories[(uint32_t)i]; } } + return ei_impulse_error; } -#if EI_CLASSIFIER_OBJECT_DETECTION - /** - * Fill the result structure from an unquantized output tensor - * (we don't support quantized here a.t.m.) + * Check if the current impulse could be used by 'run_classifier_image_quantized' */ -__attribute__((unused)) static void fill_result_struct_f32(ei_impulse_result_t *result, float *data, float *scores, float *labels, bool debug) { - for (size_t ix = 0; ix < EI_CLASSIFIER_OBJECT_DETECTION_COUNT; ix++) { - - float score = scores[ix]; - float label = labels[ix]; +__attribute__((unused)) static EI_IMPULSE_ERROR can_run_classifier_image_quantized(const ei_impulse_t *impulse, ei_learning_block_t block_ptr) { -#if EI_CLASSIFIER_TFLITE_INPUT_DATATYPE == EI_CLASSIFIER_DATATYPE_INT8 - // so for i8 inputs this seems to be 0.5..1.0 instead of 0.0..1.0 - // let's fix it by hand - score = (score - 0.5f) * 2; -#endif - - if (score >= EI_CLASSIFIER_OBJECT_DETECTION_THRESHOLD) { - float ystart = data[(ix * 4) + 0]; - float xstart = data[(ix * 4) + 1]; - float yend = data[(ix * 4) + 2]; - float xend = data[(ix * 4) + 3]; - - if (xstart < 0) xstart = 0; - if (xstart > 1) xstart = 1; - if (ystart < 0) ystart = 0; - if (ystart > 1) ystart = 1; - if (yend < 0) yend = 0; - if (yend > 1) yend = 1; - if (xend < 0) xend = 0; - if (xend > 1) xend = 1; - if (xend < xstart) xend = xstart; - if (yend < ystart) yend = ystart; - - if (debug) { - ei_printf("%s (%f): %f [ %f, %f, %f, %f ]\n", - ei_classifier_inferencing_categories[(uint32_t)label], label, score, xstart, ystart, xend, yend); - } + if (impulse->inferencing_engine != EI_CLASSIFIER_TFLITE + && impulse->inferencing_engine != EI_CLASSIFIER_TENSAIFLOW + && impulse->inferencing_engine != EI_CLASSIFIER_DRPAI + && impulse->inferencing_engine != EI_CLASSIFIER_ONNX_TIDL) // check later + { + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } - result->bounding_boxes[ix].label = ei_classifier_inferencing_categories[(uint32_t)label]; - result->bounding_boxes[ix].x = static_cast(xstart * static_cast(EI_CLASSIFIER_INPUT_WIDTH)); - result->bounding_boxes[ix].y = static_cast(ystart * static_cast(EI_CLASSIFIER_INPUT_HEIGHT)); - result->bounding_boxes[ix].width = static_cast((xend - xstart) * static_cast(EI_CLASSIFIER_INPUT_WIDTH)); - result->bounding_boxes[ix].height = static_cast((yend - ystart) * static_cast(EI_CLASSIFIER_INPUT_HEIGHT)); - result->bounding_boxes[ix].value = score; - } - else { - result->bounding_boxes[ix].value = 0.0f; - } + // visual anomaly also needs to go through the normal path + if (impulse->has_anomaly){ + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; } -} -#else + // Check if we have tflite graph + if (block_ptr.infer_fn != run_nn_inference) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; + } -/** - * Fill the result structure from a quantized output tensor - */ -__attribute__((unused)) static void fill_result_struct_i8(ei_impulse_result_t *result, int8_t *data, float zero_point, float scale, bool debug) { - for (uint32_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { - float value = static_cast(data[ix] - zero_point) * scale; + // Check if we have a quantized NN Input layer (input is always quantized for DRP-AI) + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)block_ptr.config; + if (block_config->quantized != 1) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; + } - if (debug) { - ei_printf("%s:\t", ei_classifier_inferencing_categories[ix]); - ei_printf_float(value); - ei_printf("\n"); - } - result->classification[ix].label = ei_classifier_inferencing_categories[ix]; - result->classification[ix].value = value; + // And if we have one DSP block which operates on images... + if (impulse->dsp_blocks_size != 1 || impulse->dsp_blocks[0].extract_fn != extract_image_features) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; } + + return EI_IMPULSE_OK; } +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL) + /** - * Fill the result structure from an unquantized output tensor + * Special function to run the classifier on images, only works on TFLite models (either interpreter, EON, tensaiflow, drpai, tidl, memryx) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. */ -__attribute__((unused)) static void fill_result_struct_f32(ei_impulse_result_t *result, float *data, bool debug) { - for (uint32_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { - float value = data[ix]; +extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false) +{ + memset(result, 0, sizeof(ei_impulse_result_t)); - if (debug) { - ei_printf("%s:\t", ei_classifier_inferencing_categories[ix]); - ei_printf_float(value); - ei_printf("\n"); - } - result->classification[ix].label = ei_classifier_inferencing_categories[ix]; - result->classification[ix].value = value; - } + return run_nn_inference_image_quantized(impulse, signal, result, impulse->learning_blocks[0].config, debug); } -#endif // EI_CLASSIFIER_OBJECT_DETECTION +#endif // #if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW || EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI) -#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) +#if EI_CLASSIFIER_LOAD_IMAGE_SCALING +static const float torch_mean[] = { 0.485, 0.456, 0.406 }; +static const float torch_std[] = { 0.229, 0.224, 0.225 }; +// This is ordered BGR +static const float tao_mean[] = { 103.939, 116.779, 123.68 }; -/** - * Setup the TFLite runtime - * - * @param ctx_start_ms Pointer to the start time - * @param input Pointer to input tensor - * @param output Pointer to output tensor - * @param micro_interpreter Pointer to interpreter (for non-compiled models) - * @param micro_tensor_arena Pointer to the arena that will be allocated - * - * @return EI_IMPULSE_OK if successful - */ -static EI_IMPULSE_ERROR inference_tflite_setup(uint64_t *ctx_start_ms, TfLiteTensor** input, TfLiteTensor** output, -#if EI_CLASSIFIER_OBJECT_DETECTION - TfLiteTensor** output_labels, - TfLiteTensor** output_scores, -#endif -#if (EI_CLASSIFIER_COMPILED != 1) - tflite::MicroInterpreter** micro_interpreter, -#endif - uint8_t** micro_tensor_arena) { -#if (EI_CLASSIFIER_COMPILED == 1) - TfLiteStatus init_status = trained_model_init(ei_aligned_calloc); - if (init_status != kTfLiteOk) { - ei_printf("Failed to allocate TFLite arena (error code %d)\n", init_status); - return EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED; +EI_IMPULSE_ERROR ei_scale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) { + if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) { + // @todo; could we write some faster vector math here? + for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) { + fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] - torch_mean[0]) / torch_std[0]; + fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] - torch_mean[1]) / torch_std[1]; + fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] - torch_mean[2]) / torch_std[2]; + } } -#else - // Create an area of memory to use for input, output, and intermediate arrays. - uint8_t *tensor_arena = (uint8_t*)ei_aligned_calloc(16, EI_CLASSIFIER_TFLITE_ARENA_SIZE); - if (tensor_arena == NULL) { - ei_printf("Failed to allocate TFLite arena (%d bytes)\n", EI_CLASSIFIER_TFLITE_ARENA_SIZE); - return EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED; + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) { + int scale_res = numpy::scale(fmatrix, 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } } - *micro_tensor_arena = tensor_arena; -#endif - - *ctx_start_ms = ei_read_timer_ms(); - - static bool tflite_first_run = true; - -#if (EI_CLASSIFIER_COMPILED != 1) - static const tflite::Model* model = nullptr; -#endif - -#if (EI_CLASSIFIER_COMPILED != 1) - // ====== - // Initialization code start - // This part can be run once, but that would require the TFLite arena - // to be allocated at all times, which is not ideal (e.g. when doing MFCC) - // ====== - if (tflite_first_run) { - // Map the model into a usable data structure. This doesn't involve any - // copying or parsing, it's a very lightweight operation. - model = tflite::GetModel(trained_tflite); - if (model->version() != TFLITE_SCHEMA_VERSION) { - error_reporter->Report( - "Model provided is schema version %d not equal " - "to supported version %d.", - model->version(), TFLITE_SCHEMA_VERSION); - ei_aligned_free(tensor_arena); - return EI_IMPULSE_TFLITE_ERROR; + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) { + int scale_res = numpy::scale(fmatrix, 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + scale_res = numpy::subtract(fmatrix, 128.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; } } -#endif - -#if (EI_CLASSIFIER_COMPILED != 1) -#ifdef EI_TFLITE_RESOLVER - EI_TFLITE_RESOLVER -#else - tflite::AllOpsResolver resolver; -#endif -#if defined(EI_CLASSIFIER_ENABLE_DETECTION_POSTPROCESS_OP) - resolver.AddCustom("TFLite_Detection_PostProcess", &post_process_op); -#endif -#endif // EI_CLASSIFIER_COMPILED != 1 - -#if (EI_CLASSIFIER_COMPILED == 1) - *input = trained_model_input(0); - *output = trained_model_output(0); -#if EI_CLASSIFIER_OBJECT_DETECTION - *output_scores = trained_model_output(EI_CLASSIFIER_TFLITE_OUTPUT_SCORE_TENSOR); - *output_labels = trained_model_output(EI_CLASSIFIER_TFLITE_OUTPUT_LABELS_TENSOR); -#endif // EI_CLASSIFIER_OBJECT_DETECTION -#else - // Build an interpreter to run the model with. - tflite::MicroInterpreter *interpreter = new tflite::MicroInterpreter( - model, resolver, tensor_arena, EI_CLASSIFIER_TFLITE_ARENA_SIZE, error_reporter); - - *micro_interpreter = interpreter; - - // Allocate memory from the tensor_arena for the model's tensors. - TfLiteStatus allocate_status = interpreter->AllocateTensors(); - if (allocate_status != kTfLiteOk) { - error_reporter->Report("AllocateTensors() failed"); - ei_aligned_free(tensor_arena); - return EI_IMPULSE_TFLITE_ERROR; + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) { + int scale_res = numpy::scale(fmatrix, 2.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + scale_res = numpy::subtract(fmatrix, 1.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + } + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_BGR_SUBTRACT_IMAGENET_MEAN) { + int scale_res = numpy::scale(fmatrix, 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + // Transpose RGB to BGR and subtract mean + for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) { + float r = fmatrix->buffer[ix + 0]; + fmatrix->buffer[ix + 0] = fmatrix->buffer[ix + 2] - tao_mean[0]; + fmatrix->buffer[ix + 1] -= tao_mean[1]; + fmatrix->buffer[ix + 2] = r - tao_mean[2]; + } } - // Obtain pointers to the model's input and output tensors. - *input = interpreter->input(0); - *output = interpreter->output(0); -#if EI_CLASSIFIER_OBJECT_DETECTION - *output_scores = interpreter->output(EI_CLASSIFIER_TFLITE_OUTPUT_SCORE_TENSOR); - *output_labels = interpreter->output(EI_CLASSIFIER_TFLITE_OUTPUT_LABELS_TENSOR); -#endif // EI_CLASSIFIER_OBJECT_DETECTION -#endif + return EI_IMPULSE_OK; +} - // Assert that our quantization parameters match the model - if (tflite_first_run) { - assert((*input)->type == EI_CLASSIFIER_TFLITE_INPUT_DATATYPE); - assert((*output)->type == EI_CLASSIFIER_TFLITE_OUTPUT_DATATYPE); -#if EI_CLASSIFIER_OBJECT_DETECTION - assert((*output_scores)->type == EI_CLASSIFIER_TFLITE_OUTPUT_DATATYPE); - assert((*output_labels)->type == EI_CLASSIFIER_TFLITE_OUTPUT_DATATYPE); -#endif -#if defined(EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED) || defined(EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED) - if (EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED) { - assert((*input)->params.scale == EI_CLASSIFIER_TFLITE_INPUT_SCALE); - assert((*input)->params.zero_point == EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); +EI_IMPULSE_ERROR ei_unscale_fmatrix(ei_learning_block_t *block, ei::matrix_t *fmatrix) { + if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) { + // @todo; could we write some faster vector math here? + for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) { + fmatrix->buffer[ix + 0] = (fmatrix->buffer[ix + 0] * torch_std[0]) + torch_mean[0]; + fmatrix->buffer[ix + 1] = (fmatrix->buffer[ix + 1] * torch_std[1]) + torch_mean[1]; + fmatrix->buffer[ix + 2] = (fmatrix->buffer[ix + 2] * torch_std[2]) + torch_mean[2]; + } + } + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) { + int scale_res = numpy::add(fmatrix, 128.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; } - if (EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED) { - assert((*output)->params.scale == EI_CLASSIFIER_TFLITE_OUTPUT_SCALE); - assert((*output)->params.zero_point == EI_CLASSIFIER_TFLITE_OUTPUT_ZEROPOINT); + scale_res = numpy::scale(fmatrix, 1 / 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + } + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN1_1) { + int scale_res = numpy::add(fmatrix, 1.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + scale_res = numpy::scale(fmatrix, 1 / 2.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + } + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_0_255) { + int scale_res = numpy::scale(fmatrix, 1 / 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; + } + } + else if (block->image_scaling == EI_CLASSIFIER_IMAGE_SCALING_BGR_SUBTRACT_IMAGENET_MEAN) { + // Transpose BGR to RGB and add mean + for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix += 3) { + float b = fmatrix->buffer[ix + 0]; + fmatrix->buffer[ix + 0] = fmatrix->buffer[ix + 2] + tao_mean[2]; + fmatrix->buffer[ix + 1] += tao_mean[1]; + fmatrix->buffer[ix + 2] = b + tao_mean[0]; + } + int scale_res = numpy::scale(fmatrix, 1 / 255.0f); + if (scale_res != EIDSP_OK) { + ei_printf("ERR: Failed to scale matrix (%d)\n", scale_res); + return EI_IMPULSE_DSP_ERROR; } -#endif - tflite_first_run = false; } return EI_IMPULSE_OK; } +#endif + +/* Public functions ------------------------------------------------------- */ + +/* Tread carefully: public functions are not to be changed +to preserve backwards compatibility. Anything in this public section +will be documented by Doxygen. */ /** - * Run TFLite model - * - * @param ctx_start_ms Start time of the setup function (see above) - * @param output Output tensor - * @param interpreter TFLite interpreter (non-compiled models) - * @param tensor_arena Allocated arena (will be freed) - * @param result Struct for results - * @param debug Whether to print debug info - * - * @return EI_IMPULSE_OK if successful + * @defgroup ei_functions Functions + * + * Public-facing functions for running inference using the Edge Impulse C++ library. + * + * **Source**: [classifier/ei_run_classifier.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/classifier/ei_run_classifier.h) + * + * @addtogroup ei_functions + * @{ */ -static EI_IMPULSE_ERROR inference_tflite_run(uint64_t ctx_start_ms, - TfLiteTensor* output, -#if EI_CLASSIFIER_OBJECT_DETECTION - TfLiteTensor* labels_tensor, - TfLiteTensor* scores_tensor, -#endif -#if (EI_CLASSIFIER_COMPILED != 1) - tflite::MicroInterpreter* interpreter, -#endif - uint8_t* tensor_arena, - ei_impulse_result_t *result, - bool debug) { -#if (EI_CLASSIFIER_COMPILED == 1) - trained_model_invoke(); -#else - // Run inference, and report any error - TfLiteStatus invoke_status = interpreter->Invoke(); - if (invoke_status != kTfLiteOk) { - error_reporter->Report("Invoke failed (%d)\n", invoke_status); - ei_aligned_free(tensor_arena); - return EI_IMPULSE_TFLITE_ERROR; - } - delete interpreter; -#endif - - uint64_t ctx_end_ms = ei_read_timer_ms(); - - result->timing.classification = ctx_end_ms - ctx_start_ms; - - // Read the predicted y value from the model's output tensor - if (debug) { - ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); - } -#if EI_CLASSIFIER_OBJECT_DETECTION == 1 - fill_result_struct_f32(result, tflite::post_process_boxes, tflite::post_process_scores, tflite::post_process_classes, debug); - // fill_result_struct_f32(result, output->data.f, scores_tensor->data.f, labels_tensor->data.f, debug); -#else - bool int8_output = output->type == TfLiteType::kTfLiteInt8; - if (int8_output) { - fill_result_struct_i8(result, output->data.int8, output->params.zero_point, output->params.scale, debug); - } - else { - fill_result_struct_f32(result, output->data.f, debug); - } -#endif - -#if (EI_CLASSIFIER_COMPILED == 1) - trained_model_reset(ei_aligned_free); -#else - ei_aligned_free(tensor_arena); -#endif - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - return EI_IMPULSE_OK; -} -#endif // (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) /** - * @brief Do inferencing over the processed feature matrix - * - * @param fmatrix Processed matrix - * @param result Output classifier results - * @param[in] debug Debug output enable - * - * @return The ei impulse error. + * @brief Initialize static variables for running preprocessing and inference + * continuously. + * + * Initializes and clears any internal static variables needed by `run_classifier_continuous()`. + * This includes the moving average filter (MAF). This function should be called prior to + * calling `run_classifier_continuous()`. + * + * **Blocking**: yes + * + * **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino) */ -extern "C" EI_IMPULSE_ERROR run_inference( - ei::matrix_t *fmatrix, - ei_impulse_result_t *result, - bool debug = false) -{ -#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) - { - uint64_t ctx_start_ms; - TfLiteTensor* input; - TfLiteTensor* output; -#if EI_CLASSIFIER_OBJECT_DETECTION - TfLiteTensor* output_scores; - TfLiteTensor* output_labels; -#endif - uint8_t* tensor_arena; - -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - #if EI_CLASSIFIER_OBJECT_DETECTION - &output_labels, - &output_scores, - #endif - &tensor_arena); -#else - tflite::MicroInterpreter* interpreter; - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - #if EI_CLASSIFIER_OBJECT_DETECTION - &output_labels, - &output_scores, - #endif - &interpreter, &tensor_arena); -#endif - if (init_res != EI_IMPULSE_OK) { - return init_res; - } - - // Place our calculated x value in the model's input tensor -#if EI_CLASSIFIER_OBJECT_DETECTION - bool uint8_input = input->type == TfLiteType::kTfLiteUInt8; - for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix++) { - if (uint8_input) { - float pixel = (float)fmatrix->buffer[ix]; - input->data.uint8[ix] = static_cast((pixel / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - } - else { - input->data.f[ix] = fmatrix->buffer[ix]; - } - } -#else - bool int8_input = input->type == TfLiteType::kTfLiteInt8; - for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix++) { - // Quantize the input if it is int8 - if (int8_input) { - input->data.int8[ix] = static_cast(round(fmatrix->buffer[ix] / input->params.scale) + input->params.zero_point); - // printf("float %ld : %d\r\n", ix, input->data.int8[ix]); - } else { - input->data.f[ix] = fmatrix->buffer[ix]; - } - } -#endif - -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - #if EI_CLASSIFIER_OBJECT_DETECTION - output_labels, - output_scores, - #endif - tensor_arena, result, debug); -#else - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - #if EI_CLASSIFIER_OBJECT_DETECTION - output_labels, - output_scores, - #endif - interpreter, tensor_arena, result, debug); -#endif - - if (run_res != EI_IMPULSE_OK) { - return run_res; - } - } - -#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL - - { - static std::unique_ptr model = nullptr; - static std::unique_ptr interpreter = nullptr; - if (!model) { - model = tflite::FlatBufferModel::BuildFromBuffer((const char*)trained_tflite, trained_tflite_len); - if (!model) { - ei_printf("Failed to build TFLite model from buffer\n"); - return EI_IMPULSE_TFLITE_ERROR; - } - - tflite::ops::builtin::BuiltinOpResolver resolver; - tflite::InterpreterBuilder builder(*model, resolver); - builder(&interpreter); - - if (!interpreter) { - ei_printf("Failed to construct interpreter\n"); - return EI_IMPULSE_TFLITE_ERROR; - } - - if (interpreter->AllocateTensors() != kTfLiteOk) { - ei_printf("AllocateTensors failed\n"); - return EI_IMPULSE_TFLITE_ERROR; - } - } - - // Obtain pointers to the model's input and output tensors. - #if EI_CLASSIFIER_OBJECT_DETECTION == 1 - #if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 - int8_t* input = interpreter->typed_input_tensor(0); - #else - float* input = interpreter->typed_input_tensor(0); - #endif - #elif EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 - int8_t* input = interpreter->typed_input_tensor(0); - #else - float* input = interpreter->typed_input_tensor(0); - #endif - - if (!input) { - return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; - } - - for (uint32_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix++) { - #if EI_CLASSIFIER_OBJECT_DETECTION == 1 - #if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 - float pixel = (float)fmatrix->buffer[ix]; - input[ix] = static_cast((pixel / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - #else - input[ix] = fmatrix->buffer[ix]; - #endif - #elif EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 - input[ix] = static_cast(round(fmatrix->buffer[ix] / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - #else - input[ix] = fmatrix->buffer[ix]; - #endif - } - - uint64_t ctx_start_ms = ei_read_timer_ms(); - - interpreter->Invoke(); - - uint64_t ctx_end_ms = ei_read_timer_ms(); - - result->timing.classification = ctx_end_ms - ctx_start_ms; - #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1 - int8_t* out_data = interpreter->typed_output_tensor(0); - #else - float* out_data = interpreter->typed_output_tensor(0); - #endif - - if (!out_data) { - return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; - } - - if (debug) { - ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); - } - -#if EI_CLASSIFIER_OBJECT_DETECTION == 1 - float *scores_tensor = interpreter->typed_output_tensor(EI_CLASSIFIER_TFLITE_OUTPUT_SCORE_TENSOR); - float *label_tensor = interpreter->typed_output_tensor(EI_CLASSIFIER_TFLITE_OUTPUT_LABELS_TENSOR); - if (!scores_tensor) { - return EI_IMPULSE_SCORE_TENSOR_WAS_NULL; - } - if (!label_tensor) { - return EI_IMPULSE_LABEL_TENSOR_WAS_NULL; - } - fill_result_struct_f32(result, out_data, scores_tensor, label_tensor, debug); -#else - - #if EI_CLASSIFIER_TFLITE_OUTPUT_QUANTIZED == 1 - fill_result_struct_i8(result, out_data, EI_CLASSIFIER_TFLITE_OUTPUT_ZEROPOINT, EI_CLASSIFIER_TFLITE_OUTPUT_SCALE, debug); - #else - fill_result_struct_f32(result, out_data, debug); - #endif - -#endif - - // on Linux we're not worried about free'ing (for now) - } - -#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW) - { - uint64_t ctx_start_ms = ei_read_timer_ms(); - int8_t *input; - int8_t output[EI_CLASSIFIER_LABEL_COUNT]; - - input = (int8_t *)ei_malloc(fmatrix->rows * fmatrix->cols); - - if (!input) { - return EI_IMPULSE_ALLOC_FAILED; - } - - for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix++) { - input[ix] = static_cast( - round(fmatrix->buffer[ix] / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + - EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - } - - /* Run tensaiflow inference */ - infer(input, output); - - for (uint32_t ix = 0; ix < EI_CLASSIFIER_LABEL_COUNT; ix++) { - float value; - // Dequantize the output if it is int8 - value = static_cast(output[ix] - EI_CLASSIFIER_TFLITE_OUTPUT_ZEROPOINT) * - EI_CLASSIFIER_TFLITE_OUTPUT_SCALE; - - if (debug) { - ei_printf("%s:\t", ei_classifier_inferencing_categories[ix]); - ei_printf_float(value); - ei_printf("\n"); - } - result->classification[ix].label = ei_classifier_inferencing_categories[ix]; - result->classification[ix].value = value; - } - - result->timing.classification = ei_read_timer_ms() - ctx_start_ms; - - ei_free(input); - } - -#elif (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT) - { - #if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 - #error "TensorRT requires an unquantized network" - #endif - - static bool first_run = true; - static char model_file_name[128]; - - if (first_run) { - snprintf(model_file_name, 128, "/tmp/ei-%s", trained_onnx_hash); - - FILE *file = fopen(model_file_name, "w"); - if (!file) { - ei_printf("ERR: TensorRT init failed to open '%s'\n", model_file_name); - return EI_IMPULSE_TENSORRT_INIT_FAILED; - } - - if (fwrite(trained_onnx, trained_onnx_len, 1, file) != 1) { - ei_printf("ERR: TensorRT init fwrite failed\n"); - return EI_IMPULSE_TENSORRT_INIT_FAILED; - } - - if (fclose(file) != 0) { - ei_printf("ERR: TensorRT init fclose failed\n"); - return EI_IMPULSE_TENSORRT_INIT_FAILED; - } - } - - float tensorrt_output[EI_CLASSIFIER_LABEL_COUNT]; - - // lazy initialize tensorRT context - if( ei_trt_handle == nullptr ) { - ei_trt_handle = libeitrt::create_EiTrt(model_file_name, debug); - } - - uint64_t ctx_start_ms = ei_read_timer_ms(); - - libeitrt::infer(ei_trt_handle, fmatrix->buffer, tensorrt_output, EI_CLASSIFIER_LABEL_COUNT); - uint64_t ctx_end_ms = ei_read_timer_ms(); - result->timing.classification = ctx_end_ms - ctx_start_ms; - - for( int i = 0; i < EI_CLASSIFIER_LABEL_COUNT; ++i) { - result->classification[i].label = ei_classifier_inferencing_categories[i]; - result->classification[i].value = tensorrt_output[i]; - } - } -#endif - -#if EI_CLASSIFIER_HAS_ANOMALY == 1 - - // Anomaly detection - { - uint64_t anomaly_start_ms = ei_read_timer_ms(); - - float input[EI_CLASSIFIER_ANOM_AXIS_SIZE]; - for (size_t ix = 0; ix < EI_CLASSIFIER_ANOM_AXIS_SIZE; ix++) { - input[ix] = fmatrix->buffer[EI_CLASSIFIER_ANOM_AXIS[ix]]; - } - standard_scaler(input, ei_classifier_anom_scale, ei_classifier_anom_mean, EI_CLASSIFIER_ANOM_AXIS_SIZE); - float anomaly = get_min_distance_to_cluster( - input, EI_CLASSIFIER_ANOM_AXIS_SIZE, ei_classifier_anom_clusters, EI_CLASSIFIER_ANOM_CLUSTER_COUNT); - - uint64_t anomaly_end_ms = ei_read_timer_ms(); - - if (debug) { - ei_printf("Anomaly score (time: %d ms.): ", static_cast(anomaly_end_ms - anomaly_start_ms)); - ei_printf_float(anomaly); - ei_printf("\n"); - } - - result->timing.anomaly = anomaly_end_ms - anomaly_start_ms; - - result->anomaly = anomaly; - } - -#endif - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - return EI_IMPULSE_OK; -} - -extern "C" EI_IMPULSE_ERROR run_inference_i16( - ei::matrix_i32_t *fmatrix, - ei_impulse_result_t *result, - bool debug = false) +extern "C" void run_classifier_init(void) { -#if EI_CLASSIFIER_OBJECT_DETECTION - return EI_IMPULSE_NOT_SUPPORTED_WITH_I16; -#else -#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) - { - uint64_t ctx_start_ms; - TfLiteTensor* input; - TfLiteTensor* output; - uint8_t* tensor_arena; - -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - &tensor_arena); -#else - tflite::MicroInterpreter* interpreter; - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - &interpreter, - &tensor_arena); -#endif - if (init_res != EI_IMPULSE_OK) { - return init_res; - } - - EIDSP_i16 scale; - numpy::float_to_int16(&input->params.scale, &scale, 1); - - // Place our calculated x value in the model's input tensor - bool int8_input = input->type == TfLiteType::kTfLiteInt8; - for (size_t ix = 0; ix < fmatrix->rows * fmatrix->cols; ix++) { - // Quantize the input if it is int8 - if (int8_input) { - int32_t calc = (int32_t)fmatrix->buffer[ix] << 8; // Shift for scaler - calc /= scale; - calc += 0x80; // Round by adding 0.5 - calc >>= 8; // Shift to int8_t domain - input->data.int8[ix] = static_cast(calc + input->params.zero_point); - } else { - numpy::int16_to_float((EIDSP_i16 *)&fmatrix->buffer[ix], &input->data.f[ix], 1); - } - } - -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - tensor_arena, result, debug); -#else - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - interpreter, tensor_arena, result, debug); -#endif - - if (run_res != EI_IMPULSE_OK) { - return run_res; - } - } - -#elif EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL - - ei_printf("ERR: run_classifier_i16 is not supported with full TensorFlow Lite\n"); - return EI_IMPULSE_TFLITE_ERROR; - -#endif - -#if EI_CLASSIFIER_HAS_ANOMALY == 1 - - // Anomaly detection - { - uint64_t anomaly_start_ms = ei_read_timer_ms(); - - float input[EI_CLASSIFIER_ANOM_AXIS_SIZE]; - for (size_t ix = 0; ix < EI_CLASSIFIER_ANOM_AXIS_SIZE; ix++) { - // input[ix] = fmatrix->buffer[EI_CLASSIFIER_ANOM_AXIS[ix]]; - // numpy::int16_to_float(&fmatrix->buffer[EI_CLASSIFIER_ANOM_AXIS[ix]], &input[ix], 1); - input[ix] = (float)fmatrix->buffer[EI_CLASSIFIER_ANOM_AXIS[ix]] / 32768.f; - } - standard_scaler(input, ei_classifier_anom_scale, ei_classifier_anom_mean, EI_CLASSIFIER_ANOM_AXIS_SIZE); - float anomaly = get_min_distance_to_cluster( - input, EI_CLASSIFIER_ANOM_AXIS_SIZE, ei_classifier_anom_clusters, EI_CLASSIFIER_ANOM_CLUSTER_COUNT); - - uint64_t anomaly_end_ms = ei_read_timer_ms(); + classifier_continuous_features_written = 0; + ei_dsp_clear_continuous_audio_state(); + init_impulse(&ei_default_impulse); - if (debug) { - ei_printf("Anomaly score (time: %d ms.): ", static_cast(anomaly_end_ms - anomaly_start_ms)); - ei_printf_float(anomaly); - ei_printf("\n"); - } +#if EI_CLASSIFIER_CALIBRATION_ENABLED - result->timing.anomaly = anomaly_end_ms - anomaly_start_ms; + const auto impulse = ei_default_impulse.impulse; + const ei_model_performance_calibration_t *calibration = &impulse->calibration; - result->anomaly = anomaly; + if(calibration != NULL) { + avg_scores = new RecognizeEvents(calibration, + impulse->label_count, impulse->slice_size, impulse->interval_ms); } - #endif - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - return EI_IMPULSE_OK; -#endif // OBJECT_DETECTION } /** - * Run the classifier over a raw features array - * @param raw_features Raw features array - * @param raw_features_size Size of the features array - * @param result Object to store the results in - * @param debug Whether to show debug messages (default: false) + * @brief Initialize static variables for running preprocessing and inference + * continuously. + * + * Initializes and clears any internal static variables needed by `run_classifier_continuous()`. + * This includes the moving average filter (MAF). This function should be called prior to + * calling `run_classifier_continuous()`. + * + * **Blocking**: yes + * + * **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino) + * + * @param[in] handle struct with information about model and DSP */ -extern "C" EI_IMPULSE_ERROR run_classifier( - signal_t *signal, - ei_impulse_result_t *result, - bool debug = false) -{ -#if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE - // Shortcut for quantized image models - if (can_run_classifier_image_quantized() == EI_IMPULSE_OK) { - return run_classifier_image_quantized(signal, result, debug); - } -#endif - - // if (debug) { - // static float buf[1000]; - // printf("Raw data: "); - // for (size_t ix = 0; ix < 16000; ix += 1000) { - // int r = signal->get_data(ix, 1000, buf); - // for (size_t jx = 0; jx < 1000; jx++) { - // printf("%.0f, ", buf[jx]); - // } - // } - // printf("\n"); - // } - - memset(result, 0, sizeof(ei_impulse_result_t)); - - ei::matrix_t features_matrix(1, EI_CLASSIFIER_NN_INPUT_FRAME_SIZE); - - uint64_t dsp_start_ms = ei_read_timer_ms(); - - size_t out_features_index = 0; - - for (size_t ix = 0; ix < ei_dsp_blocks_size; ix++) { - ei_model_dsp_t block = ei_dsp_blocks[ix]; - - if (out_features_index + block.n_output_features > EI_CLASSIFIER_NN_INPUT_FRAME_SIZE) { - ei_printf("ERR: Would write outside feature buffer\n"); - return EI_IMPULSE_DSP_ERROR; - } - - ei::matrix_t fm(1, block.n_output_features, features_matrix.buffer + out_features_index); - -#if EIDSP_SIGNAL_C_FN_POINTER - if (block.axes_size != EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { - ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n"); - return EI_IMPULSE_DSP_ERROR; - } - int ret = block.extract_fn(signal, &fm, block.config, EI_CLASSIFIER_FREQUENCY); -#else - SignalWithAxes swa(signal, block.axes, block.axes_size); - int ret = block.extract_fn(swa.get_signal(), &fm, block.config, EI_CLASSIFIER_FREQUENCY); -#endif - - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to run DSP process (%d)\n", ret); - return EI_IMPULSE_DSP_ERROR; - } - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - out_features_index += block.n_output_features; - } - - result->timing.dsp = ei_read_timer_ms() - dsp_start_ms; - - if (debug) { - ei_printf("Features (%d ms.): ", result->timing.dsp); - for (size_t ix = 0; ix < features_matrix.cols; ix++) { - ei_printf_float(features_matrix.buffer[ix]); - ei_printf(" "); - } - ei_printf("\n"); - } - -#if EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE - if (debug) { - ei_printf("Running neural network...\n"); - } -#endif - - return run_inference(&features_matrix, result, debug); -} - -#if defined(EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK) && EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK == 1 - -extern "C" EI_IMPULSE_ERROR run_classifier_i16( - signal_i16_t *signal, - ei_impulse_result_t *result, - bool debug = false) +__attribute__((unused)) void run_classifier_init(ei_impulse_handle_t *handle) { + classifier_continuous_features_written = 0; + ei_dsp_clear_continuous_audio_state(); + init_impulse(handle); - memset(result, 0, sizeof(ei_impulse_result_t)); - - ei::matrix_i32_t features_matrix(1, EI_CLASSIFIER_NN_INPUT_FRAME_SIZE); - - uint64_t dsp_start_ms = ei_read_timer_ms(); - - size_t out_features_index = 0; - - for (size_t ix = 0; ix < ei_dsp_blocks_size; ix++) { - ei_model_dsp_i16_t block = ei_dsp_blocks_i16[ix]; - - if (out_features_index + block.n_output_features > EI_CLASSIFIER_NN_INPUT_FRAME_SIZE) { - ei_printf("ERR: Would write outside feature buffer\n"); - return EI_IMPULSE_DSP_ERROR; - } - - ei::matrix_i32_t fm(1, block.n_output_features, features_matrix.buffer + out_features_index); - -#if EIDSP_SIGNAL_C_FN_POINTER - if (block.axes_size != EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { - ei_printf("ERR: EIDSP_SIGNAL_C_FN_POINTER can only be used when all axes are selected for DSP blocks\n"); - return EI_IMPULSE_DSP_ERROR; - } - int ret = block.extract_fn(signal, &fm, block.config, EI_CLASSIFIER_FREQUENCY); -#else - SignalWithAxesI16 swa(signal, block.axes, block.axes_size); - int ret = block.extract_fn(swa.get_signal(), &fm, block.config, EI_CLASSIFIER_FREQUENCY); -#endif - - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to run DSP process (%d)\n", ret); - return EI_IMPULSE_DSP_ERROR; - } - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - out_features_index += block.n_output_features; - } - - result->timing.dsp = ei_read_timer_ms() - dsp_start_ms; +#if EI_CLASSIFIER_CALIBRATION_ENABLED + auto impulse = handle->impulse; + const ei_model_performance_calibration_t *calibration = &impulse->calibration; - if (debug) { - ei_printf("Features (%d ms.): ", result->timing.dsp); - for (size_t ix = 0; ix < features_matrix.cols; ix++) { - ei_printf_float((float)features_matrix.buffer[ix] / 32768.f); - ei_printf(" "); - } - ei_printf("\n"); - } - -#if EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE - if (debug) { - ei_printf("Running neural network...\n"); + if(calibration != NULL) { + avg_scores = new RecognizeEvents(calibration, + impulse->label_count, impulse->slice_size, impulse->interval_ms); } #endif - - return run_inference_i16(&features_matrix, result, debug); } -#endif //EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK /** - * @brief Calculates the cepstral mean and variable normalization. - * - * @param matrix Source and destination matrix - * @param config_ptr ei_dsp_config_mfcc_t struct pointer + * @brief Deletes static variables when running preprocessing and inference continuously. + * + * Deletes internal static variables used by `run_classifier_continuous()`, which + * includes the moving average filter (MAF). This function should be called when you + * are done running continuous classification. + * + * **Blocking**: yes + * + * **Example**: [ei_run_audio_impulse.cpp](https://github.com/edgeimpulse/firmware-nordic-thingy53/blob/main/src/inference/ei_run_audio_impulse.cpp) */ -static void calc_cepstral_mean_and_var_normalization_mfcc(ei_matrix *matrix, void *config_ptr) +extern "C" void run_classifier_deinit(void) { - ei_dsp_config_mfcc_t *config = (ei_dsp_config_mfcc_t *)config_ptr; - - uint32_t original_matrix_size = matrix->rows * matrix->cols; - - /* Modify rows and colums ration for matrix normalization */ - matrix->rows = original_matrix_size / config->num_cepstral; - matrix->cols = config->num_cepstral; - - // cepstral mean and variance normalization - int ret = speechpy::processing::cmvnw(matrix, config->win_size, true, false); - if (ret != EIDSP_OK) { - ei_printf("ERR: cmvnw failed (%d)\n", ret); - return; + if((void *)avg_scores != NULL) { + delete avg_scores; } - - /* Reset rows and columns ratio */ - matrix->rows = 1; - matrix->cols = original_matrix_size; } /** - * @brief Calculates the cepstral mean and variable normalization. + * @brief Run preprocessing (DSP) on new slice of raw features. Add output features + * to rolling matrix and run inference on full sample. + * + * Accepts a new slice of features give by the callback defined in the `signal` parameter. + * It performs preprocessing (DSP) on this new slice of features and appends the output to + * a sliding window of pre-processed features (stored in a static features matrix). The matrix + * stores the new slice and as many old slices as necessary to make up one full sample for + * performing inference. + * + * `run_classifier_init()` must be called before making any calls to + * `run_classifier_continuous().` + * + * For example, if you are doing keyword spotting on 1-second slices of audio and you want to + * perform inference 4 times per second (given by `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW`), you + * would collect 0.25 seconds of audio and call run_classifier_continuous(). The function would + * compute the Mel-Frequency Cepstral Coefficients (MFCCs) for that 0.25 second slice of audio, + * drop the oldest 0.25 seconds' worth of MFCCs from its internal matrix, and append the newest + * slice of MFCCs. This process allows the library to keep track of the pre-processed features + * (e.g. MFCCs) in the window instead of the entire set of raw features (e.g. raw audio data), + * which can potentially save a lot of space in RAM. After updating the static matrix, + * inference is performed using the whole matrix, which acts as a sliding window of + * pre-processed features. + * + * Additionally, a moving average filter (MAF) can be enabled for `run_classifier_continuous()`, + * which averages (arithmetic mean) the last *n* inference results for each class. *n* is + * `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW / 2`. In our example above, if we enabled the MAF, the + * values in `result` would contain predictions averaged from the previous 2 inferences. + * + * To learn more about `run_classifier_continuous()`, see + * [this guide](https://docs.edgeimpulse.com/docs/tutorials/advanced-inferencing/continuous-audio-sampling) + * on continuous audio sampling. While the guide is written for audio signals, the concepts of continuous sampling and inference can be extrapolated to any time-series data. + * + * **Blocking**: yes + * + * **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino) + * + * @param[in] signal Pointer to a signal_t struct that contains the number of elements in the + * slice of raw features (e.g. `EI_CLASSIFIER_SLICE_SIZE`) and a pointer to a callback that reads + * in the slice of raw features. + * @param[out] result Pointer to an `ei_impulse_result_t` struct that contains the various output + * results from inference after run_classifier() returns. + * @param[in] debug Print internal preprocessing and inference debugging information via + * `ei_printf()`. + * @param[in] enable_maf Enable the moving average filter (MAF) for the classifier. * - * @param matrix Source and destination matrix - * @param config_ptr ei_dsp_config_mfe_t struct pointer + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ -static void calc_cepstral_mean_and_var_normalization_mfe(ei_matrix *matrix, void *config_ptr) +extern "C" EI_IMPULSE_ERROR run_classifier_continuous( + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false, + bool enable_maf = true) { - ei_dsp_config_mfe_t *config = (ei_dsp_config_mfe_t *)config_ptr; - - uint32_t original_matrix_size = matrix->rows * matrix->cols; - - /* Modify rows and colums ration for matrix normalization */ - matrix->rows = (original_matrix_size) / config->num_filters; - matrix->cols = config->num_filters; - - if (config->implementation_version < 3) { - // cepstral mean and variance normalization - int ret = speechpy::processing::cmvnw(matrix, config->win_size, false, true); - if (ret != EIDSP_OK) { - ei_printf("ERR: cmvnw failed (%d)\n", ret); - return; - } - } - else { - // normalization - int ret = speechpy::processing::mfe_normalization(matrix, config->noise_floor_db); - if (ret != EIDSP_OK) { - ei_printf("ERR: normalization failed (%d)\n", ret); - return; - } - } - - /* Reset rows and columns ratio */ - matrix->rows = 1; - matrix->cols = (original_matrix_size); + auto& impulse = ei_default_impulse; + return process_impulse_continuous(&impulse, signal, result, debug, enable_maf); } /** - * @brief Calculates the cepstral mean and variable normalization. + * @brief Run preprocessing (DSP) on new slice of raw features. Add output features + * to rolling matrix and run inference on full sample. + * + * Accepts a new slice of features give by the callback defined in the `signal` parameter. + * It performs preprocessing (DSP) on this new slice of features and appends the output to + * a sliding window of pre-processed features (stored in a static features matrix). The matrix + * stores the new slice and as many old slices as necessary to make up one full sample for + * performing inference. + * + * `run_classifier_init()` must be called before making any calls to + * `run_classifier_continuous().` + * + * For example, if you are doing keyword spotting on 1-second slices of audio and you want to + * perform inference 4 times per second (given by `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW`), you + * would collect 0.25 seconds of audio and call run_classifier_continuous(). The function would + * compute the Mel-Frequency Cepstral Coefficients (MFCCs) for that 0.25 second slice of audio, + * drop the oldest 0.25 seconds' worth of MFCCs from its internal matrix, and append the newest + * slice of MFCCs. This process allows the library to keep track of the pre-processed features + * (e.g. MFCCs) in the window instead of the entire set of raw features (e.g. raw audio data), + * which can potentially save a lot of space in RAM. After updating the static matrix, + * inference is performed using the whole matrix, which acts as a sliding window of + * pre-processed features. + * + * Additionally, a moving average filter (MAF) can be enabled for `run_classifier_continuous()`, + * which averages (arithmetic mean) the last *n* inference results for each class. *n* is + * `EI_CLASSIFIER_SLICES_PER_MODEL_WINDOW / 2`. In our example above, if we enabled the MAF, the + * values in `result` would contain predictions averaged from the previous 2 inferences. + * + * To learn more about `run_classifier_continuous()`, see + * [this guide](https://docs.edgeimpulse.com/docs/tutorials/advanced-inferencing/continuous-audio-sampling) + * on continuous audio sampling. While the guide is written for audio signals, the concepts of continuous sampling and inference can be extrapolated to any time-series data. + * + * **Blocking**: yes + * + * **Example**: [nano_ble33_sense_microphone_continuous.ino](https://github.com/edgeimpulse/example-lacuna-ls200/blob/main/nano_ble33_sense_microphone_continous/nano_ble33_sense_microphone_continuous.ino) + * + * @param[in] impulse `ei_impulse_handle_t` struct with information about preprocessing and model. + * @param[in] signal Pointer to a signal_t struct that contains the number of elements in the + * slice of raw features (e.g. `EI_CLASSIFIER_SLICE_SIZE`) and a pointer to a callback that reads + * in the slice of raw features. + * @param[out] result Pointer to an `ei_impulse_result_t` struct that contains the various output + * results from inference after run_classifier() returns. + * @param[in] debug Print internal preprocessing and inference debugging information via + * `ei_printf()`. + * @param[in] enable_maf Enable the moving average filter (MAF) for the classifier. * - * @param matrix Source and destination matrix - * @param config_ptr ei_dsp_config_spectrogram_t struct pointer + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ -static void calc_cepstral_mean_and_var_normalization_spectrogram(ei_matrix *matrix, void *config_ptr) +__attribute__((unused)) EI_IMPULSE_ERROR run_classifier_continuous( + ei_impulse_handle_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false, + bool enable_maf = true) { - ei_dsp_config_spectrogram_t *config = (ei_dsp_config_spectrogram_t *)config_ptr; - - uint32_t original_matrix_size = matrix->rows * matrix->cols; - - /* Modify rows and colums ration for matrix normalization */ - matrix->cols = config->fft_length / 2 + 1; - matrix->rows = (original_matrix_size) / matrix->cols; - - if (config->implementation_version < 3) { - int ret = numpy::normalize(matrix); - if (ret != EIDSP_OK) { - ei_printf("ERR: normalization failed (%d)\n", ret); - return; - } - } - else { - // normalization - int ret = speechpy::processing::spectrogram_normalization(matrix, config->noise_floor_db); - if (ret != EIDSP_OK) { - ei_printf("ERR: normalization failed (%d)\n", ret); - return; - } - } - - /* Reset rows and columns ratio */ - matrix->rows = 1; - matrix->cols = (original_matrix_size); + return process_impulse_continuous(impulse, signal, result, debug, enable_maf); } /** - * Check if the current impulse could be used by 'run_classifier_image_quantized' + * @brief Run the classifier over a raw features array. + * + * + * Overloaded function [run_classifier()](#run_classifier-1) that defaults to the single impulse. + * + * **Blocking**: yes + * + * @param[in] signal Pointer to a `signal_t` struct that contains the total length of the raw + * feature array, which must match EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, and a pointer to a callback + * that reads in the raw features. + * @param[out] result Pointer to an ei_impulse_result_t struct that will contain the various output + * results from inference after `run_classifier()` returns. + * @param[in] debug Print internal preprocessing and inference debugging information via `ei_printf()`. + * + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ -__attribute__((unused)) static EI_IMPULSE_ERROR can_run_classifier_image_quantized() { -#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_TFLITE) - return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; -#endif - -#if EI_CLASSIFIER_HAS_ANOMALY == 1 - return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; -#endif - - // Check if we have a quantized NN -#if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED != 1 - return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; -#endif - - // And if we have one DSP block which operates on images... - if (ei_dsp_blocks_size != 1 || ei_dsp_blocks[0].extract_fn != extract_image_features) { - return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; - } - - return EI_IMPULSE_OK; +extern "C" EI_IMPULSE_ERROR run_classifier( + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false) +{ + return process_impulse(&ei_default_impulse, signal, result, debug); } -#if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE /** - * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON) - * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' - * returns EI_IMPULSE_OK. + * @brief Run the classifier over a raw features array. + * + * + * Accepts a `signal_t` input struct pointing to a callback that reads in pages of raw features. + * `run_classifier()` performs any necessary preprocessing on the raw features (e.g. DSP, cropping + * of images, etc.) before performing inference. Results from inference are stored in an + * `ei_impulse_result_t` struct. + * + * **Blocking**: yes + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) + * + * @param[in] impulse Pointer to an `ei_impulse_handle_t` struct that contains the model and + * preprocessing information. + * @param[in] signal Pointer to a `signal_t` struct that contains the total length of the raw + * feature array, which must match EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, and a pointer to a callback + * that reads in the raw features. + * @param[out] result Pointer to an ei_impulse_result_t struct that will contain the various output + * results from inference after `run_classifier()` returns. + * @param[in] debug Print internal preprocessing and inference debugging information via `ei_printf()`. + * + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ -extern "C" EI_IMPULSE_ERROR run_classifier_image_quantized( +__attribute__((unused)) EI_IMPULSE_ERROR run_classifier( + ei_impulse_handle_t *impulse, signal_t *signal, ei_impulse_result_t *result, bool debug = false) { - EI_IMPULSE_ERROR verify_res = can_run_classifier_image_quantized(); - if (verify_res != EI_IMPULSE_OK) { - return verify_res; - } - - memset(result, 0, sizeof(ei_impulse_result_t)); - -#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_TFLITE) - return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; -#else - uint64_t ctx_start_ms; - TfLiteTensor* input; - TfLiteTensor* output; -#if EI_CLASSIFIER_OBJECT_DETECTION - TfLiteTensor* output_scores; - TfLiteTensor* output_labels; -#endif - uint8_t* tensor_arena; - -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - #if EI_CLASSIFIER_OBJECT_DETECTION - &output_labels, - &output_scores, - #endif - &tensor_arena); -#else - tflite::MicroInterpreter* interpreter; - EI_IMPULSE_ERROR init_res = inference_tflite_setup(&ctx_start_ms, &input, &output, - #if EI_CLASSIFIER_OBJECT_DETECTION - &output_labels, - &output_scores, - #endif - &interpreter, - &tensor_arena); -#endif - if (init_res != EI_IMPULSE_OK) { - return init_res; - } - - if (input->type != TfLiteType::kTfLiteInt8) { - return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; - } - - uint64_t dsp_start_ms = ei_read_timer_ms(); - - // features matrix maps around the input tensor to not allocate any memory - ei::matrix_i8_t features_matrix(1, EI_CLASSIFIER_NN_INPUT_FRAME_SIZE, input->data.int8); - - // run DSP process and quantize automatically - int ret = extract_image_features_quantized(signal, &features_matrix, ei_dsp_blocks[0].config, EI_CLASSIFIER_FREQUENCY); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to run DSP process (%d)\n", ret); - return EI_IMPULSE_DSP_ERROR; - } - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - result->timing.dsp = ei_read_timer_ms() - dsp_start_ms; - - if (debug) { - ei_printf("Features (%d ms.): ", result->timing.dsp); - for (size_t ix = 0; ix < features_matrix.cols; ix++) { - ei_printf_float((features_matrix.buffer[ix] - EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT) * EI_CLASSIFIER_TFLITE_INPUT_SCALE); - ei_printf(" "); - } - ei_printf("\n"); - } + return process_impulse(impulse, signal, result, debug); +} - ctx_start_ms = ei_read_timer_ms(); +/** @} */ // end of ei_functions Doxygen group -#if (EI_CLASSIFIER_COMPILED == 1) - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - #if EI_CLASSIFIER_OBJECT_DETECTION - output_labels, - output_scores, - #endif - tensor_arena, result, debug); -#else - EI_IMPULSE_ERROR run_res = inference_tflite_run(ctx_start_ms, output, - #if EI_CLASSIFIER_OBJECT_DETECTION - output_labels, - output_scores, - #endif - interpreter, tensor_arena, result, debug); -#endif +/* Deprecated functions ------------------------------------------------------- */ - if (run_res != EI_IMPULSE_OK) { - return run_res; - } - - return EI_IMPULSE_OK; -#endif // EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_TFLITE -} -#endif // #if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 && EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE +/* These functions are being deprecated and possibly will be removed or moved in future. +Do not use these - if possible, change your code to reflect the upcoming changes. */ #if EIDSP_SIGNAL_C_FN_POINTER == 0 /** - * Run the impulse, if you provide an instance of sampler it will also persist the data for you - * @param sampler Instance to an **initialized** sampler - * @param result Object to store the results in - * @param data_fn Function to retrieve data from sensors - * @param debug Whether to log debug messages (default false) + * @brief Run the impulse, if you provide an instance of sampler it will also persist + * the data for you. + * + * @deprecated This function is deprecated and will be removed in future versions. Use + * `run_classifier()` instead. + * + * @param[in] sampler Instance to an **initialized** sampler + * @param[out] result Object to store the results in + * @param[in] data_fn Callback function to retrieve data from sensors + * @param[in] debug Whether to log debug messages (default false) + * + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( -#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 +#if (defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1) || defined(__DOXYGEN__) EdgeSampler *sampler, #endif ei_impulse_result_t *result, @@ -1456,7 +1049,9 @@ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( #endif bool debug = false) { - float *x = (float*)calloc(EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, sizeof(float)); + auto& impulse = *(ei_default_impulse.impulse); + + float *x = (float*)calloc(impulse.dsp_input_frame_size, sizeof(float)); if (!x) { return EI_IMPULSE_OUT_OF_MEMORY; } @@ -1466,15 +1061,15 @@ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( uint64_t sampling_us_start = ei_read_timer_us(); // grab some data - for (int i = 0; i < EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE; i += EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { + for (int i = 0; i < (int)impulse.dsp_input_frame_size; i += impulse.raw_samples_per_frame) { uint64_t curr_us = ei_read_timer_us() - sampling_us_start; - next_tick = curr_us + (EI_CLASSIFIER_INTERVAL_MS * 1000); + next_tick = curr_us + (impulse.interval_ms * 1000); - data_fn(x + i, EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME); + data_fn(x + i, impulse.raw_samples_per_frame); #if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 if (sampler != NULL) { - sampler->write_sensor_data(x + i, EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME); + sampler->write_sensor_data(x + i, impulse.raw_samples_per_frame); } #endif @@ -1489,7 +1084,7 @@ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( result->timing.sampling = (ei_read_timer_us() - sampling_us_start) / 1000; signal_t signal; - int err = numpy::signal_from_buffer(x, EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, &signal); + int err = numpy::signal_from_buffer(x, impulse.dsp_input_frame_size, &signal); if (err != 0) { free(x); ei_printf("ERR: signal_from_buffer failed (%d)\n", err); @@ -1501,65 +1096,19 @@ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( return r; } -#if defined(EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK) && EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK == 1 - -__attribute__((unused)) EI_IMPULSE_ERROR run_impulse_i16( -#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 - EdgeSampler *sampler, -#endif - ei_impulse_result_t *result, -#ifdef __MBED__ - mbed::Callback data_fn, -#else - std::function data_fn, -#endif - bool debug = false) { - - int16_t x[EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE] = { 0 }; - - uint64_t next_tick = 0; - - uint64_t sampling_us_start = ei_read_timer_us(); - - // grab some data - for (int i = 0; i < EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE; i += EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { - uint64_t curr_us = ei_read_timer_us() - sampling_us_start; - - next_tick = curr_us + (EI_CLASSIFIER_INTERVAL_MS * 1000); - - data_fn(x + i, EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME); -#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 - if (sampler != NULL) { - sampler->write_sensor_data(x + i, EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME); - } -#endif - - if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { - return EI_IMPULSE_CANCELED; - } - - while (next_tick > ei_read_timer_us() - sampling_us_start); - } - - result->timing.sampling = (ei_read_timer_us() - sampling_us_start) / 1000; - - signal_i16_t signal; - int err = numpy::signal_from_buffer_i16(x, EI_CLASSIFIER_DSP_INPUT_FRAME_SIZE, &signal); - if (err != 0) { - ei_printf("ERR: signal_from_buffer failed (%d)\n", err); - return EI_IMPULSE_DSP_ERROR; - } - - return run_classifier_i16(&signal, result, debug); -} -#endif //EI_CLASSIFIER_USE_QUANTIZED_DSP_BLOCK - -#if defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1 +#if (defined(EI_CLASSIFIER_HAS_SAMPLER) && EI_CLASSIFIER_HAS_SAMPLER == 1) || defined(__DOXYGEN__) /** - * Run the impulse, does not persist data - * @param result Object to store the results in - * @param data_fn Function to retrieve data from sensors - * @param debug Whether to log debug messages (default false) + * @brief Run the impulse, does not persist data. + * + * @deprecated This function is deprecated and will be removed in future versions. Use + * `run_classifier()` instead. + * + * @param[out] result Object to store the results in + * @param[in] data_fn Callback function to retrieve data from sensors + * @param[out] debug Whether to log debug messages (default false) + * + * @return Error code as defined by `EI_IMPULSE_ERROR` enum. Will be `EI_IMPULSE_OK` if inference + * completed successfully. */ __attribute__((unused)) EI_IMPULSE_ERROR run_impulse( ei_impulse_result_t *result, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp new file mode 100644 index 0000000..4419384 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.cpp @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 + +#include "ei_run_classifier_c.h" + +/** + * This function definition is just there to make sure + * that the symbol is not removed from the library. + */ +EI_IMPULSE_ERROR ei_run_classifier( + signal_t *signal, + ei_impulse_result_t *result, + bool debug) { + + return run_classifier(signal, result, debug); +} + +#endif // #if defined(__cplusplus) && EI_C_LINKAGE == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.h new file mode 100644 index 0000000..426958b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_c.h @@ -0,0 +1,39 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_C_H_ +#define _EDGE_IMPULSE_RUN_CLASSIFIER_C_H_ + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 + +#include "ei_run_classifier.h" + +/** + * Run the classifier over a raw features array + * @param raw_features Raw features array + * @param raw_features_size Size of the features array + * @param result Object to store the results in + * @param debug Whether to show debug messages (default: false) + */ +extern "C" EI_IMPULSE_ERROR ei_run_classifier( + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false); + +#endif // #if defined(__cplusplus) && EI_C_LINKAGE == 1 + +#endif // _EDGE_IMPULSE_RUN_CLASSIFIER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_image.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_image.h index 5ee9299..37ff775 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_image.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_classifier_image.h @@ -1,3 +1,20 @@ +/* + * Copyright (c) 2022 Edge Impulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + #ifndef _EDGE_IMPULSE_RUN_CLASSIFIER_IMAGE_H_ #define _EDGE_IMPULSE_RUN_CLASSIFIER_IMAGE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_dsp.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_dsp.h index 997702b..e46612c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_dsp.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_run_dsp.h @@ -1,32 +1,33 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EDGE_IMPULSE_RUN_DSP_H_ #define _EDGE_IMPULSE_RUN_DSP_H_ -#include "model-parameters/model_metadata.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" #include "edge-impulse-sdk/dsp/spectral/spectral.hpp" #include "edge-impulse-sdk/dsp/speechpy/speechpy.hpp" #include "edge-impulse-sdk/classifier/ei_signal_with_range.h" +#include "edge-impulse-sdk/dsp/ei_flatten.h" +#include "model-parameters/model_metadata.h" + +#if EI_CLASSIFIER_HR_ENABLED +#include "edge-impulse-sdk/dsp/ei_hr.hpp" +#endif #if defined(__cplusplus) && EI_C_LINKAGE == 1 extern "C" { @@ -51,345 +52,116 @@ static float *ei_dsp_cont_current_frame = nullptr; static size_t ei_dsp_cont_current_frame_size = 0; static int ei_dsp_cont_current_frame_ix = 0; -__attribute__((unused)) int extract_spectral_analysis_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { - ei_dsp_config_spectral_analysis_t config = *((ei_dsp_config_spectral_analysis_t*)config_ptr); - - int ret; - - const float sampling_freq = frequency; - - // input matrix from the raw signal - matrix_t input_matrix(signal->total_length / config.axes, config.axes); - if (!input_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - signal->get_data(0, signal->total_length, input_matrix.buffer); - - // scale the signal - ret = numpy::scale(&input_matrix, config.scale_axes); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to scale signal (%d)\n", ret); - EIDSP_ERR(ret); - } - - // transpose the matrix so we have one row per axis (nifty!) - ret = numpy::transpose(&input_matrix); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to transpose matrix (%d)\n", ret); - EIDSP_ERR(ret); - } - - // the spectral edges that we want to calculate - matrix_t edges_matrix_in(64, 1); - size_t edge_matrix_ix = 0; - - char spectral_str[128] = { 0 }; - if (strlen(config.spectral_power_edges) > sizeof(spectral_str) - 1) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); - } - memcpy(spectral_str, config.spectral_power_edges, strlen(config.spectral_power_edges)); - - // convert spectral_power_edges (string) into float array - char *spectral_ptr = spectral_str; - while (spectral_ptr != NULL) { - while((*spectral_ptr) == ' ') { - spectral_ptr++; - } - - edges_matrix_in.buffer[edge_matrix_ix++] = atof(spectral_ptr); - - // find next (spectral) delimiter (or '\0' character) - while((*spectral_ptr != ',')) { - spectral_ptr++; - if (*spectral_ptr == '\0') break; - } - - if (*spectral_ptr == '\0') { - spectral_ptr = NULL; - } - else { - spectral_ptr++; - } - } - edges_matrix_in.rows = edge_matrix_ix; - - // calculate how much room we need for the output matrix - size_t output_matrix_cols = spectral::feature::calculate_spectral_buffer_size( - true, config.spectral_peaks_count, edges_matrix_in.rows - ); - // ei_printf("output_matrix_size %hux%zu\n", input_matrix.rows, output_matrix_cols); - if (output_matrix->cols * output_matrix->rows != static_cast(output_matrix_cols * config.axes)) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - output_matrix->cols = output_matrix_cols; - output_matrix->rows = config.axes; - - spectral::filter_t filter_type; - if (strcmp(config.filter_type, "low") == 0) { - filter_type = spectral::filter_lowpass; - } - else if (strcmp(config.filter_type, "high") == 0) { - filter_type = spectral::filter_highpass; - } - else { - filter_type = spectral::filter_none; - } - - ret = spectral::feature::spectral_analysis(output_matrix, &input_matrix, - sampling_freq, filter_type, config.filter_cutoff, config.filter_order, - config.fft_length, config.spectral_peaks_count, config.spectral_peaks_threshold, &edges_matrix_in); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to calculate spectral features (%d)\n", ret); - EIDSP_ERR(ret); - } - - // flatten again - output_matrix->cols = config.axes * output_matrix_cols; - output_matrix->rows = 1; - - return EIDSP_OK; -} - -matrix_i16_t *create_edges_matrix(ei_dsp_config_spectral_analysis_t config, const float sampling_freq) +__attribute__((unused)) int extract_hr_features( + signal_t *signal, + matrix_t *output_matrix, + void *config_ptr, + const float frequency) { - // the spectral edges that we want to calculate - static matrix_i16_t edges_matrix_in(64, 1); - static bool matrix_created = false; - size_t edge_matrix_ix = 0; - - if(matrix_created == false) { - - char spectral_str[128] = { 0 }; - if (strlen(config.spectral_power_edges) > sizeof(spectral_str) - 1) { - return NULL; - } - memcpy(spectral_str, config.spectral_power_edges, strlen(config.spectral_power_edges)); - - // convert spectral_power_edges (string) into float array - char *spectral_ptr = spectral_str; - while (spectral_ptr != NULL) { - while((*spectral_ptr) == ' ') { - spectral_ptr++; - } - - float edge = (atof(spectral_ptr) / (float)(sampling_freq/2.f)); - numpy::float_to_int16(&edge, &edges_matrix_in.buffer[edge_matrix_ix++], 1); - - // find next (spectral) delimiter (or '\0' character) - while((*spectral_ptr != ',')) { - spectral_ptr++; - if (*spectral_ptr == '\0') break; - } - - if (*spectral_ptr == '\0') { - spectral_ptr = NULL; - } - else { - spectral_ptr++; - } - } - edges_matrix_in.rows = edge_matrix_ix; - matrix_created = true; - } - - return &edges_matrix_in; +#if EI_CLASSIFIER_HR_ENABLED + auto handle = hr_class::create(config_ptr, frequency); + auto ret = handle->extract(signal, output_matrix, config_ptr, frequency); + delete handle; + return ret; +#else + ei_printf("ERR: Please contact EI sales to enable heart rate processing in deployment"); + return EIDSP_NOT_SUPPORTED; +#endif } -__attribute__((unused)) int extract_spectral_analysis_features(signal_i16_t *signal, matrix_i32_t *output_matrix, void *config_ptr, const float frequency) { - ei_dsp_config_spectral_analysis_t config = *((ei_dsp_config_spectral_analysis_t*)config_ptr); - - int ret; - - const float sampling_freq = frequency; +__attribute__((unused)) int extract_spectral_analysis_features( + signal_t *signal, + matrix_t *output_matrix, + void *config_ptr, + const float frequency) +{ + ei_dsp_config_spectral_analysis_t *config = (ei_dsp_config_spectral_analysis_t *)config_ptr; // input matrix from the raw signal - matrix_i16_t input_matrix(signal->total_length / config.axes, config.axes); + matrix_t input_matrix(signal->total_length / config->axes, config->axes); if (!input_matrix.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - signal->get_data(0, signal->total_length, (EIDSP_i16 *)&input_matrix.buffer[0]); - - // scale the signal - ret = numpy::scale(&input_matrix, config.scale_axes); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to scale signal (%d)\n", ret); - EIDSP_ERR(ret); - } - - // transpose the matrix so we have one row per axis (nifty!) - ret = numpy::transpose(&input_matrix); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to transpose matrix (%d)\n", ret); - EIDSP_ERR(ret); - } - - matrix_i16_t *edges_matrix_in = create_edges_matrix(config, sampling_freq); - - if(edges_matrix_in == NULL) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); - } + signal->get_data(0, signal->total_length, input_matrix.buffer); - // calculate how much room we need for the output matrix - size_t output_matrix_cols = spectral::feature::calculate_spectral_buffer_size( - true, config.spectral_peaks_count, edges_matrix_in->rows - ); - // ei_printf("output_matrix_size %hux%zu\n", input_matrix.rows, output_matrix_cols); - if (output_matrix->cols * output_matrix->rows != static_cast(output_matrix_cols * config.axes)) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); +#if EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_WAVELET || EI_DSP_PARAMS_ALL + if (strcmp(config->analysis_type, "Wavelet") == 0) { + return spectral::wavelet::extract_wavelet_features(&input_matrix, output_matrix, config, frequency); } +#endif - output_matrix->cols = output_matrix_cols; - output_matrix->rows = config.axes; - - spectral::filter_t filter_type; - if (strcmp(config.filter_type, "low") == 0) { - filter_type = spectral::filter_lowpass; - } - else if (strcmp(config.filter_type, "high") == 0) { - filter_type = spectral::filter_highpass; - } - else { - filter_type = spectral::filter_none; +#if EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_FFT || EI_DSP_PARAMS_ALL + if (strcmp(config->analysis_type, "FFT") == 0) { + if (config->implementation_version == 1) { + return spectral::feature::extract_spectral_analysis_features_v1( + &input_matrix, + output_matrix, + config, + frequency); + } else if (config->implementation_version == 4) { + return spectral::feature::extract_spectral_analysis_features_v4( + &input_matrix, + output_matrix, + config, + frequency); + } else { + return spectral::feature::extract_spectral_analysis_features_v2( + &input_matrix, + output_matrix, + config, + frequency); + } } +#endif - ret = spectral::feature::spectral_analysis(output_matrix, &input_matrix, - sampling_freq, filter_type, config.filter_cutoff, config.filter_order, - config.fft_length, config.spectral_peaks_count, config.spectral_peaks_threshold, edges_matrix_in); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to calculate spectral features (%d)\n", ret); - EIDSP_ERR(ret); +#if !EI_DSP_PARAMS_GENERATED || EI_DSP_PARAMS_ALL || !(EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_FFT || EI_DSP_PARAMS_SPECTRAL_ANALYSIS_ANALYSIS_TYPE_WAVELET) + if (config->implementation_version == 1) { + return spectral::feature::extract_spectral_analysis_features_v1( + &input_matrix, + output_matrix, + config, + frequency); + } + if (config->implementation_version == 2) { + return spectral::feature::extract_spectral_analysis_features_v2( + &input_matrix, + output_matrix, + config, + frequency); } - - // flatten again - output_matrix->cols = config.axes * output_matrix_cols; - output_matrix->rows = 1; - - return EIDSP_OK; +#endif + return EIDSP_NOT_SUPPORTED; } __attribute__((unused)) int extract_raw_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { ei_dsp_config_raw_t config = *((ei_dsp_config_raw_t*)config_ptr); - // input matrix from the raw signal - matrix_t input_matrix(signal->total_length / config.axes, config.axes); - if (!input_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); + // Because of rounding errors during re-sampling the output size of the block might be + // smaller than the input of the block. Make sure we don't write outside of the bounds + // of the array: + // https://forum.edgeimpulse.com/t/using-custom-sensors-on-raspberry-pi-4/3506/7 + size_t els_to_copy = signal->total_length; + if (els_to_copy > output_matrix->rows * output_matrix->cols) { + els_to_copy = output_matrix->rows * output_matrix->cols; } - signal->get_data(0, signal->total_length, input_matrix.buffer); + + signal->get_data(0, els_to_copy, output_matrix->buffer); // scale the signal - int ret = numpy::scale(&input_matrix, config.scale_axes); + int ret = numpy::scale(output_matrix, config.scale_axes); if (ret != EIDSP_OK) { EIDSP_ERR(ret); } - memcpy(output_matrix->buffer, input_matrix.buffer, signal->total_length * sizeof(float)); - return EIDSP_OK; } __attribute__((unused)) int extract_flatten_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { - ei_dsp_config_flatten_t config = *((ei_dsp_config_flatten_t*)config_ptr); - - uint32_t expected_matrix_size = 0; - if (config.average) expected_matrix_size += config.axes; - if (config.minimum) expected_matrix_size += config.axes; - if (config.maximum) expected_matrix_size += config.axes; - if (config.rms) expected_matrix_size += config.axes; - if (config.stdev) expected_matrix_size += config.axes; - if (config.skewness) expected_matrix_size += config.axes; - if (config.kurtosis) expected_matrix_size += config.axes; - - if (output_matrix->rows * output_matrix->cols != expected_matrix_size) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - int ret; - - // input matrix from the raw signal - matrix_t input_matrix(signal->total_length / config.axes, config.axes); - if (!input_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - signal->get_data(0, signal->total_length, input_matrix.buffer); - - // scale the signal - ret = numpy::scale(&input_matrix, config.scale_axes); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to scale signal (%d)\n", ret); - EIDSP_ERR(ret); - } - - // transpose the matrix so we have one row per axis (nifty!) - ret = numpy::transpose(&input_matrix); - if (ret != EIDSP_OK) { - ei_printf("ERR: Failed to transpose matrix (%d)\n", ret); - EIDSP_ERR(ret); - } - - size_t out_matrix_ix = 0; - - for (size_t row = 0; row < input_matrix.rows; row++) { - matrix_t row_matrix(1, input_matrix.cols, input_matrix.buffer + (row * input_matrix.cols)); - - if (config.average) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::mean(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.minimum) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::min(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.maximum) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::max(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.rms) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::rms(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.stdev) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::stdev(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.skewness) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::skew(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - - if (config.kurtosis) { - float fbuffer; - matrix_t out_matrix(1, 1, &fbuffer); - numpy::kurtosis(&row_matrix, &out_matrix); - output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; - } - } - - // flatten again - output_matrix->cols = output_matrix->rows * output_matrix->cols; - output_matrix->rows = 1; - - return EIDSP_OK; + auto handle = flatten_class::create(config_ptr, frequency); + auto ret = handle->extract(signal, output_matrix, config_ptr, frequency); + delete handle; + return ret; } static class speechpy::processing::preemphasis *preemphasis; @@ -404,7 +176,7 @@ __attribute__((unused)) int extract_mfcc_features(signal_t *signal, matrix_t *ou EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - if(config.implementation_version != 1 && config.implementation_version != 2) { + if((config.implementation_version == 0) || (config.implementation_version > 4)) { EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT); } @@ -436,7 +208,7 @@ __attribute__((unused)) int extract_mfcc_features(signal_t *signal, matrix_t *ou output_matrix->rows = out_matrix_size.rows; output_matrix->cols = out_matrix_size.cols; - // and run the MFCC extraction (using 32 rather than 40 filters here to optimize speed on embedded) + // and run the MFCC extraction int ret = speechpy::feature::mfcc(output_matrix, &preemphasized_audio_signal, frequency, config.frame_length, config.frame_stride, config.num_cepstral, config.num_filters, config.fft_length, config.low_frequency, config.high_frequency, true, config.implementation_version); @@ -459,7 +231,7 @@ __attribute__((unused)) int extract_mfcc_features(signal_t *signal, matrix_t *ou } -static int extract_mfcc_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfcc_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out, int implementation_version) { +__attribute__((unused)) static int extract_mfcc_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfcc_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out, int implementation_version) { uint32_t frequency = (uint32_t)sampling_frequency; int x; @@ -513,7 +285,7 @@ __attribute__((unused)) int extract_mfcc_per_slice_features(signal_t *signal, ma EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - if(config.implementation_version != 1 && config.implementation_version != 2) { + if((config.implementation_version == 0) || (config.implementation_version > 4)) { EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT); } @@ -537,8 +309,11 @@ __attribute__((unused)) int extract_mfcc_per_slice_features(signal_t *signal, ma const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values); if (frame_overlap_values < 0) { - ei_printf("ERR: frame_length (%f) cannot be lower than frame_stride (%f) for continuous classification\n", - config.frame_length, config.frame_stride); + ei_printf("ERR: frame_length ("); + ei_printf_float(config.frame_length); + ei_printf(") cannot be lower than frame_stride ("); + ei_printf_float(config.frame_stride); + ei_printf(") for continuous classification\n"); EIDSP_ERR(EIDSP_PARAMETER_INVALID); } @@ -695,14 +470,8 @@ __attribute__((unused)) int extract_spectrogram_features(signal_t *signal, matri output_matrix->rows = out_matrix_size.rows; output_matrix->cols = out_matrix_size.cols; - // and run the MFE extraction - EI_DSP_MATRIX(energy_matrix, output_matrix->rows, 1); - if (!energy_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - int ret = speechpy::feature::spectrogram(output_matrix, signal, - frequency, config.frame_length, config.frame_stride, config.fft_length, config.implementation_version); + sampling_frequency, config.frame_length, config.frame_stride, config.fft_length, config.implementation_version); if (ret != EIDSP_OK) { ei_printf("ERR: Spectrogram failed (%d)\n", ret); EIDSP_ERR(ret); @@ -716,7 +485,7 @@ __attribute__((unused)) int extract_spectrogram_features(signal_t *signal, matri } else { // normalization - ret = speechpy::processing::spectrogram_normalization(output_matrix, config.noise_floor_db); + ret = speechpy::processing::spectrogram_normalization(output_matrix, config.noise_floor_db, config.implementation_version == 3); if (ret != EIDSP_OK) { ei_printf("ERR: normalization failed (%d)\n", ret); EIDSP_ERR(ret); @@ -730,7 +499,7 @@ __attribute__((unused)) int extract_spectrogram_features(signal_t *signal, matri } -static int extract_spectrogram_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_spectrogram_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) { +__attribute__((unused)) static int extract_spectrogram_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_spectrogram_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) { uint32_t frequency = (uint32_t)sampling_frequency; int x; @@ -813,8 +582,11 @@ __attribute__((unused)) int extract_spectrogram_per_slice_features(signal_t *sig const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values); if (frame_overlap_values < 0) { - ei_printf("ERR: frame_length (%f) cannot be lower than frame_stride (%f) for continuous classification\n", - config.frame_length, config.frame_stride); + ei_printf("ERR: frame_length ("); + ei_printf_float(config.frame_length); + ei_printf(") cannot be lower than frame_stride ("); + ei_printf_float(config.frame_stride); + ei_printf(") for continuous classification\n"); EIDSP_ERR(EIDSP_PARAMETER_INVALID); } @@ -950,6 +722,10 @@ __attribute__((unused)) int extract_mfe_features(signal_t *signal, matrix_t *out EIDSP_ERR(EIDSP_PARAMETER_INVALID); } + if ((config.implementation_version == 0) || (config.implementation_version > 4)) { + EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT); + } + const uint32_t frequency = static_cast(sampling_frequency); signal_t preemphasized_audio_signal; @@ -988,18 +764,21 @@ __attribute__((unused)) int extract_mfe_features(signal_t *signal, matrix_t *out output_matrix->rows = out_matrix_size.rows; output_matrix->cols = out_matrix_size.cols; - // and run the MFE extraction - EI_DSP_MATRIX(energy_matrix, output_matrix->rows, 1); - if (!energy_matrix.buffer) { - if (preemphasis) { - delete preemphasis; - } - EIDSP_ERR(EIDSP_OUT_OF_MEM); + int ret; + // This probably seems incorrect, but the mfe func can actually handle all versions + // There's a subtle issue with cmvn and v2, not worth tracking down + // So for v2 and v1, we'll just use the old code + // (the new mfe does away with the intermediate filterbank matrix) + if (config.implementation_version > 2) { + ret = speechpy::feature::mfe(output_matrix, nullptr, &preemphasized_audio_signal, + frequency, config.frame_length, config.frame_stride, config.num_filters, config.fft_length, + config.low_frequency, config.high_frequency, config.implementation_version); + } else { + ret = speechpy::feature::mfe_v3(output_matrix, nullptr, &preemphasized_audio_signal, + frequency, config.frame_length, config.frame_stride, config.num_filters, config.fft_length, + config.low_frequency, config.high_frequency, config.implementation_version); } - int ret = speechpy::feature::mfe(output_matrix, &energy_matrix, &preemphasized_audio_signal, - frequency, config.frame_length, config.frame_stride, config.num_filters, config.fft_length, - config.low_frequency, config.high_frequency, config.implementation_version); if (preemphasis) { delete preemphasis; } @@ -1031,7 +810,7 @@ __attribute__((unused)) int extract_mfe_features(signal_t *signal, matrix_t *out return EIDSP_OK; } -static int extract_mfe_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfe_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) { +__attribute__((unused)) static int extract_mfe_run_slice(signal_t *signal, matrix_t *output_matrix, ei_dsp_config_mfe_t *config, const float sampling_frequency, matrix_size_t *matrix_size_out) { uint32_t frequency = (uint32_t)sampling_frequency; int x; @@ -1056,16 +835,20 @@ static int extract_mfe_run_slice(signal_t *signal, matrix_t *output_matrix, ei_d matrix_t output_matrix_slice(out_matrix_size.rows, out_matrix_size.cols, output_matrix->buffer + output_matrix_offset); - // energy matrix - EI_DSP_MATRIX(energy_matrix, out_matrix_size.rows, 1); - if (!energy_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - // and run the MFE extraction - x = speechpy::feature::mfe(&output_matrix_slice, &energy_matrix, signal, - frequency, config->frame_length, config->frame_stride, config->num_filters, config->fft_length, - config->low_frequency, config->high_frequency, config->implementation_version); + // This probably seems incorrect, but the mfe func can actually handle all versions + // There's a subtle issue with cmvn and v2, not worth tracking down + // So for v2 and v1, we'll just use the old code + // (the new mfe does away with the intermediate filterbank matrix) + if (config->implementation_version > 2) { + x = speechpy::feature::mfe(&output_matrix_slice, nullptr, signal, + frequency, config->frame_length, config->frame_stride, config->num_filters, config->fft_length, + config->low_frequency, config->high_frequency, config->implementation_version); + } else { + x = speechpy::feature::mfe_v3(&output_matrix_slice, nullptr, signal, + frequency, config->frame_length, config->frame_stride, config->num_filters, config->fft_length, + config->low_frequency, config->high_frequency, config->implementation_version); + } if (x != EIDSP_OK) { ei_printf("ERR: MFE failed (%d)\n", x); EIDSP_ERR(x); @@ -1096,6 +879,10 @@ __attribute__((unused)) int extract_mfe_per_slice_features(signal_t *signal, mat EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } + if ((config.implementation_version == 0) || (config.implementation_version > 4)) { + EIDSP_ERR(EIDSP_BLOCK_VERSION_INCORRECT); + } + if (signal->total_length == 0) { EIDSP_ERR(EIDSP_PARAMETER_INVALID); } @@ -1136,8 +923,12 @@ __attribute__((unused)) int extract_mfe_per_slice_features(signal_t *signal, mat const int frame_overlap_values = static_cast(frame_length_values) - static_cast(frame_stride_values); if (frame_overlap_values < 0) { - ei_printf("ERR: frame_length (%f) cannot be lower than frame_stride (%f) for continuous classification\n", - config.frame_length, config.frame_stride); + ei_printf("ERR: frame_length ("); + ei_printf_float(config.frame_length); + ei_printf(") cannot be lower than frame_stride ("); + ei_printf_float(config.frame_stride); + ei_printf(") for continuous classification\n"); + if (preemphasis) { delete preemphasis; } @@ -1301,12 +1092,6 @@ __attribute__((unused)) int extract_image_features(signal_t *signal, matrix_t *o int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3; - if (output_matrix->rows * output_matrix->cols != static_cast(EI_CLASSIFIER_INPUT_WIDTH * EI_CLASSIFIER_INPUT_HEIGHT * channel_count)) { - ei_printf("out_matrix = %hu items\n", output_matrix->rows, output_matrix->cols); - ei_printf("calculated size = %hu items\n", static_cast(EI_CLASSIFIER_INPUT_WIDTH * EI_CLASSIFIER_INPUT_HEIGHT * channel_count)); - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - size_t output_ix = 0; #if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE) @@ -1357,25 +1142,77 @@ __attribute__((unused)) int extract_image_features(signal_t *signal, matrix_t *o return EIDSP_OK; } -#if EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 +#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI) -__attribute__((unused)) int extract_image_features_quantized(signal_t *signal, matrix_i8_t *output_matrix, void *config_ptr, const float frequency) { +__attribute__((unused)) int extract_drpai_features_quantized(signal_t *signal, matrix_u8_t *output_matrix, void *config_ptr, const float frequency) { ei_dsp_config_image_t config = *((ei_dsp_config_image_t*)config_ptr); int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3; - if (output_matrix->rows * output_matrix->cols != static_cast(EI_CLASSIFIER_INPUT_WIDTH * EI_CLASSIFIER_INPUT_HEIGHT * channel_count)) { - ei_printf("out_matrix = %hu items\n", output_matrix->rows, output_matrix->cols); - ei_printf("calculated size = %hu items\n", static_cast(EI_CLASSIFIER_INPUT_WIDTH * EI_CLASSIFIER_INPUT_HEIGHT * channel_count)); - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + size_t output_ix = 0; + +#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE) + const size_t page_size = EI_DSP_IMAGE_BUFFER_STATIC_SIZE; +#else + const size_t page_size = 1024; +#endif + + // buffered read from the signal + size_t bytes_left = signal->total_length; + for (size_t ix = 0; ix < signal->total_length; ix += page_size) { + size_t elements_to_read = bytes_left > page_size ? page_size : bytes_left; + +#if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE) + matrix_t input_matrix(elements_to_read, config.axes, ei_dsp_image_buffer); +#else + matrix_t input_matrix(elements_to_read, config.axes); +#endif + if (!input_matrix.buffer) { + EIDSP_ERR(EIDSP_OUT_OF_MEM); + } + signal->get_data(ix, elements_to_read, input_matrix.buffer); + + for (size_t jx = 0; jx < elements_to_read; jx++) { + uint32_t pixel = static_cast(input_matrix.buffer[jx]); + + if (channel_count == 3) { + uint8_t r = static_cast(pixel >> 16 & 0xff); + uint8_t g = static_cast(pixel >> 8 & 0xff); + uint8_t b = static_cast(pixel & 0xff); + + output_matrix->buffer[output_ix++] = r; + output_matrix->buffer[output_ix++] = g; + output_matrix->buffer[output_ix++] = b; + } + else { + //NOTE: not implementing greyscale yet + } + } + bytes_left -= elements_to_read; } + return EIDSP_OK; +} + +#endif //(EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI) + +#if (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_DRPAI) + +__attribute__((unused)) int extract_image_features_quantized(signal_t *signal, matrix_i8_t *output_matrix, void *config_ptr, float scale, float zero_point, const float frequency, + int image_scaling) { + ei_dsp_config_image_t config = *((ei_dsp_config_image_t*)config_ptr); + + int16_t channel_count = strcmp(config.channels, "Grayscale") == 0 ? 1 : 3; + size_t output_ix = 0; const int32_t iRedToGray = (int32_t)(0.299f * 65536.0f); const int32_t iGreenToGray = (int32_t)(0.587f * 65536.0f); const int32_t iBlueToGray = (int32_t)(0.114f * 65536.0f); + static const float torch_mean[] = { 0.485, 0.456, 0.406 }; + static const float torch_std[] = { 0.229, 0.224, 0.225 }; + #if defined(EI_DSP_IMAGE_BUFFER_STATIC_SIZE) const size_t page_size = EI_DSP_IMAGE_BUFFER_STATIC_SIZE; #else @@ -1402,29 +1239,49 @@ __attribute__((unused)) int extract_image_features_quantized(signal_t *signal, m if (channel_count == 3) { // fast code path - if (EI_CLASSIFIER_TFLITE_INPUT_SCALE == 0.003921568859368563f && EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT == -128) { + if (scale == 0.003921568859368563f && zero_point == -128 && image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) { int32_t r = static_cast(pixel >> 16 & 0xff); int32_t g = static_cast(pixel >> 8 & 0xff); int32_t b = static_cast(pixel & 0xff); - output_matrix->buffer[output_ix++] = static_cast(r + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - output_matrix->buffer[output_ix++] = static_cast(g + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - output_matrix->buffer[output_ix++] = static_cast(b + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); + output_matrix->buffer[output_ix++] = static_cast(r + zero_point); + output_matrix->buffer[output_ix++] = static_cast(g + zero_point); + output_matrix->buffer[output_ix++] = static_cast(b + zero_point); } // slow code path else { - float r = static_cast(pixel >> 16 & 0xff) / 255.0f; - float g = static_cast(pixel >> 8 & 0xff) / 255.0f; - float b = static_cast(pixel & 0xff) / 255.0f; - - output_matrix->buffer[output_ix++] = static_cast(round(r / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - output_matrix->buffer[output_ix++] = static_cast(round(g / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); - output_matrix->buffer[output_ix++] = static_cast(round(b / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); + float r = static_cast(pixel >> 16 & 0xff); + float g = static_cast(pixel >> 8 & 0xff); + float b = static_cast(pixel & 0xff); + + if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) { + r /= 255.0f; + g /= 255.0f; + b /= 255.0f; + } + else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) { + r /= 255.0f; + g /= 255.0f; + b /= 255.0f; + + r = (r - torch_mean[0]) / torch_std[0]; + g = (g - torch_mean[1]) / torch_std[1]; + b = (b - torch_mean[2]) / torch_std[2]; + } + else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) { + r -= 128.0f; + g -= 128.0f; + b -= 128.0f; + } + + output_matrix->buffer[output_ix++] = static_cast(round(r / scale) + zero_point); + output_matrix->buffer[output_ix++] = static_cast(round(g / scale) + zero_point); + output_matrix->buffer[output_ix++] = static_cast(round(b / scale) + zero_point); } } else { // fast code path - if (EI_CLASSIFIER_TFLITE_INPUT_SCALE == 0.003921568859368563f && EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT == -128) { + if (scale == 0.003921568859368563f && zero_point == -128 && image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) { int32_t r = static_cast(pixel >> 16 & 0xff); int32_t g = static_cast(pixel >> 8 & 0xff); int32_t b = static_cast(pixel & 0xff); @@ -1433,31 +1290,51 @@ __attribute__((unused)) int extract_image_features_quantized(signal_t *signal, m // see: https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert int32_t gray = (iRedToGray * r) + (iGreenToGray * g) + (iBlueToGray * b); gray >>= 16; // scale down to int8_t - gray += EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT; + gray += zero_point; if (gray < - 128) gray = -128; else if (gray > 127) gray = 127; output_matrix->buffer[output_ix++] = static_cast(gray); } // slow code path else { - float r = static_cast(pixel >> 16 & 0xff) / 255.0f; - float g = static_cast(pixel >> 8 & 0xff) / 255.0f; - float b = static_cast(pixel & 0xff) / 255.0f; + float r = static_cast(pixel >> 16 & 0xff); + float g = static_cast(pixel >> 8 & 0xff); + float b = static_cast(pixel & 0xff); + + if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_NONE) { + r /= 255.0f; + g /= 255.0f; + b /= 255.0f; + } + else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_TORCH) { + r /= 255.0f; + g /= 255.0f; + b /= 255.0f; + + r = (r - torch_mean[0]) / torch_std[0]; + g = (g - torch_mean[1]) / torch_std[1]; + b = (b - torch_mean[2]) / torch_std[2]; + } + else if (image_scaling == EI_CLASSIFIER_IMAGE_SCALING_MIN128_127) { + r -= 128.0f; + g -= 128.0f; + b -= 128.0f; + } // ITU-R 601-2 luma transform // see: https://pillow.readthedocs.io/en/stable/reference/Image.html#PIL.Image.Image.convert float v = (0.299f * r) + (0.587f * g) + (0.114f * b); - output_matrix->buffer[output_ix++] = static_cast(round(v / EI_CLASSIFIER_TFLITE_INPUT_SCALE) + EI_CLASSIFIER_TFLITE_INPUT_ZEROPOINT); + output_matrix->buffer[output_ix++] = static_cast(round(v / scale) + zero_point); } } } bytes_left -= elements_to_read; - } + } return EIDSP_OK; } -#endif // EI_CLASSIFIER_TFLITE_INPUT_QUANTIZED == 1 +#endif // (EI_CLASSIFIER_QUANTIZATION_ENABLED == 1) && (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_DRPAI) /** * Clear all state regarding continuous audio. Invoke this function after continuous audio loop ends. @@ -1474,6 +1351,109 @@ __attribute__((unused)) int ei_dsp_clear_continuous_audio_state() { return EIDSP_OK; } +/** + * @brief Calculates the cepstral mean and variable normalization. + * + * @param matrix Source and destination matrix + * @param config_ptr ei_dsp_config_mfcc_t struct pointer + */ +__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_mfcc(ei_matrix *matrix, void *config_ptr) +{ + ei_dsp_config_mfcc_t *config = (ei_dsp_config_mfcc_t *)config_ptr; + + uint32_t original_matrix_size = matrix->rows * matrix->cols; + + /* Modify rows and colums ration for matrix normalization */ + matrix->rows = original_matrix_size / config->num_cepstral; + matrix->cols = config->num_cepstral; + + // cepstral mean and variance normalization + int ret = speechpy::processing::cmvnw(matrix, config->win_size, true, false); + if (ret != EIDSP_OK) { + ei_printf("ERR: cmvnw failed (%d)\n", ret); + return; + } + + /* Reset rows and columns ratio */ + matrix->rows = 1; + matrix->cols = original_matrix_size; +} + +/** + * @brief Calculates the cepstral mean and variable normalization. + * + * @param matrix Source and destination matrix + * @param config_ptr ei_dsp_config_mfe_t struct pointer + */ +__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_mfe(ei_matrix *matrix, void *config_ptr) +{ + ei_dsp_config_mfe_t *config = (ei_dsp_config_mfe_t *)config_ptr; + + uint32_t original_matrix_size = matrix->rows * matrix->cols; + + /* Modify rows and colums ration for matrix normalization */ + matrix->rows = (original_matrix_size) / config->num_filters; + matrix->cols = config->num_filters; + + if (config->implementation_version < 3) { + // cepstral mean and variance normalization + int ret = speechpy::processing::cmvnw(matrix, config->win_size, false, true); + if (ret != EIDSP_OK) { + ei_printf("ERR: cmvnw failed (%d)\n", ret); + return; + } + } + else { + // normalization + int ret = speechpy::processing::mfe_normalization(matrix, config->noise_floor_db); + if (ret != EIDSP_OK) { + ei_printf("ERR: normalization failed (%d)\n", ret); + return; + } + } + + /* Reset rows and columns ratio */ + matrix->rows = 1; + matrix->cols = (original_matrix_size); +} + +/** + * @brief Calculates the cepstral mean and variable normalization. + * + * @param matrix Source and destination matrix + * @param config_ptr ei_dsp_config_spectrogram_t struct pointer + */ +__attribute__((unused)) void calc_cepstral_mean_and_var_normalization_spectrogram(ei_matrix *matrix, void *config_ptr) +{ + ei_dsp_config_spectrogram_t *config = (ei_dsp_config_spectrogram_t *)config_ptr; + + uint32_t original_matrix_size = matrix->rows * matrix->cols; + + /* Modify rows and colums ration for matrix normalization */ + matrix->cols = config->fft_length / 2 + 1; + matrix->rows = (original_matrix_size) / matrix->cols; + + if (config->implementation_version < 3) { + int ret = numpy::normalize(matrix); + if (ret != EIDSP_OK) { + ei_printf("ERR: normalization failed (%d)\n", ret); + return; + } + } + else { + // normalization + int ret = speechpy::processing::spectrogram_normalization(matrix, config->noise_floor_db, config->implementation_version == 3); + if (ret != EIDSP_OK) { + ei_printf("ERR: normalization failed (%d)\n", ret); + return; + } + } + + /* Reset rows and columns ratio */ + matrix->rows = 1; + matrix->cols = (original_matrix_size); +} + #ifdef __cplusplus } #endif // __cplusplus diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_axes.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_axes.h index 7825729..ccf4291 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_axes.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_axes.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EI_CLASSIFIER_SIGNAL_WITH_AXES_H_ @@ -25,6 +20,7 @@ #include "edge-impulse-sdk/dsp/numpy_types.h" #include "edge-impulse-sdk/dsp/returntypes.hpp" +#include "edge-impulse-sdk/classifier/ei_model_types.h" #if !EIDSP_SIGNAL_C_FN_POINTER @@ -32,18 +28,18 @@ using namespace ei; class SignalWithAxes { public: - SignalWithAxes(signal_t *original_signal, uint8_t *axes, size_t axes_count): - _original_signal(original_signal), _axes(axes), _axes_count(axes_count) + SignalWithAxes(signal_t *original_signal, uint8_t *axes, size_t axes_count, const ei_impulse_t *impulse): + _original_signal(original_signal), _axes(axes), _axes_count(axes_count), _impulse(impulse) { } signal_t * get_signal() { - if (this->_axes_count == EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { + if (this->_axes_count == _impulse->raw_samples_per_frame) { return this->_original_signal; } - wrapped_signal.total_length = _original_signal->total_length / EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME * _axes_count; + wrapped_signal.total_length = _original_signal->total_length / _impulse->raw_samples_per_frame * _axes_count; #ifdef __MBED__ wrapped_signal.get_data = mbed::callback(this, &SignalWithAxes::get_data); #else @@ -55,11 +51,12 @@ class SignalWithAxes { } int get_data(size_t offset, size_t length, float *out_ptr) { - size_t length_on_original_signal = length / _axes_count * EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME; + size_t offset_on_original_signal = offset / _axes_count * _impulse->raw_samples_per_frame; + size_t length_on_original_signal = length / _axes_count * _impulse->raw_samples_per_frame; size_t out_ptr_ix = 0; - for (size_t ix = 0; ix < length_on_original_signal; ix += EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { + for (size_t ix = offset_on_original_signal; ix < offset_on_original_signal + length_on_original_signal; ix += _impulse->raw_samples_per_frame) { for (size_t axis_ix = 0; axis_ix < this->_axes_count; axis_ix++) { int r = _original_signal->get_data(ix + _axes[axis_ix], 1, &out_ptr[out_ptr_ix++]); if (r != 0) { @@ -75,57 +72,10 @@ class SignalWithAxes { signal_t *_original_signal; uint8_t *_axes; size_t _axes_count; + const ei_impulse_t *_impulse; signal_t wrapped_signal; }; -class SignalWithAxesI16 { -public: - SignalWithAxesI16(signal_i16_t *original_signal, uint8_t *axes, size_t axes_count): - _original_signal(original_signal), _axes(axes), _axes_count(axes_count) - { - - } - - signal_i16_t * get_signal() { - if (this->_axes_count == EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { - return this->_original_signal; - } - - wrapped_signal.total_length = _original_signal->total_length / EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME * _axes_count; -#ifdef __MBED__ - wrapped_signal.get_data = mbed::callback(this, &SignalWithAxesI16::get_data); -#else - wrapped_signal.get_data = [this](size_t offset, size_t length, int16_t *out_ptr) { - return this->get_data(offset, length, out_ptr); - }; -#endif - return &wrapped_signal; - } - - int get_data(size_t offset, size_t length, int16_t *out_ptr) { - size_t length_on_original_signal = length / _axes_count * EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME; - - size_t out_ptr_ix = 0; - - for (size_t ix = 0; ix < length_on_original_signal; ix += EI_CLASSIFIER_RAW_SAMPLES_PER_FRAME) { - for (size_t axis_ix = 0; axis_ix < this->_axes_count; axis_ix++) { - int r = _original_signal->get_data(ix + _axes[axis_ix], 1, &out_ptr[out_ptr_ix++]); - if (r != 0) { - return r; - } - } - } - - return 0; - } - -private: - signal_i16_t *_original_signal; - uint8_t *_axes; - size_t _axes_count; - signal_i16_t wrapped_signal; -}; - #endif // #if !EIDSP_SIGNAL_C_FN_POINTER #endif // _EI_CLASSIFIER_SIGNAL_WITH_AXES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_range.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_range.h index 8ddad22..7571c7e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_range.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/ei_signal_with_range.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EI_CLASSIFIER_SIGNAL_WITH_RANGE_H_ @@ -65,41 +60,6 @@ class SignalWithRange { signal_t wrapped_signal; }; -class SignalWithRangeI16 { -public: - SignalWithRangeI16(signal_i16_t *original_signal, uint32_t range_start, uint32_t range_end): - _original_signal(original_signal), _range_start(range_start), _range_end(range_end) - { - - } - - signal_i16_t * get_signal() { - if (this->_range_start == 0 && this->_range_end == this->_original_signal->total_length) { - return this->_original_signal; - } - - wrapped_signal.total_length = _range_end - _range_start; -#ifdef __MBED__ - wrapped_signal.get_data = mbed::callback(this, &SignalWithRangeI16::get_data); -#else - wrapped_signal.get_data = [this](size_t offset, size_t length, int16_t *out_ptr) { - return this->get_data(offset, length, out_ptr); - }; -#endif - return &wrapped_signal; - } - - int get_data(size_t offset, size_t length, int16_t *out_ptr) { - return _original_signal->get_data(offset + _range_start, length, out_ptr); - } - -private: - signal_i16_t *_original_signal; - uint32_t _range_start; - uint32_t _range_end; - signal_i16_t wrapped_signal; -}; - #endif // #if !EIDSP_SIGNAL_C_FN_POINTER #endif // _EI_CLASSIFIER_SIGNAL_WITH_RANGE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/akida.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/akida.h new file mode 100644 index 0000000..08f4fc8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/akida.h @@ -0,0 +1,578 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H +#define EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA) + +/** + * @brief if we are not forcing SOFTWARE inference (simulation) + * then make sure we will try to use hardware + * + */ +#ifndef EI_CLASSIFIER_USE_AKIDA_SOFTWARE +#define EI_CLASSIFIER_USE_AKIDA_HARDWARE 1 +#endif + +/** + * @brief If more than one device is present in system + * setting this to device index can select a proper device. + * e.g.: set to 1 to selct /dev/akida1 + * + */ +#ifndef EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO +#define EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO 0 +#endif + +#include "model-parameters/model_metadata.h" +#include +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#include "tensorflow-lite/tensorflow/lite/interpreter.h" +#include "tensorflow-lite/tensorflow/lite/kernels/register.h" +#include "tensorflow-lite/tensorflow/lite/model.h" +#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "tensorflow-lite/tensorflow/lite/kernels/internal/reference/softmax.h" +#undef EI_CLASSIFIER_INFERENCING_ENGINE +#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_TFLITE_FULL +#include "tflite_helper.h" +#undef EI_CLASSIFIER_INFERENCING_ENGINE +#define EI_CLASSIFIER_INFERENCING_ENGINE EI_CLASSIFIER_AKIDA +#include +#include +#include +#include +#include +#include +#include +#include "pybind11/embed.h" +#include "pybind11/numpy.h" +#include "pybind11/stl.h" + +namespace py = pybind11; + +std::stringstream engine_info; + +static py::module_ akida; +static py::object model; +static py::object model_predict; +static py::object model_forward; +static py::object device; +static bool akida_initialized = false; +static std::vector input_shape; +static tflite::RuntimeShape softmax_shape; +static tflite::SoftmaxParams dummy_params; +static int model_input_bits = 0; +static float scale; +static int down_scale; +typedef struct { + std::unique_ptr model; + std::unique_ptr interpreter; +} ei_tflite_state_t; + +std::map ei_tflite_instances; + +bool init_akida(const uint8_t *model_arr, size_t model_arr_size, bool debug) +{ + py::module_ sys; + py::list path; + constexpr char model_file_path[] = "/tmp/akida_model.fbz"; + + if(debug) { + try { + sys = py::module_::import("sys"); + path = sys.attr("path"); + ei_printf("DEBUG: sys.path:"); + for (py::handle p: path) { + ei_printf("\t%s\n", p.cast().c_str()); + } + } + catch (py::error_already_set &e) { + ei_printf("ERR: Importing 'sys' library failed:\n%s\n", e.what()); + // as it is only for debug purposes, continue + } + } + + try { + // import Python's akida module + akida = py::module_::import("akida"); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Importing 'akida' library failed:\n%s\n", e.what()); + return false; + } + + if(debug) { + std::string ver = akida.attr("__version__").cast(); + ei_printf("DEBUG: Akida version: %s\n", ver.c_str()); + } + + py::object Model = akida.attr("Model"); + + // deploy akida model file into temporary file + std::ofstream model_file(model_file_path, std::ios::out | std::ios::binary); + model_file.write(reinterpret_cast(model_arr), model_arr_size); + if(model_file.bad()) { + ei_printf("ERR: failed to unpack model ile into %s\n", model_file_path); + model_file.close(); + return false; + } + model_file.close(); + + // load model + try { + model = Model(model_file_path); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Can't load model file from %s\n", model_file_path); + ei_printf("ERR: %s\n", e.what()); + return false; + } + + // get input shape from model + input_shape = model.attr("input_shape").cast>(); + //TODO: temporarily only 3D input data is supported (see note in run_nn_inference) + if(input_shape.size() != 3) { + ei_printf("ERR: Unsupported input data shape. Expected 3 dimensions got %d\n", (int)input_shape.size()); + return false; + } + // extend input by (N, ...) - hardcoded to (1, ...) + input_shape.insert(input_shape.begin(), (size_t)1); + + // get model input_bits + std::vector layers = model.attr("layers").cast>(); + auto input_layer = layers[0]; + model_input_bits = input_layer.attr("input_bits").cast(); + if((model_input_bits != 8) && (model_input_bits != 4)) { + ei_printf("ERR: Unsupported input_bits. Expected 4 or 8 got %d\n", model_input_bits); + return false; + } + + // initialize scale coefficients + if(model_input_bits == 8) { + scale = 255; + down_scale = 1; + } + else if(model_input_bits == 4) { + // these values are recommended by BrainChip + scale = 15; + down_scale = 16; + } + + if(debug) { + ei_printf("INFO: Model input_bits: %d\n", model_input_bits); + ei_printf("INFO: Scale: %f\n", scale); + ei_printf("INFO: Down scale: %d\n", down_scale); + } + +#if (defined(EI_CLASSIFIER_USE_AKIDA_HARDWARE) && (EI_CLASSIFIER_USE_AKIDA_HARDWARE == 1)) + // get list of available devices + py::list devices = akida.attr("devices")(); + if(devices.empty() == true) { + ei_printf("ERR: AKD1000 device not found!\n"); + return false; + } + + if(devices.size() > 1) { + ei_printf("More than one device found! Using /dev/akida%d\n", EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO); + device = devices[EI_CLASSIFIER_USE_AKIDA_HARDWARE_NO]; + } + else { + device = devices[0]; + } + //TODO: check if selected device is correct (compare versions) + // enable power measurement + device.attr("soc").attr("power_measurement_enabled") = true; + + // map model to the device + try { + model.attr("map")(device); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Can't load the ML model onto the AKD1000 SoC\n"); + ei_printf("ERR: %s\n", e.what()); + return false; + } +#elif (defined(EI_CLASSIFIER_USE_AKIDA_SOFTWARE) && (EI_CLASSIFIER_USE_AKIDA_SOFTWARE == 1)) +#warning "Akida model will be run in SIMULATION mode (not on real hardware)!" +#else +#error "Neither EI_CLASSIFIER_USE_AKIDA_HARDWARE or EI_CLASSIFIER_USE_AKIDA_SOFTWARE are defined or set to 1" +#endif + + // init softmax shape + std::vector tmp = model.attr("output_shape").cast>(); + softmax_shape.BuildFrom(tmp); + // dumy beta parameter for softmax purposes + dummy_params.beta = 1; + + // get reference to predict function + model_predict = model.attr("predict"); + model_forward = model.attr("forward"); + + // clear info stream + engine_info.str(""); + + return true; +} + +template +void debug_print(const std::vector vec, const int val_per_row = 3) +{ + int n = 0; + for(auto it = vec.begin(); it != vec.end(); it++) { + ei_printf("%f ", *it); + if(++n > val_per_row - 1) { + ei_printf("\n"); + n = 0; + } + } +} + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param impulse Struct describing impulse architecture + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug) +{ + ei_learning_block_config_tflite_graph_t *block_config = ((ei_learning_block_config_tflite_graph_t*)config_ptr); + ei_config_tflite_graph_t *graph_config = ((ei_config_tflite_graph_t*)block_config->graph_config); + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + // init Python embedded interpreter (should be called once!) + static py::scoped_interpreter guard{}; + + // check if we've initialized the interpreter and device? + if (akida_initialized == false) { + if(init_akida(graph_config->model, graph_config->model_size, debug) == false) { + return EI_IMPULSE_AKIDA_ERROR; + } + akida_initialized = true; + } + + // according to: + // https://doc.brainchipinc.com/api_reference/akida_apis.html#akida.Model.predict + // input type is always uint8 + py::array_t input_data(input_shape); + + /* + * convert data to uint8 and copy features into input tensor + * For images RGB shape is (width, height, colors) + * For images BW shape is (width, height, 1) + * For Audio shape is (width, height, 1) - spectrogram + * TODO: test with other ML models/data types + * For details see: + * https://pybind11.readthedocs.io/en/stable/advanced/pycpp/numpy.html#direct-access + */ + auto r = input_data.mutable_unchecked<4>(); + float temp; + + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + for (size_t i = 0; i < input_block_ids_size; i++) { + uint16_t cur_mtx = input_block_ids[i]; +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + ei::matrix_t* matrix = NULL; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + for (py::ssize_t x = 0; x < r.shape(1); x++) { + for (py::ssize_t y = 0; y < r.shape(2); y++) { + for(py::ssize_t z = 0; z < r.shape(3); z++) { + temp = (matrix->buffer[x * r.shape(2) * r.shape(3) + y * r.shape(3) + z] * scale); + temp = std::max(0.0f, std::min(temp, 255.0f)); + r(0, x, y, z) = (uint8_t)(temp / down_scale); + } + } + } + } + + // Run inference on AKD1000 + uint64_t ctx_start_us = ei_read_timer_us(); + py::array_t potentials; + try { + potentials = model_predict(input_data); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Inference error:\n%s\n", e.what()); + return EI_IMPULSE_AKIDA_ERROR; + } + // TODO: 'forward' is returning int8 or int32, but EI SDK supports int8 or float32 only + // py::array_t potentials = model_forward(input_data); + uint64_t ctx_end_us = ei_read_timer_us(); + + potentials = potentials.squeeze(); + + if(debug) { + std::string ret_str = py::str(potentials).cast(); + ei_printf("AKD1000 raw output:\n%s\n", ret_str.c_str()); + } + + // convert to vector of floats to make further processing much easier + std::vector potentials_v;// = potentials.cast>(); + + // TODO: output conversion depending on output shape? + if (block_config->object_detection == false) { + potentials_v = potentials.squeeze().cast>(); + } + else { + // TODO: output from AkidaNet/MobileNet is always N x M x P (3 dimensions)? + auto q = potentials.unchecked<>(); + for (py::ssize_t x = 0; x < q.shape(0); x++) { + for (py::ssize_t y = 0; y < q.shape(1); y++) { + for(py::ssize_t z = 0; z < q.shape(2); z++) { + potentials_v.push_back(q(x, y, z)); + } + } + } + } + + if(block_config->object_detection_last_layer != EI_CLASSIFIER_LAST_LAYER_YOLOV2) { + // apply softmax, becuase Akida is not supporting this operation + tflite::reference_ops::Softmax(dummy_params, softmax_shape, potentials_v.data(), softmax_shape, potentials_v.data()); + } + + if(debug == true) { + ei_printf("After softmax:\n"); + debug_print(potentials_v); + } + + float active_power = 0; +#if (defined(EI_CLASSIFIER_USE_AKIDA_HARDWARE)) + // power measurement post-processing + float floor_power = device.attr("soc").attr("power_meter").attr("floor").cast(); + py::array pwr_events = device.attr("soc").attr("power_meter").attr("events")(); + auto events = pwr_events.mutable_unchecked(); + for (py::ssize_t i = 0; i < events.shape(0); i++) { + active_power += events(i).attr("power").cast(); + } + active_power = (active_power/pwr_events.size()) - floor_power; +#endif + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + // clear info + engine_info.str(""); + engine_info << "Power consumption: " << std::fixed << std::setprecision(2) << active_power << " mW\n"; + engine_info << "Inferences per second: " << (1000000 / result->timing.classification_us); + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + fill_res = fill_result_struct_f32_fomo( + impulse, + block_config, + result, + potentials_v.data(), + impulse->fomo_output_size, + impulse->fomo_output_size); + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV2: { + fill_res = fill_result_struct_f32_yolov2( + impulse, + block_config, + result, + potentials_v.data(), + impulse->tflite_output_features_count); + break; + } + case EI_CLASSIFIER_LAST_LAYER_SSD: { + ei_printf("ERR: MobileNet SSD models are not implemented for Akida (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV5: { + ei_printf("ERR: YOLO v5 models are not implemented for Akida (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + fill_res = fill_result_struct_f32(impulse, result, potentials_v.data(), debug); + } + + return fill_res; +} + +/** + * Construct a tflite interpreter (creates it if needed) + */ +static EI_IMPULSE_ERROR get_interpreter(ei_learning_block_config_tflite_graph_t *block_config, tflite::Interpreter **interpreter) { + // not in the map yet... + if (!ei_tflite_instances.count(block_config->block_id)) { + ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config; + ei_tflite_state_t *new_state = new ei_tflite_state_t(); + + auto new_model = tflite::FlatBufferModel::BuildFromBuffer((const char*)graph_config->model, graph_config->model_size); + new_state->model = std::move(new_model); + if (!new_state->model) { + ei_printf("Failed to build TFLite model from buffer\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; +#if EI_CLASSIFIER_HAS_TREE_ENSEMBLE_CLASSIFIER + resolver.AddCustom("TreeEnsembleClassifier", + tflite::ops::custom::Register_TREE_ENSEMBLE_CLASSIFIER()); +#endif + tflite::InterpreterBuilder builder(*new_state->model, resolver); + builder(&new_state->interpreter); + + if (!new_state->interpreter) { + ei_printf("Failed to construct interpreter\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + if (new_state->interpreter->AllocateTensors() != kTfLiteOk) { + ei_printf("AllocateTensors failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + int hw_thread_count = (int)std::thread::hardware_concurrency(); + hw_thread_count -= 1; // leave one thread free for the other application + if (hw_thread_count < 1) { + hw_thread_count = 1; + } + + if (new_state->interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) { + ei_printf("SetNumThreads failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + ei_tflite_instances.insert(std::make_pair(block_config->block_id, new_state)); + } + + auto tflite_state = ei_tflite_instances[block_config->block_id]; + *interpreter = tflite_state->interpreter.get(); + return EI_IMPULSE_OK; +} + + +extern "C" EI_IMPULSE_ERROR run_nn_inference_from_dsp( + ei_learning_block_config_tflite_graph_t *block_config, + signal_t *signal, + matrix_t *output_matrix) +{ + tflite::Interpreter *interpreter; + auto interpreter_ret = get_interpreter(block_config, &interpreter); + if (interpreter_ret != EI_IMPULSE_OK) { + return interpreter_ret; + } + + TfLiteTensor *input = interpreter->input_tensor(0); + TfLiteTensor *output = interpreter->output_tensor(0); + + if (!input) { + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + if (!output) { + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + + auto input_res = fill_input_tensor_from_signal(signal, input); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + TfLiteStatus status = interpreter->Invoke(); + if (status != kTfLiteOk) { + ei_printf("ERR: interpreter->Invoke() failed with %d\n", status); + return EI_IMPULSE_TFLITE_ERROR; + } + + auto output_res = fill_output_matrix_from_tensor(output, output_matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + + // on Linux we're not worried about free'ing (for now) + + return EI_IMPULSE_OK; +} + +__attribute__((unused)) int extract_tflite_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { + + ei_dsp_config_tflite_t *dsp_config = (ei_dsp_config_tflite_t*)config_ptr; + + ei_config_tflite_graph_t ei_config_tflite_graph_0 = { + .implementation_version = 1, + .model = dsp_config->model, + .model_size = dsp_config->model_size, + .arena_size = dsp_config->arena_size + }; + + ei_learning_block_config_tflite_graph_t ei_learning_block_config = { + .implementation_version = 1, + .classification_mode = EI_CLASSIFIER_CLASSIFICATION_MODE_DSP, + .block_id = dsp_config->block_id, + .object_detection = false, + .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN, + .output_data_tensor = 0, + .output_labels_tensor = 255, + .output_score_tensor = 255, + .threshold = 0, + .quantized = 0, + .compiled = 1, + .graph_config = &ei_config_tflite_graph_0 + }; + + auto x = run_nn_inference_from_dsp(&ei_learning_block_config, signal, output_matrix); + if (x != 0) { + return x; + } + + return EIDSP_OK; +} + +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_AKIDA + +#endif /* EI_CLASSIFIER_INFERENCING_ENGINE_AKIDA_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h new file mode 100644 index 0000000..5a800eb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/anomaly.h @@ -0,0 +1,280 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EDGE_IMPULSE_INFERENCING_ANOMALY_H_ +#define _EDGE_IMPULSE_INFERENCING_ANOMALY_H_ + +#if (EI_CLASSIFIER_HAS_ANOMALY) + +#include +#include +#include +#include +#include + +#include "edge-impulse-sdk/classifier/ei_classifier_types.h" +#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/inferencing_engines/engines.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" + +#ifdef __cplusplus +namespace { +#endif // __cplusplus + +/** + * Standard scaler, scales all values in the input vector + * Note that this *modifies* the array in place! + * @param input Array of input values + * @param scale Array of scale values (obtain from StandardScaler in Python) + * @param mean Array of mean values (obtain from StandardScaler in Python) + * @param input_size Size of input, scale and mean arrays + */ +void standard_scaler(float *input, const float *scale, const float *mean, size_t input_size) { + for (size_t ix = 0; ix < input_size; ix++) { + input[ix] = (input[ix] - mean[ix]) / scale[ix]; + } +} + +/** + * Calculate the distance between input vector and the cluster + * @param input Array of input values (already scaled by standard_scaler) + * @param input_size Size of the input array + * @param cluster A cluster (number of centroids should match input_size) + */ +float calculate_cluster_distance(float *input, size_t input_size, const ei_classifier_anom_cluster_t *cluster) { + // todo: check input_size and centroid size? + + float dist = 0.0f; + for (size_t ix = 0; ix < input_size; ix++) { + dist += pow(input[ix] - cluster->centroid[ix], 2); + } + return sqrt(dist) - cluster->max_error; +} + +/** + * Get minimum distance to a cluster + * @param input Array of input values (already scaled by standard_scaler) + * @param input_size Size of the input array + * @param clusters Array of clusters + * @param cluster_size Size of cluster array + */ +float get_min_distance_to_cluster(float *input, size_t input_size, const ei_classifier_anom_cluster_t *clusters, size_t cluster_size) { + float min = 1000.0f; + for (size_t ix = 0; ix < cluster_size; ix++) { + float dist = calculate_cluster_distance(input, input_size, &clusters[ix]); + if (dist < min) { + min = dist; + } + } + return min; +} + +#ifdef __cplusplus +} +#endif // __cplusplus + + +/** + * Extracts the input values from the feature matrix based on the anomaly axes. + * @param fmatrix Feature matrix + * @param input_block_ids Array of block IDs to extract from the feature matrix + * @param input_block_ids_size Size of input_block_ids array + * @param block_config Anomaly block configuration + * @param input Array to store the extracted input values + * @return EI_IMPULSE_OK if successful, otherwise an error code + */ +EI_IMPULSE_ERROR extract_anomaly_input_values( + ei_feature_t *fmatrix, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + uint32_t anom_axes_size, + const uint16_t *anom_axis, + float *input) +{ + if (input_block_ids_size == 1) { + for (size_t ix = 0; ix < anom_axes_size; ix++) { + input[ix] = fmatrix[0].matrix->buffer[anom_axis[ix]]; + } + } + else { +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + ei::matrix_t* matrix = NULL; +#endif + // tracks where we are now in the combined feature matrix + uint32_t global_buf_pos = 0; + // we add the size of passed matrix to it + uint32_t buf_offset = 0; + // current index of input feature + uint32_t input_pos = 0; + + for (size_t i = 0; i < input_block_ids_size; i++) { +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + size_t cur_mtx = input_block_ids[i]; + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, anom_axes_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + for (size_t ix = 0; ix < anom_axes_size; ix++) { + global_buf_pos = anom_axis[input_pos]; + if (global_buf_pos <= buf_offset + (matrix->rows * matrix->cols)) { + input[input_pos] = matrix->buffer[anom_axis[input_pos] - buf_offset]; + input_pos++; + if (input_pos >= anom_axes_size) { goto end; } + } + else { + break; + } + } + buf_offset += matrix->rows * matrix->cols; + } + end:; + } + return EI_IMPULSE_OK; +} + + +EI_IMPULSE_ERROR run_kmeans_anomaly( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_anomaly_kmeans_t *block_config = (ei_learning_block_config_anomaly_kmeans_t*)config_ptr; + + uint64_t anomaly_start_ms = ei_read_timer_ms(); + + float *input = (float*)ei_malloc(block_config->anom_axes_size * sizeof(float)); + if (!input) { + ei_printf("Failed to allocate memory for anomaly input buffer"); + return EI_IMPULSE_OUT_OF_MEMORY; + } + + extract_anomaly_input_values(fmatrix, input_block_ids, input_block_ids_size, block_config->anom_axes_size, block_config->anom_axis, input); + + standard_scaler(input, block_config->anom_scale, block_config->anom_mean, block_config->anom_axes_size); + float anomaly = get_min_distance_to_cluster( + input, block_config->anom_axes_size, block_config->anom_clusters, block_config->anom_cluster_count); + + uint64_t anomaly_end_ms = ei_read_timer_ms(); + + if (debug) { + ei_printf("Anomaly score (time: %d ms.): ", static_cast(anomaly_end_ms - anomaly_start_ms)); + ei_printf_float(anomaly); + ei_printf("\n"); + } + + result->timing.anomaly = anomaly_end_ms - anomaly_start_ms; + result->anomaly = anomaly; + ei_free(input); + + return EI_IMPULSE_OK; +} + +#if (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE) +EI_IMPULSE_ERROR run_gmm_anomaly( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_anomaly_gmm_t *block_config = (ei_learning_block_config_anomaly_gmm_t*)config_ptr; + + ei_learning_block_config_tflite_graph_t ei_learning_block_config_gmm = { + .implementation_version = 1, + .classification_mode = block_config->classification_mode, + .block_id = 0, + .object_detection = 0, + .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN, + .output_data_tensor = 0, + .output_labels_tensor = 0, + .output_score_tensor = 0, + .threshold = block_config->anomaly_threshold, + .quantized = 0, + .compiled = 0, + .graph_config = block_config->graph_config + }; + + ei_impulse_result_t anomaly_result = { 0 }; + + std::unique_ptr input_ptr(new ei_feature_t[1]); + ei_feature_t* input = input_ptr.get(); + + memset(&anomaly_result, 0, sizeof(ei_impulse_result_t)); + + std::unique_ptr matrix_ptr(new ei::matrix_t(1, block_config->anom_axes_size)); + + if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY) { + // [JJ] Here we assume that the feature extractor block is always directly before the GMM block + // if that changes (which I assume it will at some point, e.g. if we have a shared backbone) + // this will break. Would it be better if `run_nn_inference` would get pointers to the input/output + // matrices instead? + input[0].matrix = fmatrix[impulse->dsp_blocks_size + (learn_block_index - 1)].matrix; + input[0].blockId = fmatrix[impulse->dsp_blocks_size + (learn_block_index - 1)].blockId; + + input_block_ids_size = 1; + } + else { + input[0].matrix = matrix_ptr.get(); + input[0].blockId = 0; + + extract_anomaly_input_values(fmatrix, input_block_ids, input_block_ids_size, block_config->anom_axes_size, block_config->anom_axis, input[0].matrix->buffer); + input_block_ids_size = 1; + } + + EI_IMPULSE_ERROR res = run_nn_inference(impulse, input, learn_block_index, input_block_ids, input_block_ids_size, &anomaly_result, (void*)&ei_learning_block_config_gmm, debug); + if (res != EI_IMPULSE_OK) { + return res; + } + + if (debug) { + ei_printf("Anomaly score (time: %d ms.): ", anomaly_result.timing.classification); + ei_printf_float(anomaly_result.classification[0].value); + ei_printf("\n"); + } + + result->timing.anomaly = anomaly_result.timing.classification; + + if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY) { +#if EI_CLASSIFIER_HAS_VISUAL_ANOMALY + result->visual_ad_grid_cells = anomaly_result.visual_ad_grid_cells; + result->visual_ad_count = anomaly_result.visual_ad_count; + result->visual_ad_result.mean_value = anomaly_result.visual_ad_result.mean_value; + result->visual_ad_result.max_value = anomaly_result.visual_ad_result.max_value; +#endif // EI_CLASSIFIER_HAS_VISUAL_ANOMALY + } + else { + result->anomaly = anomaly_result.classification[0].value; + } + + return EI_IMPULSE_OK; +} +#endif // (EI_CLASSIFIER_INFERENCING_ENGINE != EI_CLASSIFIER_NONE) + +#endif //#if (EI_CLASSIFIER_HAS_ANOMALY == 1) +#endif // _EDGE_IMPULSE_INFERENCING_ANOMALY_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/drpai.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/drpai.h new file mode 100644 index 0000000..6ecea7d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/drpai.h @@ -0,0 +1,758 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_DRPAI_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_DRPAI_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI) + +/***************************************** + * includes + ******************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI)) +// For a YOLOV5_V5_DRPAI model we ran the unsupported layers with TF +#include +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#include "tensorflow-lite/tensorflow/lite/interpreter.h" +#include "tensorflow-lite/tensorflow/lite/kernels/register.h" +#include "tensorflow-lite/tensorflow/lite/model.h" +#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h" +#endif +#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/ei_run_dsp.h" +#include "edge-impulse-sdk/porting/ei_logging.h" + +#include +#include + + + +/***************************************** + * Macro + ******************************************/ +/*Maximum DRP-AI Timeout threshold*/ +#define DRPAI_TIMEOUT (5) + +/*Buffer size for writing data to memory via DRP-AI Driver.*/ +#define BUF_SIZE (1024) + +/*Index to access drpai_file_path[]*/ +#define INDEX_D (0) +#define INDEX_C (1) +#define INDEX_P (2) +#define INDEX_A (3) +#define INDEX_W (4) + +/***************************************** + * Public global vars + ******************************************/ +// input and output buffer pointers for memory mapped regions used by DRP-AI +uint8_t *drpai_input_buf = (uint8_t *)NULL; +float *drpai_output_buf = (float *)NULL; + +/***************************************** + * Typedef + ******************************************/ +/* For DRP-AI Address List */ +typedef struct { + unsigned long desc_aimac_addr; + unsigned long desc_aimac_size; + unsigned long desc_drp_addr; + unsigned long desc_drp_size; + unsigned long drp_param_addr; + unsigned long drp_param_size; + unsigned long data_in_addr; + unsigned long data_in_size; + unsigned long data_addr; + unsigned long data_size; + unsigned long work_addr; + unsigned long work_size; + unsigned long data_out_addr; + unsigned long data_out_size; + unsigned long drp_config_addr; + unsigned long drp_config_size; + unsigned long weight_addr; + unsigned long weight_size; +} st_addr_t; + +/***************************************** + * static vars + ******************************************/ +static st_addr_t drpai_address; +static uint64_t udmabuf_address = 0; + +static int drpai_fd = -1; + +drpai_data_t proc[DRPAI_INDEX_NUM]; + +void get_udmabuf_memory_start_addr() +{ /* Obtain udmabuf memory area starting address */ + + int8_t fd = 0; + char addr[1024]; + int32_t read_ret = 0; + errno = 0; + + fd = open("/sys/class/u-dma-buf/udmabuf0/phys_addr", O_RDONLY); + if (0 > fd) + { + fprintf(stderr, "[ERROR] Failed to open udmabuf0/phys_addr : errno=%d\n", errno); + } + + read_ret = read(fd, addr, 1024); + if (0 > read_ret) + { + fprintf(stderr, "[ERROR] Failed to read udmabuf0/phys_addr : errno=%d\n", errno); + close(fd); + } + + sscanf(addr, "%lx", &udmabuf_address); + close(fd); + + /* Filter the bit higher than 32 bit */ + udmabuf_address &=0xFFFFFFFF; +} + +uint8_t drpai_init_mem(uint32_t input_frame_size) { + int32_t i = 0; + + int udmabuf_fd0 = open("/dev/udmabuf0", O_RDWR); + if (udmabuf_fd0 < 0) { + return -1; + } + + // input_frame_size === data_in_size + uint8_t *addr = + (uint8_t *)mmap(NULL, input_frame_size, + PROT_READ | PROT_WRITE, MAP_SHARED, udmabuf_fd0, 0); + + drpai_input_buf = addr; + + /* Write once to allocate physical memory to u-dma-buf virtual space. + * Note: Do not use memset() for this. + * Because it does not work as expected. */ + for (i = 0; i < input_frame_size; i++) { + drpai_input_buf[i] = 0; + } + + + get_udmabuf_memory_start_addr(); + if (0 == udmabuf_address) { + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + return 0; +} + +/***************************************** + * Function Name : read_addrmap_txt + * Description : Loads address and size of DRP-AI Object files into struct + *addr. Arguments : addr_file = filename of addressmap file (from + *DRP-AI Object files) Return value : 0 if succeeded not 0 otherwise + ******************************************/ +static int8_t read_addrmap_txt() { + // create a stream from the DRP-AI model data without copying + std::istringstream ifs; + ifs.rdbuf()->pubsetbuf((char *)ei_ei_addrmap_intm_txt, ei_ei_addrmap_intm_txt_len); + + std::string str; + unsigned long l_addr; + unsigned long l_size; + std::string element, a, s; + + if (ifs.fail()) { + return -1; + } + + while (getline(ifs, str)) { + std::istringstream iss(str); + iss >> element >> a >> s; + l_addr = strtol(a.c_str(), NULL, 16); + l_size = strtol(s.c_str(), NULL, 16); + + if (element == "drp_config") { + drpai_address.drp_config_addr = l_addr; + drpai_address.drp_config_size = l_size; + } else if (element == "desc_aimac") { + drpai_address.desc_aimac_addr = l_addr; + drpai_address.desc_aimac_size = l_size; + } else if (element == "desc_drp") { + drpai_address.desc_drp_addr = l_addr; + drpai_address.desc_drp_size = l_size; + } else if (element == "drp_param") { + drpai_address.drp_param_addr = l_addr; + drpai_address.drp_param_size = l_size; + } else if (element == "weight") { + drpai_address.weight_addr = l_addr; + drpai_address.weight_size = l_size; + } else if (element == "data_in") { + drpai_address.data_in_addr = l_addr; + drpai_address.data_in_size = l_size; + } else if (element == "data") { + drpai_address.data_addr = l_addr; + drpai_address.data_size = l_size; + } else if (element == "data_out") { + drpai_address.data_out_addr = l_addr; + drpai_address.data_out_size = l_size; + } else if (element == "work") { + drpai_address.work_addr = l_addr; + drpai_address.work_size = l_size; + } + } + + return 0; +} + +/***************************************** + * Function Name : load_data_to_mem + * Description : Loads a binary blob DRP-AI Driver Memory + * Arguments : data_ptr = pointer to the bytes to write + * drpai_fd = file descriptor of DRP-AI Driver + * from = memory start address where the data is + *written size = data size to be written Return value : 0 if succeeded not 0 + *otherwise + ******************************************/ +static int8_t load_data_to_mem(unsigned char *data_ptr, int drpai_fd, + unsigned long from, unsigned long size) { + drpai_data_t drpai_data; + + drpai_data.address = from; + drpai_data.size = size; + + errno = 0; + if (-1 == ioctl(drpai_fd, DRPAI_ASSIGN, &drpai_data)) { + return -1; + } + + errno = 0; + if (-1 == write(drpai_fd, data_ptr, size)) { + return -1; + } + + return 0; +} + +/***************************************** + * Function Name : load_drpai_data + * Description : Loads DRP-AI Object files to memory via DRP-AI Driver. + * Arguments : drpai_fd = file descriptor of DRP-AI Driver + * Return value : 0 if succeeded + * : not 0 otherwise + ******************************************/ +static int load_drpai_data(int drpai_fd) { + unsigned long addr, size; + unsigned char *data_ptr; + for (int i = 0; i < 5; i++) { + switch (i) { + case (INDEX_W): + addr = drpai_address.weight_addr; + size = drpai_address.weight_size; + data_ptr = ei_ei_weight_dat; + break; + case (INDEX_C): + addr = drpai_address.drp_config_addr; + size = drpai_address.drp_config_size; + data_ptr = ei_ei_drpcfg_mem; + break; + case (INDEX_P): + addr = drpai_address.drp_param_addr; + size = drpai_address.drp_param_size; + data_ptr = ei_drp_param_bin; + break; + case (INDEX_A): + addr = drpai_address.desc_aimac_addr; + size = drpai_address.desc_aimac_size; + data_ptr = ei_aimac_desc_bin; + break; + case (INDEX_D): + addr = drpai_address.desc_drp_addr; + size = drpai_address.desc_drp_size; + data_ptr = ei_drp_desc_bin; + break; + default: + return -1; + break; + } + if (0 != load_data_to_mem(data_ptr, drpai_fd, addr, size)) { + return -1; + } + } + return 0; +} + +EI_IMPULSE_ERROR drpai_init_classifier() { + // retval for drpai status + int ret_drpai; + + // Read DRP-AI Object files address and size + if (0 != read_addrmap_txt()) { + ei_printf("ERR: read_addrmap_txt failed : %d\n", errno); + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + // DRP-AI Driver Open + drpai_fd = open("/dev/drpai0", O_RDWR); + if (drpai_fd < 0) { + ei_printf("ERR: Failed to Open DRP-AI Driver: errno=%d\n", errno); + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + // Load DRP-AI Data from Filesystem to Memory via DRP-AI Driver + ret_drpai = load_drpai_data(drpai_fd); + if (ret_drpai != 0) { + ei_printf("ERR: Failed to load DRPAI Data\n"); + if (0 != close(drpai_fd)) { + ei_printf("ERR: Failed to Close DRPAI Driver: errno=%d\n", errno); + } + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + // statically store DRP object file addresses and sizes + proc[DRPAI_INDEX_INPUT].address = (uint32_t)udmabuf_address; + proc[DRPAI_INDEX_INPUT].size = drpai_address.data_in_size; + proc[DRPAI_INDEX_DRP_CFG].address = drpai_address.drp_config_addr; + proc[DRPAI_INDEX_DRP_CFG].size = drpai_address.drp_config_size; + proc[DRPAI_INDEX_DRP_PARAM].address = drpai_address.drp_param_addr; + proc[DRPAI_INDEX_DRP_PARAM].size = drpai_address.drp_param_size; + proc[DRPAI_INDEX_AIMAC_DESC].address = drpai_address.desc_aimac_addr; + proc[DRPAI_INDEX_AIMAC_DESC].size = drpai_address.desc_aimac_size; + proc[DRPAI_INDEX_DRP_DESC].address = drpai_address.desc_drp_addr; + proc[DRPAI_INDEX_DRP_DESC].size = drpai_address.desc_drp_size; + proc[DRPAI_INDEX_WEIGHT].address = drpai_address.weight_addr; + proc[DRPAI_INDEX_WEIGHT].size = drpai_address.weight_size; + proc[DRPAI_INDEX_OUTPUT].address = drpai_address.data_out_addr; + proc[DRPAI_INDEX_OUTPUT].size = drpai_address.data_out_size; + + EI_LOGD("proc[DRPAI_INDEX_INPUT] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_INPUT].address, proc[DRPAI_INDEX_INPUT].size); + EI_LOGD("proc[DRPAI_INDEX_DRP_CFG] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_DRP_CFG].address, proc[DRPAI_INDEX_DRP_CFG].size); + EI_LOGD("proc[DRPAI_INDEX_DRP_PARAM] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_DRP_PARAM].address, proc[DRPAI_INDEX_DRP_PARAM].size); + EI_LOGD("proc[DRPAI_INDEX_AIMAC_DESC] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_AIMAC_DESC].address, proc[DRPAI_INDEX_AIMAC_DESC].size); + EI_LOGD("proc[DRPAI_INDEX_DRP_DESC] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_DRP_DESC].address, proc[DRPAI_INDEX_DRP_DESC].size); + EI_LOGD("proc[DRPAI_INDEX_WEIGHT] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_WEIGHT].address, proc[DRPAI_INDEX_WEIGHT].size); + EI_LOGD("proc[DRPAI_INDEX_OUTPUT] addr: %p, size: %p\r\n", proc[DRPAI_INDEX_OUTPUT].address, proc[DRPAI_INDEX_OUTPUT].size); + + drpai_output_buf = (float *)ei_malloc(drpai_address.data_out_size); + + return EI_IMPULSE_OK; +} + +EI_IMPULSE_ERROR drpai_run_classifier_image_quantized() { +#if EI_CLASSIFIER_COMPILED == 1 +#error "DRP-AI is not compatible with EON Compiler" +#endif + // output data from DRPAI model + drpai_data_t drpai_data; + // status used to query if any internal errors occured during inferencing + drpai_status_t drpai_status; + // descriptor used for checking if DRPAI is done inferencing + fd_set rfds; + // struct used to define DRPAI timeout + struct timespec tv; + // retval for drpai status + int ret_drpai; + // retval when querying drpai status + int inf_status = 0; + + // DRP-AI Output Memory Preparation + drpai_data.address = drpai_address.data_out_addr; + drpai_data.size = drpai_address.data_out_size; + + // Start DRP-AI driver + EI_LOGD("Start DRPAI inference\r\n"); + int ioret = ioctl(drpai_fd, DRPAI_START, &proc[0]); + if (0 != ioret) { + EI_LOGE("Failed to Start DRPAI Inference: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + + // Settings For pselect - this is how DRPAI signals inferencing complete + FD_ZERO(&rfds); + FD_SET(drpai_fd, &rfds); + // Define a timeout for DRP-AI to complete + tv.tv_sec = DRPAI_TIMEOUT; + tv.tv_nsec = 0; + + // Wait until DRP-AI ends + EI_LOGD("Waiting on DRPAI inference results\r\n"); + ret_drpai = pselect(drpai_fd + 1, &rfds, NULL, NULL, &tv, NULL); + if (ret_drpai == 0) { + EI_LOGE("DRPAI Inference pselect() Timeout: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } else if (ret_drpai < 0) { + EI_LOGE("DRPAI Inference pselect() Error: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + + // Checks for DRPAI inference status errors + EI_LOGD("Getting DRPAI Status\r\n"); + inf_status = ioctl(drpai_fd, DRPAI_GET_STATUS, &drpai_status); + if (inf_status != 0) { + EI_LOGE("DRPAI Internal Error: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + + EI_LOGD("Getting inference results\r\n"); + if (ioctl(drpai_fd, DRPAI_ASSIGN, &drpai_data) != 0) { + EI_LOGE("Failed to Assign DRPAI data: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + + if (read(drpai_fd, drpai_output_buf, drpai_data.size) < 0) { + EI_LOGE("Failed to read DRPAI output data: %d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + return EI_IMPULSE_OK; +} + +// close the driver (reset file handles) +EI_IMPULSE_ERROR drpai_close(uint32_t input_frame_size) { + munmap(drpai_input_buf, input_frame_size); + free(drpai_output_buf); + if (drpai_fd > 0) { + if (0 != close(drpai_fd)) { + EI_LOGE("Failed to Close DRP-AI Driver: errno=%d\n", errno); + return EI_IMPULSE_DRPAI_RUNTIME_FAILED; + } + drpai_fd = -1; + } + return EI_IMPULSE_OK; +} + +#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI)) +EI_IMPULSE_ERROR drpai_run_yolov5_postprocessing( + const ei_impulse_t *impulse, + ei_learning_block_config_tflite_graph_t *block_config, + signal_t *signal, + ei_impulse_result_t *result, + bool debug = false) +{ + + static std::unique_ptr model = nullptr; + static std::unique_ptr interpreter = nullptr; + + if (!model) { + model = tflite::FlatBufferModel::BuildFromBuffer((const char*)yolov5_part2, yolov5_part2_len); + if (!model) { + ei_printf("Failed to build TFLite model from buffer\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder builder(*model, resolver); + builder(&interpreter); + + if (!interpreter) { + ei_printf("Failed to construct interpreter\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + if (interpreter->AllocateTensors() != kTfLiteOk) { + ei_printf("AllocateTensors failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + int hw_thread_count = (int)std::thread::hardware_concurrency(); + hw_thread_count -= 1; // leave one thread free for the other application + if (hw_thread_count < 1) { + hw_thread_count = 1; + } + + if (interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) { + ei_printf("SetNumThreads failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + } + + const size_t drpai_buff_size = drpai_address.data_out_size / sizeof(float); + const size_t drpai_features = drpai_buff_size; + + const size_t els_per_grid = drpai_features / ((NUM_GRID_1 * NUM_GRID_1) + (NUM_GRID_2 * NUM_GRID_2) + (NUM_GRID_3 * NUM_GRID_3)); + + const size_t grid_1_offset = 0; + const size_t grid_1_size = (NUM_GRID_1 * NUM_GRID_1) * els_per_grid; + + const size_t grid_2_offset = grid_1_offset + grid_1_size; + const size_t grid_2_size = (NUM_GRID_2 * NUM_GRID_2) * els_per_grid; + + const size_t grid_3_offset = grid_2_offset + grid_2_size; + const size_t grid_3_size = (NUM_GRID_3 * NUM_GRID_3) * els_per_grid; + + // Now we don't know the exact tensor order for some reason + // so let's do that dynamically + for (size_t ix = 0; ix < 3; ix++) { + TfLiteTensor * tensor = interpreter->input_tensor(ix); + size_t tensor_size = 1; + for (size_t ix = 0; ix < tensor->dims->size; ix++) { + tensor_size *= tensor->dims->data[ix]; + } + + EI_LOGD("input tensor %d, tensor_size=%d\n", (int)ix, (int)tensor_size); + + float *input = interpreter->typed_input_tensor(ix); + + if (tensor_size == grid_1_size) { + memcpy(input, drpai_output_buf + grid_1_offset, grid_1_size * sizeof(float)); + } + else if (tensor_size == grid_2_size) { + memcpy(input, drpai_output_buf + grid_2_offset, grid_2_size * sizeof(float)); + } + else if (tensor_size == grid_3_size) { + memcpy(input, drpai_output_buf + grid_3_offset, grid_3_size * sizeof(float)); + } + else { + ei_printf("ERR: Cannot determine which grid to use for input tensor %d with %d tensor size\n", + (int)ix, (int)tensor_size); + return EI_IMPULSE_TFLITE_ERROR; + } + } + + uint64_t ctx_start_us = ei_read_timer_us(); + + interpreter->Invoke(); + + uint64_t ctx_end_us = ei_read_timer_us(); + + EI_LOGD("Invoke took %d ms.\n", (int)((ctx_end_us - ctx_start_us) / 1000)); + + float* out_data = interpreter->typed_output_tensor(0); + + const size_t out_size = impulse->tflite_output_features_count; + + if (debug) { + printf("First 20 bytes: "); + for (size_t ix = 0; ix < 20; ix++) { + ei_printf("%f ", out_data[ix]); + } + ei_printf("\n"); + } + + // printf("Last 5 bytes: "); + // for (size_t ix = out_size - 5; ix < out_size; ix++) { + // printf("%f ", out_data[ix]); + // } + // printf("\n"); + + return fill_result_struct_f32_yolov5(impulse, block_config, result, 5, out_data, out_size); +} +#endif + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug) +{ + // dummy, not used for DRPAI +} + +/** + * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + // this needs to be changed for multi-model, multi-impulse + static bool first_run = true; + uint64_t ctx_start_us; + uint64_t dsp_start_us = ei_read_timer_us(); + + if (first_run) { + // map memory regions to the DRP-AI UDMA. This is required for passing data + // to and from DRP-AI + int t = drpai_init_mem(impulse->nn_input_frame_size); + if (t != 0) { + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + EI_IMPULSE_ERROR ret = drpai_init_classifier(); + if (ret != EI_IMPULSE_OK) { + drpai_close(impulse->nn_input_frame_size); + return EI_IMPULSE_DRPAI_INIT_FAILED; + } + + EI_LOGI("Initialized input and output buffers:\r\n"); + EI_LOGI("input buf (addr: %p, size: 0x%x)\r\n", drpai_input_buf, drpai_address.data_in_size); + EI_LOGI("output buf (addr: %p, size: 0x%x)\r\n", drpai_output_buf, drpai_address.data_out_size); + EI_LOGI("udmabuf_addr: %p\n", udmabuf_address); + } + + EI_LOGD("Starting DSP...\n"); + int ret; + + EI_LOGD("fmatrix size == Bpp * signal.total_length ( %p == %p * %p = %p )\r\n", proc[DRPAI_INDEX_INPUT].size, 3, signal->total_length, 3 * signal->total_length); + // Creates a features matrix mapped to the DRP-AI UDMA input region + ei::matrix_u8_t features_matrix(1, proc[DRPAI_INDEX_INPUT].size, drpai_input_buf); + + // Grabs the raw image buffer from the signal, DRP-AI will automatically + // extract features + ret = extract_drpai_features_quantized( + signal, + &features_matrix, + impulse->dsp_blocks[0].config, + impulse->frequency); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < EI_CLASSIFIER_NN_INPUT_FRAME_SIZE; ix++) { + ei_printf("0x%hhx, ", drpai_input_buf[ix]); + } + ei_printf("\n"); + } + + ctx_start_us = ei_read_timer_us(); + + // Run DRP-AI inference, a static buffer is used to store the raw output + // results + ret = drpai_run_classifier_image_quantized(); + + // close driver to reset memory, file pointer + if (ret != EI_IMPULSE_OK) { + drpai_close(impulse->nn_input_frame_size); + first_run = true; + } + else { + // drpai_reset(); + first_run = false; + } + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + if (debug) { + ei_printf("DEBUG: raw drpai output"); + ei_printf("\n["); + for (uint32_t i = 0; i < impulse->tflite_output_features_count; i++) { + ei_printf_float(drpai_output_buf[i]); + ei_printf(" "); + } + ei_printf("]\n"); + } + + fill_res = fill_result_struct_f32_fomo( + impulse, + block_config, + result, + drpai_output_buf, + impulse->fomo_output_size, + impulse->fomo_output_size); + break; + } + case EI_CLASSIFIER_LAST_LAYER_SSD: { + ei_printf("ERR: MobileNet SSD models are not implemented for DRP-AI (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOv5 does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + if (debug) { + ei_printf("DEBUG: raw drpai output"); + ei_printf("\n["); + // impulse->tflite_output_features_count can't be used here as this is not the final output + // so print only the first 10 values. + for (uint32_t i = 0; i < 10; i++) { + ei_printf_float(drpai_output_buf[i]); + ei_printf(" "); + } + ei_printf("]\n"); + } + } + +#if ((EI_CLASSIFIER_OBJECT_DETECTION == 1) && (EI_CLASSIFIER_OBJECT_DETECTION_LAST_LAYER == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI)) + // do post processing + fill_res = drpai_run_yolov5_postprocessing(impulse, block_config, signal, result, debug); +#endif + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + fill_res = fill_result_struct_f32(impulse, result, drpai_output_buf, debug); + } + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + return EI_IMPULSE_OK; +} + +#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_DRPAI) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_DRPAI_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/engines.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/engines.h new file mode 100644 index 0000000..5fa4bd1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/engines.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_ENGINES_H_ +#define _EI_CLASSIFIER_ENGINES_H_ + +#include "edge-impulse-sdk/classifier/ei_model_types.h" + +EI_IMPULSE_ERROR run_kmeans_anomaly( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug); + +EI_IMPULSE_ERROR run_gmm_anomaly( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug); + +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug); + +int extract_tflite_eon_features(signal_t *signal, matrix_t *output_matrix, + void *config_ptr, const float frequency); + +int extract_tflite_features(signal_t *signal, matrix_t *output_matrix, + void *config_ptr, const float frequency); + +#endif // _EI_CLASSIFIER_ENGINES_H_s \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/memryx.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/memryx.h new file mode 100644 index 0000000..5ce2516 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/memryx.h @@ -0,0 +1,476 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2023 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef EI_CLASSIFIER_INFERENCING_ENGINE_MEMRYX_H +#define EI_CLASSIFIER_INFERENCING_ENGINE_MEMRYX_H + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX) + +/** + * @brief we are forcing SOFTWARE inference (simulation), + * beacuse use of hardware is not ready + * + */ +#ifndef EI_CLASSIFIER_USE_MEMRYX_SOFTWARE +#define EI_CLASSIFIER_USE_MEMRYX_HARDWARE 1 +#endif + +/** + * @brief Memryx accelerator can leverage up to four MX3 chips for inference. + * Specify here the number of chips to be used for acceleration, + * e.g. set to 4 in order to use all four chips of the M3X board. + */ +#ifndef EI_CLASSIFIER_USE_MEMRYX_CHIPS_COUNT +#define EI_CLASSIFIER_USE_MEMRYX_CHIPS_COUNT 1 +#endif + +#include "model-parameters/model_metadata.h" +#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1 +#include "model-parameters/model_variables.h" +#endif + +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "tensorflow-lite/tensorflow/lite/kernels/internal/reference/softmax.h" +#include +#include +#include +#include +#include +#include +#ifdef EI_CLASSIFIER_USE_MEMRYX_SOFTWARE +#include "pybind11/embed.h" +#include "pybind11/numpy.h" +#include "pybind11/stl.h" +#else +#include "memx/memx.h" +#endif +/* Headers below help us bundle the DFP model with EIM in single binary */ +#include "memryx-model/memryx-model.h" +#include "utils/model_header_utils.h" + +/* Result delivered by memryx simulator contains 3 fields, indexes for print */ +#define MX_SIM_RES_OUTPUTS 0 +#define MX_SIM_RES_LATENCY 1 +#define MX_SIM_RES_FPS 2 + +std::stringstream engine_info; + +static bool memryx_initialized = false; + +#ifdef EI_CLASSIFIER_USE_MEMRYX_SOFTWARE +/* brings in the `_a` literals to set args to python API */ +using namespace pybind11::literals; +namespace py = pybind11; +/* PyBind variables for EIM with Simulator */ +static py::module_ memryx; +static py::module_ np; +static py::object zeroes; +static py::object Simulator; +static py::object model; +static py::object device; +static std::vector vec; +#endif + +#ifdef EI_CLASSIFIER_USE_MEMRYX_HARDWARE +/* Variables for EIM with Hardware */ +const uint8_t flow_id = 0; // flow port 0 +const uint8_t model_id = 0; // model 0 +const uint8_t group_id = 0; // MPU device group 0 +const int timeout = 0; // was 200 ms +int argmax = 0; // index with maximum score +#endif + +/* We need a workaround for softmax because + * the MX3+ is not coming out this year, and + * the MX3 does not support the SoftMax layer + */ +static tflite::RuntimeShape softmax_shape; +static tflite::SoftmaxParams dummy_params; + +static bool verbose_debug = 0; + +bool init_memryx(bool debug, const ei_impulse_t *impulse) +{ + /* Unpack DFP model to file system */ + std::string project_file_path = "/tmp/" + std::string(impulse->project_name) + "-" + std::to_string(impulse->project_id) + "-" + std::to_string(impulse->deploy_version); + create_project_if_not_exists(project_file_path, model_h_files, model_h_files_len); + + std::string proj_model_path = project_file_path + "/memryx_trained.dfp"; + const char * model_file_path = proj_model_path.c_str(); +#if (defined(EI_CLASSIFIER_USE_MEMRYX_HARDWARE) && (EI_CLASSIFIER_USE_MEMRYX_HARDWARE == 1)) +#warning "Building EIM for use with MemryX Hardware" + memx_status status = MEMX_STATUS_OK; + // 1. Bind MPU device group 0 as MX3:Cascade to model 0. + status = memx_open(model_id, group_id, MEMX_DEVICE_CASCADE); + if(memx_status_error(status)) { + return false; + } + ei_printf("Memryx device opened.\n"); + + // 2. Download model from a DFP file to MPU device group, input and + // output feature map shape is auto, configured after download complete. + status = memx_download_model(model_id, model_file_path, 0, // model_idx = 0 + MEMX_DOWNLOAD_TYPE_WTMEM_AND_MODEL); + if(memx_status_error(status)) { + return false; + } + ei_printf("Memryx model downloaded.\n"); + + // 3. Enable data transfer of this model to device. Set to no wait here + // since driver will go to data transfer state eventually. + status = memx_set_stream_enable(model_id, 0); + if(memx_status_error(status)) { + return false; + } + ei_printf("Data streaming to and from the MX3 board is enabled\n"); +#elif (defined(EI_CLASSIFIER_USE_MEMRYX_SOFTWARE) && (EI_CLASSIFIER_USE_MEMRYX_SOFTWARE == 1)) +#warning "MEMRYX model will be run in SIMULATION mode (not on real hardware)!" + py::list path; + // import Python's memryx module + try { + memryx = py::module_::import("memryx"); + if(debug) printf("Memryx PyModule init\n"); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Importing 'memryx' library failed:\n%s\n", e.what()); + return false; + } + + Simulator = memryx.attr("Simulator"); + if(debug) printf("Simulator API init\n"); + + // load model + try { + model = Simulator("dfp"_a = model_file_path); + if(debug) printf("Model API init\n"); + } + catch (py::error_already_set &e) { + ei_printf("ERR: Can't load model file from %s\n", model_file_path); + return false; + } +#else +#error "Neither EI_CLASSIFIER_USE_MEMRYX_HARDWARE or EI_CLASSIFIER_USE_MEMRYX_SOFTWARE are defined or set to 1" +#endif + + // clear info + engine_info.str(""); + + return true; +} + + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param impulse Struct describing impulse architecture + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +#if (defined(EI_CLASSIFIER_USE_MEMRYX_HARDWARE) && (EI_CLASSIFIER_USE_MEMRYX_HARDWARE == 1)) +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + memx_status status = MEMX_STATUS_OK; + int32_t ifmap_height, ifmap_width, ifmap_channel_number, ifmap_format; + int32_t ofmap_height, ofmap_width, ofmap_channel_number, ofmap_format; + int32_t z; + uint64_t ctx_start_us = 0; + uint64_t ctx_end_us = 0; + + // check if we've initialized the interpreter and device? + if (memryx_initialized == false) { + if(init_memryx(debug, impulse) == false) { + return EI_IMPULSE_MEMRYX_ERROR; + } + memryx_initialized = true; + } + + /* 4. get input shape - Not needed during runtime, available only for debugging */ + if(verbose_debug) { + status = memx_get_ifmap_size(model_id, flow_id, &ifmap_height, &ifmap_width, &z, &ifmap_channel_number, &ifmap_format); + ei_printf("status = %d, ifmap shape = (%d, %d, %d), format = %d\n", + status, ifmap_height, ifmap_width, ifmap_channel_number, ifmap_format); + } + + // 5. get output shape + status = memx_get_ofmap_size(model_id, flow_id, &ofmap_height, &ofmap_width, &z, &ofmap_channel_number, &ofmap_format); + if(debug) { + ei_printf("status = %d, ofmap shape = (%d, %d, %d), format = %d\n", + status, ofmap_height, ofmap_width, ofmap_channel_number, ofmap_format); + } + if(memx_status_error(status)) { + return EI_IMPULSE_MEMRYX_ERROR; + } + + // 6. Prepare input and output buffers + float* ofmap = new float [ofmap_width * ofmap_height * ofmap_channel_number]; + +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + ei::matrix_t* matrix = NULL; + + ei::matrix_t combined_matrix(1, impulse->nn_input_frame_size); + uint32_t buf_pos = 0; + + for (size_t i = 0; i < input_block_ids_size; i++) { + size_t cur_mtx = input_block_ids[i]; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } + + for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + combined_matrix.buffer[buf_pos++] = matrix->buffer[ix]; + } + } + matrix = &combined_matrix; +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + + float* ifmap = (float*)matrix->buffer; + + if(verbose_debug) { + for(int fidx = 0; fidx < (ofmap_width*ofmap_height); fidx++) { + ei_printf("%f\t", matrix->buffer[fidx]); + if(!(fidx % ofmap_width)) ei_printf("\n"); + } + } + + // TODO stream_ifmap only copies buffer to MX3 board, + // we need a different approach to measure latency + ctx_start_us = ei_read_timer_us(); + // 7. Stream inputs to device and start inference. + status = memx_stream_ifmap(model_id, 0, ifmap, timeout); + ctx_end_us = ei_read_timer_us(); + if(memx_status_error(status)) { + return EI_IMPULSE_MEMRYX_ERROR; + } + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + engine_info.str(""); + engine_info << "Inferences per second: " << (1000000 / result->timing.classification_us); + + // 6. Stream output results from device after inference + status = memx_stream_ofmap(model_id, 0, ofmap, timeout); + if(debug) { + ei_printf(" memx_stream_ofmap (status=%d)\n", status); + } + if(memx_status_error(status)) { + return EI_IMPULSE_MEMRYX_ERROR; + } + + // init softmax shape + std::vector output_shape = {static_cast(ofmap_height),static_cast(ofmap_width), + static_cast(ofmap_channel_number)}; + softmax_shape.BuildFrom(output_shape); + // dumy beta parameter for softmax purposes + dummy_params.beta = 1; + + // apply softmax, becuase MX3 does not support this operation + tflite::reference_ops::Softmax(dummy_params, softmax_shape, ofmap, softmax_shape, ofmap); + + // handle inference outputs + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + ei_printf("FOMO executed on Memryx\n"); + fill_result_struct_f32_fomo( + impulse, + block_config, + result, + ofmap, + impulse->fomo_output_size, + impulse->fomo_output_size); + break; + } + case EI_CLASSIFIER_LAST_LAYER_SSD: { + ei_printf("Mobilenet SSD is not implemented for Edge Impulse MemryX engine, please contact Edge Impulse Support\n"); + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + fill_result_struct_f32(impulse, result, ofmap, debug); + } + + delete ofmap; + // Device is closed only at EIM exit, therefore we do not use memx_close() + return EI_IMPULSE_OK; +} + +#elif (defined(EI_CLASSIFIER_USE_MEMRYX_SOFTWARE) && (EI_CLASSIFIER_USE_MEMRYX_SOFTWARE == 1)) +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* inputBlockIds, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + // init Python embedded interpreter (should be called once!) + static py::scoped_interpreter guard{}; + + // check if we've initialized the interpreter and device? + if (memryx_initialized == false) { + if(init_memryx(debug, impulse) == false) { + return EI_IMPULSE_MEMRYX_ERROR; + } + memryx_initialized = true; + } + + std::vector input_shape = {1, impulse->input_width, impulse->input_height, 3}; + py::array_t input_data(input_shape); // = zeroes(input_shape, 0); + + printf("impulse->w=%d h=%d\n", impulse->input_width, impulse->input_height); + + /* + * convert features data to the expected shape (4dim) + * For images RGB shape is (width, height, colors) + * For images BW shape is (width, height, 1) + * For Audio shape is (width, height, 1) - spectrogram + */ + auto r = input_data.mutable_unchecked<4>(); + + for (size_t i = 0; i < input_block_ids_size; i++) { + uint16_t cur_mtx = input_block_ids[i]; +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + ei::matrix_t* matrix = NULL; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + for (py::ssize_t x = 0; x < r.shape(1); x++) { + for (py::ssize_t y = 0; y < r.shape(2); y++) { + for(py::ssize_t z = 0; z < r.shape(3); z++) { + r(0, x, y, z) = (float)(fmatrix.buffer[x * r.shape(2) * r.shape(3) + y * r.shape(3) + z]); + } + } + } + } + + py::object runmodel = model.attr("run"); + // result from mx_sim is {np array, float, float} + py::tuple args = py::make_tuple(py::none(), 0.00, 0.00); + // run inference in sumualtor + printf("start inference\n"); + uint64_t ctx_start_us = ei_read_timer_us(); + args = runmodel("inputs"_a=input_data,"frames"_a=1); + uint64_t ctx_end_us = ei_read_timer_us(); + printf("end of inference\n"); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + engine_info.str(""); + engine_info << "Inferences per second: " << (1000000 / result->timing.classification_us); + + py::array outputs = py::list(args[0]); + py::array_t potentials; + std::vector potentials_v; + + potentials = outputs.squeeze().cast>(); + + if (block_config->object_detection == false) { + potentials_v = outputs.squeeze().cast>(); + } + else { + auto q = potentials.unchecked<>(); + for (py::ssize_t x = 0; x < q.shape(0); x++) { + for (py::ssize_t y = 0; y < q.shape(1); y++) { + for(py::ssize_t z = 0; z < q.shape(2); z++) { + potentials_v.push_back(q(x, y, z)); + } + } + } + } + + if(debug) { + std::string ret_str = py::str(potentials).cast(); + ei_printf("Memryx raw output:\n%s\n", ret_str.c_str()); + } + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + ei_printf("FOMO executed on Memryx\n"); + fill_result_struct_f32_fomo( + impulse, + block_config, + result, + potentials_v.data(), + impulse->fomo_output_size, + impulse->fomo_output_size); + break; + } + case EI_CLASSIFIER_LAST_LAYER_SSD: { + ei_printf("Mobilenet SSD executed on Memryx\n"); + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + impulse->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + fill_result_struct_f32(impulse, result, potentials_v.data(), debug); + } + + return EI_IMPULSE_OK; +} +#else +#error "Neither EI_CLASSIFIER_USE_MEMRYX_HARDWARE or EI_CLASSIFIER_USE_MEMRYX_SOFTWARE are defined or set to 1" +#endif // USE_HARDWARE + +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_MEMRYX + +#endif /* EI_CLASSIFIER_INFERENCING_ENGINE_MEMRYX_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h new file mode 100644 index 0000000..acc3e12 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/onnx_tidl.h @@ -0,0 +1,704 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_ONNX_TIDL_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_ONNX_TIDL_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL) && (EI_CLASSIFIER_COMPILED != 1) + +#include "model-parameters/model_metadata.h" +#if EI_CLASSIFIER_HAS_MODEL_VARIABLES == 1 +#include "model-parameters/model_variables.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "itidl_rt.h" +#include +#include +#include + +#include +#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" + +#include "onnx-model/tidl-model.h" +#include "utils/model_header_utils.h" + +#define TI_PREPROC_DEFAULT_WIDTH 320 +#define TI_PREPROC_DEFAULT_HEIGHT 240 + +using namespace std; + +/** + * \brief returns time in micro sec + * @returns void + */ +double getUs(struct timeval t) +{ + return(t.tv_sec * 1000000 + t.tv_usec); +} + +/** + * \brief print tensor info + * \param session onnx session + * \param input_node_names input array node names + * @returns int status + */ +int printTensorInfo(Ort::Session *session, std::vector *input_node_names, std::vector *output_node_names) +{ + size_t num_input_nodes = (*session).GetInputCount(); + size_t num_output_nodes = (*session).GetOutputCount(); + Ort::TypeInfo type_info = (*session).GetInputTypeInfo(0); + auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); + std::vector input_node_dims = tensor_info.GetShape(); + ei_printf("LOG_INFO: number of inputs:%d \n", num_input_nodes); + ei_printf("LOG_INFO: number of outputs: %d\n", num_output_nodes); + ei_printf("LOG_INFO: input(0) name: %s\n", (*input_node_names)[0]); + + Ort::TypeInfo type_info_out = (*session).GetOutputTypeInfo(0); + auto tensor_info_out = type_info_out.GetTensorTypeAndShapeInfo(); + std::vector output_node_dims = tensor_info_out.GetShape(); + /* iterate over all input nodes */ + for (int i = 0; i < num_input_nodes; i++) + { + /* print input node names */ + ei_printf("LOG_INFO: Input %d : name=%s\n", i, (*input_node_names)[i]); + + /* print input node types */ + Ort::TypeInfo type_info = (*session).GetInputTypeInfo(i); + auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); + + ONNXTensorElementDataType type = tensor_info.GetElementType(); + ei_printf("LOG_INFO: Input %d : type=%d\n", i, type); + /* print input shapes/dims */ + input_node_dims = tensor_info.GetShape(); + ei_printf("LOG_INFO: Input %d : num_dims=%zu\n", i, input_node_dims.size()); + for (int j = 0; j < input_node_dims.size(); j++) + { + ei_printf("LOG_INFO: Input %d : dim %d=%jd\n", i, j, input_node_dims[j]); + } + } + if (num_input_nodes != 1) + { + ei_printf("LOG_INFO: supports only single input model \n"); + return EI_IMPULSE_ONNX_ERROR; + } + + for (int i = 0; i < num_output_nodes; i++) + { + /* print output node names */ + ei_printf("LOG_INFO: Output %d : name=%s\n", i, (*output_node_names)[i]); + + /* print output node types */ + Ort::TypeInfo type_info = (*session).GetOutputTypeInfo(i); + auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); + + ONNXTensorElementDataType type = tensor_info.GetElementType(); + ei_printf("LOG_INFO: Output %d : type=%d\n", i, type); + /* print output shapes/dims */ + output_node_dims = tensor_info.GetShape(); + ei_printf("LOG_INFO: Output %d : num_dims=%zu\n", i, output_node_dims.size()); + for (int j = 0; j < output_node_dims.size(); j++) + { + ei_printf("LOG_INFO: Output %d : dim %d=%jd\n", i, j, output_node_dims[j]); + } + } + return EI_IMPULSE_OK; +} + +void * allocTensorMem(int size, int accel) +{ + void * ptr = NULL; + if (accel) + { + #ifdef DEVICE_AM62 + ei_printf("LOG_INFO: TIDL Delgate mode is not allowed on AM62 devices...\n"); + ei_printf("LOG_ERROR: Could not allocate memory for a Tensor of size %d \n ", size); + exit(0); + #else + ptr = TIDLRT_allocSharedMem(64, size); + #endif + } + else + { + ptr = malloc(size); + } + if (ptr == NULL) + { + ei_printf("LOG_ERROR: Could not allocate memory for a Tensor of size %d \n ", size); + exit(0); + } + return ptr; +} + +void freeTensorMem(void * ptr, int accel) +{ + if (accel) + { + #ifndef DEVICE_AM62 + TIDLRT_freeSharedMem(ptr); + #endif + } + else + { + free(ptr); + } +} + +/** + * Setup the ONNX runtime + * + * @param ctx_start_us Pointer to the start time + * @param input Pointer to input tensor + * @param output Pointer to output tensor + * @param micro_interpreter Pointer to interpreter (for non-compiled models) + * @param micro_tensor_arena Pointer to the arena that will be allocated + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_onnx_setup( + const ei_impulse_t *impulse, + uint64_t *ctx_start_us, + std::vector* input_tensors, + std::vector* output_tensors, + Ort::Session** session_ptr, + Ort::RunOptions** run_options_ptr, + Ort::IoBinding** binding_ptr) { + + static bool onnx_first_run = true; + // Nothing to do after first run + if (!onnx_first_run) { + return EI_IMPULSE_OK; + } + + std::string proj_artifacts_path = "/tmp/" + std::string(impulse->project_name) + "-" + std::to_string(impulse->project_id) + "-" + std::to_string(impulse->deploy_version); + + create_project_if_not_exists(proj_artifacts_path, model_h_files, model_h_files_len); + + std::string proj_model_path = proj_artifacts_path + "/model.onnx"; + + ei_printf("test onnx tidl: %s\n", __FUNCTION__); + #pragma message ( "test onnx tidl: run_nn_inference") + + /* Initialize enviroment, maintains thread pools and state info */ + Ort::Env env(ORT_LOGGING_LEVEL_WARNING, "test"); + /* Initialize session options */ + Ort::SessionOptions session_options; + //TODO: from where do we load number of threads? + session_options.SetIntraOpNumThreads(1); + + ei_printf("LOG_INFO: model accelerated \n"); + c_api_tidl_options *options = (c_api_tidl_options *)malloc(sizeof(c_api_tidl_options)); + OrtStatus *def_status = OrtSessionsOptionsSetDefault_Tidl(options); + ei_printf("LOG_INFO: artifacts: %s \n", proj_artifacts_path.c_str()); + strcpy(options->artifacts_folder, proj_artifacts_path.c_str()); + if(NULL == options){ + ei_printf("LOG_ERROR: faild to allocate c_api_tidl_options \n"); + return EI_IMPULSE_ONNX_ERROR; + } + OrtStatus *status = OrtSessionOptionsAppendExecutionProvider_Tidl(session_options, options); + + session_options.SetGraphOptimizationLevel(GraphOptimizationLevel::ORT_ENABLE_EXTENDED); + Ort::AllocatorWithDefaultOptions allocator; + + /* ORT Session */ + Ort::Session* session = new Ort::Session(env, proj_model_path.c_str(), session_options); + *session_ptr = session; + ei_printf("LOG_INFO: Loaded model %s\n", proj_model_path.c_str()); + + /* Input information */ + size_t num_input_nodes = session->GetInputCount(); + std::vector input_node_names(num_input_nodes); + Ort::TypeInfo type_info = session->GetInputTypeInfo(0); + auto tensor_info = type_info.GetTensorTypeAndShapeInfo(); + std::vector input_node_dims = tensor_info.GetShape(); + ONNXTensorElementDataType input_tensor_type = tensor_info.GetElementType(); + + /* output information */ + size_t num_output_nodes = session->GetOutputCount(); + std::vector output_node_names(num_output_nodes); + for (int i = 0; i < num_output_nodes; i++) + { + output_node_names[i] = session->GetOutputName(i, allocator); + } + for (int i = 0; i < num_input_nodes; i++) + { + input_node_names[i] = session->GetInputName(i, allocator); + } + + type_info = session->GetOutputTypeInfo(0); + auto output_tensor_info = type_info.GetTensorTypeAndShapeInfo(); + std::vector output_node_dims = output_tensor_info.GetShape(); + size_t output_tensor_size = output_node_dims[1]; + + if (EI_IMPULSE_ONNX_ERROR == printTensorInfo(session, &input_node_names, &output_node_names)) { + ei_printf("LOG_ERROR: print tensor information failed!\n"); + return EI_IMPULSE_ONNX_ERROR; + } + + ssize_t input_tensor_size_bytes; + /* simplify ... using known dim values to calculate size */ + size_t input_tensor_size = impulse->nn_input_frame_size; + void *inData; + if (input_tensor_type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT) + { + input_tensor_size_bytes = input_tensor_size * sizeof(float); + inData = allocTensorMem(input_tensor_size_bytes, true); + } + else if (input_tensor_type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8) + { + input_tensor_size_bytes = input_tensor_size * sizeof(uint8_t); + inData = allocTensorMem(input_tensor_size_bytes, true); + } + else + { + ei_printf("LOG_ERROR: indata type not supported yet \n "); + return EI_IMPULSE_ONNX_ERROR; + } + auto memory_info = Ort::MemoryInfo::CreateCpu(OrtArenaAllocator, OrtMemTypeDefault); + + Ort::Value input_tensor = Ort::Value::CreateTensor(memory_info, inData, input_tensor_size_bytes, input_node_dims.data(), 4, input_tensor_type); + input_tensors->push_back(std::move(input_tensor)); + + Ort::RunOptions* run_options = new Ort::RunOptions(); + *run_options_ptr = run_options; + run_options->SetRunLogVerbosityLevel(2); + auto output_tensors_warm_up = session->Run(*run_options, input_node_names.data(), input_tensors->data(), 1, output_node_names.data(), num_output_nodes); + + //void *outData = allocTensorMem(output_tensor_size * sizeof(float), true); + Ort::IoBinding* binding = new Ort::IoBinding(*session); + *binding_ptr = binding; + binding->BindInput(input_node_names[0], (*input_tensors)[0]); + + for(int idx=0; idx < num_output_nodes; idx++) + { + auto node_dims = output_tensors_warm_up[idx].GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape(); + size_t tensor_size = 1; + for(int j = node_dims.size()-1; j >= 0; j--) + { + tensor_size *= node_dims[j]; + } + ONNXTensorElementDataType tensor_type = output_tensors_warm_up[idx].GetTypeInfo().GetTensorTypeAndShapeInfo().GetElementType(); + if(tensor_type == ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT) + { + tensor_size *= sizeof(float); + } + else if(tensor_type == ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8) + { + tensor_size *= sizeof(uint8_t); + } + else if(tensor_type == ONNX_TENSOR_ELEMENT_DATA_TYPE_INT64) + { + tensor_size *= sizeof(int64_t); + } + else + { + ei_printf("LOG_ERROR: Un Supported output tensor_type\n"); + return EI_IMPULSE_ONNX_ERROR; + } + + void * outData = allocTensorMem(tensor_size, true); + auto output_tensor = Ort::Value::CreateTensor(memory_info, (void *)outData, tensor_size, node_dims.data(), node_dims.size(),tensor_type); + output_tensors->push_back(std::move(output_tensor)); + binding->BindOutput(output_node_names[idx], (*output_tensors)[idx]); + } + + onnx_first_run = false; + + return EI_IMPULSE_OK; +} + +/** + * Run ONNX model + * + * @param ctx_start_us Start time of the setup function (see above) + * @param output_tensors Output tensors + * @param session ONNX session + * @param run_options ONNX run options + * @param binding IO bindings + * @param debug Whether to print debug info + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_onnx_run(const ei_impulse_t *impulse, + void *config_ptr, + uint64_t ctx_start_us, + std::vector* input_tensors, + std::vector* output_tensors, + Ort::Session* session, + Ort::RunOptions* run_options, + Ort::IoBinding* binding, + ei_impulse_result_t *result, + bool debug) { + + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + session->Run(*run_options, *binding); + + uint64_t ctx_end_us = ei_read_timer_us(); + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + ONNXTensorElementDataType output_tensor_type = (*output_tensors).at(0).GetTypeInfo().GetTensorTypeAndShapeInfo().GetElementType(); + void *out_data = output_tensors->front().GetTensorMutableData(); + + // get output features count from model + auto node_dims = (*output_tensors).at(0).GetTypeInfo().GetTensorTypeAndShapeInfo().GetShape(); + size_t output_tensor_features_count = 1; + for(int j = node_dims.size()-1; j >= 0; j--) + { + output_tensor_features_count *= node_dims[j]; + } + + // Read the predicted y value from the model's output tensor + if (debug) { + ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); + } + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + // NOTE: for now only yolox object detection supported + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_YOLOX: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOX does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + if (debug) { + ei_printf("YOLOX OUTPUT (%d ms.): ", result->timing.classification); + for (size_t ix = 0; ix < output_tensor_features_count; ix++) { + ei_printf_float(((float*)out_data)[ix]); + ei_printf(" "); + } + ei_printf("\n"); + } + fill_res = fill_result_struct_f32_yolox_detect( + impulse, + block_config, + result, + (float*)out_data, + output_tensor_features_count); + } + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + break; + } + } + } + else { +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + + switch (output_tensor_type) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8: { + fill_res = fill_result_struct_i8(impulse, result, (int8_t*)out_data, impulse->tflite_output_zeropoint, impulse->tflite_output_scale, debug); + break; + } + case ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8: { + fill_res = fill_result_struct_i8(impulse, result, (int8_t*)out_data, impulse->tflite_output_zeropoint, impulse->tflite_output_scale, debug); + break; + } + default: { + ei_printf("ERR: Cannot handle output type (%d)\n", output_tensor_type); + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + } + +#else + switch (output_tensor_type) { + case ONNX_TENSOR_ELEMENT_DATA_TYPE_FLOAT: { + fill_res = fill_result_struct_f32(impulse, result, (float*)out_data, debug); + break; + } + default: { + ei_printf("ERR: Cannot handle output type (%d)\n", output_tensor_type); + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + } +#endif + } + + ///* freeing shared mem*/ + //for (size_t i = 0; i < output_tensors->size(); i++) + //{ + // void *ptr = (*output_tensors)[i].GetTensorMutableData(); + // freeTensorMem(ptr, true); + //} + //for (size_t i = 0; i < input_tensors->size(); i++) + //{ + // void *ptr = (*input_tensors)[i].GetTensorMutableData(); + // freeTensorMem(ptr, true); + //} + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + return EI_IMPULSE_OK; +} + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param fmatrix Processed matrix >> features [array of features] this is input + * @param result Output classifier results >> output + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *afmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + static std::vector input_tensors; + static std::vector output_tensors; + static Ort::Session* session; + static Ort::RunOptions* run_options; + static Ort::IoBinding* binding; + uint64_t ctx_start_us; + + ei_printf("test onnx tidl: %s\n", __FUNCTION__); + #pragma message ( "test onnx tidl: run_nn_inference") + + EI_IMPULSE_ERROR init_res = inference_onnx_setup(impulse, + &ctx_start_us, + &input_tensors, + &output_tensors, + &session, + &run_options, + &binding); + + if (init_res != EI_IMPULSE_OK || session == NULL || run_options == NULL || + binding == NULL) { + ei_printf("LOG_ERROR: ONNX inference setup failed!\n"); + return EI_IMPULSE_ONNX_ERROR; + } + + uint64_t dsp_chw_start_us; + dsp_chw_start_us = ei_read_timer_us(); + + /* + ** Convert to CHW from HWC + */ + // features matrix maps around the input tensor to not allocate any memory + float *input_buffer = input_tensors.front().GetTensorMutableData(); + ei::matrix_t fmatrix(1, impulse->nn_input_frame_size, input_buffer); + + ei_dsp_config_image_t *config = (ei_dsp_config_image_t *)impulse->dsp_blocks[0].config; + + size_t channels = strcmp(config->channels, "Grayscale") == 0 ? 1 : 3; + size_t height = impulse->input_height; + size_t width = impulse->input_width; + + ei::matrix_t* matrix = afmatrix[0].matrix; + + int dest_ix = 0; + for (size_t c=0; c < channels; c++) { + for (size_t h=0; h < height; h++) { + for (size_t w=0; w < width; w++) { + uint32_t src_ix = channels * width * h + w*channels + c; + fmatrix.buffer[dest_ix++] = matrix->buffer[src_ix]; + } + } + } + + uint64_t dsp_chw_end_us = ei_read_timer_us(); + result->timing.dsp_us += dsp_chw_end_us - dsp_chw_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("After Features (%ld us.): ", result->timing.dsp_us); + for (size_t ix = 0; ix < fmatrix.cols; ix++) { + ei_printf_float(fmatrix.buffer[ix]); + ei_printf(" "); + } + ei_printf("\n"); + } + + ctx_start_us = ei_read_timer_us(); + EI_IMPULSE_ERROR run_res = inference_onnx_run(impulse, + config_ptr, + ctx_start_us, + &input_tensors, + &output_tensors, + session, + run_options, + binding, + result, debug); + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + return EI_IMPULSE_OK; +} + +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 +/** + * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + static std::vector input_tensors; + static std::vector output_tensors; + static Ort::Session* session; + static Ort::RunOptions* run_options; + static Ort::IoBinding* binding; + uint64_t ctx_start_us; + + ei_printf("test onnx tidl: %s\n", __FUNCTION__); + #pragma message ( "test onnx tidl: run_nn_inference_image_quantized") + + EI_IMPULSE_ERROR init_res = inference_onnx_setup(impulse, + &ctx_start_us, + &input_tensors, &output_tensors, + &session, + &run_options, + &binding); + + if (init_res != EI_IMPULSE_OK || session == NULL || run_options == NULL || + binding == NULL) { + ei_printf("LOG_ERROR: ONNX inference setup failed!\n"); + return EI_IMPULSE_ONNX_ERROR; + } + + ONNXTensorElementDataType input_tensor_type = input_tensors.at(0).GetTypeInfo().GetTensorTypeAndShapeInfo().GetElementType(); + if (input_tensor_type != ONNX_TENSOR_ELEMENT_DATA_TYPE_UINT8 && + input_tensor_type != ONNX_TENSOR_ELEMENT_DATA_TYPE_INT8) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; + } + + uint64_t dsp_start_us = ei_read_timer_us(); + + ei::matrix_i8_t a_features_matrix(1, impulse->nn_input_frame_size); + + // run DSP process and quantize automatically + int ret = extract_image_features_quantized(impulse, signal, &a_features_matrix, impulse->dsp_blocks[0].config, impulse->frequency, + impulse->learning_blocks[0].image_scaling); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + if (debug) { + ei_printf("Before Features: "); + for (size_t ix = 0; ix < a_features_matrix.cols; ix++) { + ei_printf("%d", (uint8_t)a_features_matrix.buffer[ix]); + ei_printf(" "); + } + ei_printf("\n"); + } + + /* + ** Convert to CHW from HWC + */ + // features matrix maps around the input tensor to not allocate any memory + uint8_t *input_buffer = input_tensors.front().GetTensorMutableData(); + ei::matrix_i8_t features_matrix(1, impulse->nn_input_frame_size, (int8_t*) input_buffer); + + ei_dsp_config_image_t *config = (ei_dsp_config_image_t *)impulse->dsp_blocks[0].config; + + size_t channels = strcmp(config->channels, "Grayscale") == 0 ? 1 : 3; + size_t height = impulse->input_height; + size_t width = impulse->input_width; + + int dest_ix = 0; + for (size_t c=0; c < channels; c++) { + for (size_t h=0; h < height; h++) { + for (size_t w=0; w < width; w++) { + uint32_t src_ix = channels * width * h + w*channels + c; + features_matrix.buffer[dest_ix++] = a_features_matrix.buffer[src_ix]; + } + } + } + + if (debug) { + ei_printf("After Features: "); + for (size_t ix = 0; ix < features_matrix.cols; ix++) { + ei_printf("%d", (uint8_t)features_matrix.buffer[ix]); + ei_printf(" "); + } + ei_printf("\n"); + } + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < features_matrix.cols; ix++) { + // expects scale of (1/255) and zeropoint of 0 + ei_printf_float(static_cast(((uint8_t)features_matrix.buffer[ix] - impulse->tflite_input_zeropoint) * impulse->tflite_input_scale)); + ei_printf(" "); + } + ei_printf("\n"); + } + + ctx_start_us = ei_read_timer_us(); + EI_IMPULSE_ERROR run_res = inference_onnx_run(impulse, + config_ptr, + ctx_start_us, + &input_tensors, + &output_tensors, + session, + run_options, + binding, + result, debug); + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + return EI_IMPULSE_OK; +} +#endif // EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + +#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_ONNX_TIDL) && (EI_CLASSIFIER_COMPILED != 1) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_ONNX_TIDL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h new file mode 100644 index 0000000..dd6caf6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensaiflow.h @@ -0,0 +1,235 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TENSAILFOW_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TENSAILFOW_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAIFLOW) + +#include "model-parameters/model_metadata.h" +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_run_dsp.h" + +#include "mcu.h" + +extern "C" void infer(const void *impulse_arg, uint32_t* time, uint32_t* cycles); +int8_t *processed_features; + +#ifdef EI_CLASSIFIER_NN_OUTPUT_COUNT +int8_t infer_result[EI_CLASSIFIER_NN_OUTPUT_COUNT]; +#else +int8_t infer_result[EI_CLASSIFIER_LABEL_COUNT]; +#endif + +extern "C" void get_data(const void *impulse_arg, int8_t *in_buf_0, uint16_t in_buf_0_dim_0, uint16_t in_buf_0_dim_1, uint16_t in_buf_0_dim_2) +{ + ei_impulse_t *impulse = (ei_impulse_t *) impulse_arg; + + if ((impulse->sensor == EI_CLASSIFIER_SENSOR_CAMERA) && + ((impulse->dsp_blocks_size == 1) || + (impulse->dsp_blocks[0].extract_fn == extract_image_features))) { + + memcpy(in_buf_0, processed_features, impulse->nn_input_frame_size); + } +} + +extern "C" void post_process(const void *impulse_arg, int8_t *out_buf_0, int8_t *out_buf_1) +{ + ei_impulse_t *impulse = (ei_impulse_t *) impulse_arg; + + #ifdef EI_CLASSIFIER_NN_OUTPUT_COUNT + memcpy(infer_result, out_buf_0, impulse->tflite_output_features_count); + #else + memcpy(infer_result, out_buf_0, impulse->label_count); + #endif +} + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + ei_config_tensaiflow_graph_t *graph_config = (ei_config_tensaiflow_graph_t*)block_config->graph_config; + + if (block_config->object_detection) { + ei_printf("ERR: Object detection models are not supported with TensaiFlow\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + + uint64_t ctx_start_us = ei_read_timer_us(); + uint32_t time, cycles; + + /* Run tensaiflow inference */ + infer((const void *)impulse, &time, &cycles); + + // Inference results returned by post_process() and copied into infer_results + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + for (uint32_t ix = 0; ix < impulse->label_count; ix++) { + float value; + // Dequantize the output if it is int8 + value = static_cast(infer_result[ix] - graph_config->output_zeropoint) * + graph_config->output_scale; + + if (debug) { + ei_printf("%s:\t", impulse->categories[ix]); + ei_printf_float(value); + ei_printf("\n"); + } + result->classification[ix].label = impulse->categories[ix]; + result->classification[ix].value = value; + } + + return EI_IMPULSE_OK; + +} + +/** + * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + ei_config_tensaiflow_graph_t *graph_config = (ei_config_tensaiflow_graph_t*)block_config->graph_config; + + uint64_t ctx_start_us; + uint64_t dsp_start_us = ei_read_timer_us(); + + ei::matrix_i8_t features_matrix(1, impulse->nn_input_frame_size); + processed_features = (int8_t *) features_matrix.buffer; + + // run DSP process and quantize automatically + int ret = extract_image_features_quantized( + signal, + &features_matrix, + impulse->dsp_blocks[0].config, + graph_config->input_scale, + graph_config->input_zeropoint, + impulse->frequency, + impulse->learning_blocks[0].image_scaling); + + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < features_matrix.cols; ix++) { + ei_printf_float((features_matrix.buffer[ix] - graph_config->input_zeropoint) * graph_config->input_scale); + ei_printf(" "); + } + ei_printf("\n"); + } + + uint32_t time, cycles; + ctx_start_us = ei_read_timer_us(); + + /* Run tensaiflow inference */ + infer((const void *)impulse, &time, &cycles); + + // Inference results returned by post_process() and copied into infer_results + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + if (block_config->quantized == 1) { + fill_res = fill_result_struct_i8_fomo( + impulse, + block_config, + result, + infer_result, + graph_config->output_zeropoint, + graph_config->output_scale, + impulse->fomo_output_size, + impulse->fomo_output_size); + } + else { + ei_printf("ERR: TensaiFlow does not support float32 inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + if (block_config->quantized == 1) { + fill_res = fill_result_struct_i8( + impulse, + result, + infer_result, + graph_config->output_zeropoint, + graph_config->output_scale, + debug); + } + else { + ei_printf("ERR: TensaiFlow does not support float32 inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + return EI_IMPULSE_OK; + +} + +#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSAILFOW) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TENSAILFOW_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h new file mode 100644 index 0000000..d9fbb29 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tensorrt.h @@ -0,0 +1,319 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT) + +#include "model-parameters/model_metadata.h" + +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" + +#include +#include +#include +#include +#include +#include +#include "tflite/linux-jetson-nano/libeitrt.h" + +#if __APPLE__ +#include +#else +#include +#endif + +EiTrt *ei_trt_handle = NULL; + +inline bool file_exists(char *model_file_name) +{ + if (FILE *file = fopen(model_file_name, "r")) { + fclose(file); + return true; + } + else { + return false; + } +} + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config; + + #if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + #error "TensorRT requires an unquantized network" + #endif + + static char current_exe_path[PATH_MAX] = { 0 }; + +#if __APPLE__ + uint32_t len = PATH_MAX; + if (_NSGetExecutablePath(current_exe_path, &len) != 0) { + current_exe_path[0] = '\0'; // buffer too small + } + else { + // resolve symlinks, ., .. if possible + char *canonical_path = realpath(current_exe_path, NULL); + if (canonical_path != NULL) + { + strncpy(current_exe_path, canonical_path, len); + free(canonical_path); + } + } +#else + int readlink_res = readlink("/proc/self/exe", current_exe_path, PATH_MAX); + if (readlink_res < 0) { + printf("readlink_res = %d\n", readlink_res); + current_exe_path[0] = '\0'; // failed to find location + } +#endif + + static char model_file_name[PATH_MAX]; + + if (strlen(current_exe_path) == 0) { + // could not determine current exe path, use /tmp for the engine file + snprintf( + model_file_name, + PATH_MAX, + "/tmp/ei-%d-%d.engine", + impulse->project_id, + impulse->deploy_version); + } + else { + std::filesystem::path p(current_exe_path); + snprintf( + model_file_name, + PATH_MAX, + "%s/%s-project%d-v%d.engine", + p.parent_path().c_str(), + p.stem().c_str(), + impulse->project_id, + impulse->deploy_version); + } + + static bool first_run = true; + + if (first_run) { + + bool fexists = file_exists(model_file_name); + if (!fexists) { + ei_printf("INFO: Model file '%s' does not exist, creating...\n", model_file_name); + + FILE *file = fopen(model_file_name, "w"); + if (!file) { + ei_printf("ERR: TensorRT init failed to open '%s'\n", model_file_name); + return EI_IMPULSE_TENSORRT_INIT_FAILED; + } + + if (fwrite(graph_config->model, graph_config->model_size, 1, file) != 1) { + ei_printf("ERR: TensorRT init fwrite failed.\n"); + return EI_IMPULSE_TENSORRT_INIT_FAILED; + } + + if (fclose(file) != 0) { + ei_printf("ERR: TensorRT init fclose failed.\n"); + return EI_IMPULSE_TENSORRT_INIT_FAILED; + } + } + + first_run = false; + } + + uint32_t out_data_size = 0; + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_TAO_SSD: + case EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET: + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3: + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: + case EI_CLASSIFIER_LAST_LAYER_FOMO: + case EI_CLASSIFIER_LAST_LAYER_YOLOV5: + case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: { + out_data_size = impulse->tflite_output_features_count; + break; + } + default: { + ei_printf( + "ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + out_data_size = impulse->label_count; + } + + float *out_data = (float*)ei_malloc(out_data_size * sizeof(float)); + if (out_data == nullptr) { + ei_printf("ERR: Cannot allocate memory for output data \n"); + } + + // lazy initialize tensorRT context + if (ei_trt_handle == nullptr) { + ei_trt_handle = libeitrt::create_EiTrt(model_file_name, debug); + } + +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + ei::matrix_t* matrix = NULL; + + ei::matrix_t combined_matrix(1, impulse->nn_input_frame_size); + uint32_t buf_pos = 0; + + for (size_t i = 0; i < input_block_ids_size; i++) { + size_t cur_mtx = input_block_ids[i]; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } + + for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + combined_matrix.buffer[buf_pos++] = matrix->buffer[ix]; + } + } + matrix = &combined_matrix; +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + + uint64_t ctx_start_us = ei_read_timer_us(); + + libeitrt::infer(ei_trt_handle, matrix->buffer, out_data, out_data_size); + + uint64_t ctx_end_us = ei_read_timer_us(); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + fill_res = fill_result_struct_f32_fomo( + impulse, + block_config, + result, + out_data, + impulse->fomo_output_size, + impulse->fomo_output_size); + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV5: + case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: { + int version = block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI ? + 5 : 6; + fill_res = fill_result_struct_f32_yolov5( + impulse, + block_config, + result, + version, + out_data, + impulse->tflite_output_features_count); + break; + } + case EI_CLASSIFIER_LAST_LAYER_TAO_SSD: + case EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET: { + fill_res = fill_result_struct_f32_tao_decode_detections( + impulse, + block_config, + result, + out_data, + impulse->tflite_output_features_count, + debug); + break; + } + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3: + fill_res = fill_result_struct_f32_tao_yolov3( + impulse, + block_config, + result, + out_data, + impulse->tflite_output_features_count, + debug); + break; + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: { + fill_res = fill_result_struct_f32_tao_yolov4( + impulse, + block_config, + result, + out_data, + impulse->tflite_output_features_count, + debug); + break; + } + default: { + ei_printf( + "ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else { + fill_res = fill_result_struct_f32(impulse, result, out_data, debug); + } + + ei_free(out_data); + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + return EI_IMPULSE_OK; +} + +/** + * Special function to run the classifier on images for quantized models + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; +} + +#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TENSORRT) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TENSORRT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h new file mode 100644 index 0000000..c1053e0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_eon.h @@ -0,0 +1,406 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_EON_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_EON_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED == 1) + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h" +#include "edge-impulse-sdk/classifier/ei_run_dsp.h" + +/** + * Setup the TFLite runtime + * + * @param ctx_start_us Pointer to the start time + * @param input Pointer to input tensor + * @param output Pointer to output tensor + * @param micro_tensor_arena Pointer to the arena that will be allocated + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_tflite_setup( + ei_learning_block_config_tflite_graph_t *block_config, + uint64_t *ctx_start_us, + TfLiteTensor* input, + TfLiteTensor* output, + TfLiteTensor* output_labels, + TfLiteTensor* output_scores, + ei_unique_ptr_t& p_tensor_arena) { + + ei_config_tflite_eon_graph_t *graph_config = (ei_config_tflite_eon_graph_t*)block_config->graph_config; + + *ctx_start_us = ei_read_timer_us(); + + TfLiteStatus init_status = graph_config->model_init(ei_aligned_calloc); + if (init_status != kTfLiteOk) { + ei_printf("Failed to initialize the model (error code %d)\n", init_status); + return EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED; + } + + TfLiteStatus status; + + status = graph_config->model_input(0, input); + if (status != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + status = graph_config->model_output(block_config->output_data_tensor, output); + if (status != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + + if (block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_SSD) { + status = graph_config->model_output(block_config->output_score_tensor, output_scores); + if (status != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + status = graph_config->model_output(block_config->output_labels_tensor, output_labels); + if (status != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + } + + return EI_IMPULSE_OK; +} + +/** + * Run TFLite model + * + * @param ctx_start_us Start time of the setup function (see above) + * @param output Output tensor + * @param interpreter TFLite interpreter (non-compiled models) + * @param tensor_arena Allocated arena (will be freed) + * @param result Struct for results + * @param debug Whether to print debug info + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_tflite_run( + const ei_impulse_t *impulse, + ei_learning_block_config_tflite_graph_t *block_config, + uint64_t ctx_start_us, + TfLiteTensor* output, + TfLiteTensor* labels_tensor, + TfLiteTensor* scores_tensor, + uint8_t* tensor_arena, + ei_impulse_result_t *result, + bool debug) { + + ei_config_tflite_eon_graph_t *graph_config = (ei_config_tflite_eon_graph_t*)block_config->graph_config; + + if (graph_config->model_invoke() != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + + uint64_t ctx_end_us = ei_read_timer_us(); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + // Read the predicted y value from the model's output tensor + if (debug) { + ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); + } + + EI_IMPULSE_ERROR fill_res = fill_result_struct_from_output_tensor_tflite( + impulse, block_config, output, labels_tensor, scores_tensor, result, debug); + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + return EI_IMPULSE_OK; +} + +/** + * @brief Do neural network inferencing over a signal (from the DSP) + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference_from_dsp( + ei_learning_block_config_tflite_graph_t *block_config, + signal_t *signal, + matrix_t *output_matrix) +{ + TfLiteTensor input; + TfLiteTensor output; + TfLiteTensor output_scores; + TfLiteTensor output_labels; + uint64_t ctx_start_us = ei_read_timer_us(); + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + ei_config_tflite_eon_graph_t *graph_config = (ei_config_tflite_eon_graph_t*)block_config->graph_config; + + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + block_config, + &ctx_start_us, + &input, + &output, + &output_labels, + &output_scores, + p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + auto input_res = fill_input_tensor_from_signal(signal, &input); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + // invoke the model + if (graph_config->model_invoke() != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + + auto output_res = fill_output_matrix_from_tensor(&output, output_matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + + if (graph_config->model_reset(ei_aligned_free) != kTfLiteOk) { + return EI_IMPULSE_TFLITE_ERROR; + } + + return EI_IMPULSE_OK; +} + +/** + * @brief Do neural network inferencing over a feature matrix + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + ei_config_tflite_eon_graph_t *graph_config = (ei_config_tflite_eon_graph_t*)block_config->graph_config; + + TfLiteTensor input; + TfLiteTensor output; + TfLiteTensor output_scores; + TfLiteTensor output_labels; + + uint64_t ctx_start_us = ei_read_timer_us(); + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + block_config, + &ctx_start_us, + &input, + &output, + &output_labels, + &output_scores, + p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + uint8_t* tensor_arena = static_cast(p_tensor_arena.get()); + + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + auto input_res = fill_input_tensor_from_matrix(fmatrix, &input, input_block_ids, input_block_ids_size, mtx_size); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + EI_IMPULSE_ERROR run_res = inference_tflite_run( + impulse, + block_config, + ctx_start_us, + &output, + &output_labels, + &output_scores, + tensor_arena, result, debug); + + if (result->copy_output) { + auto output_res = fill_output_matrix_from_tensor(&output, fmatrix[impulse->dsp_blocks_size + learn_block_index].matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + } + + graph_config->model_reset(ei_aligned_free); + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + return EI_IMPULSE_OK; +} + +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 +/** + * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) { + + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + ei_config_tflite_eon_graph_t *graph_config = (ei_config_tflite_eon_graph_t*)block_config->graph_config; + + memset(result, 0, sizeof(ei_impulse_result_t)); + + uint64_t ctx_start_us; + TfLiteTensor input; + TfLiteTensor output; + TfLiteTensor output_scores; + TfLiteTensor output_labels; + + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + block_config, + &ctx_start_us, + &input, &output, + &output_labels, + &output_scores, + p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + if (input.type != TfLiteType::kTfLiteInt8 && input.type != TfLiteType::kTfLiteUInt8) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; + } + + uint64_t dsp_start_us = ei_read_timer_us(); + + // features matrix maps around the input tensor to not allocate any memory + ei::matrix_i8_t features_matrix(1, impulse->nn_input_frame_size, input.data.int8); + + // run DSP process and quantize automatically + int ret = extract_image_features_quantized(signal, &features_matrix, impulse->dsp_blocks[0].config, input.params.scale, input.params.zero_point, + impulse->frequency, impulse->learning_blocks[0].image_scaling); + + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < features_matrix.cols; ix++) { + ei_printf_float((features_matrix.buffer[ix] - input.params.zero_point) * input.params.scale); + ei_printf(" "); + } + ei_printf("\n"); + } + + ctx_start_us = ei_read_timer_us(); + + EI_IMPULSE_ERROR run_res = inference_tflite_run( + impulse, + block_config, + ctx_start_us, + &output, + &output_labels, + &output_scores, + static_cast(p_tensor_arena.get()), + result, + debug); + + graph_config->model_reset(ei_aligned_free); + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + + return EI_IMPULSE_OK; +} +#endif // EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + +__attribute__((unused)) int extract_tflite_eon_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { + ei_dsp_config_tflite_eon_t *dsp_config = (ei_dsp_config_tflite_eon_t*)config_ptr; + + ei_config_tflite_eon_graph_t ei_config_tflite_graph_0 = { + .implementation_version = 1, + .model_init = dsp_config->init_fn, + .model_invoke = dsp_config->invoke_fn, + .model_reset = dsp_config->reset_fn, + .model_input = dsp_config->input_fn, + .model_output = dsp_config->output_fn, + }; + + ei_learning_block_config_tflite_graph_t ei_learning_block_config = { + .implementation_version = 1, + .classification_mode = EI_CLASSIFIER_CLASSIFICATION_MODE_DSP, + .block_id = dsp_config->block_id, + .object_detection = false, + .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN, + .output_data_tensor = 0, + .output_labels_tensor = 255, + .output_score_tensor = 255, + .threshold = 0, + .quantized = 0, + .compiled = 1, + .graph_config = &ei_config_tflite_graph_0 + }; + + auto x = run_nn_inference_from_dsp(&ei_learning_block_config, signal, output_matrix); + if (x != 0) { + return x; + } + + return EIDSP_OK; +} + +#endif // (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED == 1) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_EON_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h new file mode 100644 index 0000000..e1f0d42 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_full.h @@ -0,0 +1,247 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_FULL_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_FULL_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL) + +#include "model-parameters/model_metadata.h" +#include "tflite-model/trained_model_ops_define.h" + +#include +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#include "tensorflow-lite/tensorflow/lite/interpreter.h" +#include "tensorflow-lite/tensorflow/lite/kernels/register.h" +#include "tensorflow-lite/tensorflow/lite/model.h" +#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h" + +typedef struct { + std::unique_ptr model; + std::unique_ptr interpreter; +} ei_tflite_state_t; + +std::map ei_tflite_instances; + +/** + * Construct a tflite interpreter (creates it if needed) + */ +static EI_IMPULSE_ERROR get_interpreter(ei_learning_block_config_tflite_graph_t *block_config, tflite::Interpreter **interpreter) { + // not in the map yet... + if (!ei_tflite_instances.count(block_config->block_id)) { + ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config; + ei_tflite_state_t *new_state = new ei_tflite_state_t(); + + auto new_model = tflite::FlatBufferModel::BuildFromBuffer((const char*)graph_config->model, graph_config->model_size); + new_state->model = std::move(new_model); + if (!new_state->model) { + ei_printf("Failed to build TFLite model from buffer\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; +#if EI_CLASSIFIER_HAS_TREE_ENSEMBLE_CLASSIFIER + resolver.AddCustom("TreeEnsembleClassifier", + tflite::ops::custom::Register_TREE_ENSEMBLE_CLASSIFIER()); +#endif + tflite::InterpreterBuilder builder(*new_state->model, resolver); + builder(&new_state->interpreter); + + if (!new_state->interpreter) { + ei_printf("Failed to construct interpreter\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + if (new_state->interpreter->AllocateTensors() != kTfLiteOk) { + ei_printf("AllocateTensors failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + int hw_thread_count = (int)std::thread::hardware_concurrency(); + hw_thread_count -= 1; // leave one thread free for the other application + if (hw_thread_count < 1) { + hw_thread_count = 1; + } + + if (new_state->interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) { + ei_printf("SetNumThreads failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + ei_tflite_instances.insert(std::make_pair(block_config->block_id, new_state)); + } + + auto tflite_state = ei_tflite_instances[block_config->block_id]; + *interpreter = tflite_state->interpreter.get(); + return EI_IMPULSE_OK; +} + +extern "C" EI_IMPULSE_ERROR run_nn_inference_from_dsp( + ei_learning_block_config_tflite_graph_t *block_config, + signal_t *signal, + matrix_t *output_matrix) +{ + tflite::Interpreter *interpreter; + auto interpreter_ret = get_interpreter(block_config, &interpreter); + if (interpreter_ret != EI_IMPULSE_OK) { + return interpreter_ret; + } + + TfLiteTensor *input = interpreter->input_tensor(0); + TfLiteTensor *output = interpreter->output_tensor(0); + + if (!input) { + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + if (!output) { + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + + auto input_res = fill_input_tensor_from_signal(signal, input); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + TfLiteStatus status = interpreter->Invoke(); + if (status != kTfLiteOk) { + ei_printf("ERR: interpreter->Invoke() failed with %d\n", status); + return EI_IMPULSE_TFLITE_ERROR; + } + + auto output_res = fill_output_matrix_from_tensor(output, output_matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + + // on Linux we're not worried about free'ing (for now) + + return EI_IMPULSE_OK; +} + +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + tflite::Interpreter *interpreter; + auto interpreter_ret = get_interpreter(block_config, &interpreter); + if (interpreter_ret != EI_IMPULSE_OK) { + return interpreter_ret; + } + + TfLiteTensor *input = interpreter->input_tensor(0); + TfLiteTensor *output = interpreter->output_tensor(block_config->output_data_tensor); + + if (!input) { + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + if (!output) { + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + auto input_res = fill_input_tensor_from_matrix(fmatrix, input, input_block_ids, input_block_ids_size, mtx_size); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + uint64_t ctx_start_us = ei_read_timer_us(); + + TfLiteStatus status = interpreter->Invoke(); + if (status != kTfLiteOk) { + ei_printf("ERR: interpreter->Invoke() failed with %d\n", status); + return EI_IMPULSE_TFLITE_ERROR; + } + + uint64_t ctx_end_us = ei_read_timer_us(); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + if (result->copy_output) { + auto output_res = fill_output_matrix_from_tensor(output, fmatrix[impulse->dsp_blocks_size + learn_block_index].matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + } + + if (debug) { + ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); + } + + TfLiteTensor *scores_tensor = interpreter->output_tensor(block_config->output_score_tensor); + TfLiteTensor *labels_tensor = interpreter->output_tensor(block_config->output_labels_tensor); + + EI_IMPULSE_ERROR fill_res = fill_result_struct_from_output_tensor_tflite( + impulse, block_config, output, labels_tensor, scores_tensor, result, debug); + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + // on Linux we're not worried about free'ing (for now) + + return EI_IMPULSE_OK; +} + +__attribute__((unused)) int extract_tflite_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { + + ei_dsp_config_tflite_t *dsp_config = (ei_dsp_config_tflite_t*)config_ptr; + + ei_config_tflite_graph_t ei_config_tflite_graph_0 = { + .implementation_version = 1, + .model = dsp_config->model, + .model_size = dsp_config->model_size, + .arena_size = dsp_config->arena_size + }; + + ei_learning_block_config_tflite_graph_t ei_learning_block_config = { + .implementation_version = 1, + .classification_mode = EI_CLASSIFIER_CLASSIFICATION_MODE_DSP, + .block_id = dsp_config->block_id, + .object_detection = false, + .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN, + .output_data_tensor = 0, + .output_labels_tensor = 255, + .output_score_tensor = 255, + .threshold = 0, + .quantized = 0, + .compiled = 0, + .graph_config = &ei_config_tflite_graph_0 + }; + + auto x = run_nn_inference_from_dsp(&ei_learning_block_config, signal, output_matrix); + if (x != 0) { + return x; + } + + return EIDSP_OK; +} + +#endif // (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_FULL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h new file mode 100644 index 0000000..c567805 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h @@ -0,0 +1,574 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_HELPER_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_HELPER_H_ + +#include "edge-impulse-sdk/classifier/ei_quantize.h" +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL) || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) + +#if EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL +#include +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#include "tensorflow-lite/tensorflow/lite/interpreter.h" +#include "tensorflow-lite/tensorflow/lite/kernels/register.h" +#include "tensorflow-lite/tensorflow/lite/model.h" +#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h" +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL + +#if EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE +#include +#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE + +EI_IMPULSE_ERROR fill_input_tensor_from_matrix( + ei_feature_t *fmatrix, + TfLiteTensor *input, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + size_t mtx_size +) { + size_t matrix_els = 0; + uint32_t input_idx = 0; + + for (size_t i = 0; i < input_block_ids_size; i++) { +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + size_t cur_mtx = input_block_ids[i]; + ei::matrix_t* matrix = NULL; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + + matrix_els += matrix->rows * matrix->cols; + + switch (input->type) { + case kTfLiteFloat32: { + for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + input->data.f[input_idx++] = matrix->buffer[ix]; + } + break; + } + case kTfLiteInt8: { + for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + float val = (float)matrix->buffer[ix]; + input->data.int8[input_idx++] = static_cast( + pre_cast_quantize(val, input->params.scale, input->params.zero_point, true)); + } + break; + } + case kTfLiteUInt8: { + for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + float val = (float)matrix->buffer[ix]; + input->data.uint8[input_idx++] = static_cast( + pre_cast_quantize(val, input->params.scale, input->params.zero_point, false)); } + break; + } + default: { + ei_printf("ERR: Cannot handle input type (%d)\n", input->type); + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + } + } + + if (input->bytes / 4 != matrix_els && input->bytes != matrix_els) { + ei_printf("ERR: input tensor has size %d bytes, but input matrix has has size %d bytes\n", + (int)input->bytes, (int)matrix_els); + return EI_IMPULSE_INVALID_SIZE; + } + + return EI_IMPULSE_OK; +} + +EI_IMPULSE_ERROR fill_input_tensor_from_signal( + signal_t *signal, + TfLiteTensor *input +) { + switch (input->type) { + case kTfLiteFloat32: { + if (input->bytes / 4 != signal->total_length) { + ei_printf("ERR: input tensor has size %d, but signal has size %d\n", + (int)input->bytes / 4, (int)signal->total_length); + return EI_IMPULSE_INVALID_SIZE; + } + + auto x = signal->get_data(0, signal->total_length, input->data.f); + if (x != EIDSP_OK) { + return EI_IMPULSE_DSP_ERROR; + } + break; + } + case kTfLiteInt8: + case kTfLiteUInt8: { + // we don't have a good signaling way here (this is DSP blocks where + // we don't understand the input very well; guess whether this is an RGB input) + bool is_rgb = input->bytes / 3 == signal->total_length; + + if (!is_rgb) { + // otherwise expect an exact match in length + if (input->bytes != signal->total_length) { + ei_printf("ERR: input tensor has size %d, but signal has size %d\n", + (int)input->bytes, (int)signal->total_length); + return EI_IMPULSE_INVALID_SIZE; + } + } + + float scale = input->params.scale; + int zero_point = input->params.zero_point; + if (scale == 0.0f) { // not quantized? + if (is_rgb) { + scale = 0.003921568859368563f; + } + else { + scale = 1.0f; + } + + if (input->type == kTfLiteInt8 && zero_point == 0) { + zero_point = -128; + } + } + + size_t output_ix = 0; + const size_t page_size = 1024; + + // buffered read from the signal + size_t bytes_left = signal->total_length; + for (size_t ix = 0; ix < signal->total_length; ix += page_size) { + size_t elements_to_read = bytes_left > page_size ? page_size : bytes_left; + + matrix_t input_matrix(elements_to_read, 1); + if (!input_matrix.buffer) { + return EI_IMPULSE_ALLOC_FAILED; + } + signal->get_data(ix, elements_to_read, input_matrix.buffer); + + for (size_t jx = 0; jx < elements_to_read; jx++) { + if (is_rgb) { + uint32_t value = static_cast(input_matrix.buffer[jx]); + + // fast code path + if (scale == 0.003921568859368563f && zero_point == -128) { + int32_t r = static_cast(value >> 16 & 0xff); + int32_t g = static_cast(value >> 8 & 0xff); + int32_t b = static_cast(value & 0xff); + + if (input->type == kTfLiteInt8) { + input->data.int8[output_ix++] = static_cast(r + zero_point); + input->data.int8[output_ix++] = static_cast(g + zero_point); + input->data.int8[output_ix++] = static_cast(b + zero_point); + } + else { + input->data.uint8[output_ix++] = static_cast(r + zero_point); + input->data.uint8[output_ix++] = static_cast(g + zero_point); + input->data.uint8[output_ix++] = static_cast(b + zero_point); + } + } + // slow code path + else { + float r = static_cast(value >> 16 & 0xff) / 255.0f; + float g = static_cast(value >> 8 & 0xff) / 255.0f; + float b = static_cast(value & 0xff) / 255.0f; + + if (input->type == kTfLiteInt8) { + input->data.int8[output_ix++] = static_cast(round(r / scale) + zero_point); + input->data.int8[output_ix++] = static_cast(round(g / scale) + zero_point); + input->data.int8[output_ix++] = static_cast(round(b / scale) + zero_point); + } + else { + input->data.uint8[output_ix++] = static_cast(round(r / scale) + zero_point); + input->data.uint8[output_ix++] = static_cast(round(g / scale) + zero_point); + input->data.uint8[output_ix++] = static_cast(round(b / scale) + zero_point); + } + } + } + else { + float value = input_matrix.buffer[jx]; + if (input->type == kTfLiteInt8) { + input->data.int8[output_ix++] = static_cast(round(value / scale) + zero_point); + } + else { // uint8 + input->data.uint8[output_ix++] = static_cast((value / scale) + zero_point); + } + } + } + } + break; + } + default: { + ei_printf("ERR: Cannot handle input type (%d)\n", input->type); + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + } + + return EI_IMPULSE_OK; +} + +EI_IMPULSE_ERROR fill_output_matrix_from_tensor( + TfLiteTensor *output, + matrix_t *output_matrix +) { + const size_t matrix_els = output_matrix->rows * output_matrix->cols; + + switch (output->type) { + case kTfLiteFloat32: { + if (output->bytes / 4 != matrix_els) { + ei_printf("ERR: output tensor has size %d, but input matrix has has size %d\n", + (int)output->bytes / 4, (int)matrix_els); + return EI_IMPULSE_INVALID_SIZE; + } + + memcpy(output_matrix->buffer, output->data.f, output->bytes); + break; + } + case kTfLiteInt8: { + if (output->bytes != matrix_els) { + ei_printf("ERR: output tensor has size %d, but input matrix has has size %d\n", + (int)output->bytes, (int)matrix_els); + return EI_IMPULSE_INVALID_SIZE; + } + + for (size_t ix = 0; ix < output->bytes; ix++) { + float value = static_cast(output->data.int8[ix] - output->params.zero_point) * output->params.scale; + output_matrix->buffer[ix] = value; + } + break; + } + case kTfLiteUInt8: { + if (output->bytes != matrix_els) { + ei_printf("ERR: output tensor has size %d, but input matrix has has size %d\n", + (int)output->bytes, (int)matrix_els); + return EI_IMPULSE_INVALID_SIZE; + } + + for (size_t ix = 0; ix < output->bytes; ix++) { + float value = static_cast(output->data.uint8[ix] - output->params.zero_point) * output->params.scale; + output_matrix->buffer[ix] = value; + } + break; + } + default: { + ei_printf("ERR: Cannot handle output type (%d)\n", output->type); + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + } + + return EI_IMPULSE_OK; +} + +EI_IMPULSE_ERROR fill_result_struct_from_output_tensor_tflite( + const ei_impulse_t *impulse, + ei_learning_block_config_tflite_graph_t *block_config, + TfLiteTensor* output, + TfLiteTensor* labels_tensor, + TfLiteTensor* scores_tensor, + ei_impulse_result_t *result, + bool debug +) { + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_OBJECT_DETECTION) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + bool int8_output = output->type == TfLiteType::kTfLiteInt8; + if (int8_output) { + fill_res = fill_result_struct_i8_fomo( + impulse, + block_config, + result, + output->data.int8, + output->params.zero_point, + output->params.scale, + impulse->fomo_output_size, + impulse->fomo_output_size); + } + else { + fill_res = fill_result_struct_f32_fomo( + impulse, + block_config, + result, + output->data.f, + impulse->fomo_output_size, + impulse->fomo_output_size); + } + break; + } +#if EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL + case EI_CLASSIFIER_LAST_LAYER_SSD: { + if (!scores_tensor->data.f) { + return EI_IMPULSE_SCORE_TENSOR_WAS_NULL; + } + if (!labels_tensor->data.f) { + return EI_IMPULSE_LABEL_TENSOR_WAS_NULL; + } + if (output->type == kTfLiteFloat32) { + fill_res = fill_result_struct_f32_object_detection( + impulse, + block_config, + result, + output->data.f, + scores_tensor->data.f, + labels_tensor->data.f, + debug); + } + else { + ei_printf("ERR: MobileNet SSD does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } +#else + case EI_CLASSIFIER_LAST_LAYER_SSD: { + ei_printf("ERR: MobileNet SSD is not supported in EON or TensorFlow Lite Micro\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } +#endif // EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL + case EI_CLASSIFIER_LAST_LAYER_YOLOV5: + case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: { + int version = block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI ? + 5 : 6; + + if (output->type == kTfLiteInt8) { + fill_res = fill_result_struct_quantized_yolov5( + impulse, + block_config, + result, + version, + output->data.int8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteUInt8) { + fill_res = fill_result_struct_quantized_yolov5( + impulse, + block_config, + result, + version, + output->data.uint8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteFloat32) { + fill_res = fill_result_struct_f32_yolov5( + impulse, + block_config, + result, + version, + output->data.f, + impulse->tflite_output_features_count, + debug); + } + else { + ei_printf("ERR: Invalid output type (%d) for YOLOv5 last layer\n", output->type); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOX: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOX does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + fill_res = fill_result_struct_f32_yolox( + impulse, + block_config, + result, + output->data.f, + impulse->tflite_output_features_count, + debug); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV7: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOV7 does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + size_t output_feature_count = 1; + for (int ix = 0; ix < output->dims->size; ix++) { + output_feature_count *= output->dims->data[ix]; + } + fill_res = fill_result_struct_f32_yolov7( + impulse, + block_config, + result, + output->data.f, + output_feature_count); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_TAO_SSD: + case EI_CLASSIFIER_LAST_LAYER_TAO_RETINANET: { + + if (output->type == kTfLiteInt8) { + fill_res = fill_result_struct_quantized_tao_decode_detections( + impulse, + block_config, + result, + output->data.int8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteUInt8) { + fill_res = fill_result_struct_quantized_tao_decode_detections( + impulse, + block_config, + result, + output->data.uint8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteFloat32) { + fill_res = fill_result_struct_f32_tao_decode_detections( + impulse, + block_config, + result, + output->data.f, + impulse->tflite_output_features_count, + debug); + } + else { + ei_printf("ERR: Invalid output type (%d) for TAO last layer\n", output->type); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV3: { + + if (output->type == kTfLiteInt8) { + fill_res = fill_result_struct_quantized_tao_yolov3( + impulse, + block_config, + result, + output->data.int8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteUInt8) { + fill_res = fill_result_struct_quantized_tao_yolov3( + impulse, + block_config, + result, + output->data.uint8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteFloat32) { + fill_res = fill_result_struct_f32_tao_yolov3( + impulse, + block_config, + result, + output->data.f, + impulse->tflite_output_features_count, + debug); + } + else { + ei_printf("ERR: Invalid output type (%d) for TAO YOLOv3 layer\n", output->type); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_TAO_YOLOV4: { + + if (output->type == kTfLiteInt8) { + fill_res = fill_result_struct_quantized_tao_yolov4( + impulse, + block_config, + result, + output->data.int8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteUInt8) { + fill_res = fill_result_struct_quantized_tao_yolov4( + impulse, + block_config, + result, + output->data.uint8, + output->params.zero_point, + output->params.scale, + impulse->tflite_output_features_count, + debug); + } + else if (output->type == kTfLiteFloat32) { + fill_res = fill_result_struct_f32_tao_yolov4( + impulse, + block_config, + result, + output->data.f, + impulse->tflite_output_features_count, + debug); + } + else { + ei_printf("ERR: Invalid output type (%d) for TAO YOLOv4 layer\n", output->type); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + } + } + else if (block_config->classification_mode == EI_CLASSIFIER_CLASSIFICATION_MODE_VISUAL_ANOMALY) + { + if (!result->copy_output) { + fill_res = fill_result_visual_ad_struct_f32(impulse, result, output->data.f, block_config->threshold, debug); + } + } + // if we copy the output, we don't need to process it as classification + else + { + if (!result->copy_output) { + bool int8_output = output->type == TfLiteType::kTfLiteInt8; + if (int8_output) { + fill_res = fill_result_struct_i8(impulse, result, output->data.int8, output->params.zero_point, output->params.scale, debug); + } + else { + fill_res = fill_result_struct_f32(impulse, result, output->data.f, debug); + } + } + } + + return fill_res; +} +#endif // #if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_FULL) || (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) + +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_HELPER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h new file mode 100644 index 0000000..fd3cb26 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_micro.h @@ -0,0 +1,470 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_MICRO_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_MICRO_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1) + +#include "model-parameters/model_metadata.h" + +#include +#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" +#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" +#include "edge-impulse-sdk/classifier/inferencing_engines/tflite_helper.h" + +#if defined(EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER) && EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER == 1 +#include "tflite-model/tflite-resolver.h" +#endif // EI_CLASSIFIER_HAS_TFLITE_OPS_RESOLVER + +#ifdef EI_CLASSIFIER_ALLOCATION_STATIC +#if defined __GNUC__ +#define ALIGN(X) __attribute__((aligned(X))) +#elif defined _MSC_VER +#define ALIGN(X) __declspec(align(X)) +#elif defined __TASKING__ +#define ALIGN(X) __align(X) +#endif +#endif + +/** + * Setup the TFLite runtime + * + * @param ctx_start_us Pointer to the start time + * @param input Pointer to input tensor + * @param output Pointer to output tensor + * @param micro_interpreter Pointer to interpreter (for non-compiled models) + * @param micro_tensor_arena Pointer to the arena that will be allocated + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_tflite_setup( + ei_learning_block_config_tflite_graph_t *block_config, + uint64_t *ctx_start_us, + TfLiteTensor** input, + TfLiteTensor** output, + TfLiteTensor** output_labels, + TfLiteTensor** output_scores, + tflite::MicroInterpreter** micro_interpreter, + ei_unique_ptr_t& p_tensor_arena) { + + *ctx_start_us = ei_read_timer_us(); + + ei_config_tflite_graph_t *graph_config = (ei_config_tflite_graph_t*)block_config->graph_config; + +#ifdef EI_CLASSIFIER_ALLOCATION_STATIC + // Assign a no-op lambda to the "free" function in case of static arena + static uint8_t tensor_arena[EI_CLASSIFIER_TFLITE_ARENA_SIZE] ALIGN(16); + p_tensor_arena = ei_unique_ptr_t(tensor_arena, [](void*){}); +#else + // Create an area of memory to use for input, output, and intermediate arrays. + uint8_t *tensor_arena = (uint8_t*)ei_aligned_calloc(16, graph_config->arena_size); + if (tensor_arena == NULL) { + ei_printf("Failed to allocate TFLite arena (%zu bytes)\n", graph_config->arena_size); + return EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED; + } + p_tensor_arena = ei_unique_ptr_t(tensor_arena, ei_aligned_free); +#endif + + static bool tflite_first_run = true; + static uint8_t *model_arr = NULL; + + if (model_arr != graph_config->model) { + tflite_first_run = true; + model_arr = (uint8_t*)graph_config->model; + } + + static const tflite::Model* model = nullptr; + + // ====== + // Initialization code start + // This part can be run once, but that would require the TFLite arena + // to be allocated at all times, which is not ideal (e.g. when doing MFCC) + // ====== + if (tflite_first_run) { + // Map the model into a usable data structure. This doesn't involve any + // copying or parsing, it's a very lightweight operation. + model = tflite::GetModel(graph_config->model); + if (model->version() != TFLITE_SCHEMA_VERSION) { + ei_printf( + "Model provided is schema version %d not equal " + "to supported version %d.", + model->version(), TFLITE_SCHEMA_VERSION); + return EI_IMPULSE_TFLITE_ERROR; + } + tflite_first_run = false; + } + +#ifdef EI_TFLITE_RESOLVER + EI_TFLITE_RESOLVER +#else + static tflite::AllOpsResolver resolver; // needs static to match the life of the interpreter +#endif + + // Build an interpreter to run the model with. + tflite::MicroInterpreter *interpreter = new tflite::MicroInterpreter( + model, resolver, tensor_arena, graph_config->arena_size); + + *micro_interpreter = interpreter; + + // Allocate memory from the tensor_arena for the model's tensors. + TfLiteStatus allocate_status = interpreter->AllocateTensors(true); + if (allocate_status != kTfLiteOk) { + ei_printf("AllocateTensors() failed"); + return EI_IMPULSE_TFLITE_ERROR; + } + + // Obtain pointers to the model's input and output tensors. + *input = interpreter->input(0); + *output = interpreter->output(block_config->output_data_tensor); + + if (block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_SSD) { + *output_scores = interpreter->output(block_config->output_score_tensor); + *output_labels = interpreter->output(block_config->output_labels_tensor); + } + + if (tflite_first_run) { + tflite_first_run = false; + } + + return EI_IMPULSE_OK; +} + +/** + * Run TFLite model + * + * @param ctx_start_us Start time of the setup function (see above) + * @param output Output tensor + * @param interpreter TFLite interpreter (non-compiled models) + * @param tensor_arena Allocated arena (will be freed) + * @param result Struct for results + * @param debug Whether to print debug info + * + * @return EI_IMPULSE_OK if successful + */ +static EI_IMPULSE_ERROR inference_tflite_run( + const ei_impulse_t *impulse, + ei_learning_block_config_tflite_graph_t *block_config, + uint64_t ctx_start_us, + TfLiteTensor* output, + TfLiteTensor* labels_tensor, + TfLiteTensor* scores_tensor, + tflite::MicroInterpreter* interpreter, + uint8_t* tensor_arena, + ei_impulse_result_t *result, + bool debug) { + + + // Run inference, and report any error + TfLiteStatus invoke_status = interpreter->Invoke(); + if (invoke_status != kTfLiteOk) { + delete interpreter; + ei_printf("Invoke failed (%d)\n", invoke_status); + return EI_IMPULSE_TFLITE_ERROR; + } + + uint64_t ctx_end_us = ei_read_timer_us(); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + + // Read the predicted y value from the model's output tensor + if (debug) { + ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); + } + + EI_IMPULSE_ERROR fill_res = fill_result_struct_from_output_tensor_tflite( + impulse, block_config, output, labels_tensor, scores_tensor, result, debug); + + delete interpreter; + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + return EI_IMPULSE_OK; +} + + +/** + * @brief Do neural network inferencing over a signal (from the DSP) + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference_from_dsp( + ei_learning_block_config_tflite_graph_t *config, + signal_t *signal, + matrix_t *output_matrix) +{ + TfLiteTensor* input; + TfLiteTensor* output; + TfLiteTensor* output_scores; + TfLiteTensor* output_labels; + uint64_t ctx_start_us = ei_read_timer_us(); + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + + tflite::MicroInterpreter* interpreter; + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + config, + &ctx_start_us, + &input, &output, + &output_labels, + &output_scores, + &interpreter, p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + auto input_res = fill_input_tensor_from_signal(signal, input); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + // Run inference, and report any error + TfLiteStatus invoke_status = interpreter->Invoke(); + if (invoke_status != kTfLiteOk) { + ei_printf("Invoke failed (%d)\n", invoke_status); + return EI_IMPULSE_TFLITE_ERROR; + } + + auto output_res = fill_output_matrix_from_tensor(output, output_matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + + delete interpreter; + + return EI_IMPULSE_OK; +} + +/** + * @brief Do neural network inferencing over the processed feature matrix + * + * @param fmatrix Processed matrix + * @param result Output classifier results + * @param[in] debug Debug output enable + * + * @return The ei impulse error. + */ +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + TfLiteTensor* input; + TfLiteTensor* output; + TfLiteTensor* output_scores; + TfLiteTensor* output_labels; + uint64_t ctx_start_us = ei_read_timer_us(); + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + + tflite::MicroInterpreter* interpreter; + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + block_config, + &ctx_start_us, + &input, &output, + &output_labels, + &output_scores, + &interpreter, + p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + uint8_t* tensor_arena = static_cast(p_tensor_arena.get()); + + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + auto input_res = fill_input_tensor_from_matrix(fmatrix, input, input_block_ids, input_block_ids_size, mtx_size); + if (input_res != EI_IMPULSE_OK) { + return input_res; + } + + EI_IMPULSE_ERROR run_res = inference_tflite_run( + impulse, + block_config, + ctx_start_us, + output, + output_labels, + output_scores, + interpreter, tensor_arena, result, debug); + + if (result->copy_output) { + auto output_res = fill_output_matrix_from_tensor(output, fmatrix[impulse->dsp_blocks_size + learn_block_index].matrix); + if (output_res != EI_IMPULSE_OK) { + return output_res; + } + } + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + return EI_IMPULSE_OK; +} + +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 +/** + * Special function to run the classifier on images, only works on TFLite models (either interpreter or EON or for tensaiflow) + * that allocates a lot less memory by quantizing in place. This only works if 'can_run_classifier_image_quantized' + * returns EI_IMPULSE_OK. + */ +EI_IMPULSE_ERROR run_nn_inference_image_quantized( + const ei_impulse_t *impulse, + signal_t *signal, + ei_impulse_result_t *result, + void *config_ptr, + bool debug = false) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + memset(result, 0, sizeof(ei_impulse_result_t)); + + uint64_t ctx_start_us; + TfLiteTensor* input; + TfLiteTensor* output; + TfLiteTensor* output_scores; + TfLiteTensor* output_labels; + ei_unique_ptr_t p_tensor_arena(nullptr, ei_aligned_free); + + tflite::MicroInterpreter* interpreter; + EI_IMPULSE_ERROR init_res = inference_tflite_setup( + block_config, + &ctx_start_us, + &input, &output, + &output_labels, + &output_scores, + &interpreter, + p_tensor_arena); + + if (init_res != EI_IMPULSE_OK) { + return init_res; + } + + if (input->type != TfLiteType::kTfLiteInt8 && input->type != TfLiteType::kTfLiteUInt8) { + return EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES; + } + + uint64_t dsp_start_us = ei_read_timer_us(); + + // features matrix maps around the input tensor to not allocate any memory + ei::matrix_i8_t features_matrix(1, impulse->nn_input_frame_size, input->data.int8); + + // run DSP process and quantize automatically + int ret = extract_image_features_quantized(signal, &features_matrix, impulse->dsp_blocks[0].config, input->params.scale, input->params.zero_point, + impulse->frequency, impulse->learning_blocks[0].image_scaling); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to run DSP process (%d)\n", ret); + return EI_IMPULSE_DSP_ERROR; + } + + if (ei_run_impulse_check_canceled() == EI_IMPULSE_CANCELED) { + return EI_IMPULSE_CANCELED; + } + + result->timing.dsp_us = ei_read_timer_us() - dsp_start_us; + result->timing.dsp = (int)(result->timing.dsp_us / 1000); + + if (debug) { + ei_printf("Features (%d ms.): ", result->timing.dsp); + for (size_t ix = 0; ix < features_matrix.cols; ix++) { + ei_printf_float((features_matrix.buffer[ix] - input->params.zero_point) * input->params.scale); + ei_printf(" "); + } + ei_printf("\n"); + } + + ctx_start_us = ei_read_timer_us(); + + EI_IMPULSE_ERROR run_res = inference_tflite_run(impulse, + block_config, + ctx_start_us, + output, + output_labels, + output_scores, + interpreter, + static_cast(p_tensor_arena.get()), + result, debug); + + if (run_res != EI_IMPULSE_OK) { + return run_res; + } + + result->timing.classification_us = ei_read_timer_us() - ctx_start_us; + + return EI_IMPULSE_OK; +} +#endif // EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + +__attribute__((unused)) int extract_tflite_features(signal_t *signal, matrix_t *output_matrix, void *config_ptr, const float frequency) { + ei_dsp_config_tflite_t *dsp_config = (ei_dsp_config_tflite_t*)config_ptr; + + ei_config_tflite_graph_t ei_config_tflite_graph_0 = { + .implementation_version = 1, + .model = dsp_config->model, + .model_size = dsp_config->model_size, + .arena_size = dsp_config->arena_size + }; + + ei_learning_block_config_tflite_graph_t ei_learning_block_config = { + .implementation_version = 1, + .classification_mode = EI_CLASSIFIER_CLASSIFICATION_MODE_DSP, + .block_id = dsp_config->block_id, + .object_detection = false, + .object_detection_last_layer = EI_CLASSIFIER_LAST_LAYER_UNKNOWN, + .output_data_tensor = 0, + .output_labels_tensor = 255, + .output_score_tensor = 255, + .threshold = 0, + .quantized = 0, + .compiled = 0, + .graph_config = &ei_config_tflite_graph_0 + }; + + auto x = run_nn_inference_from_dsp(&ei_learning_block_config, signal, output_matrix); + if (x != 0) { + return x; + } + + return EIDSP_OK; +} + +#endif // (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE) && (EI_CLASSIFIER_COMPILED != 1) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_MICRO_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h new file mode 100644 index 0000000..5d78201 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/classifier/inferencing_engines/tflite_tidl.h @@ -0,0 +1,396 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_TIDL_H_ +#define _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_TIDL_H_ + +#if (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_TIDL) + +#include "model-parameters/model_metadata.h" + +#include +#include "tensorflow-lite/tensorflow/lite/c/common.h" +#include "tensorflow-lite/tensorflow/lite/interpreter.h" +#include "tensorflow-lite/tensorflow/lite/kernels/register.h" +#include "tensorflow-lite/tensorflow/lite/model.h" +#include "tensorflow-lite/tensorflow/lite/optional_debug_tools.h" +#include "edge-impulse-sdk/classifier/ei_fill_result_struct.h" +#include "edge-impulse-sdk/classifier/ei_model_types.h" + +#include "itidl_rt.h" +#if ARMNN_ENABLE +#include "DelegateOptions.hpp" +#include "armnn_delegate.hpp" +#endif + +#include + +// old models don't have this, add this here +#ifndef EI_CLASSIFIER_TFLITE_OUTPUT_DATA_TENSOR +#define EI_CLASSIFIER_TFLITE_OUTPUT_DATA_TENSOR 0 +#endif // not defined EI_CLASSIFIER_TFLITE_OUTPUT_DATA_TENSOR + +#include "tflite-model/tidl-model.h" +#include "utils/model_header_utils.h" + +void *in_ptrs[16] = {NULL}; +void *out_ptrs[16] = {NULL}; + +EI_IMPULSE_ERROR run_nn_inference( + const ei_impulse_t *impulse, + ei_feature_t *fmatrix, + uint32_t learn_block_index, + uint32_t* input_block_ids, + uint32_t input_block_ids_size, + ei_impulse_result_t *result, + void *config_ptr, + bool debug) +{ + ei_learning_block_config_tflite_graph_t *block_config = (ei_learning_block_config_tflite_graph_t*)config_ptr; + + static std::unique_ptr model = nullptr; + static std::unique_ptr interpreter = nullptr; + static std::vector inputs; + static std::vector outputs; + + if (!model) { + + std::string proj_artifacts_path = "/tmp/" + std::string(impulse->project_name) + "-" + std::to_string(impulse->project_id) + "-" + std::to_string(impulse->deploy_version); + + create_project_if_not_exists(proj_artifacts_path, model_h_files, model_h_files_len); + + std::string proj_model_path = proj_artifacts_path + "/trained.tflite"; + + model = tflite::FlatBufferModel::BuildFromFile(proj_model_path.c_str()); + if (!model) { + ei_printf("Failed to build TFLite model from buffer\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + tflite::ops::builtin::BuiltinOpResolver resolver; + tflite::InterpreterBuilder builder(*model, resolver); + builder(&interpreter); + + if (!interpreter) { + ei_printf("Failed to construct interpreter\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + /* This part creates the dlg_ptr */ + ei_printf("TIDL delegate mode\n"); + typedef TfLiteDelegate *(*tflite_plugin_create_delegate)(char **, char **, size_t, void (*report_error)(const char *)); + tflite_plugin_create_delegate tflite_plugin_dlg_create; + char *keys[] = {(char *)"artifacts_folder", (char *)"num_tidl_subgraphs", (char *)"debug_level"}; + char *values[] = {(char *)proj_artifacts_path.c_str(), (char *)"16", (char *)"0"}; + void *lib = dlopen("libtidl_tfl_delegate.so", RTLD_NOW); + assert(lib); + tflite_plugin_dlg_create = (tflite_plugin_create_delegate)dlsym(lib, "tflite_plugin_create_delegate"); + TfLiteDelegate *dlg_ptr = tflite_plugin_dlg_create(keys, values, 3, NULL); + interpreter->ModifyGraphWithDelegate(dlg_ptr); + ei_printf("ModifyGraphWithDelegate - Done \n"); + + + if (interpreter->AllocateTensors() != kTfLiteOk) { + ei_printf("AllocateTensors failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + + int hw_thread_count = (int)std::thread::hardware_concurrency(); + hw_thread_count -= 1; // leave one thread free for the other application + if (hw_thread_count < 1) { + hw_thread_count = 1; + } + + if (interpreter->SetNumThreads(hw_thread_count) != kTfLiteOk) { + ei_printf("SetNumThreads failed\n"); + return EI_IMPULSE_TFLITE_ERROR; + } + } + + inputs = interpreter->inputs(); + outputs = interpreter->outputs(); + + ei_printf("device mem enabled\n"); + for (uint32_t i = 0; i < inputs.size(); i++) + { + const TfLiteTensor *tensor = interpreter->input_tensor(i); + in_ptrs[i] = TIDLRT_allocSharedMem(tflite::kDefaultTensorAlignment, tensor->bytes); + if (in_ptrs[i] == NULL) + { + ei_printf("Could not allocate Memory for input: %s\n", tensor->name); + } + interpreter->SetCustomAllocationForTensor(inputs[i], {in_ptrs[i], tensor->bytes}); + } + for (uint32_t i = 0; i < outputs.size(); i++) + { + const TfLiteTensor *tensor = interpreter->output_tensor(i); + out_ptrs[i] = TIDLRT_allocSharedMem(tflite::kDefaultTensorAlignment, tensor->bytes); + if (out_ptrs[i] == NULL) + { + ei_printf("Could not allocate Memory for ouput: %s\n", tensor->name); + } + interpreter->SetCustomAllocationForTensor(outputs[i], {out_ptrs[i], tensor->bytes}); + } + + // Obtain pointers to the model's input and output tensors. +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + int8_t* input = interpreter->typed_input_tensor(0); +#else + float* input = interpreter->typed_input_tensor(0); +#endif + + if (!input) { + return EI_IMPULSE_INPUT_TENSOR_WAS_NULL; + } + + size_t mtx_size = impulse->dsp_blocks_size + impulse->learning_blocks_size; + + for (size_t i = 0; i < input_block_ids_size; i++) { +#if EI_CLASSIFIER_SINGLE_FEATURE_INPUT == 0 + uint16_t cur_mtx = input_block_ids[i]; + ei::matrix_t* matrix = NULL; + + if (!find_mtx_by_idx(fmatrix, &matrix, cur_mtx, mtx_size)) { + ei_printf("ERR: Cannot find matrix with id %zu\n", cur_mtx); + return EI_IMPULSE_INVALID_SIZE; + } +#else + ei::matrix_t* matrix = fmatrix[0].matrix; +#endif + + for (uint32_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { + if (block_config->object_detection) { +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + float pixel = (float)matrix->buffer[ix]; + input[ix] = static_cast((pixel / input->tflite_input_scale) + input->tflite_input_zeropoint); +#else + input[ix] = matrix->buffer[ix]; +#endif + } + else { +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + input[ix] = static_cast(round(matrix->buffer[ix] / input->tflite_input_scale) + input->tflite_input_zeropoint); +#else + input[ix] = matrix->buffer[ix]; +#endif + } + } + } + + uint64_t ctx_start_us = ei_read_timer_us(); + + interpreter->Invoke(); + + uint64_t ctx_end_us = ei_read_timer_us(); + + result->timing.classification_us = ctx_end_us - ctx_start_us; + result->timing.classification = (int)(result->timing.classification_us / 1000); + +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + int8_t* out_data = interpreter->typed_output_tensor(block_config->output_data_tensor); +#else + float* out_data = interpreter->typed_output_tensor(block_config->output_data_tensor); +#endif + + if (debug) { + ei_printf("LOG_INFO tensors size: %ld \n", interpreter->tensors_size()); + ei_printf("LOG_INFO nodes size: %ld\n", interpreter->nodes_size()); + ei_printf("LOG_INFO number of inputs: %ld\n", inputs.size()); + ei_printf("LOG_INFO number of outputs: %ld\n", outputs.size()); + ei_printf("LOG_INFO input(0) name: %s\n", interpreter->GetInputName(0)); + + int t_size = interpreter->tensors_size(); + for (int i = 0; i < t_size; i++) + { + if (interpreter->tensor(i)->name) { + ei_printf("LOG_INFO %d: %s,%ld,%d,%f,%d,size(", i, interpreter->tensor(i)->name, + interpreter->tensor(i)->bytes, + interpreter->tensor(i)->type, + interpreter->tensor(i)->params.scale, + interpreter->tensor(i)->params.zero_point); + + for (int k=0; k < interpreter->tensor(i)->dims->size; k++) { + if (k == interpreter->tensor(i)->dims->size - 1) { + ei_printf("%d", interpreter->tensor(i)->dims->data[k]); + } else { + ei_printf("%d,", interpreter->tensor(i)->dims->data[k]); + } + } + ei_printf(")\n"); + } + } + } + + if (!out_data) { + return EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL; + } + + if (debug) { + ei_printf("Predictions (time: %d ms.):\n", result->timing.classification); + } + + EI_IMPULSE_ERROR fill_res = EI_IMPULSE_OK; + + if (block_config->object_detection) { + switch (block_config->object_detection_last_layer) { + case EI_CLASSIFIER_LAST_LAYER_FOMO: { + if (block_config->quantized == 1) { +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + fill_res = fill_result_struct_i8_fomo( + impulse, + block_config, + result, + out_data, + out_data->tflite_output_zeropoint, + out_data->tflite_output_scale, + impulse->fomo_output_size, + impulse->fomo_output_size); +#endif + } + else { + fill_res = fill_result_struct_f32_fomo( + impulse, + block_config, + result, + out_data, + impulse->fomo_output_size, + impulse->fomo_output_size); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_SSD: { + float *scores_tensor = interpreter->typed_output_tensor(block_config->output_score_tensor); + float *label_tensor = interpreter->typed_output_tensor(block_config->output_labels_tensor); + if (!scores_tensor) { + return EI_IMPULSE_SCORE_TENSOR_WAS_NULL; + } + if (!label_tensor) { + return EI_IMPULSE_LABEL_TENSOR_WAS_NULL; + } + if (block_config->quantized == 1) { + ei_printf("ERR: MobileNet SSD does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + fill_res = fill_result_struct_f32_object_detection( + impulse, + block_config, + result, + out_data, + scores_tensor, + label_tensor, + debug); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV5: + case EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOv5 does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + int version = block_config->object_detection_last_layer == EI_CLASSIFIER_LAST_LAYER_YOLOV5_V5_DRPAI ? + 5 : 6; + fill_res = fill_result_struct_f32_yolov5( + impulse, + block_config, + result, + version, + out_data, + impulse->tflite_output_features_count, + debug); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOX: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOX does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + fill_res = fill_result_struct_f32_yolox( + impulse, + block_config, + result, + out_data, + impulse->tflite_output_features_count, + debug); + } + break; + } + case EI_CLASSIFIER_LAST_LAYER_YOLOV7: { + if (block_config->quantized == 1) { + ei_printf("ERR: YOLOV7 does not support quantized inference\n"); + return EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE; + } + else { + TfLiteTensor *output = interpreter->output_tensor(0); + size_t output_feature_count = 1; + for (int ix = 0; ix < output->dims->size; ix++) { + output_feature_count *= output->dims->data[ix]; + } + fill_res = fill_result_struct_f32_yolov7( + impulse, + block_config, + result, + output->data.f, + output_feature_count); + } + break; + } + default: { + ei_printf("ERR: Unsupported object detection last layer (%d)\n", + block_config->object_detection_last_layer); + break; + } + } + } + else { +#if EI_CLASSIFIER_QUANTIZATION_ENABLED == 1 + fill_res = fill_result_struct_i8(impulse, result, out_data, out_data->tflite_output_zeropoint, out_data->tflite_output_scale, debug); +#else + fill_res = fill_result_struct_f32(impulse, result, out_data, debug); +#endif + } + + for (uint32_t i = 0; i < inputs.size(); i++) + { + if (in_ptrs[i]) + { + TIDLRT_freeSharedMem(in_ptrs[i]); + } + } + for (uint32_t i = 0; i < outputs.size(); i++) + { + if (out_ptrs[i]) + { + TIDLRT_freeSharedMem(out_ptrs[i]); + } + } + + if (fill_res != EI_IMPULSE_OK) { + return fill_res; + } + + // on Linux we're not worried about free'ing (for now) + + return EI_IMPULSE_OK; +} + +#endif // (EI_CLASSIFIER_INFERENCING_ENGINE == EI_CLASSIFIER_TFLITE_TIDL) +#endif // _EI_CLASSIFIER_INFERENCING_ENGINE_TFLITE_TIDL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/utils.cmake b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/utils.cmake index 097aad8..02cf739 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/utils.cmake +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/utils.cmake @@ -19,6 +19,24 @@ MACRO(RECURSIVE_FIND_FILE return_list dir pattern) SET(${return_list} ${dir_list}) ENDMACRO() +MACRO(RECURSIVE_FIND_FILE_EXCLUDE_DIR return_list dir exclude_dir pattern) + FILE(GLOB_RECURSE new_list "${dir}/${pattern}") + SET(dir_list "") + FOREACH(file_path ${new_list}) + IF (file_path MATCHES ".*\/${exclude_dir}\/.*") + continue() + endif() + SET(dir_list ${dir_list} ${file_path}) + ENDFOREACH() + LIST(REMOVE_DUPLICATES dir_list) + SET(${return_list} ${dir_list}) +ENDMACRO() + +MACRO(RECURSIVE_FIND_FILE_APPEND return_list dir pattern) + RECURSIVE_FIND_FILE( append_list ${dir} ${pattern} ) + LIST(APPEND ${return_list} ${append_list}) +ENDMACRO() + MACRO(SOURCE_FILES return_list dir pattern) FILE(GLOB new_list "${dir}/${pattern}") SET(dir_list "") diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/zephyr/CMakeLists.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/zephyr/CMakeLists.txt index fec6785..5a5e49e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/zephyr/CMakeLists.txt +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/cmake/zephyr/CMakeLists.txt @@ -1,5 +1,9 @@ cmake_minimum_required(VERSION 3.13.1) +if(NOT TARGET app) + message(FATAL_ERROR "Please create a target named 'app' (ex: add_executable(app)) before adding this file") +endif() + set(CMAKE_CXX_STANDARD 11) set(CMAKE_CXX_STANDARD_REQUIRED ON) @@ -11,12 +15,18 @@ target_include_directories(app PRIVATE ${EI_SDK_FOLDER} ) -RECURSIVE_FIND_FILE(SOURCE_FILES "${EI_SDK_FOLDER}" "*.cpp") -RECURSIVE_FIND_FILE(CC_FILES "${EI_SDK_FOLDER}" "*.cc") -RECURSIVE_FIND_FILE(S_FILES "${EI_SDK_FOLDER}" "*.s") -RECURSIVE_FIND_FILE(C_FILES "${EI_SDK_FOLDER}" "*.c") -list(APPEND SOURCE_FILES ${S_FILES}) -list(APPEND SOURCE_FILES ${C_FILES}) -list(APPEND SOURCE_FILES ${CC_FILES}) +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}" "*.cpp") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}" "*.cc") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}" "*.s") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/TransformFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/CommonTables" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/BasicMathFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/ComplexMathFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/FastMathFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/SupportFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/MatrixFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/DSP/Source/StatisticsFunctions" "*.c") +RECURSIVE_FIND_FILE_APPEND(EI_SOURCE_FILES "${EI_SDK_FOLDER}/CMSIS/NN/Source" "*.c") +LIST(APPEND EI_SOURCE_FILES "${EI_SDK_FOLDER}/tensorflow/lite/c/common.c") -target_sources(app PRIVATE ${SOURCE_FILES}) +target_sources(app PRIVATE ${EI_SOURCE_FILES}) \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/create-arduino-library.sh b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/create-arduino-library.sh deleted file mode 100644 index bb3ad56..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/create-arduino-library.sh +++ /dev/null @@ -1,60 +0,0 @@ -# Run this script to convert the edge-impulse-sdk folder into a library that can be consumed by the Arduino IDE -# it renames files (e.g. *.cpp to *.c), removes features (uTensor), and updates include paths - -# exit when any command fails -set -e - -cleanup() { - echo "" - echo "Terminated by user" - exit 1 -} -trap cleanup INT TERM - -SCRIPTPATH="$( cd "$(dirname "$0")" ; pwd -P )" - -if [[ "$OSTYPE" == "darwin"* ]]; then - SEDCMD="sed -i '' -e" - ECHOCMD="echo" - LC_CTYPE=C - LANG=C -else - SEDCMD="sed -i -e" - ECHOCMD="echo -e" -fi - -rm -rf $SCRIPTPATH/tensorflow/lite/micro/mbed/ -rm -rf $SCRIPTPATH/porting/ecm3532/ -rm -rf $SCRIPTPATH/porting/himax/ -rm -rf $SCRIPTPATH/porting/mbed/ -rm -rf $SCRIPTPATH/porting/mingw32/ -rm -rf $SCRIPTPATH/porting/posix/ -rm -rf $SCRIPTPATH/porting/silabs/ -rm -rf $SCRIPTPATH/porting/stm32-cubeai/ -rm -rf $SCRIPTPATH/porting/zephyr/ -rm -rf $SCRIPTPATH/porting/sony/ -rm -rf $SCRIPTPATH/classifier/ei_run_classifier_c* -rm -rf $SCRIPTPATH/CMSIS/DSP/Source/TransformFunctions/arm_bitreversal2.S -rm -rf $SCRIPTPATH/third_party/arc_mli_package/ - -# rename all .cc files to .cpp, and do an inplace change of the headers -find . -name '*.cc' -exec sh -c 'mv "$0" "${0%.cc}.cpp"' {} \; - -# make sure that abs is undefined on arduino -find $SCRIPTPATH/ -name 'compatibility.h' -exec bash -c "$SEDCMD 's/#include /#include \\ -#include \"edge-impulse-sdk\/tensorflow\/lite\/portable_type_to_tflitetype.h\"/' {}" {} \; -find $SCRIPTPATH/ -name 'micro_utils.h' -exec bash -c "$SEDCMD 's/#include /#include \\ -#include \"edge-impulse-sdk\/tensorflow\/lite\/portable_type_to_tflitetype.h\"/' {}" {} \; - -# wrap all CMSIS-DSP .c files in a guard (defined in config.hpp) -find $SCRIPTPATH/CMSIS/DSP/Source -name "*.c" -print0 | while read -d $'\0' file; do - $SEDCMD '1i\ -#include \"edge-impulse-sdk/dsp/config.hpp\"\ -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -' "$file" - - $ECHOCMD '\n#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES' >> "$file" -done - -# remove all the -e files -find $SCRIPTPATH/ -name "*-e" -exec rm -f {} \; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/config.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/config.hpp index 0f3bd5f..86f638c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/config.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/config.hpp @@ -1,31 +1,26 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_CPP_CONFIG_H_ #define _EIDSP_CPP_CONFIG_H_ // clang-format off -#ifndef EIDSP_USE_CMSIS_DSP -#if defined(__MBED__) || defined(__TARGET_CPU_CORTEX_M0) || defined(__TARGET_CPU_CORTEX_M0PLUS) || defined(__TARGET_CPU_CORTEX_M3) || defined(__TARGET_CPU_CORTEX_M4) || defined(__TARGET_CPU_CORTEX_M7) || defined(USE_HAL_DRIVER) || defined(ARDUINO_NRF52_ADAFRUIT) +#ifndef EIDSP_USE_CMSIS_DSP // __ARM_ARCH_PROFILE is a predefine of arm-gcc. __TARGET_* is armcc +#if defined(__MBED__) || __ARM_ARCH_PROFILE == 'M' || defined(__TARGET_CPU_CORTEX_M0) || defined(__TARGET_CPU_CORTEX_M0PLUS) || defined(__TARGET_CPU_CORTEX_M3) || defined(__TARGET_CPU_CORTEX_M4) || defined(__TARGET_CPU_CORTEX_M7) || defined(USE_HAL_DRIVER) || defined(ARDUINO_NRF52_ADAFRUIT) // Mbed OS versions before 5.7 are not based on CMSIS5, disable CMSIS-DSP and CMSIS-NN instructions #if defined(__MBED__) #include "mbed_version.h" @@ -50,10 +45,6 @@ #endif // Mbed / ARM Core check #endif // ifndef EIDSP_USE_CMSIS_DSP -//TODO when we have other fixed point libraries, change this -//even if we don't use cmsis, use their fixed point FFT -#define EIDSP_USE_CMSIS_FIXED 1 - #if EIDSP_USE_CMSIS_DSP == 1 #define EIDSP_i32 int32_t #define EIDSP_i16 int16_t diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.cpp index 31e06ac..27420ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.cpp @@ -1,24 +1,18 @@ /* - * Fast discrete cosine transform algorithms (C) + * Copyright (c) 2022 Project Nayuki. (MIT License) * - * Copyright (c) 2017 Project Nayuki. (MIT License) - * https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * Permission is hereby granted, free of charge, to any person obtaining a copy of - * this software and associated documentation files (the "Software"), to deal in - * the Software without restriction, including without limitation the rights to - * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - * the Software, and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - The Software is provided "as is", without warranty of any kind, express or - * implied, including but not limited to the warranties of merchantability, - * fitness for a particular purpose and noninfringement. In no event shall the - * authors or copyright holders be liable for any claim, damages or other - * liability, whether in an action of contract, tort or otherwise, arising from, - * out of or in connection with the Software or the use or other dealings in the - * Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 */ #include @@ -35,106 +29,53 @@ // DCT type II, unscaled int ei::dct::transform(float vector[], size_t len) { - const size_t fft_data_out_size = (len / 2 + 1) * sizeof(ei::fft_complex_t); - const size_t fft_data_in_size = len * sizeof(float); + const size_t fft_data_out_size = (len / 2 + 1) * sizeof(ei::fft_complex_t); + const size_t fft_data_in_size = len * sizeof(float); - // Allocate KissFFT input / output buffer + // Allocate KissFFT input / output buffer fft_complex_t *fft_data_out = - (ei::fft_complex_t*)ei_dsp_calloc(fft_data_out_size, 1); - if (!fft_data_out) { - return ei::EIDSP_OUT_OF_MEM; - } + (ei::fft_complex_t*)ei_dsp_calloc(fft_data_out_size, 1); + if (!fft_data_out) { + return ei::EIDSP_OUT_OF_MEM; + } float *fft_data_in = (float*)ei_dsp_calloc(fft_data_in_size, 1); - if (!fft_data_in) { - ei_dsp_free(fft_data_out, fft_data_out_size); - return ei::EIDSP_OUT_OF_MEM; - } - - // Preprocess the input buffer with the data from the vector - size_t halfLen = len / 2; - for (size_t i = 0; i < halfLen; i++) { - fft_data_in[i] = vector[i * 2]; - fft_data_in[len - 1 - i] = vector[i * 2 + 1]; - } - if (len % 2 == 1) { - fft_data_in[halfLen] = vector[len - 1]; - } - - int r = ei::numpy::rfft(fft_data_in, len, fft_data_out, (len / 2 + 1), len); - if (r != 0) { - ei_dsp_free(fft_data_in, fft_data_in_size); - ei_dsp_free(fft_data_out, fft_data_out_size); - return r; - } - - for (size_t i = 0; i < len / 2 + 1; i++) { - float temp = i * M_PI / (len * 2); - vector[i] = fft_data_out[i].r * cos(temp) + fft_data_out[i].i * sin(temp); - } - - ei_dsp_free(fft_data_in, fft_data_in_size); - ei_dsp_free(fft_data_out, fft_data_out_size); - - return 0; -} - -// DCT type III, unscaled -int ei::dct::inverse_transform(float vector[], size_t len) { - const size_t fft_data_out_size = len * sizeof(kiss_fft_cpx); - const size_t fft_data_in_size = len * sizeof(kiss_fft_cpx); - - // Allocate KissFFT input / output buffer - kiss_fft_cpx *fft_data_out = (kiss_fft_cpx*)ei_dsp_calloc(fft_data_out_size, 1); - if (!fft_data_out) { - return ei::EIDSP_OUT_OF_MEM; - } - - kiss_fft_cpx *fft_data_in = (kiss_fft_cpx*)ei_dsp_calloc(fft_data_in_size, 1); - if (!fft_data_in) { - ei_dsp_free(fft_data_out, fft_data_out_size); - return ei::EIDSP_OUT_OF_MEM; - } - - size_t kiss_fftr_mem_length; - - // Allocate KissFFT configuration - kiss_fft_cfg cfg = kiss_fft_alloc(len, 0, NULL, NULL, &kiss_fftr_mem_length); - if (!cfg) { - ei_dsp_free(fft_data_in, fft_data_in_size); - ei_dsp_free(fft_data_out, fft_data_out_size); - return ei::EIDSP_OUT_OF_MEM; - } - - ei_dsp_register_alloc(kiss_fftr_mem_length, cfg); - - // Preprocess and transform - if (len > 0) { - vector[0] /= 2; - } - - for (size_t i = 0; i < len; i++) { - float temp = i * M_PI / (len * 2); - fft_data_in[i].r = vector[i] * cos(temp); - fft_data_in[i].i *= -sin(temp); - } - - kiss_fft(cfg, fft_data_in, fft_data_out); - - // Postprocess the vectors - size_t halfLen = len / 2; - for (size_t i = 0; i < halfLen; i++) { - vector[i * 2 + 0] = fft_data_out[i].r; - vector[i * 2 + 1] = fft_data_out[len - 1 - i].r; - } - - if (len % 2 == 1) { - vector[len - 1] = fft_data_out[halfLen].r; - } - - ei_dsp_free(cfg, kiss_fftr_mem_length); - ei_dsp_free(fft_data_in, fft_data_in_size); - ei_dsp_free(fft_data_out, fft_data_out_size); - - return ei::EIDSP_OK; -} + if (!fft_data_in) { + ei_dsp_free(fft_data_out, fft_data_out_size); + return ei::EIDSP_OUT_OF_MEM; + } + + // Preprocess the input buffer with the data from the vector + size_t halfLen = len / 2; + for (size_t i = 0; i < halfLen; i++) { + fft_data_in[i] = vector[i * 2]; + fft_data_in[len - 1 - i] = vector[i * 2 + 1]; + } + if (len % 2 == 1) { + fft_data_in[halfLen] = vector[len - 1]; + } + + int r = ei::numpy::rfft(fft_data_in, len, fft_data_out, (len / 2 + 1), len); + if (r != 0) { + ei_dsp_free(fft_data_in, fft_data_in_size); + ei_dsp_free(fft_data_out, fft_data_out_size); + return r; + } + + size_t i = 0; + for (; i < len / 2 + 1; i++) { + float temp = i * M_PI / (len * 2); + vector[i] = fft_data_out[i].r * cos(temp) + fft_data_out[i].i * sin(temp); + } + //take advantage of hermetian symmetry to calculate remainder of signal + for (; i < len; i++) { + float temp = i * M_PI / (len * 2); + int conj_idx = len-i; + // second half bins not calculated would have just been the conjugate of the first half (note minus of imag) + vector[i] = fft_data_out[conj_idx].r * cos(temp) - fft_data_out[conj_idx].i * sin(temp); + } + ei_dsp_free(fft_data_in, fft_data_in_size); + ei_dsp_free(fft_data_out, fft_data_out_size); + + return 0; +} \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.h index e74ce4f..e31efe1 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/dct/fast-dct-fft.h @@ -1,24 +1,18 @@ /* - * Fast discrete cosine transform algorithms (C) + * Copyright (c) 2022 Project Nayuki. (MIT License) * - * Copyright (c) 2018 Project Nayuki. (MIT License) - * https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * Permission is hereby granted, free of charge, to any person obtaining a copy of - * this software and associated documentation files (the "Software"), to deal in - * the Software without restriction, including without limitation the rights to - * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of - * the Software, and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - The Software is provided "as is", without warranty of any kind, express or - * implied, including but not limited to the warranties of merchantability, - * fitness for a particular purpose and noninfringement. In no event shall the - * authors or copyright holders be liable for any claim, damages or other - * liability, whether in an action of contract, tort or otherwise, arising from, - * out of or in connection with the Software or the use or other dealings in the - * Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __FAST_DCT_FFT__H__ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_alloc.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_alloc.h new file mode 100644 index 0000000..6690570 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_alloc.h @@ -0,0 +1,79 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __EI_ALLOC__H__ +#define __EI_ALLOC__H__ + +#include "memory.hpp" + +#if EIDSP_TRACK_ALLOCATIONS +#include +#endif + +namespace ei { + +template +struct EiAlloc +{ + typedef T value_type; + EiAlloc() = default; + template + constexpr EiAlloc(const EiAlloc &) noexcept {} + + T *allocate(size_t n) + { + auto bytes = n * sizeof(T); + auto ptr = ei_dsp_malloc(bytes); +#if EIDSP_TRACK_ALLOCATIONS + get_allocs()[ptr] = bytes; +#endif + return (T *)ptr; + } + + void deallocate(T *p, size_t n) noexcept + { +#if EIDSP_TRACK_ALLOCATIONS + auto size_p = get_allocs().find(p); + ei_dsp_free(p,size_p->second); + get_allocs().erase(size_p); +#else + ei_dsp_free(p,0); +#endif + } +#if EIDSP_TRACK_ALLOCATIONS + private: + // [address] -> size requested + typedef std::map map_t; + static map_t& get_allocs() { + static map_t allocs; + return allocs; + } +#endif +}; + +template +bool operator==(const EiAlloc &, const EiAlloc &) { return true; } +template +bool operator!=(const EiAlloc &, const EiAlloc &) { return false; } +} + +#endif //!__EI_ALLOC__H__ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_dsp_handle.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_dsp_handle.h new file mode 100644 index 0000000..462117a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_dsp_handle.h @@ -0,0 +1,58 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2023 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __EI_DSP_HANDLE__H__ +#define __EI_DSP_HANDLE__H__ + +#include "edge-impulse-sdk/dsp/config.hpp" +#include "edge-impulse-sdk/dsp/numpy_types.h" + +class DspHandle { +public: + /** + * @brief Override and call ei_printf to print debug information, especially the current state + * + * @return int + */ + virtual int print() = 0; + + /** + * @brief Override and convert raw data into processed features. Any state should live inside your custom class. + * Provide a constructor to initialize your state. + * + * @param signal Callback object to get raw data from + * @param output_matrix Output matrix to write features to + * @param config Configuration object, generated by Studio based on your DSP block parameters + * @param frequency Sampling frequency, as set in your project + * @return int 0 on success, anything else for failure + */ + virtual int extract(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config, const float frequency) = 0; + + // Must declare so user can override + /** + * @brief If you call new or ei_malloc anywhere in your class, you must override this function and delete your objects + * + */ + virtual ~DspHandle() {}; +}; + +#endif //!__EI_DSP_HANDLE__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_flatten.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_flatten.h new file mode 100644 index 0000000..d7586b2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_flatten.h @@ -0,0 +1,198 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2023 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __EI_FLATTEN__H__ +#define __EI_FLATTEN__H__ + +#include "edge-impulse-sdk/dsp/ei_vector.h" +#include "edge-impulse-sdk/dsp/returntypes.hpp" +#include "edge-impulse-sdk/dsp/ei_dsp_handle.h" +#include "model-parameters/model_metadata.h" +#include "edge-impulse-sdk/dsp/numpy.hpp" +#include "edge-impulse-sdk/dsp/config.hpp" + +class flatten_class : public DspHandle { +public: + int print() override { + ei_printf("means: "); + for(int axis = 0; (size_t)axis < this->means.size(); axis++) { + ei_printf("axis: %i\n", axis); + for (size_t i = 0; i < this->means.size(); i++) { + ei_printf("%f ", this->means[axis][i]); + } + } + ei_printf("\n"); + return ei::EIDSP_OK; + } + + int extract(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config_ptr, const float frequency) override { + using namespace ei; + + ei_dsp_config_flatten_t config = *((ei_dsp_config_flatten_t*)config_ptr); + + uint32_t expected_matrix_size = 0; + if (config.average) expected_matrix_size += config.axes; + if (config.minimum) expected_matrix_size += config.axes; + if (config.maximum) expected_matrix_size += config.axes; + if (config.rms) expected_matrix_size += config.axes; + if (config.stdev) expected_matrix_size += config.axes; + if (config.skewness) expected_matrix_size += config.axes; + if (config.kurtosis) expected_matrix_size += config.axes; + if (config.moving_avg_num_windows) expected_matrix_size += config.axes; + + if (output_matrix->rows * output_matrix->cols != expected_matrix_size) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } + + int ret; + + // input matrix from the raw signal + matrix_t input_matrix(signal->total_length / config.axes, config.axes); + if (!input_matrix.buffer) { + EIDSP_ERR(EIDSP_OUT_OF_MEM); + } + signal->get_data(0, signal->total_length, input_matrix.buffer); + + // scale the signal + ret = numpy::scale(&input_matrix, config.scale_axes); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to scale signal (%d)\n", ret); + EIDSP_ERR(ret); + } + + // transpose the matrix so we have one row per axis + numpy::transpose_in_place(&input_matrix); + + size_t out_matrix_ix = 0; + + for (size_t row = 0; row < input_matrix.rows; row++) { + matrix_t row_matrix(1, input_matrix.cols, input_matrix.buffer + (row * input_matrix.cols)); + + float mean; // to use with moving average + + if (config.average || config.moving_avg_num_windows) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::mean(&row_matrix, &out_matrix); + mean = out_matrix.buffer[0]; + if (config.average) { + output_matrix->buffer[out_matrix_ix++] = mean; + } + } + + if (config.minimum) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::min(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.maximum) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::max(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.rms) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::rms(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.stdev) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::stdev(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.skewness) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::skew(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.kurtosis) { + float fbuffer; + matrix_t out_matrix(1, 1, &fbuffer); + numpy::kurtosis(&row_matrix, &out_matrix); + output_matrix->buffer[out_matrix_ix++] = out_matrix.buffer[0]; + } + + if (config.moving_avg_num_windows) { + push_mean(row, mean); + output_matrix->buffer[out_matrix_ix++] = numpy::mean(means[row].data(), means[row].size()); + } + } + + // flatten again + output_matrix->cols = output_matrix->rows * output_matrix->cols; + output_matrix->rows = 1; + + return EIDSP_OK; + } + + static DspHandle* create(void* config, float _sampling_frequency); + + void* operator new(size_t size) { + // Custom memory allocation logic here + return ei_malloc(size); + } + + void operator delete(void* ptr) { + // Custom memory deallocation logic here + ei_free(ptr); + } + +private: + ei_vector> means; + ei_vector head_indexes; + size_t moving_avg_num_windows; + + flatten_class(int moving_avg_num_windows, int axes_count) : means(axes_count), head_indexes(axes_count, 0) { + this->moving_avg_num_windows = moving_avg_num_windows; + } + + void push_mean(int axis, float mean) { + auto& head = head_indexes[axis]; + if (head_indexes[axis] >= means[axis].size()) { + means[axis].push_back(mean); + } else { + means[axis][head] = mean; + } + head = head + 1; + // This is a lot cheaper than mod (%) + if (head >= moving_avg_num_windows) { + head = 0; + } + } +}; + +DspHandle* flatten_class::create(void* config_in, float _sampling_frequency) { // NOLINT def in header is OK at EI + auto config = reinterpret_cast(config_in); + return new flatten_class(config->moving_avg_num_windows, config->axes); +}; + +#endif //!__EI_FLATTEN__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_hr.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_hr.hpp new file mode 100644 index 0000000..6570f9b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_hr.hpp @@ -0,0 +1,96 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef HR_PPG_HPP +#define HR_PPG_HPP + +#include "edge-impulse-sdk/dsp/numpy.hpp" +#include "edge-impulse-sdk/dsp/ei_dsp_handle.h" +#include "edge-impulse-enterprise/hr/hr_ppg.hpp" + +class hr_class : public DspHandle { +public: + int print() override { + ei_printf("Last HR: %f\n", ppg._res.hr); + return ei::EIDSP_OK; + } + + int extract(ei::signal_t *signal, ei::matrix_t *output_matrix, void *config_ptr, const float frequency) override { + using namespace ei; + + // Don't need just yet + // ei_dsp_config_hr_t config = *((ei_dsp_config_hr_t*)config_ptr); + + + // TODO fix for axes / accel + size_t samples_per_inc = ppg.win_inc_samples; + // TODO go in a loop for the full window size, once I can actually test this vs studio + if(signal->total_length != samples_per_inc) { + return EIDSP_BUFFER_SIZE_MISMATCH; + } + + // TODO ask for smaller increments and bp them into place + // Copy into the end of the buffer + matrix_t temp(ppg.axes, samples_per_inc); + signal->get_data(0, samples_per_inc, temp.buffer); + + + output_matrix->buffer[0] = ppg.stream(&temp); + + output_matrix->rows = 1; + output_matrix->cols = 1; + return EIDSP_OK; + } + + // TODO: actually read in config: axes too! + hr_class(float frequency) : ppg(frequency, 1, 8*50, 2*50, true) { + } + + // Boilerplate below here + static DspHandle* create(void* config, float frequency); + + void* operator new(size_t size) { + // Custom memory allocation logic here + return ei_malloc(size); + } + + void operator delete(void* ptr) { + // Custom memory deallocation logic here + ei_free(ptr); + } + // end boilerplate +private: + ei::hr_ppg ppg; +}; + +DspHandle* hr_class::create(void* config_in, float frequency) { // NOLINT def in header is OK at EI + // Don't need just yet + // auto config = reinterpret_cast(config_in); + // TODO: actually read in config + return new hr_class(frequency); +}; + +/* +NOTE, contact EI sales for license and source to use EI heart rate and heart rate variance functions in deployment +*/ + +#endif \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_profiler.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_profiler.h index 01256fc..365d9e0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_profiler.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_profiler.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __EIPROFILER__H__ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_utils.h index 294a423..2a2c5e8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_utils.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __EI_UTILS__H__ #define __EI_UTILS__H__ @@ -25,8 +20,8 @@ #define ARRAY_LENGTH(array) (sizeof((array))/sizeof((array)[0])) // Stringify -#define xstr(a) str(a) -#define str(a) #a +#define ei_xstr(a) ei_str(a) +#define ei_str(a) #a // Bit manipulation @@ -54,4 +49,6 @@ // Test whether all the flag bits in word are set. #define TEST_BIT_MASK(y, flag) ( ((y)&(flag)) == (flag) ) +#define EI_TRY(x) { auto res = (x); if(res != EIDSP_OK) { return res; } } + #endif //!__EI_UTILS__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_vector.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_vector.h new file mode 100644 index 0000000..750b434 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/ei_vector.h @@ -0,0 +1,32 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifndef __EI_VECTOR__H__ +#define __EI_VECTOR__H__ + +#include "ei_alloc.h" +#include + +template +using ei_vector = std::vector>; + +#endif //!__EI_VECTOR__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/image.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/image.hpp index 8d935e1..12c0da4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/image.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/image.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_IMAGE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.cpp index 98f91dd..5ff30c6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.cpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __EIDSP_IMAGE_PROCESSING__H__ @@ -38,10 +33,10 @@ enum YUV_OPTIONS /** * @brief Convert YUV to RGB - * + * * @param rgb_out Output buffer (can be the same as yuv_in if big enough) * @param yuv_in Input buffer - * @param in_size_B Size of input image in B + * @param in_size_B Size of input image in B * @param opts Note, only BIG_ENDIAN_ORDER supported presently */ int yuv422_to_rgb888( @@ -59,7 +54,7 @@ int yuv422_to_rgb888( #define EI_GET_G_FROM_YUV(y, u, v) ((298 * y - 100 * u - 208 * v + 128) >> 8) #define EI_GET_B_FROM_YUV(y, u, v) ((298 * y + 516 * u + 128) >> 8) - int in_size_pixels = in_size_B / 4; + unsigned int in_size_pixels = in_size_B / 4; yuv_in += in_size_B - 1; int rgb_end = TEST_BIT_MASK(opts, PAD_4B) ? 2 * in_size_B : (6 * in_size_B) / 4; @@ -102,11 +97,11 @@ int yuv422_to_rgb888( /** * @brief Crops an image. Can be in-place. 4B alignment for best performance * (Alignment is tested, will fall back to B by B movement) - * + * * @param srcWidth X dimension in pixels * @param srcHeight Y dimension in pixels - * @param srcImage Input buffer - * @param startX X coord of first pixel to keep + * @param srcImage Input buffer + * @param startX X coord of first pixel to keep * @param startY Y coord of the first pixel to keep * @param dstWidth Desired X dimension in pixels (should be smaller than srcWidth) * @param dstHeight Desired Y dimension in pixels (should be smaller than srcHeight) @@ -234,7 +229,7 @@ int crop_image_rgb888_packed( * Can be used to resize the image smaller or larger * If resizing much smaller than 1/3 size, then a more rubust algorithm should average all of the pixels * This algorithm uses bilinear interpolation - averages a 2x2 region to generate each new pixel - * + * * @param srcWidth Input image width in pixels * @param srcHeight Input image height in pixels * @param srcImage Input buffer @@ -278,7 +273,7 @@ int resize_image( //dstWidth still needed as is //dstHeight shouldn't be scaled - const uint8_t *s; + const uint8_t *s; uint8_t *d; for (y = 0; y < dstHeight; y++) { @@ -324,7 +319,7 @@ int resize_image( * @brief Calculate new dims that match the aspect ratio of destination * This prevents a squashed look * The smallest axis is held constant - * + * * @param srcWidth Input width in pixels * @param srcHeight Input height in pixels * @param dstWidth Ultimate width in pixels @@ -373,11 +368,42 @@ int crop_and_interpolate_rgb888( dstImage, cropWidth, cropHeight); - + if( res != EIDSP_OK) { return res; } // Finally, interpolate down to desired dimensions, in place return resize_image(dstImage, cropWidth, cropHeight, dstImage, dstWidth, dstHeight, 3); } +int crop_and_interpolate_image( + const uint8_t *srcImage, + int srcWidth, + int srcHeight, + uint8_t *dstImage, + int dstWidth, + int dstHeight, + int pixel_size_B) +{ + int cropWidth, cropHeight; + // What are dimensions that maintain aspect ratio? + calculate_crop_dims(srcWidth, srcHeight, dstWidth, dstHeight, cropWidth, cropHeight); + + // Now crop to that dimension + int res = cropImage( + srcImage, + srcWidth * pixel_size_B, + srcHeight, + ((srcWidth - cropWidth) / 2) * pixel_size_B, + (srcHeight - cropHeight) / 2, + dstImage, + cropWidth * pixel_size_B, + cropHeight, + 8); + + if( res != EIDSP_OK) { return res; } + + // Finally, interpolate down to desired dimensions, in place + return resize_image(dstImage, cropWidth, cropHeight, dstImage, dstWidth, dstHeight, pixel_size_B); +} + }}} //namespaces #endif //!__EI_IMAGE_PROCESSING__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.hpp index e0bba8a..de8a3be 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/image/processing.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __EIDSP_IMAGE_PROCESSING__H__ @@ -37,10 +32,10 @@ enum YUV_OPTIONS /** * @brief Convert YUV to RGB - * + * * @param rgb_out Output buffer (can be the same as yuv_in if big enough) * @param yuv_in Input buffer - * @param in_size_B Size of input image in B + * @param in_size_B Size of input image in B * @param opts Note, only BIG_ENDIAN_ORDER supported presently */ int yuv422_to_rgb888( @@ -52,11 +47,11 @@ int yuv422_to_rgb888( /** * @brief Crops an image. Can be in-place. 4B alignment for best performance * (Alignment is tested, will fall back to B by B movement) - * + * * @param srcWidth X dimension in pixels * @param srcHeight Y dimension in pixels - * @param srcImage Input buffer - * @param startX X coord of first pixel to keep + * @param srcImage Input buffer + * @param startX X coord of first pixel to keep * @param startY Y coord of the first pixel to keep * @param dstWidth Desired X dimension in pixels (should be smaller than srcWidth) * @param dstHeight Desired Y dimension in pixels (should be smaller than srcHeight) @@ -104,7 +99,7 @@ constexpr int MONO_B_SIZE = 1; * Can be used to resize the image smaller or larger * If resizing much smaller than 1/3 size, then a more rubust algorithm should average all of the pixels * This algorithm uses bilinear interpolation - averages a 2x2 region to generate each new pixel - * + * * @param srcWidth Input image width in pixels * @param srcHeight Input image height in pixels * @param srcImage Input buffer @@ -126,7 +121,7 @@ void resize_image( * @brief Calculate new dims that match the aspect ratio of destination * This prevents a squashed look * The smallest axis is held constant - * + * * @param srcWidth Input width in pixels * @param srcHeight Input height in pixels * @param dstWidth Ultimate width in pixels @@ -145,15 +140,15 @@ void calculate_crop_dims( /** * @brief Crops, then interpolates to a desired new image size * Can be done in place (set srcImage == dstImage) - * + * * @param srcImage Input image buffer * @param srcWidth Input width in pixels * @param srcHeight Input height in pixels - * @param dstImage Output image buffer, can be same as input buffer + * @param dstImage Output image buffer, can be same as input buffer * @param dstWidth Desired new width in pixels * @param dstHeight Desired new height in pixels */ -void crop_and_interpolate_rgb888( +int crop_and_interpolate_rgb888( const uint8_t *srcImage, int srcWidth, int srcHeight, @@ -161,5 +156,28 @@ void crop_and_interpolate_rgb888( int dstWidth, int dstHeight); +/** + * @brief Crops, then interpolates to a desired new image size + * Can be done in place (set srcImage == dstImage) + * A more beneric version of the previously used + * crop_and_interpolate_rgb888 + * + * @param srcImage Input image buffer + * @param srcWidth Input width in pixels + * @param srcHeight Input height in pixels + * @param dstImage Output image buffer, can be same as input buffer + * @param dstWidth Desired new width in pixels + * @param dstHeight Desired new height in pixels + * @param pixel_size_B Size of pixels in Bytes. 3 for RGB, 1 for mono + */ +int crop_and_interpolate_image( + const uint8_t *srcImage, + int srcWidth, + int srcHeight, + uint8_t *dstImage, + int dstWidth, + int dstHeight, + int pixel_size_B); + }}} //namespaces #endif //!__EI_IMAGE_PROCESSING__H__ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h index 7bf5762..754896a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h @@ -10,6 +10,8 @@ defines kiss_fft_scalar as either short or a float type and defines typedef struct { kiss_fft_scalar r; kiss_fft_scalar i; }kiss_fft_cpx; */ +#pragma once + #include "kiss_fft.h" #include diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fft.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fft.cpp index 33b1ae6..9393357 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fft.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fft.cpp @@ -7,7 +7,7 @@ */ -#include "_kiss_fft_guts.h" +#include "edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h" /* The guts header contains all the multiplication and addition macros that are defined for fixed or floating point complex numbers. It also delares the kf_ internal functions. */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.cpp index c4cee87..b448730 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.cpp @@ -1,13 +1,13 @@ /* - * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. + * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. * This file is part of KISS FFT - https://github.com/mborgerding/kissfft * * SPDX-License-Identifier: BSD-3-Clause * See COPYING file for more information. */ -#include "kiss_fftr.h" -#include "_kiss_fft_guts.h" +#include "edge-impulse-sdk/dsp/kissfft/kiss_fftr.h" +#include "edge-impulse-sdk/dsp/kissfft/_kiss_fft_guts.h" struct kiss_fftr_state{ kiss_fft_cfg substate; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.h index 749f56e..49f0fd9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kiss_fftr.h @@ -1,5 +1,5 @@ /* - * Copyright (c) 2003-2004, Mark Borgerding. All rights reserved. + * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. * This file is part of KISS FFT - https://github.com/mborgerding/kissfft * * SPDX-License-Identifier: BSD-3-Clause diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.h index 0c7927b..3619c00 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.h @@ -10,7 +10,7 @@ #define KISSFFT_CLASS_HH #include #include -#include +#include "edge-impulse-sdk/dsp/ei_vector.h" template @@ -69,7 +69,7 @@ class kissfft else if ( inverse != _inverse ) { // conjugate the twiddle factors. - for ( typename std::vector::iterator it = _twiddles.begin(); + for ( typename ei_vector::iterator it = _twiddles.begin(); it != _twiddles.end(); ++it ) it->imag( -it->imag() ); } @@ -353,9 +353,9 @@ class kissfft std::size_t _nfft; bool _inverse; - std::vector _twiddles; - std::vector _stageRadix; - std::vector _stageRemainder; - mutable std::vector _scratchbuf; + ei_vector _twiddles; + ei_vector _stageRadix; + ei_vector _stageRemainder; + mutable ei_vector _scratchbuf; }; #endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.hh b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.hh deleted file mode 100644 index 0c7927b..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft.hh +++ /dev/null @@ -1,361 +0,0 @@ -/* - * Copyright (c) 2003-2010, Mark Borgerding. All rights reserved. - * This file is part of KISS FFT - https://github.com/mborgerding/kissfft - * - * SPDX-License-Identifier: BSD-3-Clause - * See COPYING file for more information. - */ - -#ifndef KISSFFT_CLASS_HH -#define KISSFFT_CLASS_HH -#include -#include -#include - - -template -class kissfft -{ - public: - - typedef std::complex cpx_t; - - kissfft( const std::size_t nfft, - const bool inverse ) - :_nfft(nfft) - ,_inverse(inverse) - { - // fill twiddle factors - _twiddles.resize(_nfft); - const scalar_t phinc = (_inverse?2:-2)* std::acos( (scalar_t) -1) / _nfft; - for (std::size_t i=0;i<_nfft;++i) - _twiddles[i] = std::exp( cpx_t(0,i*phinc) ); - - //factorize - //start factoring out 4's, then 2's, then 3,5,7,9,... - std::size_t n= _nfft; - std::size_t p=4; - do { - while (n % p) { - switch (p) { - case 4: p = 2; break; - case 2: p = 3; break; - default: p += 2; break; - } - if (p*p>n) - p = n;// no more factors - } - n /= p; - _stageRadix.push_back(p); - _stageRemainder.push_back(n); - }while(n>1); - } - - - /// Changes the FFT-length and/or the transform direction. - /// - /// @post The @c kissfft object will be in the same state as if it - /// had been newly constructed with the passed arguments. - /// However, the implementation may be faster than constructing a - /// new fft object. - void assign( const std::size_t nfft, - const bool inverse ) - { - if ( nfft != _nfft ) - { - kissfft tmp( nfft, inverse ); // O(n) time. - std::swap( tmp, *this ); // this is O(1) in C++11, O(n) otherwise. - } - else if ( inverse != _inverse ) - { - // conjugate the twiddle factors. - for ( typename std::vector::iterator it = _twiddles.begin(); - it != _twiddles.end(); ++it ) - it->imag( -it->imag() ); - } - } - - /// Calculates the complex Discrete Fourier Transform. - /// - /// The size of the passed arrays must be passed in the constructor. - /// The sum of the squares of the absolute values in the @c dst - /// array will be @c N times the sum of the squares of the absolute - /// values in the @c src array, where @c N is the size of the array. - /// In other words, the l_2 norm of the resulting array will be - /// @c sqrt(N) times as big as the l_2 norm of the input array. - /// This is also the case when the inverse flag is set in the - /// constructor. Hence when applying the same transform twice, but with - /// the inverse flag changed the second time, then the result will - /// be equal to the original input times @c N. - void transform(const cpx_t * fft_in, cpx_t * fft_out, const std::size_t stage = 0, const std::size_t fstride = 1, const std::size_t in_stride = 1) const - { - const std::size_t p = _stageRadix[stage]; - const std::size_t m = _stageRemainder[stage]; - cpx_t * const Fout_beg = fft_out; - cpx_t * const Fout_end = fft_out + p*m; - - if (m==1) { - do{ - *fft_out = *fft_in; - fft_in += fstride*in_stride; - }while(++fft_out != Fout_end ); - }else{ - do{ - // recursive call: - // DFT of size m*p performed by doing - // p instances of smaller DFTs of size m, - // each one takes a decimated version of the input - transform(fft_in, fft_out, stage+1, fstride*p,in_stride); - fft_in += fstride*in_stride; - }while( (fft_out += m) != Fout_end ); - } - - fft_out=Fout_beg; - - // recombine the p smaller DFTs - switch (p) { - case 2: kf_bfly2(fft_out,fstride,m); break; - case 3: kf_bfly3(fft_out,fstride,m); break; - case 4: kf_bfly4(fft_out,fstride,m); break; - case 5: kf_bfly5(fft_out,fstride,m); break; - default: kf_bfly_generic(fft_out,fstride,m,p); break; - } - } - - /// Calculates the Discrete Fourier Transform (DFT) of a real input - /// of size @c 2*N. - /// - /// The 0-th and N-th value of the DFT are real numbers. These are - /// stored in @c dst[0].real() and @c dst[1].imag() respectively. - /// The remaining DFT values up to the index N-1 are stored in - /// @c dst[1] to @c dst[N-1]. - /// The other half of the DFT values can be calculated from the - /// symmetry relation - /// @code - /// DFT(src)[2*N-k] == conj( DFT(src)[k] ); - /// @endcode - /// The same scaling factors as in @c transform() apply. - /// - /// @note For this to work, the types @c scalar_t and @c cpx_t - /// must fulfill the following requirements: - /// - /// For any object @c z of type @c cpx_t, - /// @c reinterpret_cast(z)[0] is the real part of @c z and - /// @c reinterpret_cast(z)[1] is the imaginary part of @c z. - /// For any pointer to an element of an array of @c cpx_t named @c p - /// and any valid array index @c i, @c reinterpret_cast(p)[2*i] - /// is the real part of the complex number @c p[i], and - /// @c reinterpret_cast(p)[2*i+1] is the imaginary part of the - /// complex number @c p[i]. - /// - /// Since C++11, these requirements are guaranteed to be satisfied for - /// @c scalar_ts being @c float, @c double or @c long @c double - /// together with @c cpx_t being @c std::complex. - void transform_real( const scalar_t * const src, - cpx_t * const dst ) const - { - const std::size_t N = _nfft; - if ( N == 0 ) - return; - - // perform complex FFT - transform( reinterpret_cast(src), dst ); - - // post processing for k = 0 and k = N - dst[0] = cpx_t( dst[0].real() + dst[0].imag(), - dst[0].real() - dst[0].imag() ); - - // post processing for all the other k = 1, 2, ..., N-1 - const scalar_t pi = std::acos( (scalar_t) -1); - const scalar_t half_phi_inc = ( _inverse ? pi : -pi ) / N; - const cpx_t twiddle_mul = std::exp( cpx_t(0, half_phi_inc) ); - for ( std::size_t k = 1; 2*k < N; ++k ) - { - const cpx_t w = (scalar_t)0.5 * cpx_t( - dst[k].real() + dst[N-k].real(), - dst[k].imag() - dst[N-k].imag() ); - const cpx_t z = (scalar_t)0.5 * cpx_t( - dst[k].imag() + dst[N-k].imag(), - -dst[k].real() + dst[N-k].real() ); - const cpx_t twiddle = - k % 2 == 0 ? - _twiddles[k/2] : - _twiddles[k/2] * twiddle_mul; - dst[ k] = w + twiddle * z; - dst[N-k] = std::conj( w - twiddle * z ); - } - if ( N % 2 == 0 ) - dst[N/2] = std::conj( dst[N/2] ); - } - - private: - - void kf_bfly2( cpx_t * Fout, const size_t fstride, const std::size_t m) const - { - for (std::size_t k=0;k _scratchbuf.size()) _scratchbuf.resize(p); - - for ( std::size_t u=0; u=_nfft) - twidx-=_nfft; - Fout[ k ] += _scratchbuf[q] * twiddles[twidx]; - } - k += m; - } - } - } - - std::size_t _nfft; - bool _inverse; - std::vector _twiddles; - std::vector _stageRadix; - std::vector _stageRemainder; - mutable std::vector _scratchbuf; -}; -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft_i32.hh b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft_i32.hh deleted file mode 100644 index 5871e00..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/kissfft/kissfft_i32.hh +++ /dev/null @@ -1,304 +0,0 @@ -#ifndef KISSFFT_I32_CLASS_HH -#define KISSFFT_I32_CLASS_HH - -#include -#include -#include - -// TODO1: substitute complex (behaviour not defined for nonfloats), should be faster -// TODO2: use std:: namespace -// TODO3: make unittests for all ffts (c, cpp, i32) - -template -struct complex_s -{ - DType real; - DType imag; -}; - -class kissfft_i32 -{ -private: - - using scalar_type = int32_t; - using cpx_type = complex; - - scalar_type _scale_factor; - std::size_t _nfft; - bool _inverse; - std::vector _twiddles; - std::vector _stageRadix; - std::vector _stageRemainder; - -public: - - // scale_factor: upscale twiddle-factors otherwise they lie between 0..1 (out of range for integer) --> fixed point math - kissfft_i32(const std::size_t nfft, const bool inverse, const double scale_factor = 1024.0) - : _scale_factor(scalar_type(scale_factor)), _nfft(nfft), _inverse(inverse) - { - // fill twiddle factors - _twiddles.resize(_nfft); - const double phinc = (_inverse ? 2 : -2) * acos(-1.0) / _nfft; - for (std::size_t i = 0; i < _nfft; ++i) - { - _twiddles[i] = scale_factor * exp(complex(0, i * phinc)); - } - //factorize - //start factoring out 4's, then 2's, then 3,5,7,9,... - std::size_t n = _nfft; - std::size_t p = 4; - do - { - while (n % p) - { - switch (p) - { - case 4: - p = 2; - break; - case 2: - p = 3; - break; - default: - p += 2; - break; - } - if (p * p > n) p = n;// no more factors - } - n /= p; - _stageRadix.push_back(p); - _stageRemainder.push_back(n); - } while (n > 1); - } - - /// Calculates the complex Discrete Fourier Transform. - /// - /// The size of the passed arrays must be passed in the constructor. - /// The sum of the squares of the absolute values in the @c dst - /// array will be @c N times the sum of the squares of the absolute - /// values in the @c src array, where @c N is the size of the array. - /// In other words, the l_2 norm of the resulting array will be - /// @c sqrt(N) times as big as the l_2 norm of the input array. - /// This is also the case when the inverse flag is set in the - /// constructor. Hence when applying the same transform twice, but with - /// the inverse flag changed the second time, then the result will - /// be equal to the original input times @c N. - void transform(const cpx_type * FSrc, - cpx_type * FDst, - const std::size_t stage = 0, - const std::size_t fstride = 1, - const std::size_t in_stride = 1) const - { - const std::size_t p = _stageRadix[stage]; - const std::size_t m = _stageRemainder[stage]; - cpx_type *const Fout_beg = FDst; - cpx_type *const Fout_end = FDst + p * m; - - if (m == 1) - { - do - { - *FDst = *FSrc; - FSrc += fstride * in_stride; - } while (++FDst != Fout_end); - } - else - { - do - { - // recursive call: - // DFT of size m*p performed by doing - // p instances of smaller DFTs of size m, - // each one takes a decimated version of the input - transform(FSrc, FDst, stage + 1, fstride * p, in_stride); - FSrc += fstride * in_stride; - } while ((FDst += m) != Fout_end); - } - - FDst = Fout_beg; - - // recombine the p smaller DFTs - switch (p) - { - case 2: - kf_bfly2(FDst, fstride, m); - break; - case 3: - kf_bfly3(FDst, fstride, m); - break; - case 4: - kf_bfly4(FDst, fstride, m); - break; - case 5: - kf_bfly5(FDst, fstride, m); - break; - default: - kf_bfly_generic(FDst, fstride, m, p); - break; - } - } - -private: - - void kf_bfly2(cpx_type *const Fout, const size_t fstride, const std::size_t m) const - { - for (std::size_t k = 0; k < m; ++k) - { - const cpx_type t = (Fout[m + k] * _twiddles[k * fstride]) / _scale_factor; - Fout[m + k] = Fout[k] - t; - Fout[k] += t; - } - } - - void kf_bfly3(cpx_type *Fout, const std::size_t fstride, const std::size_t m) const - { - std::size_t k = m; - const std::size_t m2 = 2 * m; - const cpx_type *tw1, *tw2; - cpx_type scratch[5]; - const cpx_type epi3 = _twiddles[fstride * m]; - - tw1 = tw2 = &_twiddles[0]; - - do - { - scratch[1] = (Fout[m] * *tw1) / _scale_factor; - scratch[2] = (Fout[m2] * *tw2) / _scale_factor; - - scratch[3] = scratch[1] + scratch[2]; - scratch[0] = scratch[1] - scratch[2]; - tw1 += fstride; - tw2 += fstride * 2; - - Fout[m] = Fout[0] - (scratch[3] / 2); - scratch[0] *= epi3.imag(); - scratch[0] /= _scale_factor; - - Fout[0] += scratch[3]; - - Fout[m2] = cpx_type(Fout[m].real() + scratch[0].imag(), Fout[m].imag() - scratch[0].real()); - - Fout[m] += cpx_type(-scratch[0].imag(), scratch[0].real()); - ++Fout; - } while (--k); - } - - void kf_bfly4(cpx_type *const Fout, const std::size_t fstride, const std::size_t m) const - { - cpx_type scratch[7]; - const scalar_type negative_if_inverse = _inverse ? -1 : +1; - - for (std::size_t k = 0; k < m; ++k) - { - scratch[0] = (Fout[k + m] * _twiddles[k * fstride]) / _scale_factor; - scratch[1] = (Fout[k + 2 * m] * _twiddles[k * fstride * 2]) / _scale_factor; - scratch[2] = (Fout[k + 3 * m] * _twiddles[k * fstride * 3]) / _scale_factor; - scratch[5] = Fout[k] - scratch[1]; - - Fout[k] += scratch[1]; - scratch[3] = scratch[0] + scratch[2]; - scratch[4] = scratch[0] - scratch[2]; - scratch[4] = cpx_type(scratch[4].imag() * negative_if_inverse, - -scratch[4].real() * negative_if_inverse); - - Fout[k + 2 * m] = Fout[k] - scratch[3]; - Fout[k] += scratch[3]; - Fout[k + m] = scratch[5] + scratch[4]; - Fout[k + 3 * m] = scratch[5] - scratch[4]; - } - } - - void kf_bfly5(cpx_type *const Fout, const std::size_t fstride, const std::size_t m) const - { - cpx_type *Fout0, *Fout1, *Fout2, *Fout3, *Fout4; - cpx_type scratch[13]; - const cpx_type ya = _twiddles[fstride * m]; - const cpx_type yb = _twiddles[fstride * 2 * m]; - - Fout0 = Fout; - Fout1 = Fout0 + m; - Fout2 = Fout0 + 2 * m; - Fout3 = Fout0 + 3 * m; - Fout4 = Fout0 + 4 * m; - - for (std::size_t u = 0; u < m; ++u) - { - scratch[0] = *Fout0; - - scratch[1] = (*Fout1 * _twiddles[u * fstride]) / _scale_factor; - scratch[2] = (*Fout2 * _twiddles[2 * u * fstride]) / _scale_factor; - scratch[3] = (*Fout3 * _twiddles[3 * u * fstride]) / _scale_factor; - scratch[4] = (*Fout4 * _twiddles[4 * u * fstride]) / _scale_factor; - - scratch[7] = scratch[1] + scratch[4]; - scratch[10] = scratch[1] - scratch[4]; - scratch[8] = scratch[2] + scratch[3]; - scratch[9] = scratch[2] - scratch[3]; - - *Fout0 += scratch[7]; - *Fout0 += scratch[8]; - - scratch[5] = scratch[0] + (cpx_type( - scratch[7].real() * ya.real() + scratch[8].real() * yb.real(), - scratch[7].imag() * ya.real() + scratch[8].imag() * yb.real() ) / _scale_factor); - - scratch[6] = cpx_type( - scratch[10].imag() * ya.imag() + scratch[9].imag() * yb.imag(), - -scratch[10].real() * ya.imag() - scratch[9].real() * yb.imag() ) / _scale_factor; - - *Fout1 = scratch[5] - scratch[6]; - *Fout4 = scratch[5] + scratch[6]; - - scratch[11] = scratch[0] + (cpx_type( - scratch[7].real() * yb.real() + scratch[8].real() * ya.real(), - scratch[7].imag() * yb.real() + scratch[8].imag() * ya.real() ) / _scale_factor); - - scratch[12] = cpx_type( - -scratch[10].imag() * yb.imag() + scratch[9].imag() * ya.imag(), - scratch[10].real() * yb.imag() - scratch[9].real() * ya.imag() ) / _scale_factor; - - *Fout2 = scratch[11] + scratch[12]; - *Fout3 = scratch[11] - scratch[12]; - - ++Fout0; - ++Fout1; - ++Fout2; - ++Fout3; - ++Fout4; - } - } - - /* perform the butterfly for one stage of a mixed radix FFT */ - void kf_bfly_generic(cpx_type * const Fout, const size_t fstride, const std::size_t m, const std::size_t p) const - { - const cpx_type *twiddles = &_twiddles[0]; - cpx_type scratchbuf[p]; - - for (std::size_t u = 0; u < m; ++u) - { - std::size_t k = u; - for (std::size_t q1 = 0; q1 < p; ++q1) - { - scratchbuf[q1] = Fout[k]; - k += m; - } - - k = u; - for (std::size_t q1 = 0; q1 < p; ++q1) - { - std::size_t twidx = 0; - Fout[k] = scratchbuf[0]; - for (std::size_t q = 1; q < p; ++q) - { - twidx += fstride * k; - if (twidx >= _nfft) - twidx -= _nfft; - Fout[k] += (scratchbuf[q] * twiddles[twidx]) / _scale_factor; - } - k += m; - } - } - } -}; - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.cpp index 5fb4f0a..299694a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.cpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #include "memory.hpp" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.hpp index ee9c146..2ce95a7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/memory.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_MEMORY_H_ @@ -25,7 +20,10 @@ // clang-format off #include +#include #include "../porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/classifier/ei_aligned_malloc.h" +#include "config.hpp" extern size_t ei_memory_in_use; extern size_t ei_memory_peak_use; @@ -36,6 +34,17 @@ extern size_t ei_memory_peak_use; #define ei_dsp_printf (void) #endif +typedef std::unique_ptr ei_unique_ptr_t; +#define EI_ALLOCATE_AUTO_POINTER(ptr, size) \ + ptr = static_cast(ei_calloc(size,sizeof(*ptr))); \ + ei_unique_ptr_t __ptr__(ptr,ei_free); + +#define EI_ERR_AND_RETURN_ON_NULL(ptr,code) \ + if( ! (ptr) ) { \ + ei_printf("Null check failed\n"); \ + return code; \ + } + namespace ei { /** @@ -54,7 +63,7 @@ namespace ei { if (ei_memory_in_use > ei_memory_peak_use) { \ ei_memory_peak_use = ei_memory_in_use; \ } \ - ei_dsp_printf("alloc %lu bytes (in_use=%lu, peak=%lu) (%s@%s:%d) %p\n", \ + ei_dsp_printf("alloc %lu bytes (in_use=%lu, peak=%lu) (%s@ %s:%d) %p\n", \ (unsigned long)bytes, (unsigned long)ei_memory_in_use, (unsigned long)ei_memory_peak_use, fn, file, line, ptr); /** @@ -69,7 +78,7 @@ namespace ei { if (ei_memory_in_use > ei_memory_peak_use) { \ ei_memory_peak_use = ei_memory_in_use; \ } \ - ei_dsp_printf("alloc matrix %lu x %lu = %lu bytes (in_use=%lu, peak=%lu) (%s@%s:%d) %p\n", \ + ei_dsp_printf("alloc matrix %lu x %lu = %lu bytes (in_use=%lu, peak=%lu) (%s@ %s:%d) %p\n", \ (unsigned long)rows, (unsigned long)cols, (unsigned long)(rows * cols * type_size), (unsigned long)ei_memory_in_use, \ (unsigned long)ei_memory_peak_use, fn, file, line, ptr); @@ -79,7 +88,7 @@ namespace ei { */ #define ei_dsp_register_free_internal(fn, file, line, bytes, ptr) \ ei_memory_in_use -= bytes; \ - ei_dsp_printf("free %lu bytes (in_use=%lu, peak=%lu) (%s@%s:%d) %p\n", \ + ei_dsp_printf("free %lu bytes (in_use=%lu, peak=%lu) (%s@ %s:%d) %p\n", \ (unsigned long)bytes, (unsigned long)ei_memory_in_use, (unsigned long)ei_memory_peak_use, fn, file, line, ptr); /** @@ -91,7 +100,7 @@ namespace ei { */ #define ei_dsp_register_matrix_free_internal(fn, file, line, rows, cols, type_size, ptr) \ ei_memory_in_use -= (rows * cols * type_size); \ - ei_dsp_printf("free matrix %lu x %lu = %lu bytes (in_use=%lu, peak=%lu) (%s@%s:%d) %p\n", \ + ei_dsp_printf("free matrix %lu x %lu = %lu bytes (in_use=%lu, peak=%lu) (%s@ %s:%d) %p\n", \ (unsigned long)rows, (unsigned long)cols, (unsigned long)(rows * cols * type_size), \ (unsigned long)ei_memory_in_use, (unsigned long)ei_memory_peak_use, fn, file, line, ptr); @@ -106,10 +115,6 @@ namespace ei { #define EI_DSP_MATRIX_B(name, ...) matrix_t name(__VA_ARGS__, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #define EI_DSP_QUANTIZED_MATRIX(name, ...) quantized_matrix_t name(__VA_ARGS__, NULL, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #define EI_DSP_QUANTIZED_MATRIX_B(name, ...) quantized_matrix_t name(__VA_ARGS__, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i16_MATRIX(name, rows, cols) matrix_i16_t name(rows, cols, NULL, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i16_MATRIX_B(name, ...) matrix_i16_t name(__VA_ARGS__, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i32_MATRIX(name, rows, cols) matrix_i32_t name(rows, cols, NULL, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i32_MATRIX_B(name, ...) matrix_i32_t name(__VA_ARGS__, __func__, __FILE__, __LINE__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #else #define ei_dsp_register_alloc(...) (void)0 #define ei_dsp_register_matrix_alloc(...) (void)0 @@ -122,10 +127,6 @@ namespace ei { #define EI_DSP_MATRIX_B(name, ...) matrix_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #define EI_DSP_QUANTIZED_MATRIX(name, ...) quantized_matrix_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #define EI_DSP_QUANTIZED_MATRIX_B(name, ...) quantized_matrix_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i16_MATRIX(name, ...) matrix_i16_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i16_MATRIX_B(name, ...) matrix_i16_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i32_MATRIX(name, ...) matrix_i32_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - #define EI_DSP_i32_MATRIX_B(name, ...) matrix_i32_t name(__VA_ARGS__); if (!name.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } #endif #if EIDSP_TRACK_ALLOCATIONS diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy.hpp index 1d4fc06..1c92fe5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy.hpp @@ -1,42 +1,55 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_NUMPY_H_ #define _EIDSP_NUMPY_H_ +// it's valid to include the SDK without a model, but there's information that we need +// in model_metadata.h (like the FFT tables used). +// if the compiler does not support the __has_include directive we'll assume that the +// file exists. +#ifndef __has_include +#define __has_include 1 +#endif // __has_include + #include #include #include #include +#include "ei_vector.h" +#include #include "numpy_types.h" #include "config.hpp" #include "returntypes.hpp" #include "memory.hpp" +#include "ei_utils.h" #include "dct/fast-dct-fft.h" #include "kissfft/kiss_fftr.h" -#if EIDSP_USE_CMSIS_FIXED +#if __has_include("model-parameters/model_metadata.h") +#include "model-parameters/model_metadata.h" +#endif +#if EIDSP_USE_CMSIS_DSP #include "edge-impulse-sdk/CMSIS/DSP/Include/arm_math.h" +#include "edge-impulse-sdk/CMSIS/DSP/Include/arm_const_structs.h" #endif +// For the following CMSIS includes, we want to use the C fallback, so include whether or not we set the CMSIS flag +#include "edge-impulse-sdk/CMSIS/DSP/Include/dsp/statistics_functions.h" + #ifdef __MBED__ #include "mbed.h" #else @@ -47,14 +60,28 @@ namespace ei { +using fvec = ei_vector; +using ivec = ei_vector; + // clang-format off // lookup table for quantized values between 0.0f and 1.0f -static const float quantized_values_one_zero[] = { (0.0f / 1.0f), (1.0f / 100.0f), (2.0f / 100.0f), (3.0f / 100.0f), (4.0f / 100.0f), (1.0f / 22.0f), (1.0f / 21.0f), (1.0f / 20.0f), (1.0f / 19.0f), (1.0f / 18.0f), (1.0f / 17.0f), (6.0f / 100.0f), (1.0f / 16.0f), (1.0f / 15.0f), (7.0f / 100.0f), (1.0f / 14.0f), (1.0f / 13.0f), (8.0f / 100.0f), (1.0f / 12.0f), (9.0f / 100.0f), (1.0f / 11.0f), (2.0f / 21.0f), (1.0f / 10.0f), (2.0f / 19.0f), (11.0f / 100.0f), (1.0f / 9.0f), (2.0f / 17.0f), (12.0f / 100.0f), (1.0f / 8.0f), (13.0f / 100.0f), (2.0f / 15.0f), (3.0f / 22.0f), (14.0f / 100.0f), (1.0f / 7.0f), (3.0f / 20.0f), (2.0f / 13.0f), (3.0f / 19.0f), (16.0f / 100.0f), (1.0f / 6.0f), (17.0f / 100.0f), (3.0f / 17.0f), (18.0f / 100.0f), (2.0f / 11.0f), (3.0f / 16.0f), (19.0f / 100.0f), (4.0f / 21.0f), (1.0f / 5.0f), (21.0f / 100.0f), (4.0f / 19.0f), (3.0f / 14.0f), (22.0f / 100.0f), (2.0f / 9.0f), (5.0f / 22.0f), (23.0f / 100.0f), (3.0f / 13.0f), (4.0f / 17.0f), (5.0f / 21.0f), (24.0f / 100.0f), (1.0f / 4.0f), (26.0f / 100.0f), (5.0f / 19.0f), (4.0f / 15.0f), (27.0f / 100.0f), (3.0f / 11.0f), (5.0f / 18.0f), (28.0f / 100.0f), (2.0f / 7.0f), (29.0f / 100.0f), (5.0f / 17.0f), (3.0f / 10.0f), (4.0f / 13.0f), (31.0f / 100.0f), (5.0f / 16.0f), (6.0f / 19.0f), (7.0f / 22.0f), (32.0f / 100.0f), (33.0f / 100.0f), (1.0f / 3.0f), (34.0f / 100.0f), (7.0f / 20.0f), (6.0f / 17.0f), (5.0f / 14.0f), (36.0f / 100.0f), (4.0f / 11.0f), (7.0f / 19.0f), (37.0f / 100.0f), (3.0f / 8.0f), (38.0f / 100.0f), (8.0f / 21.0f), (5.0f / 13.0f), (7.0f / 18.0f), (39.0f / 100.0f), (2.0f / 5.0f), (9.0f / 22.0f), (41.0f / 100.0f), (7.0f / 17.0f), (5.0f / 12.0f), (42.0f / 100.0f), (8.0f / 19.0f), (3.0f / 7.0f), (43.0f / 100.0f), (7.0f / 16.0f), (44.0f / 100.0f), (4.0f / 9.0f), (9.0f / 20.0f), (5.0f / 11.0f), (46.0f / 100.0f), (6.0f / 13.0f), (7.0f / 15.0f), (47.0f / 100.0f), (8.0f / 17.0f), (9.0f / 19.0f), (10.0f / 21.0f), (48.0f / 100.0f), (49.0f / 100.0f), (1.0f / 2.0f), (51.0f / 100.0f), (52.0f / 100.0f), (11.0f / 21.0f), (10.0f / 19.0f), (9.0f / 17.0f), (53.0f / 100.0f), (8.0f / 15.0f), (7.0f / 13.0f), (54.0f / 100.0f), (6.0f / 11.0f), (11.0f / 20.0f), (5.0f / 9.0f), (56.0f / 100.0f), (9.0f / 16.0f), (57.0f / 100.0f), (4.0f / 7.0f), (11.0f / 19.0f), (58.0f / 100.0f), (7.0f / 12.0f), (10.0f / 17.0f), (59.0f / 100.0f), (13.0f / 22.0f), (3.0f / 5.0f), (61.0f / 100.0f), (11.0f / 18.0f), (8.0f / 13.0f), (13.0f / 21.0f), (62.0f / 100.0f), (5.0f / 8.0f), (63.0f / 100.0f), (12.0f / 19.0f), (7.0f / 11.0f), (64.0f / 100.0f), (9.0f / 14.0f), (11.0f / 17.0f), (13.0f / 20.0f), (66.0f / 100.0f), (2.0f / 3.0f), (67.0f / 100.0f), (68.0f / 100.0f), (15.0f / 22.0f), (13.0f / 19.0f), (11.0f / 16.0f), (69.0f / 100.0f), (9.0f / 13.0f), (7.0f / 10.0f), (12.0f / 17.0f), (71.0f / 100.0f), (5.0f / 7.0f), (72.0f / 100.0f), (13.0f / 18.0f), (8.0f / 11.0f), (73.0f / 100.0f), (11.0f / 15.0f), (14.0f / 19.0f), (74.0f / 100.0f), (3.0f / 4.0f), (76.0f / 100.0f), (16.0f / 21.0f), (13.0f / 17.0f), (10.0f / 13.0f), (77.0f / 100.0f), (17.0f / 22.0f), (7.0f / 9.0f), (78.0f / 100.0f), (11.0f / 14.0f), (15.0f / 19.0f), (79.0f / 100.0f), (4.0f / 5.0f), (17.0f / 21.0f), (81.0f / 100.0f), (13.0f / 16.0f), (9.0f / 11.0f), (82.0f / 100.0f), (14.0f / 17.0f), (83.0f / 100.0f), (5.0f / 6.0f), (84.0f / 100.0f), (16.0f / 19.0f), (11.0f / 13.0f), (17.0f / 20.0f), (6.0f / 7.0f), (86.0f / 100.0f), (19.0f / 22.0f), (13.0f / 15.0f), (87.0f / 100.0f), (7.0f / 8.0f), (88.0f / 100.0f), (15.0f / 17.0f), (8.0f / 9.0f), (89.0f / 100.0f), (17.0f / 19.0f), (9.0f / 10.0f), (19.0f / 21.0f), (10.0f / 11.0f), (91.0f / 100.0f), (11.0f / 12.0f), (92.0f / 100.0f), (12.0f / 13.0f), (13.0f / 14.0f), (93.0f / 100.0f), (14.0f / 15.0f), (15.0f / 16.0f), (94.0f / 100.0f), (16.0f / 17.0f), (17.0f / 18.0f), (18.0f / 19.0f), (19.0f / 20.0f), (20.0f / 21.0f), (21.0f / 22.0f), (96.0f / 100.0f), (97.0f / 100.0f), (98.0f / 100.0f), (99.0f / 100.0f), (1.0f / 1.0f) , +static constexpr float quantized_values_one_zero[] = { (0.0f / 1.0f), (1.0f / 100.0f), (2.0f / 100.0f), (3.0f / 100.0f), (4.0f / 100.0f), (1.0f / 22.0f), (1.0f / 21.0f), (1.0f / 20.0f), (1.0f / 19.0f), (1.0f / 18.0f), (1.0f / 17.0f), (6.0f / 100.0f), (1.0f / 16.0f), (1.0f / 15.0f), (7.0f / 100.0f), (1.0f / 14.0f), (1.0f / 13.0f), (8.0f / 100.0f), (1.0f / 12.0f), (9.0f / 100.0f), (1.0f / 11.0f), (2.0f / 21.0f), (1.0f / 10.0f), (2.0f / 19.0f), (11.0f / 100.0f), (1.0f / 9.0f), (2.0f / 17.0f), (12.0f / 100.0f), (1.0f / 8.0f), (13.0f / 100.0f), (2.0f / 15.0f), (3.0f / 22.0f), (14.0f / 100.0f), (1.0f / 7.0f), (3.0f / 20.0f), (2.0f / 13.0f), (3.0f / 19.0f), (16.0f / 100.0f), (1.0f / 6.0f), (17.0f / 100.0f), (3.0f / 17.0f), (18.0f / 100.0f), (2.0f / 11.0f), (3.0f / 16.0f), (19.0f / 100.0f), (4.0f / 21.0f), (1.0f / 5.0f), (21.0f / 100.0f), (4.0f / 19.0f), (3.0f / 14.0f), (22.0f / 100.0f), (2.0f / 9.0f), (5.0f / 22.0f), (23.0f / 100.0f), (3.0f / 13.0f), (4.0f / 17.0f), (5.0f / 21.0f), (24.0f / 100.0f), (1.0f / 4.0f), (26.0f / 100.0f), (5.0f / 19.0f), (4.0f / 15.0f), (27.0f / 100.0f), (3.0f / 11.0f), (5.0f / 18.0f), (28.0f / 100.0f), (2.0f / 7.0f), (29.0f / 100.0f), (5.0f / 17.0f), (3.0f / 10.0f), (4.0f / 13.0f), (31.0f / 100.0f), (5.0f / 16.0f), (6.0f / 19.0f), (7.0f / 22.0f), (32.0f / 100.0f), (33.0f / 100.0f), (1.0f / 3.0f), (34.0f / 100.0f), (7.0f / 20.0f), (6.0f / 17.0f), (5.0f / 14.0f), (36.0f / 100.0f), (4.0f / 11.0f), (7.0f / 19.0f), (37.0f / 100.0f), (3.0f / 8.0f), (38.0f / 100.0f), (8.0f / 21.0f), (5.0f / 13.0f), (7.0f / 18.0f), (39.0f / 100.0f), (2.0f / 5.0f), (9.0f / 22.0f), (41.0f / 100.0f), (7.0f / 17.0f), (5.0f / 12.0f), (42.0f / 100.0f), (8.0f / 19.0f), (3.0f / 7.0f), (43.0f / 100.0f), (7.0f / 16.0f), (44.0f / 100.0f), (4.0f / 9.0f), (9.0f / 20.0f), (5.0f / 11.0f), (46.0f / 100.0f), (6.0f / 13.0f), (7.0f / 15.0f), (47.0f / 100.0f), (8.0f / 17.0f), (9.0f / 19.0f), (10.0f / 21.0f), (48.0f / 100.0f), (49.0f / 100.0f), (1.0f / 2.0f), (51.0f / 100.0f), (52.0f / 100.0f), (11.0f / 21.0f), (10.0f / 19.0f), (9.0f / 17.0f), (53.0f / 100.0f), (8.0f / 15.0f), (7.0f / 13.0f), (54.0f / 100.0f), (6.0f / 11.0f), (11.0f / 20.0f), (5.0f / 9.0f), (56.0f / 100.0f), (9.0f / 16.0f), (57.0f / 100.0f), (4.0f / 7.0f), (11.0f / 19.0f), (58.0f / 100.0f), (7.0f / 12.0f), (10.0f / 17.0f), (59.0f / 100.0f), (13.0f / 22.0f), (3.0f / 5.0f), (61.0f / 100.0f), (11.0f / 18.0f), (8.0f / 13.0f), (13.0f / 21.0f), (62.0f / 100.0f), (5.0f / 8.0f), (63.0f / 100.0f), (12.0f / 19.0f), (7.0f / 11.0f), (64.0f / 100.0f), (9.0f / 14.0f), (11.0f / 17.0f), (13.0f / 20.0f), (66.0f / 100.0f), (2.0f / 3.0f), (67.0f / 100.0f), (68.0f / 100.0f), (15.0f / 22.0f), (13.0f / 19.0f), (11.0f / 16.0f), (69.0f / 100.0f), (9.0f / 13.0f), (7.0f / 10.0f), (12.0f / 17.0f), (71.0f / 100.0f), (5.0f / 7.0f), (72.0f / 100.0f), (13.0f / 18.0f), (8.0f / 11.0f), (73.0f / 100.0f), (11.0f / 15.0f), (14.0f / 19.0f), (74.0f / 100.0f), (3.0f / 4.0f), (76.0f / 100.0f), (16.0f / 21.0f), (13.0f / 17.0f), (10.0f / 13.0f), (77.0f / 100.0f), (17.0f / 22.0f), (7.0f / 9.0f), (78.0f / 100.0f), (11.0f / 14.0f), (15.0f / 19.0f), (79.0f / 100.0f), (4.0f / 5.0f), (17.0f / 21.0f), (81.0f / 100.0f), (13.0f / 16.0f), (9.0f / 11.0f), (82.0f / 100.0f), (14.0f / 17.0f), (83.0f / 100.0f), (5.0f / 6.0f), (84.0f / 100.0f), (16.0f / 19.0f), (11.0f / 13.0f), (17.0f / 20.0f), (6.0f / 7.0f), (86.0f / 100.0f), (19.0f / 22.0f), (13.0f / 15.0f), (87.0f / 100.0f), (7.0f / 8.0f), (88.0f / 100.0f), (15.0f / 17.0f), (8.0f / 9.0f), (89.0f / 100.0f), (17.0f / 19.0f), (9.0f / 10.0f), (19.0f / 21.0f), (10.0f / 11.0f), (91.0f / 100.0f), (11.0f / 12.0f), (92.0f / 100.0f), (12.0f / 13.0f), (13.0f / 14.0f), (93.0f / 100.0f), (14.0f / 15.0f), (15.0f / 16.0f), (94.0f / 100.0f), (16.0f / 17.0f), (17.0f / 18.0f), (18.0f / 19.0f), (19.0f / 20.0f), (20.0f / 21.0f), (21.0f / 22.0f), (96.0f / 100.0f), (97.0f / 100.0f), (98.0f / 100.0f), (99.0f / 100.0f), (1.0f / 1.0f) , 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 1.0f }; // clang-format on class numpy { public: + + static float sqrt(float x) { +#if EIDSP_USE_CMSIS_DSP + float temp; + arm_sqrt_f32(x, &temp); + return temp; +#else + return sqrtf(x); +#endif + } + /** * Roll array elements along a given axis. * Elements that roll beyond the last position are re-introduced at the first. @@ -246,7 +273,7 @@ class numpy { * @param out_matrix Pointer to out matrix (MxK) * @returns EIDSP_OK if OK */ - static inline int dot_by_row(int i, float *row, uint32_t matrix1_cols, matrix_t *matrix2, matrix_t *out_matrix) { + static int dot_by_row(int i, float *row, uint32_t matrix1_cols, matrix_t *matrix2, matrix_t *out_matrix) { if (matrix1_cols != matrix2->rows) { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } @@ -286,7 +313,7 @@ class numpy { * @param out_matrix Pointer to out matrix (MxK) * @returns EIDSP_OK if OK */ - static inline int dot_by_row(int i, float *row, size_t matrix1_cols, + static int dot_by_row(int i, float *row, size_t matrix1_cols, quantized_matrix_t *matrix2, matrix_t *out_matrix) { if (matrix1_cols != matrix2->rows) { @@ -307,12 +334,55 @@ class numpy { return EIDSP_OK; } + static void transpose_in_place(matrix_t *matrix) { + // Don't bother if either dim is one, just need to swap the dimension sizes + if( matrix->rows != 1 && matrix->cols != 1) { + size_t size = matrix->cols * matrix->rows - 1; + float temp; // temp for swap + size_t next; // next item to swap + size_t cycleBegin; // index of start of cycle + size_t i; // location in matrix + size_t all_done_mark = 1; + ei_vector done(size+1,false); + + i = 1; // Note that matrix[0] and last element of matrix won't move + while (1) + { + cycleBegin = i; + temp = matrix->buffer[i]; + do + { + size_t col = i % matrix->cols; + size_t row = i / matrix->cols; + // swap row and col to make new idx, b/c we want to know where in the transposed matrix + next = col*matrix->rows + row; + float temp2 = matrix->buffer[next]; + matrix->buffer[next] = temp; + temp = temp2; + done[next] = true; + i = next; + } + while (i != cycleBegin); + + // start next cycle by find next not done + for (i = all_done_mark; done[i]; i++) { + all_done_mark++; // move the high water mark so we don't look again + if(i>=size) { goto LOOP_END; } + } + } + } + LOOP_END: + // finally, swap the row and column dimensions + std::swap(matrix->rows, matrix->cols); + } + /** - * Transpose an array in place (from MxN to NxM) + * Transpose an array, souce is destination (from MxN to NxM) * Note: this temporary allocates a copy of the matrix on the heap. * @param matrix * @param rows * @param columns + * @deprecated You probably want to use transpose_in_place * @returns EIDSP_OK if OK */ static int transpose(matrix_t *matrix) { @@ -330,26 +400,12 @@ class numpy { return EIDSP_OK; } - static int transpose(matrix_i16_t *matrix) { - int r = transpose(matrix->buffer, matrix->cols, matrix->rows); - if (r != 0) { - return r; - } - - uint16_t old_rows = matrix->rows; - uint16_t old_cols = matrix->cols; - - matrix->rows = old_cols; - matrix->cols = old_rows; - - return EIDSP_OK; - } - /** - * Transpose an array in place (from MxN to NxM) + * Transpose an array, source is destination (from MxN to NxM) * @param matrix * @param rows * @param columns + * @deprecated You probably want to use transpose_in_place * @returns EIDSP_OK if OK */ static int transpose(float *matrix, int rows, int columns) { @@ -390,40 +446,6 @@ class numpy { return EIDSP_OK; } - static int transpose(EIDSP_i16 *matrix, int rows, int columns) { - EI_DSP_i16_MATRIX(temp_matrix, rows, columns); - if (!temp_matrix.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - -#if EIDSP_USE_CMSIS_FiXED - const arm_matrix_instance_q15 i_m = { - static_cast(columns), - static_cast(rows), - matrix - }; - arm_matrix_instance_q15 o_m = { - static_cast(rows), - static_cast(columns), - temp_matrix.buffer - }; - arm_status status = arm_mat_trans_q15(&i_m, &o_m); - if (status != ARM_MATH_SUCCESS) { - return status; - } -#else - for (int j = 0; j < rows; j++){ - for (int i = 0; i < columns; i++){ - temp_matrix.buffer[j * columns + i] = matrix[i * rows + j]; - } - } -#endif - - memcpy(matrix, temp_matrix.buffer, rows * columns * sizeof(EIDSP_i16)); - - return EIDSP_OK; - } - /** * Transpose an array in place (from MxN to NxM) * Note: this temporary allocates a copy of the matrix on the heap. @@ -670,66 +692,6 @@ class numpy { return EIDSP_OK; } - /** - * Scale a q15 matrix in place, per row - * @todo Now works for scale values between 0 and 1. Should also work for bigger values. - * @param matrix Input matrix (MxN) - * @param scale_matrix Scale matrix (Mx1) - * @returns 0 if OK - */ - static int scale(matrix_i16_t *matrix, float scale) { - if (scale == 1.0f) return EIDSP_OK; - else if(scale > 1.0f) return EIDSP_PARAMETER_INVALID; - - EIDSP_i16 scale_i16; - float_to_int16(&scale, &scale_i16, 1); - -#if EIDSP_USE_CMSIS_FiXED - const arm_matrix_instance_q15 mi = {(uint16_t)matrix->rows, (uint16_t)matrix->cols, matrix->buffer }; - arm_matrix_instance_q15 mo = { (uint16_t)matrix->rows, (uint16_t)matrix->cols, matrix->buffer }; - int status = arm_mat_scale_q15(&mi, scale_i16, 0, &mo); - if (status != ARM_MATH_SUCCESS) { - return status; - } -#else - for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { - int32_t prod = (int32_t)matrix->buffer[ix] * scale_i16; - matrix->buffer[ix] = saturate((EIDSP_i16)(prod >> 15), 16); - } -#endif - return EIDSP_OK; - } - - /** - * Scale a q31 matrix in place, per row - * @todo Now works for scale values between 0 and 1. Should also work for bigger values. - * @param matrix Input matrix (MxN) - * @param scale_matrix Scale matrix (Mx1) - * @returns 0 if OK - */ - static int scale(matrix_i32_t *matrix, float scale) { - if (scale == 1.0f) return EIDSP_OK; - else if(scale > 1.0f) return EIDSP_PARAMETER_INVALID; - - EIDSP_i32 scale_i32; - float_to_int32(&scale, &scale_i32, 1); - -#if EIDSP_USE_CMSIS_FiXED - const arm_matrix_instance_q31 mi = { (uint16_t)matrix->rows, (uint16_t)matrix->cols, matrix->buffer }; - arm_matrix_instance_q31 mo = { (uint16_t)matrix->rows, (uint16_t)matrix->cols, matrix->buffer }; - int status = arm_mat_scale_q31(&mi, scale_i32, 0, &mo); - if (status != ARM_MATH_SUCCESS) { - return status; - } -#else - for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { - int64_t prod = (int64_t)matrix->buffer[ix] * scale_i32; - matrix->buffer[ix] = saturate((EIDSP_i32)(prod >> 31), 32); - } -#endif - return EIDSP_OK; - } - /** * Scale a matrix in place, per row @@ -809,19 +771,6 @@ class numpy { return EIDSP_OK; } - /** - * Subtract q15 from matrix in place - * @param matrix - * @param subtraction - * @returns 0 if OK - */ - static int subtract(matrix_i16_t *matrix, EIDSP_i16 subtraction) { - for (uint32_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { - matrix->buffer[ix] -= subtraction; - } - return EIDSP_OK; - } - /** * Add on a matrix in place, per row * @param matrix Input matrix (MxN) @@ -848,32 +797,6 @@ class numpy { return EIDSP_OK; } - /** - * Subract subtract_matrix from matrix - * @param matrix Input matrix (MxN) - * @param add Scale matrix (Mx1) - * @returns 0 if OK - */ - static int subtract(matrix_i16_t *matrix, matrix_i16_t *subtract_matrix) { - if (matrix->rows != subtract_matrix->rows) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (subtract_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - for (size_t row = 0; row < matrix->rows; row++) { - EI_DSP_i16_MATRIX_B(temp, 1, matrix->cols, matrix->buffer + (row * matrix->cols)); - int ret = subtract(&temp, subtract_matrix->buffer[row]); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - } - - return EIDSP_OK; - } - /** * Calculate the root mean square of a matrix, one per row * @param matrix Matrix of size (MxN) @@ -907,39 +830,6 @@ class numpy { return EIDSP_OK; } - /** - * Calculate the root mean square of a q15 matrix, one per row - * @param matrix Matrix of size (MxN) - * @param output_matrix Matrix of size (Mx1) - * @returns 0 if OK - */ - static int rms(matrix_i16_t *matrix, matrix_i16_t *output_matrix) { - if (matrix->rows != output_matrix->rows) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - for (size_t row = 0; row < matrix->rows; row++) { -#if EIDSP_USE_CMSIS_FiXED - EIDSP_i16 rms_result; - arm_rms_q15(matrix->buffer + (row * matrix->cols), matrix->cols, &rms_result); - output_matrix->buffer[row] = rms_result; -#else - int64_t sum = 0; - for(size_t ix = 0; ix < matrix->cols; ix++) { - int32_t v = matrix->buffer[(row * matrix->cols) + ix]; - sum += (int64_t)abs(v * v); - } - sqrt_q15(saturate((sum / matrix->cols)>>15, 16UL), &output_matrix->buffer[row]); -#endif - } - - return EIDSP_OK; - } - /** * Calculate the mean over a matrix per row * @param input_matrix Input matrix (MxN) @@ -972,38 +862,6 @@ class numpy { return EIDSP_OK; } - /** - * Calculate the mean over a q15 matrix per row - * @param input_matrix Input matrix (MxN) - * @param output_matrix Output matrix (Mx1) - */ - static int mean(matrix_i16_t *input_matrix, matrix_i16_t *output_matrix) { - if (input_matrix->rows != output_matrix->rows) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - if (output_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - for (size_t row = 0; row < input_matrix->rows; row++) { -#if EIDSP_USE_CMSIS_FiXED - EIDSP_i16 mean; - arm_mean_q15(input_matrix->buffer + (row * input_matrix->cols), input_matrix->cols, &mean); - output_matrix->buffer[row] = mean; -#else - int32_t sum = 0.0f; - - for (size_t col = 0; col < input_matrix->cols; col++) { - sum += input_matrix->buffer[( row * input_matrix->cols ) + col]; - } - - output_matrix->buffer[row] = (EIDSP_i16)(sum / input_matrix->cols); -#endif - } - - return EIDSP_OK; - } - /** * Calculate the mean over a matrix on axis 0 * @param input_matrix Input matrix (MxN) @@ -1226,7 +1084,11 @@ class numpy { arm_sqrt_f32(var * var * var, &var); // Calculate skew = (m_3) / (variance)^(3/2) - output_matrix->buffer[row] = m_3 / var; + if (var == 0.0f) { + output_matrix->buffer[row] = 0.0f; + } else { + output_matrix->buffer[row] = m_3 / var; + } #else float sum = 0.0f; float mean; @@ -1254,7 +1116,11 @@ class numpy { m_2 = sqrt(m_2 * m_2 * m_2); // Calculate skew = (m_3) / (m_2)^(3/2) - output_matrix->buffer[row] = m_3 / m_2; + if (m_2 == 0.0f) { + output_matrix->buffer[row] = 0.0f; + } else { + output_matrix->buffer[row] = m_3 / m_2; + } #endif } @@ -1288,7 +1154,12 @@ class numpy { cmsis_arm_fourth_moment(&input_matrix->buffer[(row * input_matrix->cols)], input_matrix->cols, mean, &m_4); // Calculate Fisher kurtosis = (m_4 / variance^2) - 3 - output_matrix->buffer[row] = (m_4 / (var * var)) - 3; + var = var * var; + if (var == 0.0f) { + output_matrix->buffer[row] = -3.0f; + } else { + output_matrix->buffer[row] = (m_4 / var) - 3.0f; + } #else // Calculate the mean float mean = 0.0f; @@ -1316,13 +1187,18 @@ class numpy { // Square the variance variance = variance * variance; // Calculate Fisher kurtosis = (m_4 / variance^2) - 3 - output_matrix->buffer[row] = (m_4 / variance) - 3; + if (variance == 0.0f) { + output_matrix->buffer[row] = -3.0f; + } else { + output_matrix->buffer[row] = (m_4 / variance) - 3.0f; + } #endif } return EIDSP_OK; } + /** * Compute the one-dimensional discrete Fourier Transform for real input. * This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of @@ -1366,7 +1242,7 @@ class numpy { else { // hardware acceleration only works for the powers above... arm_rfft_fast_instance_f32 rfft_instance; - arm_status status = arm_rfft_fast_init_f32(&rfft_instance, n_fft); + int status = cmsis_rfft_init_f32(&rfft_instance, n_fft); if (status != ARM_MATH_SUCCESS) { return status; } @@ -1400,6 +1276,7 @@ class numpy { return EIDSP_OK; } + /** * Compute the one-dimensional discrete Fourier Transform for real input. * This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of @@ -1410,7 +1287,7 @@ class numpy { * @param output_size Size of the output buffer, should be n_fft / 2 + 1 * @returns 0 if OK */ - static int rfft(const EIDSP_i16 *src, size_t src_size, EIDSP_i16 *output, size_t output_size, size_t n_fft) { + static int rfft(const float *src, size_t src_size, fft_complex_t *output, size_t output_size, size_t n_fft) { size_t n_fft_out_features = (n_fft / 2) + 1; if (output_size != n_fft_out_features) { EIDSP_ERR(EIDSP_BUFFER_SIZE_MISMATCH); @@ -1422,47 +1299,64 @@ class numpy { } // declare input and output arrays - EI_DSP_i16_MATRIX(fft_input, 1, n_fft << 1); + float *fft_input_buffer = NULL; + if (src_size == n_fft) { + fft_input_buffer = (float*)src; + } - // copy from src to fft_input - memcpy(fft_input.buffer, src, src_size * sizeof(EIDSP_i16)); - // pad to the rigth with zeros - memset(fft_input.buffer + src_size, 0, (n_fft - src_size) * sizeof(EIDSP_i16)); + EI_DSP_MATRIX_B(fft_input, 1, n_fft, fft_input_buffer); + if (!fft_input.buffer) { + EIDSP_ERR(EIDSP_OUT_OF_MEM); + } + + if (!fft_input_buffer) { + // copy from src to fft_input + memcpy(fft_input.buffer, src, src_size * sizeof(float)); + // pad to the rigth with zeros + memset(fft_input.buffer + src_size, 0, (n_fft - src_size) * sizeof(float)); + } -#if EIDSP_USE_CMSIS_FIXED +#if EIDSP_USE_CMSIS_DSP if (n_fft != 32 && n_fft != 64 && n_fft != 128 && n_fft != 256 && n_fft != 512 && n_fft != 1024 && n_fft != 2048 && n_fft != 4096) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); //TODO zero pad so we can use anyway` - } else { + int ret = software_rfft(fft_input.buffer, output, n_fft, n_fft_out_features); + if (ret != EIDSP_OK) { + EIDSP_ERR(ret); + } + } + else { // hardware acceleration only works for the powers above... - arm_rfft_instance_q15 rfft_instance; - arm_status status = arm_rfft_init_q15(&rfft_instance, n_fft, 0, 1); + arm_rfft_fast_instance_f32 rfft_instance; + int status = cmsis_rfft_init_f32(&rfft_instance, n_fft); if (status != ARM_MATH_SUCCESS) { - return (int)status; + return status; } - EI_DSP_i16_MATRIX(fft_output, 1, n_fft << 1); + EI_DSP_MATRIX(fft_output, 1, n_fft); if (!fft_output.buffer) { EIDSP_ERR(EIDSP_OUT_OF_MEM); } - arm_rfft_q15(&rfft_instance, fft_input.buffer, fft_output.buffer); + arm_rfft_fast_f32(&rfft_instance, fft_input.buffer, fft_output.buffer, 0); - output[0] = fft_output.buffer[0]; - output[n_fft_out_features - 1] = fft_output.buffer[1]; + output[0].r = fft_output.buffer[0]; + output[0].i = 0.0f; + output[n_fft_out_features - 1].r = fft_output.buffer[1]; + output[n_fft_out_features - 1].i = 0.0f; size_t fft_output_buffer_ix = 2; for (size_t ix = 1; ix < n_fft_out_features - 1; ix += 1) { - EIDSP_i16 rms_result; - arm_rms_q15(&fft_output.buffer[fft_output_buffer_ix], 2, &rms_result); - output[ix] = (EIDSP_i16)saturate((int32_t)rms_result * ((int32_t)(1.414213562f * (1 << 15))) >> 15, 16); /* sqrt(2) */ + output[ix].r = fft_output.buffer[fft_output_buffer_ix]; + output[ix].i = fft_output.buffer[fft_output_buffer_ix + 1]; fft_output_buffer_ix += 2; } } #else - #error("No DSP lib defined! Use CMSIS-DSP for C implementation ( #define EIDSP_USE_CMSIS_DSP 1 )") -} + int ret = software_rfft(fft_input.buffer, output, n_fft, n_fft_out_features); + if (ret != EIDSP_OK) { + EIDSP_ERR(ret); + } #endif return EIDSP_OK; @@ -1470,253 +1364,23 @@ class numpy { /** - * Compute the one-dimensional discrete Fourier Transform for real input. - * This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of - * a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). - * @param src Source buffer - * @param src_size Size of the source buffer - * @param output Output buffer - * @param output_size Size of the output buffer, should be n_fft / 2 + 1 + * Return evenly spaced numbers over a specified interval. + * Returns num evenly spaced samples, calculated over the interval [start, stop]. + * The endpoint of the interval can optionally be excluded. + * + * Based on https://github.com/ntessore/algo/blob/master/linspace.c + * Licensed in public domain (see LICENSE in repository above) + * + * @param start The starting value of the sequence. + * @param stop The end value of the sequence. + * @param number Number of samples to generate. + * @param out Out array, with size `number` * @returns 0 if OK */ - static int rfft(const EIDSP_i32 *src, size_t src_size, EIDSP_i32 *output, size_t output_size, size_t n_fft) { - size_t n_fft_out_features = (n_fft / 2) + 1; - if (output_size != n_fft_out_features) { - EIDSP_ERR(EIDSP_BUFFER_SIZE_MISMATCH); - } - - // truncate if needed - if (src_size > n_fft) { - src_size = n_fft; - } - - // declare input and output arrays - EI_DSP_i32_MATRIX(fft_input, 1, n_fft << 1); - - if (!fft_input.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - // copy from src to fft_input - memcpy(fft_input.buffer, src, src_size * sizeof(EIDSP_i32)); - // pad to the rigth with zeros - memset(fft_input.buffer + src_size, 0, (n_fft - src_size) * sizeof(EIDSP_i32)); - -#if EIDSP_USE_CMSIS_FIXED - if (n_fft != 32 && n_fft != 64 && n_fft != 128 && n_fft != 256 && - n_fft != 512 && n_fft != 1024 && n_fft != 2048 && n_fft != 4096) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); - } else { - // hardware acceleration only works for the powers above... - arm_rfft_instance_q31 rfft_instance; - arm_status status = arm_rfft_init_q31(&rfft_instance, n_fft, 0, 1); - if (status != ARM_MATH_SUCCESS) { - return status; - } - - // EI_DSP_i16_MATRIX(fft_output, 1, n_fft << 1); - matrix_i32_t fft_output(1, n_fft << 1); - - if (!fft_output.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - arm_rfft_q31(&rfft_instance, (EIDSP_i32 *)fft_input.buffer, (EIDSP_i32 *)fft_output.buffer); - - output[0] = fft_output.buffer[0]; - output[n_fft_out_features - 1] = fft_output.buffer[1]; - - size_t fft_output_buffer_ix = 2; - for (size_t ix = 1; ix < n_fft_out_features - 1; ix += 1) { - EIDSP_i32 rms_result; - arm_rms_q31((EIDSP_i32 *)&fft_output.buffer[fft_output_buffer_ix], 2, &rms_result); - output[ix] = (EIDSP_i32)saturate((int64_t)rms_result * ((int64_t)(1.414213562f * 2147483648.f)) >> 31, 32); /* sqrt(2) */ - - fft_output_buffer_ix += 2; - } - } -#else - #error("No DSP lib defined! Use CMSIS-DSP for C implementation ( #define EIDSP_USE_CMSIS_FIXED 1 )") -#endif - - return EIDSP_OK; - } - - /** - * Compute the one-dimensional discrete Fourier Transform for real input. - * This function computes the one-dimensional n-point discrete Fourier Transform (DFT) of - * a real-valued array by means of an efficient algorithm called the Fast Fourier Transform (FFT). - * @param src Source buffer - * @param src_size Size of the source buffer - * @param output Output buffer - * @param output_size Size of the output buffer, should be n_fft / 2 + 1 - * @returns 0 if OK - */ - static int rfft(const float *src, size_t src_size, fft_complex_t *output, size_t output_size, size_t n_fft) { - size_t n_fft_out_features = (n_fft / 2) + 1; - if (output_size != n_fft_out_features) { - EIDSP_ERR(EIDSP_BUFFER_SIZE_MISMATCH); - } - - // truncate if needed - if (src_size > n_fft) { - src_size = n_fft; - } - - // declare input and output arrays - float *fft_input_buffer = NULL; - if (src_size == n_fft) { - fft_input_buffer = (float*)src; - } - - EI_DSP_MATRIX_B(fft_input, 1, n_fft, fft_input_buffer); - if (!fft_input.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - if (!fft_input_buffer) { - // copy from src to fft_input - memcpy(fft_input.buffer, src, src_size * sizeof(float)); - // pad to the rigth with zeros - memset(fft_input.buffer + src_size, 0, (n_fft - src_size) * sizeof(float)); - } - -#if EIDSP_USE_CMSIS_DSP - if (n_fft != 32 && n_fft != 64 && n_fft != 128 && n_fft != 256 && - n_fft != 512 && n_fft != 1024 && n_fft != 2048 && n_fft != 4096) { - int ret = software_rfft(fft_input.buffer, output, n_fft, n_fft_out_features); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - } - else { - // hardware acceleration only works for the powers above... - arm_rfft_fast_instance_f32 rfft_instance; - arm_status status = arm_rfft_fast_init_f32(&rfft_instance, n_fft); - if (status != ARM_MATH_SUCCESS) { - return status; - } - - EI_DSP_MATRIX(fft_output, 1, n_fft); - if (!fft_output.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - arm_rfft_fast_f32(&rfft_instance, fft_input.buffer, fft_output.buffer, 0); - - output[0].r = fft_output.buffer[0]; - output[0].i = 0.0f; - output[n_fft_out_features - 1].r = fft_output.buffer[1]; - output[n_fft_out_features - 1].i = 0.0f; - - size_t fft_output_buffer_ix = 2; - for (size_t ix = 1; ix < n_fft_out_features - 1; ix += 1) { - output[ix].r = fft_output.buffer[fft_output_buffer_ix]; - output[ix].i = fft_output.buffer[fft_output_buffer_ix + 1]; - - fft_output_buffer_ix += 2; - } - } -#else - int ret = software_rfft(fft_input.buffer, output, n_fft, n_fft_out_features); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } -#endif - - return EIDSP_OK; - } - - static int rfft(const EIDSP_i16 *src, size_t src_size, fft_complex_i16_t *output, size_t output_size, size_t n_fft) { - size_t n_fft_out_features = (n_fft / 2) + 1; - if (output_size != n_fft_out_features) { - EIDSP_ERR(EIDSP_BUFFER_SIZE_MISMATCH); - } - - // truncate if needed - if (src_size > n_fft) { - src_size = n_fft; - } - - // declare input and output arrays - EIDSP_i16 *fft_input_buffer = NULL; - if (src_size == n_fft) { - fft_input_buffer = (EIDSP_i16*)src; - } - - EI_DSP_i16_MATRIX_B(fft_input, 1, n_fft, fft_input_buffer); - if (!fft_input.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - if (!fft_input_buffer) { - // copy from src to fft_input - memcpy(fft_input.buffer, src, src_size * sizeof(EIDSP_i16)); - // pad to the rigth with zeros - memset(fft_input.buffer + src_size, 0, (n_fft - src_size) * sizeof(EIDSP_i16)); - } - -#if EIDSP_USE_CMSIS_FIXED - if (n_fft != 32 && n_fft != 64 && n_fft != 128 && n_fft != 256 && - n_fft != 512 && n_fft != 1024 && n_fft != 2048 && n_fft != 4096) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); // fixed fft lib does not support arbitrary input length - } - else { - // hardware acceleration only works for the powers above... - arm_rfft_instance_q15 rfft_instance; - arm_status status = arm_rfft_init_q15(&rfft_instance, n_fft, 0, 1); - if (status != ARM_MATH_SUCCESS) { - return status; - } - - EI_DSP_i16_MATRIX(fft_output, 1, n_fft << 1); - if (!fft_output.buffer) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - arm_rfft_q15(&rfft_instance, fft_input.buffer, fft_output.buffer); - - output[0].r = fft_output.buffer[0]; - output[0].i = 0.0f; - output[n_fft_out_features - 1].r = fft_output.buffer[1]; - output[n_fft_out_features - 1].i = 0.0f; - - size_t fft_output_buffer_ix = 2; - for (size_t ix = 1; ix < n_fft_out_features - 1; ix += 1) { - output[ix].r = fft_output.buffer[fft_output_buffer_ix]; - output[ix].i = fft_output.buffer[fft_output_buffer_ix + 1]; - - fft_output_buffer_ix += 2; - } - } -#else - int ret = software_rfft(fft_input.buffer, output, n_fft, n_fft_out_features); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } -#endif - - return EIDSP_OK; - } - - /** - * Return evenly spaced numbers over a specified interval. - * Returns num evenly spaced samples, calculated over the interval [start, stop]. - * The endpoint of the interval can optionally be excluded. - * - * Based on https://github.com/ntessore/algo/blob/master/linspace.c - * Licensed in public domain (see LICENSE in repository above) - * - * @param start The starting value of the sequence. - * @param stop The end value of the sequence. - * @param number Number of samples to generate. - * @param out Out array, with size `number` - * @returns 0 if OK - */ - static int linspace(float start, float stop, uint32_t number, float *out) - { - if (number < 1 || !out) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); + static int linspace(float start, float stop, uint32_t number, float *out) + { + if (number < 1 || !out) { + EIDSP_ERR(EIDSP_PARAMETER_INVALID); } if (number == 1) { @@ -1738,45 +1402,6 @@ class numpy { return EIDSP_OK; } - /** - * Return evenly spaced q15 numbers over a specified interval. - * Returns num evenly spaced samples, calculated over the interval [start, stop]. - * The endpoint of the interval can optionally be excluded. - * - * Based on https://github.com/ntessore/algo/blob/master/linspace.c - * Licensed in public domain (see LICENSE in repository above) - * - * @param start The starting value of the sequence. - * @param stop The end value of the sequence. - * @param number Number of samples to generate. - * @param out Out array, with size `number` - * @returns 0 if OK - */ - static int linspace(EIDSP_i16 start, EIDSP_i16 stop, uint32_t number, EIDSP_i16 *out) - { - if (number < 1 || !out) { - EIDSP_ERR(EIDSP_PARAMETER_INVALID); - } - - if (number == 1) { - out[0] = start; - return EIDSP_OK; - } - - // step size - EIDSP_i16 step = (stop - start) / (number - 1); - - // do steps - for (uint32_t ix = 0; ix < number - 1; ix++) { - out[ix] = start + ix * step; - } - - // last entry always stop - out[number - 1] = stop; - - return EIDSP_OK; - } - /** * Return evenly spaced q31 numbers over a specified interval. * Returns num evenly spaced samples, calculated over the interval [start, stop]. @@ -1817,94 +1442,30 @@ class numpy { } /** - * Convert an int32_t buffer into a float buffer, maps to -1..1 - * @param input - * @param output - * @param length - * @returns 0 if OK - */ - static int int32_to_float(const EIDSP_i32 *input, float *output, size_t length) { -#if EIDSP_USE_CMSIS_FiXED - arm_q31_to_float((q31_t *)input, output, length); -#else - for (size_t ix = 0; ix < length; ix++) { - output[ix] = (float)(input[ix]) / 2147483648.f; - } -#endif - return EIDSP_OK; - } - - /** - * Convert an float buffer into a fixedpoint 32 bit buffer, input values are - * limited between -1 and 1 - * @param input - * @param output - * @param length - * @returns 0 if OK - */ - static int float_to_int32(const float *input, EIDSP_i32 *output, size_t length) { -#if EIDSP_USE_CMSIS_FiXED - arm_float_to_q31((float *)input, (q31_t *)output, length); -#else - for (size_t ix = 0; ix < length; ix++) { - output[ix] = (EIDSP_i32)saturate((int64_t)(input[ix] * 2147483648.f), 32); - } -#endif - return EIDSP_OK; - } - - /** - * Convert an int16_t buffer into a float buffer, maps to -1..1 + * Convert an int16_t buffer into a float buffer * @param input * @param output * @param length * @returns 0 if OK */ static int int16_to_float(const EIDSP_i16 *input, float *output, size_t length) { -#if EIDSP_USE_CMSIS_FiXED - arm_q15_to_float((q15_t *)input, output, length); -#else - for (size_t ix = 0; ix < length; ix++) { - output[ix] = (float)(input[ix]) / 32768.f; - } -#endif - return EIDSP_OK; - } - - /** - * Convert an float buffer into a fixedpoint 16 bit buffer, input values are - * limited between -1 and 1 - * @param input - * @param output - * @param length - * @returns 0 if OK - */ - static int float_to_int16(const float *input, EIDSP_i16 *output, size_t length) { -#if EIDSP_USE_CMSIS_FiXED - arm_float_to_q15((float *)input, output, length); -#else for (size_t ix = 0; ix < length; ix++) { - output[ix] = (EIDSP_i16)saturate((int32_t)(input[ix] * 32768.f), 16); + output[ix] = static_cast((input[ix])); } -#endif return EIDSP_OK; } /** - * Convert an int8_t buffer into a float buffer, maps to -1..1 + * Convert an int8_t buffer into a float buffer * @param input * @param output * @param length * @returns 0 if OK */ static int int8_to_float(const EIDSP_i8 *input, float *output, size_t length) { -#if EIDSP_USE_CMSIS_FiXED - arm_q7_to_float((q7_t *)input, output, length); -#else for (size_t ix = 0; ix < length; ix++) { - output[ix] = (float)(input[ix]) / 128; + output[ix] = static_cast((input[ix])); } -#endif return EIDSP_OK; } @@ -1918,7 +1479,7 @@ class numpy { * @param signal Output signal * @returns EIDSP_OK if ok */ - static int signal_from_buffer(float *data, size_t data_size, signal_t *signal) + static int signal_from_buffer(const float *data, size_t data_size, signal_t *signal) { signal->total_length = data_size; #ifdef __MBED__ @@ -1931,27 +1492,7 @@ class numpy { return EIDSP_OK; } - static int signal_from_buffer_i16(EIDSP_i16 *data_i16, size_t data_size, signal_i16_t *signal) - { - signal->total_length = data_size; -#ifdef __MBED__ - signal->get_data = mbed::callback(&numpy::signal_get_data_i16, data_i16); -#else - signal->get_data = [data_i16](size_t offset, size_t length, EIDSP_i16 *out_ptr) { - return numpy::signal_get_data_i16(data_i16, offset, length, out_ptr); - }; #endif - return EIDSP_OK; - } -#endif - - static int signal_from_buffer_pointer_function_q15(size_t data_size, signal_i16_t *signal, int (*data_i16)(size_t, size_t, EIDSP_i16 *)) - { - signal->total_length = data_size; - signal->get_data = data_i16; - - return EIDSP_OK; - } #if defined ( __GNUC__ ) #pragma GCC diagnostic push @@ -1967,20 +1508,17 @@ class numpy { */ __attribute__((always_inline)) static inline float log(float a) { - float m, r, s, t, i, f; - int32_t e, g; - - g = (int32_t) * ((int32_t *)&a); - e = (g - 0x3f2aaaab) & 0xff800000; + int32_t g = (int32_t) * ((int32_t *)&a); + int32_t e = (g - 0x3f2aaaab) & 0xff800000; g = g - e; - m = (float) * ((float *)&g); - i = (float)e * 1.19209290e-7f; // 0x1.0p-23 + float m = (float) * ((float *)&g); + float i = (float)e * 1.19209290e-7f; // 0x1.0p-23 /* m in [2/3, 4/3] */ - f = m - 1.0f; - s = f * f; + float f = m - 1.0f; + float s = f * f; /* Compute log1p(f) for f in [-1/3, 1/3] */ - r = fmaf(0.230836749f, f, -0.279208571f); // 0x1.d8c0f0p-3, -0x1.1de8dap-2 - t = fmaf(0.331826031f, f, -0.498910338f); // 0x1.53ca34p-2, -0x1.fee25ap-2 + float r = fmaf(0.230836749f, f, -0.279208571f); // 0x1.d8c0f0p-3, -0x1.1de8dap-2 + float t = fmaf(0.331826031f, f, -0.498910338f); // 0x1.53ca34p-2, -0x1.fee25ap-2 r = fmaf(r, s, t); r = fmaf(r, s, f); r = fmaf(i, 0.693147182f, r); // 0x1.62e430p-1 // log(2) @@ -2135,10 +1673,10 @@ class numpy { } for (size_t ix = 0; ix < matrix->rows * matrix->cols; ix++) { - if (min != DBL_MIN && matrix->buffer[ix] < min) { + if (matrix->buffer[ix] < min) { matrix->buffer[ix] = min; } - else if (max != DBL_MAX && matrix->buffer[ix] > max) { + else if (matrix->buffer[ix] > max) { matrix->buffer[ix] = max; } } @@ -2159,7 +1697,6 @@ class numpy { return EIDSP_OK; } -private: static int software_rfft(float *fft_input, float *output, size_t n_fft, size_t n_fft_out_features) { kiss_fft_cpx *fft_output = (kiss_fft_cpx*)ei_dsp_malloc(n_fft_out_features * sizeof(kiss_fft_cpx)); if (!fft_output) { @@ -2211,7 +1748,7 @@ class numpy { return EIDSP_OK; } - static int signal_get_data(float *in_buffer, size_t offset, size_t length, float *out_ptr) + static int signal_get_data(const float *in_buffer, size_t offset, size_t length, float *out_ptr) { memcpy(out_ptr, in_buffer + offset, length * sizeof(float)); return 0; @@ -2487,90 +2024,627 @@ class numpy { return count; } - static void sqrt_q15(int16_t in, int16_t *pOut) +#if EIDSP_USE_CMSIS_DSP + /** + * Initialize a CMSIS-DSP fast rfft structure + * We do it this way as this means we can compile out fast_init calls which hints the compiler + * to which tables can be removed + */ + static int cmsis_rfft_init_f32(arm_rfft_fast_instance_f32 *rfft_instance, const size_t n_fft) { - int32_t bits_val1; - int16_t number, temp1, var1, signBits1, half; - float temp_float1; - union { - int32_t fracval; - float floatval; - } tempconv; - - number = in; - - /* If the input is a positive number then compute the signBits. */ - if (number > 0) { - signBits1 = count_leading_zeros(number) - 17; - - /* Shift by the number of signBits1 */ - if ((signBits1 % 2) == 0) { - number = number << signBits1; - } else { - number = number << (signBits1 - 1); +// ARM cores (ex M55) with Helium extensions (MVEF) need special treatment (Issue 2843) +#if EI_CLASSIFIER_HAS_FFT_INFO == 1 && !defined(ARM_MATH_MVEF) && !defined(EI_CLASSIFIER_LOAD_ALL_FFTS) + arm_status status; + switch (n_fft) { +#if EI_CLASSIFIER_LOAD_FFT_32 == 1 + case 32: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 16U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len16.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len16.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len16.pTwiddle; + rfft_instance->fftLenRFFT = 32U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_32; + status = ARM_MATH_SUCCESS; + break; } - - /* Calculate half value of the number */ - half = number >> 1; - /* Store the number for later use */ - temp1 = number; - - /* Convert to float */ - temp_float1 = number * 3.051757812500000e-005f; - /* Store as integer */ - tempconv.floatval = temp_float1; - bits_val1 = tempconv.fracval; - /* Subtract the shifted value from the magic number to give intial guess */ - bits_val1 = 0x5f3759df - (bits_val1 >> 1); /* gives initial guess */ - /* Store as float */ - tempconv.fracval = bits_val1; - temp_float1 = tempconv.floatval; - /* Convert to integer format */ - var1 = (int32_t)(temp_float1 * 16384); - - /* 1st iteration */ - var1 = - ((int16_t)( - (int32_t)var1 * - (0x3000 - - ((int16_t)((((int16_t)(((int32_t)var1 * var1) >> 15)) * (int32_t)half) >> 15))) >> - 15)) - << 2; - /* 2nd iteration */ - var1 = - ((int16_t)( - (int32_t)var1 * - (0x3000 - - ((int16_t)((((int16_t)(((int32_t)var1 * var1) >> 15)) * (int32_t)half) >> 15))) >> - 15)) - << 2; - /* 3rd iteration */ - var1 = - ((int16_t)( - (int32_t)var1 * - (0x3000 - - ((int16_t)((((int16_t)(((int32_t)var1 * var1) >> 15)) * (int32_t)half) >> 15))) >> - 15)) - << 2; - - /* Multiply the inverse square root with the original value */ - var1 = ((int16_t)(((int32_t)temp1 * var1) >> 15)) << 1; - - /* Shift the output down accordingly */ - if ((signBits1 % 2) == 0) { - var1 = var1 >> (signBits1 / 2); - } else { - var1 = var1 >> ((signBits1 - 1) / 2); +#endif +#if EI_CLASSIFIER_LOAD_FFT_64 == 1 + case 64: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 32U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len32.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len32.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len32.pTwiddle; + rfft_instance->fftLenRFFT = 64U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_64; + status = ARM_MATH_SUCCESS; + break; + } +#endif +#if EI_CLASSIFIER_LOAD_FFT_128 == 1 + case 128: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 64U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len64.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len64.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len64.pTwiddle; + rfft_instance->fftLenRFFT = 128U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_128; + status = ARM_MATH_SUCCESS; + break; + } +#endif +#if EI_CLASSIFIER_LOAD_FFT_256 == 1 + case 256: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 128U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len128.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len128.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len128.pTwiddle; + rfft_instance->fftLenRFFT = 256U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_256; + status = ARM_MATH_SUCCESS; + break; + } +#endif +#if EI_CLASSIFIER_LOAD_FFT_512 == 1 + case 512: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 256U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len256.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len256.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len256.pTwiddle; + rfft_instance->fftLenRFFT = 512U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_512; + status = ARM_MATH_SUCCESS; + break; + } +#endif +#if EI_CLASSIFIER_LOAD_FFT_1024 == 1 + case 1024: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 512U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len512.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len512.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len512.pTwiddle; + rfft_instance->fftLenRFFT = 1024U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_1024; + status = ARM_MATH_SUCCESS; + break; + } +#endif +#if EI_CLASSIFIER_LOAD_FFT_2048 == 1 + case 2048: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 1024U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len1024.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len1024.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len1024.pTwiddle; + rfft_instance->fftLenRFFT = 2048U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_2048; + status = ARM_MATH_SUCCESS; + break; } - *pOut = var1; +#endif +#if EI_CLASSIFIER_LOAD_FFT_4096 == 1 + case 4096: { + arm_cfft_instance_f32 *S = &(rfft_instance->Sint); + S->fftLen = 2048U; + S->pTwiddle = NULL; + S->bitRevLength = arm_cfft_sR_f32_len2048.bitRevLength; + S->pBitRevTable = arm_cfft_sR_f32_len2048.pBitRevTable; + S->pTwiddle = arm_cfft_sR_f32_len2048.pTwiddle; + rfft_instance->fftLenRFFT = 4096U; + rfft_instance->pTwiddleRFFT = (float32_t *) twiddleCoef_rfft_4096; + status = ARM_MATH_SUCCESS; + break; + } +#endif + default: + return EIDSP_FFT_TABLE_NOT_LOADED; + } + + return status; +#else + return arm_rfft_fast_init_f32(rfft_instance, n_fft); +#endif + } +#endif // #if EIDSP_USE_CMSIS_DSP + + /** + * Power spectrum of a frame + * @param frame Row of a frame + * @param frame_size Size of the frame + * @param out_buffer Out buffer, size should be fft_points + * @param out_buffer_size Buffer size + * @param fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. + * @returns EIDSP_OK if OK + */ + static int power_spectrum( + float *frame, + size_t frame_size, + float *out_buffer, + size_t out_buffer_size, + uint16_t fft_points) + { + if (out_buffer_size != static_cast(fft_points / 2 + 1)) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } + + int r = numpy::rfft(frame, frame_size, out_buffer, out_buffer_size, fft_points); + if (r != EIDSP_OK) { + return r; + } + + for (size_t ix = 0; ix < out_buffer_size; ix++) { + out_buffer[ix] = (1.0 / static_cast(fft_points)) * + (out_buffer[ix] * out_buffer[ix]); + } + + return EIDSP_OK; + } + + static int welch_max_hold( + float *input, + size_t input_size, + float *output, + size_t start_bin, + size_t stop_bin, + size_t fft_points, + bool do_overlap) + { + // save off one point to put back, b/c we're going to calculate in place + float saved_point = 0; + bool do_saved_point = false; + size_t fft_out_size = fft_points / 2 + 1; + float *fft_out; + ei_unique_ptr_t p_fft_out(nullptr, ei_free); + if (input_size < fft_points) { + fft_out = (float *)ei_calloc(fft_out_size, sizeof(float)); + p_fft_out.reset(fft_out); } - /* If the number is a negative number then store zero as its square root value */ else { - *pOut = 0; + // set input as output for in place operation + fft_out = input; + // save off one point to put back, b/c we're going to calculate in place + saved_point = input[fft_points / 2]; + do_saved_point = true; + } + + // init the output to zeros + memset(output, 0, sizeof(float) * (stop_bin - start_bin)); + int input_ix = 0; + while (input_ix < (int)input_size) { + // Figure out if we need any zero padding + size_t n_input_points = input_ix + fft_points <= input_size ? fft_points + : input_size - input_ix; + EI_TRY(power_spectrum( + input + input_ix, + n_input_points, + fft_out, + fft_points / 2 + 1, + fft_points)); + int j = 0; + // keep the max of the last frame and everything before + for (size_t i = start_bin; i < stop_bin; i++) { + output[j] = std::max(output[j], fft_out[i]); + j++; + } + if (do_overlap) { + if (do_saved_point) { + // This step only matters first time through + input[fft_points / 2] = saved_point; + do_saved_point = false; + } + input_ix += fft_points / 2; + } + else { + input_ix += fft_points; + } + } + + return EIDSP_OK; + } + + static float variance(float *input, size_t size) + { + // Use CMSIS either way. Will fall back to straight C when needed + float temp; +#if EIDSP_USE_CMSIS_DSP + arm_var_f32(input, size, &temp); +#else + float mean = 0.0f; + for (size_t i = 0; i < size; i++) { + mean += input[i]; + } + mean /= size; + + temp = 0.0f; + for (size_t i = 0; i < size; i++) { + temp += (input[i] - mean) * (input[i] - mean); + } + temp /= (size - 1); +#endif + return temp; + } + + /** + * This function handle the issue with zero values if the are exposed + * to become an argument for any log function. + * @param input Array + * @param input_size Size of array + * @returns void + */ + static void zero_handling(float *input, size_t input_size) + { + for (size_t ix = 0; ix < input_size; ix++) { + if (input[ix] == 0) { + input[ix] = 1e-10; + } + } + } + + /** + * This function handle the issue with zero values if the are exposed + * to become an argument for any log function. + * @param input Matrix + * @returns void + */ + static void zero_handling(matrix_t *input) + { + zero_handling(input->buffer, input->rows * input->cols); + } + + /** + * This function handle the underflow float values. + * @param input Array + * @param input_size Size of array + * @param epsilon Smallest valid non-zero value + * @returns void + */ + static void underflow_handling(float* input, size_t input_size, float epsilon = 1e-07f) + { + for (size_t ix = 0; ix < input_size; ix++) { + if (fabs(input[ix]) < epsilon) { + input[ix] = 0.0f; + } + } + } + + __attribute__((unused)) static void scale(fvec& v, float scale) { + for (auto& x : v) { + x *= scale; } } + + __attribute__((unused)) static void sub(fvec& v, float b) { + for (auto& x : v) { + x -= b; + } + } + + __attribute__((unused)) static void mul(float* y, const float* x, float* b, size_t n) { + for (size_t i = 0; i < n; i++) { + y[i] = x[i] * b[i]; + } + } + + __attribute__((unused)) static fvec diff(const float* v, size_t n) { + fvec d(n - 1); + for (size_t i = 0; i < d.size(); i++) { + d[i] = v[i + 1] - v[i]; + } + return d; + } + + __attribute__((unused)) static float sum(const float* v, size_t n) { + float sum = 0; + for (size_t i = 0; i < n; i++) { + sum += v[i]; + } + return sum; + } + + static float mean(const fvec& v) { + float mean = 0; + for (auto x : v) { + mean += x; + } + mean /= v.size(); + return mean; + } + + static float mean(const float* v, size_t n) { + float mean = 0; + for (size_t i = 0; i < n; i++) { + mean += v[i]; + } + mean /= n; + return mean; + } + + static float median(const float* v, size_t n) { + fvec vc(n); + std::copy(v, v + n, vc.begin()); + std::sort(vc.begin(), vc.end()); + if (vc.size() % 2 == 0) { + return (vc[vc.size() / 2 - 1] + vc[vc.size() / 2]) / 2; + } + return vc[vc.size() / 2]; + } + + __attribute__((unused)) static float median(const fvec& v) { + return median(v.data(), v.size()); + } + + static float stddev(const float* v, size_t n, float m /* mean */, int ddof = 0) { + float var = 0; + for (size_t i = 0; i < n; i++) { + var += (v[i] - m) * (v[i] - m); + } + var /= n - ddof; + return sqrt(var); + } + + __attribute__((unused)) static float stddev(const float* v, size_t n) { + return stddev(v, n, mean(v, n), 0); + } + + __attribute__((unused)) static float stddev(const float* v, size_t n, int ddof) { + return stddev(v, n, mean(v, n), ddof); + } + + __attribute__((unused)) static float stddev(const fvec& v, int ddof = 0) { + return stddev(v.data(), v.size(), mean(v), ddof); + } + + static float rms(const float* v, size_t n) { + float rms = 0; + for (size_t i = 0; i < n; i++) { + rms += v[i] * v[i]; + } + rms /= n; + return sqrt(rms); + } + + __attribute__((unused)) static float rms(const fvec& v) { + return rms(v.data(), v.size()); + } + + template + static float max(const ei_vector& v) { + return *std::max_element(v.begin(), v.end()); + } + + __attribute__((unused)) static float max(const float* v, size_t n) { + return *std::max_element(v, v + n); + } + + template + static float min(const ei_vector& v) { + return *std::min_element(v.begin(), v.end()); + } + + __attribute__((unused)) static float min(const float* v, size_t n) { + return *std::min_element(v, v + n); + } + + __attribute__((unused)) static int argmax(const fvec& v, int start, int end) { + return std::max_element(v.begin() + start, v.begin() + end) - v.begin(); + } + + __attribute__((unused)) static fvec divide(float num, const float* den, size_t n) { + fvec v(n); + for (size_t i = 0; i < n; i++) { + v[i] = num / den[i]; + } + return v; + } + + __attribute__((unused)) static ivec histogram(const float* x, size_t n, int a, int b, int inc) { + int num_bins = (b - a) / inc; + ivec bins(num_bins, 0); + for (size_t i = 0; i < n; i++) { + int bin = (int)((x[i] - a) / inc); + if (bin >= 0 && bin < num_bins) { + bins[bin]++; + } + } + return bins; + } + + __attribute__((unused)) static fvec cumsum(const float* v, size_t n) { + fvec c(n); + c[0] = v[0]; + for (size_t i = 1; i < n; i++) { + c[i] = c[i - 1] + v[i]; + } + return c; + } + + __attribute__((unused)) static fvec arange(float start, float end, float step) { + assert(start < end); + assert(step > 0); + fvec v(::round((end - start) / step)); + for (size_t i = 0; i < v.size(); i++) { + v[i] = start + i * step; + } + return v; + } + + __attribute__((unused)) static void add(fvec& v, fvec& b) { + for (size_t i = 0; i < v.size(); i++) { + v[i] += b[i]; + } + } + + __attribute__((unused)) static float trapz(const fvec& x, const fvec& y, size_t lo, size_t hi) { + float area = 0; + for (size_t i = lo; i < hi; i++) { + area += (x[i + 1] - x[i]) * (y[i + 1] + y[i]) / 2; + } + return area; + } + + __attribute__((unused)) static fvec quantile(const fvec& v, size_t start, size_t end, const fvec& q) { + end = std::min(end, v.size()); + fvec vc(end - start); + std::copy(v.begin() + start, v.begin() + end, vc.begin()); + std::sort(vc.begin(), vc.end()); + fvec res(q.size()); + for (size_t i = 0; i < q.size(); i++) { + res[i] = vc[q[i] * vc.size()]; + } + return res; + } + + __attribute__((unused)) static fvec quantile(const float* v, size_t n, const fvec& q) { + fvec vc(n); + std::copy(v, v + n, vc.begin()); + std::sort(vc.begin(), vc.end()); + fvec res(q.size()); + for (size_t i = 0; i < q.size(); i++) { + res[i] = vc[q[i] * vc.size()]; + } + return res; + } + + static float dot(const float* x, const float* y, size_t n) { + float res = 0; + for (size_t i = 0; i < n; i++) { + res += x[i] * y[i]; + } + return res; + } + + + __attribute__((unused)) static float cosine_similarity(const fvec& x, const fvec& y) { + float xy = dot(x.data(), y.data(), x.size()); + float magx = dot(x.data(), x.data(), x.size()); + float magy = dot(y.data(), y.data(), y.size()); + xy /= sqrt(magx * magy); + return xy; + } + + __attribute__((unused)) static void ln(fvec& v) { + for (auto& x : v) { + x = log(x); + } + } + + static size_t next_power_of_2(size_t x) { + size_t res = 1; + while (res < x) { + res *= 2; + } + return res; + } + + static void detrend(float* data, size_t n) { + // Calculate the mean of the data points + float mean = 0.0; + for (size_t i = 0; i < n; i++) { + mean += data[i]; + } + mean /= n; + + // Calculate the slope of the best-fit line + float x_mean = (n + 1) / 2.0; + float y_mean = mean; + float numerator = 0.0; + float denominator = 0.0; + for (size_t i = 0; i < n; i++) { + numerator += (i + 1 - x_mean) * (data[i] - y_mean); + denominator += (i + 1 - x_mean) * (i + 1 - x_mean); + } + float slope = numerator / denominator; + + // Subtract the best-fit line from the data points to get the detrended data + for (size_t i = 0; i < n; i++) { + data[i] = data[i] - (slope * (i + 1)); + } + + // Calculate the mean of the detrended data + float detrended_mean = 0.0; + for (size_t i = 0; i < n; i++) { + detrended_mean += data[i]; + } + detrended_mean /= n; + + // Subtract the mean of the detrended data from each element + for (size_t i = 0; i < n; i++) { + data[i] -= detrended_mean; + } + } + + static fvec detrend(const fvec& data) { + auto ret = data; + detrend(ret.data(), ret.size()); + return ret; + } + }; +struct fmat { + ei_matrix* mat = nullptr; + fmat(size_t rows, size_t cols) { + mat = new ei_matrix(rows, cols); + assert(mat); + } + + ~fmat() { + delete mat; + } + + void resize(size_t rows, size_t cols) { + delete mat; + mat = new ei_matrix(rows, cols); + } + + float* operator[](size_t i) { + if (mat == nullptr || i >= mat->rows) { + return nullptr; + } + return mat->get_row_ptr(i); + } + + void fill(float x) { + if (mat == nullptr) { + return; + } + for (size_t i = 0; i < mat->rows; i++) { + for (size_t j = 0; j < mat->cols; j++) { + (*this)[i][j] = x; + } + } + } + + void fill_col(size_t col, float x) { + if (mat == nullptr) { + return; + } + for (size_t i = 0; i < mat->rows; i++) { + (*this)[i][col] = x; + } + } + + void fill_row(size_t row, float x) { + if (mat == nullptr) { + return; + } + for (size_t i = 0; i < mat->cols; i++) { + (*this)[row][i] = x; + } + } +}; } // namespace ei #endif // _EIDSP_NUMPY_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy_types.h index 19e47be..98b9c78 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/numpy_types.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_NUMPY_TYPES_H_ @@ -29,13 +24,13 @@ #include #ifdef __cplusplus #include +#include "edge-impulse-sdk/dsp/ei_vector.h" #ifdef __MBED__ #include "mbed.h" #endif // __MBED__ #endif // __cplusplus #include "config.hpp" - -#include "../porting/ei_classifier_porting.h" +#include "edge-impulse-sdk/dsp/returntypes.h" #if EIDSP_TRACK_ALLOCATIONS #include "memory.hpp" @@ -50,11 +45,6 @@ typedef struct { float i; } fft_complex_t; -typedef struct { - int16_t r; - int16_t i; -} fft_complex_i16_t; - typedef struct { int32_t r; int32_t i; @@ -140,11 +130,30 @@ typedef struct ei_matrix { #endif } } + + /** + * @brief Get a pointer to the buffer advanced by n rows + * + * @param row Numer of rows to advance the returned buffer pointer + * @return float* Pointer to the buffer at the start of row n + */ + float *get_row_ptr(size_t row) + { + return buffer + row * cols; + } + + ei_matrix(ei_vector &in) : ei_matrix(1, in.size(), in.data()) { + } #endif // #ifdef __cplusplus } matrix_t; -typedef struct ei_matrix_i16 { - EIDSP_i16 *buffer; + +/** + * A matrix structure that allocates a matrix on the **heap**. + * Freeing happens by calling `delete` on the object or letting the object go out of scope. + */ +typedef struct ei_matrix_i8 { + int8_t *buffer; uint32_t rows; uint32_t cols; bool buffer_managed_by_me; @@ -164,10 +173,10 @@ typedef struct ei_matrix_i16 { * @param n_cols Number of columns * @param a_buffer Buffer, if not provided we'll alloc on the heap */ - ei_matrix_i16( + ei_matrix_i8( uint32_t n_rows, uint32_t n_cols, - EIDSP_i16 *a_buffer = NULL + int8_t *a_buffer = NULL #if EIDSP_TRACK_ALLOCATIONS , const char *fn = NULL, @@ -181,7 +190,7 @@ typedef struct ei_matrix_i16 { buffer_managed_by_me = false; } else { - buffer = (EIDSP_i16*)ei_calloc(n_rows * n_cols * sizeof(EIDSP_i16), 1); + buffer = (int8_t*)ei_calloc(n_rows * n_cols * sizeof(int8_t), 1); buffer_managed_by_me = true; } rows = n_rows; @@ -195,36 +204,52 @@ typedef struct ei_matrix_i16 { _originally_allocated_rows = rows; _originally_allocated_cols = cols; if (_fn) { - ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(EIDSP_i16), buffer); + ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(int8_t), buffer); } else { - ei_dsp_register_matrix_alloc(rows, cols, sizeof(EIDSP_i16), buffer); + ei_dsp_register_matrix_alloc(rows, cols, sizeof(int8_t), buffer); } #endif } } - ~ei_matrix_i16() { + ~ei_matrix_i8() { if (buffer && buffer_managed_by_me) { ei_free(buffer); #if EIDSP_TRACK_ALLOCATIONS if (_fn) { ei_dsp_register_matrix_free_internal(_fn, _file, _line, _originally_allocated_rows, - _originally_allocated_cols, sizeof(EIDSP_i16), buffer); + _originally_allocated_cols, sizeof(int8_t), buffer); } else { ei_dsp_register_matrix_free(_originally_allocated_rows, _originally_allocated_cols, - sizeof(EIDSP_i16), buffer); + sizeof(int8_t), buffer); } #endif } } + + /** + * @brief Get a pointer to the buffer advanced by n rows + * + * @param row Numer of rows to advance the returned buffer pointer + * @return float* Pointer to the buffer at the start of row n + */ + int8_t *get_row_ptr(size_t row) + { + return buffer + row * cols; + } + #endif // #ifdef __cplusplus -} matrix_i16_t; +} matrix_i8_t; +/** + * A matrix structure that allocates a matrix on the **heap**. + * Freeing happens by calling `delete` on the object or letting the object go out of scope. + */ typedef struct ei_matrix_i32 { - EIDSP_i32 *buffer; + int32_t *buffer; uint32_t rows; uint32_t cols; bool buffer_managed_by_me; @@ -247,7 +272,7 @@ typedef struct ei_matrix_i32 { ei_matrix_i32( uint32_t n_rows, uint32_t n_cols, - EIDSP_i32 *a_buffer = NULL + int32_t *a_buffer = NULL #if EIDSP_TRACK_ALLOCATIONS , const char *fn = NULL, @@ -261,7 +286,7 @@ typedef struct ei_matrix_i32 { buffer_managed_by_me = false; } else { - buffer = (EIDSP_i32*)ei_calloc(n_rows * n_cols * sizeof(EIDSP_i32), 1); + buffer = (int32_t*)ei_calloc(n_rows * n_cols * sizeof(int32_t), 1); buffer_managed_by_me = true; } rows = n_rows; @@ -275,10 +300,10 @@ typedef struct ei_matrix_i32 { _originally_allocated_rows = rows; _originally_allocated_cols = cols; if (_fn) { - ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(EIDSP_i32), buffer); + ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(int32_t), buffer); } else { - ei_dsp_register_matrix_alloc(rows, cols, sizeof(EIDSP_i32), buffer); + ei_dsp_register_matrix_alloc(rows, cols, sizeof(int32_t), buffer); } #endif } @@ -291,27 +316,45 @@ typedef struct ei_matrix_i32 { #if EIDSP_TRACK_ALLOCATIONS if (_fn) { ei_dsp_register_matrix_free_internal(_fn, _file, _line, _originally_allocated_rows, - _originally_allocated_cols, sizeof(EIDSP_i32), buffer); + _originally_allocated_cols, sizeof(int32_t), buffer); } else { ei_dsp_register_matrix_free(_originally_allocated_rows, _originally_allocated_cols, - sizeof(EIDSP_i32), buffer); + sizeof(int32_t), buffer); } #endif } } + + /** + * @brief Get a pointer to the buffer advanced by n rows + * + * @param row Numer of rows to advance the returned buffer pointer + * @return float* Pointer to the buffer at the start of row n + */ + int32_t *get_row_ptr(size_t row) + { + return buffer + row * cols; + } + #endif // #ifdef __cplusplus } matrix_i32_t; /** - * A matrix structure that allocates a matrix on the **heap**. + * Another matrix structure that allocates a matrix on the **heap**. * Freeing happens by calling `delete` on the object or letting the object go out of scope. + * We use this for the filterbanks, as we quantize these operations to save memory. */ -typedef struct ei_matrix_i8 { - int8_t *buffer; +typedef struct ei_quantized_matrix { + uint8_t *buffer; uint32_t rows; uint32_t cols; bool buffer_managed_by_me; +#ifdef __MBED__ + mbed::Callback dequantization_fn; +#else + float (*dequantization_fn)(uint8_t); +#endif #if EIDSP_TRACK_ALLOCATIONS const char *_fn; @@ -323,34 +366,39 @@ typedef struct ei_matrix_i8 { #ifdef __cplusplus /** - * Create a new matrix + * Create a quantized matrix * @param n_rows Number of rows * @param n_cols Number of columns - * @param a_buffer Buffer, if not provided we'll alloc on the heap + * @param a_dequantization_fn How to dequantize the values in this matrix + * @param a_buffer Optional: a buffer, if set we won't allocate memory ourselves */ - ei_matrix_i8( - uint32_t n_rows, - uint32_t n_cols, - int8_t *a_buffer = NULL + ei_quantized_matrix(uint32_t n_rows, + uint32_t n_cols, +#ifdef __MBED__ + mbed::Callback a_dequantization_fn, +#else + float (*a_dequantization_fn)(uint8_t), +#endif + uint8_t *a_buffer = NULL #if EIDSP_TRACK_ALLOCATIONS - , - const char *fn = NULL, - const char *file = NULL, - int line = 0 + , + const char *fn = NULL, + const char *file = NULL, + int line = 0 #endif - ) + ) { if (a_buffer) { buffer = a_buffer; buffer_managed_by_me = false; } else { - buffer = (int8_t*)ei_calloc(n_rows * n_cols * sizeof(int8_t), 1); + buffer = (uint8_t*)ei_calloc(n_rows * n_cols * sizeof(uint8_t), 1); buffer_managed_by_me = true; } rows = n_rows; cols = n_cols; - + dequantization_fn = a_dequantization_fn; if (!a_buffer) { #if EIDSP_TRACK_ALLOCATIONS _fn = fn; @@ -359,49 +407,55 @@ typedef struct ei_matrix_i8 { _originally_allocated_rows = rows; _originally_allocated_cols = cols; if (_fn) { - ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(int8_t), buffer); + ei_dsp_register_matrix_alloc_internal(fn, file, line, rows, cols, sizeof(uint8_t), buffer); } else { - ei_dsp_register_matrix_alloc(rows, cols, sizeof(int8_t), buffer); + ei_dsp_register_matrix_alloc(rows, cols, sizeof(uint8_t), buffer); } #endif } } - ~ei_matrix_i8() { + ~ei_quantized_matrix() { if (buffer && buffer_managed_by_me) { ei_free(buffer); #if EIDSP_TRACK_ALLOCATIONS if (_fn) { ei_dsp_register_matrix_free_internal(_fn, _file, _line, _originally_allocated_rows, - _originally_allocated_cols, sizeof(int8_t), buffer); + _originally_allocated_cols, sizeof(uint8_t), buffer); } else { ei_dsp_register_matrix_free(_originally_allocated_rows, _originally_allocated_cols, - sizeof(int8_t), buffer); + sizeof(uint8_t), buffer); } #endif } } + + /** + * @brief Get a pointer to the buffer advanced by n rows + * + * @param row Numer of rows to advance the returned buffer pointer + * @return float* Pointer to the buffer at the start of row n + */ + uint8_t *get_row_ptr(size_t row) + { + return buffer + row * cols; + } + #endif // #ifdef __cplusplus -} matrix_i8_t; +} quantized_matrix_t; /** - * Another matrix structure that allocates a matrix on the **heap**. + * A matrix structure that allocates a matrix on the **heap**. * Freeing happens by calling `delete` on the object or letting the object go out of scope. - * We use this for the filterbanks, as we quantize these operations to save memory. */ -typedef struct ei_quantized_matrix { +typedef struct ei_matrix_u8 { uint8_t *buffer; uint32_t rows; uint32_t cols; bool buffer_managed_by_me; -#ifdef __MBED__ - mbed::Callback dequantization_fn; -#else - float (*dequantization_fn)(uint8_t); -#endif #if EIDSP_TRACK_ALLOCATIONS const char *_fn; @@ -413,27 +467,22 @@ typedef struct ei_quantized_matrix { #ifdef __cplusplus /** - * Create a quantized matrix + * Create a new matrix * @param n_rows Number of rows * @param n_cols Number of columns - * @param a_dequantization_fn How to dequantize the values in this matrix - * @param a_buffer Optional: a buffer, if set we won't allocate memory ourselves + * @param a_buffer Buffer, if not provided we'll alloc on the heap */ - ei_quantized_matrix(uint32_t n_rows, - uint32_t n_cols, -#ifdef __MBED__ - mbed::Callback a_dequantization_fn, -#else - float (*a_dequantization_fn)(uint8_t), -#endif - uint8_t *a_buffer = NULL + ei_matrix_u8( + uint32_t n_rows, + uint32_t n_cols, + uint8_t *a_buffer = NULL #if EIDSP_TRACK_ALLOCATIONS - , - const char *fn = NULL, - const char *file = NULL, - int line = 0 + , + const char *fn = NULL, + const char *file = NULL, + int line = 0 #endif - ) + ) { if (a_buffer) { buffer = a_buffer; @@ -445,7 +494,7 @@ typedef struct ei_quantized_matrix { } rows = n_rows; cols = n_cols; - dequantization_fn = a_dequantization_fn; + if (!a_buffer) { #if EIDSP_TRACK_ALLOCATIONS _fn = fn; @@ -463,7 +512,7 @@ typedef struct ei_quantized_matrix { } } - ~ei_quantized_matrix() { + ~ei_matrix_u8() { if (buffer && buffer_managed_by_me) { ei_free(buffer); @@ -479,8 +528,20 @@ typedef struct ei_quantized_matrix { #endif } } + + /** + * @brief Get a pointer to the buffer advanced by n rows + * + * @param row Numer of rows to advance the returned buffer pointer + * @return float* Pointer to the buffer at the start of row n + */ + uint8_t *get_row_ptr(size_t row) + { + return buffer + row * cols; + } + #endif // #ifdef __cplusplus -} quantized_matrix_t; +} matrix_u8_t; /** * Size of a matrix @@ -496,16 +557,47 @@ typedef enum { } DCT_NORMALIZATION_MODE; /** - * Sensor signal structure + * @addtogroup ei_structs + * @{ + */ + +/** + * @brief Holds the callback pointer for retrieving raw data and the length + * of data to be retrieved. + * + * Holds the callback function, `get_data(size_t offset, size_t length, float + * *out_ptr)`. This callback should be implemented by the user and fills the memory + * location given by `*out_ptr` with raw features. Features must be flattened to a + * 1-dimensional vector, as described in + * [this guide](https://docs.edgeimpulse.com/docs/deploy-your-model-as-a-c-library#signal-structure). + * + * `get_data()` may be called multiple times during preprocessing or inference (e.g. + * during execution of + * [run_classifier()](https://docs.edgeimpulse.com/reference/run_classifier) or + * [run_classifier_continuous()](https://docs.edgeimpulse.com/reference/run_classifier_continuous)). + * The `offset` argument will update to point to new data, and `length` data must + * be copied into the location specified by `out_ptr`. This scheme allows raw features + * to be stored in RAM or flash memory and paged in as necessary. + * + * Note that `get_data()` (even after multiple calls during a single execution of + * `run_classifier()` or `run_classifier_continuous()`) will never request more than a + * total number of features as given by `total_length`. + * + * **Source**: [dsp/numpy_types.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/dsp/numpy_types.h) + * + * **Example**: [standalone inferencing main.cpp](https://github.com/edgeimpulse/example-standalone-inferencing/blob/master/source/main.cpp) */ typedef struct ei_signal_t { /** - * A function to retrieve part of the sensor signal - * No bytes will be requested outside of the `total_length`. - * @param offset The offset in the signal - * @param length The total length of the signal - * @param out_ptr An out buffer to set the signal data - */ + * Callback function to be implemented by the user. Parameters are given as + * `get_data(size_t offset, size_t length, float *out_ptr)` and should return an + * int (e.g. `EIDSP_OK` if copying completed successfully). No bytes will be + * requested outside of the `total_length`. + * Callback parameters: + * `offset`: The offset in the signal + * `length`: The number of samples to write into `out_ptr` + * `out_ptr`: An out buffer to set the signal data + */ #if EIDSP_SIGNAL_C_FN_POINTER == 1 int (*get_data)(size_t, size_t, float *); #else @@ -516,36 +608,22 @@ typedef struct ei_signal_t { #endif // __MBED__ #endif // EIDSP_SIGNAL_C_FN_POINTER == 1 + /** + * Total number of samples the user will provide (via get_data). This value should match either the total number of raw features required for a full window (ie, the window size in Studio, but in samples), OR, if using run_classifier_continuous(), the number of samples in a single slice) + * for a new slice (`run_classifier_continuous()`) in order to perform + * preprocessing and inference. + */ size_t total_length; } signal_t; -typedef struct ei_signal_i16_t { - /** - * A function to retrieve part of the sensor signal - * No bytes will be requested outside of the `total_length`. - * @param offset The offset in the signal - * @param length The total length of the signal - * @param out_ptr An out buffer to set the signal data - */ -#if EIDSP_SIGNAL_C_FN_POINTER == 1 - int (*get_data)(size_t, size_t, EIDSP_i16 *); -#else -#ifdef __MBED__ - mbed::Callback get_data; -#else - std::function get_data; -#endif // __MBED__ -#endif // EIDSP_SIGNAL_C_FN_POINTER == 1 - - size_t total_length; -} signal_i16_t; +/** @} */ #ifdef __cplusplus } // namespace ei { #endif // __cplusplus -// required on Adafruit nRF52, it seems not to matter too much on other targets... -#ifdef __cplusplus +// required on Adafruit nRF52 +#if defined(__cplusplus) && defined(ARDUINO_NRF52_ADAFRUIT) namespace std { __attribute__((weak)) void __throw_bad_function_call() { while(1); }; __attribute__((weak)) void __throw_length_error(char const*) { while(1); }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/returntypes.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/returntypes.h new file mode 100644 index 0000000..a7e7191 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/returntypes.h @@ -0,0 +1,48 @@ +#ifndef _EIDSP_RETURN_TYPES_H_ +#define _EIDSP_RETURN_TYPES_H_ + +#include + +/** + * @defgroup ei_returntypes Return codes + * + * Return codes for Edge Impulse functions. + * + * **Source**: [dsp/returntypes.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/dsp/returntypes.h) + * + * @addtogroup ei_returntypes + * @{ + */ + +// outside of namespace for backwards compat +typedef enum { + EI_IMPULSE_OK = 0, /**< Success */ + EI_IMPULSE_ERROR_SHAPES_DONT_MATCH = -1, /**< The shape of data does not match the shape of input layer. */ + EI_IMPULSE_CANCELED = -2, /**< Impulse execution is cancelled by user. */ + EI_IMPULSE_TFLITE_ERROR = -3, /**< Error in TesnorFlow Lite inference engine */ + EI_IMPULSE_DSP_ERROR = -5, /**< Error in processing portion of impulse */ + EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED = -6, /**< Failed to allocate memory in TensorFlow Lite arena, often caused by a lack of available heap memory. */ + EI_IMPULSE_CUBEAI_ERROR = -7, /**< Error in CubeAI inference engine (STM32) */ + EI_IMPULSE_ALLOC_FAILED = -8, /**< Memory allocation failed. Could be caused by a fragmented heap. Try to increase heap size. */ + EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES = -9, /**< This function is only supported for impulses with an image input. */ + EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE = -10, /**< The chosen inference engine (e.g. in Studio) is incapable of running this impulse. */ + EI_IMPULSE_OUT_OF_MEMORY = -11, /**< Out of memory. Could be caused by a fragmented heap. Try to increase heap size. */ + EI_IMPULSE_INPUT_TENSOR_WAS_NULL = -13, /**< Input tensor was null */ + EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL = -14, /**< Output tensor was null */ + EI_IMPULSE_SCORE_TENSOR_WAS_NULL = -15, /**< Score tensor is null (for SSD Object Detection models). */ + EI_IMPULSE_LABEL_TENSOR_WAS_NULL = -16, /**< Label tensor is null (for SSD Object Detection models). */ + EI_IMPULSE_TENSORRT_INIT_FAILED = -17, /**< TensorRT (NVIDIA) initialization failed. */ + EI_IMPULSE_DRPAI_INIT_FAILED = -18, /**< DRP-AI (Renesas) initialization failed. */ + EI_IMPULSE_DRPAI_RUNTIME_FAILED = -19, /**< DRP-AI (Renesas) runtime failed. */ + EI_IMPULSE_DEPRECATED_MODEL = -20, /**< The model is deprecated and cannot be used. You should re-export the impulse from Studio. */ + EI_IMPULSE_LAST_LAYER_NOT_AVAILABLE = -21, /**< The last layer is not available in the model. */ + EI_IMPULSE_INFERENCE_ERROR = -22, /**< Error during inference. */ + EI_IMPULSE_AKIDA_ERROR = -23, /**< Error in Akida inference engine (BrainChip) */ + EI_IMPULSE_INVALID_SIZE = -24, /** +#include "returntypes.h" namespace ei { @@ -41,7 +37,10 @@ typedef enum { EIDSP_UNSUPPORTED_FILTER_CONFIG = -1011, EIDSP_NARROWING = -1012, EIDSP_BLOCK_VERSION_INCORRECT = -1013, - EIDSP_NOT_SUPPORTED = -1014 + EIDSP_NOT_SUPPORTED = -1014, + EIDSP_REQUIRES_CMSIS_DSP = -1015, + EIDSP_FFT_TABLE_NOT_LOADED = -1016, + EIDSP_INFERENCE_ERROR = -1017 } EIDSP_RETURN_T; } // namespace ei diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/feature.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/feature.hpp index 8f0ee75..aada87e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/feature.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/feature.hpp @@ -1,31 +1,29 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPECTRAL_FEATURE_H_ #define _EIDSP_SPECTRAL_FEATURE_H_ -#include #include #include "processing.hpp" +#include "wavelet.hpp" +#include "signal.hpp" +#include "edge-impulse-sdk/dsp/ei_utils.h" +#include "model-parameters/model_metadata.h" namespace ei { namespace spectral { @@ -38,6 +36,7 @@ typedef enum { class feature { public: + /** * Calculate the spectral features over a signal. * @param out_features Output matrix. Use `calculate_spectral_buffer_size` to calculate @@ -81,18 +80,7 @@ class feature { size_t axes = input_matrix->rows; - // calculate the mean - EI_DSP_MATRIX(mean_matrix, axes, 1); - ret = numpy::mean(input_matrix, &mean_matrix); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - // scale by the mean - ret = numpy::subtract(input_matrix, &mean_matrix); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } + EI_TRY(processing::subtract_mean(input_matrix) ); // apply filter if (filter_type == filter_lowpass) { @@ -181,163 +169,505 @@ class feature { return EIDSP_OK; } - static int spectral_analysis( - matrix_i32_t *out_features, - matrix_i16_t *input_matrix, - float sampling_freq, - filter_t filter_type, - float filter_cutoff, - uint8_t filter_order, - uint16_t fft_length, - uint8_t fft_peaks, - float fft_peaks_threshold, - matrix_i16_t *edges_matrix_in - ) { - if (out_features->rows != input_matrix->rows) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - if (out_features->cols != calculate_spectral_buffer_size(true, fft_peaks, edges_matrix_in->rows)) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + /** + * Calculate the buffer size for Spectral Analysis + * @param rms: Whether to calculate the RMS as part of the features + * @param peaks_count: Number of FFT peaks + * @param spectral_edges_count: Number of spectral edges + */ + static size_t calculate_spectral_buffer_size( + bool rms, size_t peaks_count, size_t spectral_edges_count) + { + size_t count = 0; + if (rms) count++; + count += (peaks_count * 2); + if (spectral_edges_count > 0) { + count += (spectral_edges_count - 1); } + return count; + } - if (edges_matrix_in->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + static int extract_spectral_analysis_features_v1( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config_ptr, + const float sampling_freq) + { + // scale the signal + int ret = numpy::scale(input_matrix, config_ptr->scale_axes); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to scale signal (%d)\n", ret); + EIDSP_ERR(ret); } - int ret; - - size_t axes = input_matrix->rows; - - // calculate the mean - EI_DSP_i16_MATRIX(mean_matrix, axes, 1); - ret = numpy::mean(input_matrix, &mean_matrix); + // transpose the matrix so we have one row per axis (nifty!) + ret = numpy::transpose(input_matrix); if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + ei_printf("ERR: Failed to transpose matrix (%d)\n", ret); + EIDSP_ERR(ret); } - // scale by the mean - ret = numpy::subtract(input_matrix, &mean_matrix); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + // the spectral edges that we want to calculate + matrix_t edges_matrix_in(64, 1); + size_t edge_matrix_ix = 0; + + char spectral_str[128] = { 0 }; + if (strlen(config_ptr->spectral_power_edges) > sizeof(spectral_str) - 1) { + EIDSP_ERR(EIDSP_PARAMETER_INVALID); } + memcpy( + spectral_str, + config_ptr->spectral_power_edges, + strlen(config_ptr->spectral_power_edges)); + + // convert spectral_power_edges (string) into float array + char *spectral_ptr = spectral_str; + while (spectral_ptr != NULL) { + while ((*spectral_ptr) == ' ') { + spectral_ptr++; + } - // apply filter - if (filter_type == filter_lowpass) { - ret = spectral::processing::i16_filter( - input_matrix, sampling_freq, filter_order, filter_cutoff, 0); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + edges_matrix_in.buffer[edge_matrix_ix++] = atof(spectral_ptr); + + // find next (spectral) delimiter (or '\0' character) + while ((*spectral_ptr != ',')) { + spectral_ptr++; + if (*spectral_ptr == '\0') + break; } - } - else if (filter_type == filter_highpass) { - ret = spectral::processing::i16_filter( - input_matrix, sampling_freq, filter_order, 0, filter_cutoff); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + + if (*spectral_ptr == '\0') { + spectral_ptr = NULL; + } + else { + spectral_ptr++; } } - - // calculate RMS - EI_DSP_i16_MATRIX(rms_matrix, axes, 1); - ret = numpy::rms(input_matrix, &rms_matrix); - if (ret != EIDSP_OK) { + edges_matrix_in.rows = edge_matrix_ix; + + // calculate how much room we need for the output matrix + size_t output_matrix_cols = spectral::feature::calculate_spectral_buffer_size( + true, + config_ptr->spectral_peaks_count, + edges_matrix_in.rows); + // ei_printf("output_matrix_size %hux%zu\n", input_matrix.rows, output_matrix_cols); + if (output_matrix->cols * output_matrix->rows != + static_cast(output_matrix_cols * config_ptr->axes)) { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - // calculate FFT - EI_DSP_i32_MATRIX(fft_matrix, 1, fft_length / 2 + 1); - ei_matrix_i32 axis_matrix_i32(1, input_matrix->cols); + output_matrix->cols = output_matrix_cols; + output_matrix->rows = config_ptr->axes; - // find peaks in FFT - EI_DSP_i16_MATRIX(peaks_matrix, axes, fft_peaks * 2); + spectral::filter_t filter_type; + if (strcmp(config_ptr->filter_type, "low") == 0) { + filter_type = spectral::filter_lowpass; + } + else if (strcmp(config_ptr->filter_type, "high") == 0) { + filter_type = spectral::filter_highpass; + } + else { + filter_type = spectral::filter_none; + } - // EIDSP_i16 fft_scaled = fft_length / 10; + ret = spectral::feature::spectral_analysis( + output_matrix, + input_matrix, + sampling_freq, + filter_type, + config_ptr->filter_cutoff, + config_ptr->filter_order, + config_ptr->fft_length, + config_ptr->spectral_peaks_count, + config_ptr->spectral_peaks_threshold, + &edges_matrix_in); + if (ret != EIDSP_OK) { + ei_printf("ERR: Failed to calculate spectral features (%d)\n", ret); + EIDSP_ERR(ret); + } - EI_DSP_i16_MATRIX(period_fft_matrix, 1, fft_length / 2 + 1); - EI_DSP_i16_MATRIX(period_freq_matrix, 1, fft_length / 2 + 1); - EI_DSP_i16_MATRIX(edges_matrix_out, edges_matrix_in->rows - 1, 1); + // flatten again + output_matrix->cols = config_ptr->axes * output_matrix_cols; + output_matrix->rows = 1; - for (size_t row = 0; row < input_matrix->rows; row++) { - // per axis code + return EIDSP_OK; + } - // get a slice of the current axis - EI_DSP_i16_MATRIX_B(axis_matrix, 1, input_matrix->cols, input_matrix->buffer + (row * input_matrix->cols)); + static void get_start_stop_bin( + float sampling_freq, + size_t fft_length, + float filter_cutoff, + size_t *start_bin, + size_t *stop_bin, + bool is_high_pass) + { + // we want to find n such that fcutoff < sample_f / fft * n ( or > for high pass ) + // also, + - half bin width (sample_f/(fft*2)) for high / low pass + if (filter_cutoff > sampling_freq / 2) { + filter_cutoff = sampling_freq / 2; + } + float bin = filter_cutoff * fft_length / sampling_freq; + if (is_high_pass) { + *start_bin = static_cast(bin - 0.5) + 1; // add one b/c we want to always round up + // don't use the DC bin b/c it's zero + *start_bin = *start_bin == 0 ? 1 : *start_bin; + *stop_bin = fft_length / 2 + 1; // go one past + } + else { + *start_bin = 1; + *stop_bin = static_cast(bin + 0.5) + 1; // go one past + } + } - // Convert to i32 for accuracy - for(uint32_t i = 0; i < input_matrix->cols; i++) { - axis_matrix_i32.buffer[i] = ((EIDSP_i32)axis_matrix.buffer[i]) << 16; - } + /** + * @brief Calculates the spectral analysis features. + * + * @return the number of features calculated + */ + static size_t extract_spec_features( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config, + const float sampling_freq, + const bool remove_mean = true, + const bool transpose_and_scale_input = true) + { + if (transpose_and_scale_input) { + // transpose the matrix so we have one row per axis + numpy::transpose_in_place(input_matrix); - ret = numpy::rfft(axis_matrix_i32.buffer, axis_matrix.cols, fft_matrix.buffer, fft_matrix.cols, fft_length); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + // func tests for scale of 1 and does a no op in that case + EI_TRY(numpy::scale(input_matrix, config->scale_axes)); + } + + bool do_filter = false; + bool is_high_pass; + + // apply filter, if enabled + // "zero" order filter allowed. will still remove unwanted fft bins later + if (strcmp(config->filter_type, "low") == 0) { + if( config->filter_order ) { + EI_TRY(spectral::processing::butterworth_lowpass_filter( + input_matrix, + sampling_freq, + config->filter_cutoff, + config->filter_order)); } + do_filter = true; + is_high_pass = false; + } + else if (strcmp(config->filter_type, "high") == 0) { + if( config->filter_order ) { + EI_TRY(spectral::processing::butterworth_highpass_filter( + input_matrix, + sampling_freq, + config->filter_cutoff, + config->filter_order)); + } + do_filter = true; + is_high_pass = true; + } - // multiply by 2/N - numpy::scale(&fft_matrix, (2.0f / static_cast(fft_length))); + if (remove_mean){ + EI_TRY(processing::subtract_mean(input_matrix)); + } - // we're now using the FFT matrix to calculate peaks etc. - EI_DSP_i32_MATRIX(peaks_matrix, fft_peaks, 2); - ret = spectral::processing::find_fft_peaks(&fft_matrix, &peaks_matrix, sampling_freq, fft_peaks_threshold, fft_length); - if (ret != EIDSP_OK) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } + // Figure bins we remove based on filter cutoff + size_t start_bin, stop_bin; + if (do_filter) { + get_start_stop_bin( + sampling_freq, + config->fft_length, + config->filter_cutoff, + &start_bin, + &stop_bin, + is_high_pass); + } + else { + start_bin = 1; + stop_bin = config->fft_length / 2 + 1; + } + size_t num_bins = stop_bin - start_bin; - // calculate periodogram for spectral power buckets - ret = spectral::processing::periodogram(&axis_matrix, - &period_fft_matrix, &period_freq_matrix, sampling_freq, fft_length); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } + float *feature_out = output_matrix->buffer; + const float *feature_out_ori = feature_out; + for (size_t row = 0; row < input_matrix->rows; row++) { + float *data_window = input_matrix->get_row_ptr(row); + size_t data_size = input_matrix->cols; - // EI_DSP_i16_MATRIX(edges_matrix_out, edges_matrix_in->rows - 1, 1); - ret = spectral::processing::spectral_power_edges( - &period_fft_matrix, - &period_freq_matrix, - edges_matrix_in, - &edges_matrix_out, - sampling_freq); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); + matrix_t rms_in_matrix(1, data_size, data_window); + matrix_t rms_out_matrix(1, 1, feature_out); + EI_TRY(numpy::rms(&rms_in_matrix, &rms_out_matrix)); + + feature_out++; + + // Standard Deviation + float stddev = *(feature_out-1); //= sqrt(numpy::variance(data_window, data_size)); + if (stddev == 0.0f) { + stddev = 1e-10f; + } + // Don't add std dev as a feature b/c it's the same as RMS + // Skew and Kurtosis w/ shortcut: + // See definition at https://en.wikipedia.org/wiki/Skewness + // See definition at https://en.wikipedia.org/wiki/Kurtosis + // Substitute 0 for mean (b/c it is subtracted out above) + // Skew becomes: mean(X^3) / stddev^3 + // Kurtosis becomes: mean(X^4) / stddev^4 + // Note, this is the Fisher definition of Kurtosis, so subtract 3 + // (see https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.kurtosis.html) + float s_sum = 0; + float k_sum = 0; + float temp; + for (size_t i = 0; i < data_size; i++) { + temp = data_window[i] * data_window[i] * data_window[i]; + s_sum += temp; + k_sum += temp * data_window[i]; } + // Skewness out + temp = stddev * stddev * stddev; + *feature_out++ = (s_sum / data_size) / temp; + // Kurtosis out + *feature_out++ = ((k_sum / data_size) / (temp * stddev)) - 3; + + if (config->implementation_version == 4) { + + size_t fft_out_size = config->fft_length / 2 + 1; + ei_vector fft_out(fft_out_size); + EI_TRY(numpy::welch_max_hold( + data_window, + data_size, + fft_out.data(), + 0, + fft_out_size, + config->fft_length, + config->do_fft_overlap)); + + matrix_t x(1, fft_out.size(), const_cast(fft_out.data())); + matrix_t out(1, 1); + + *feature_out++ = (numpy::skew(&x, &out) == EIDSP_OK) ? (out.get_row_ptr(0)[0]) : 0.0f; + *feature_out++ = (numpy::kurtosis(&x, &out) == EIDSP_OK) ? (out.get_row_ptr(0)[0]) : 0.0f; + + for (size_t i = start_bin; i < stop_bin; i++) { + feature_out[i - start_bin] = fft_out[i]; + } + } else { + EI_TRY(numpy::welch_max_hold( + data_window, + data_size, + feature_out, + start_bin, + stop_bin, + config->fft_length, + config->do_fft_overlap)); + } + if (config->do_log) { + numpy::zero_handling(feature_out, num_bins); + ei_matrix temp(num_bins, 1, feature_out); + numpy::log10(&temp); + } + feature_out += num_bins; + } + size_t num_features = feature_out - feature_out_ori; + return num_features; + } - EIDSP_i32 *features_row = out_features->buffer + (row * out_features->cols); + static int extract_spectral_analysis_features_v2( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config, + const float sampling_freq) + { + size_t n_features = + extract_spec_features(input_matrix, output_matrix, config, sampling_freq); + return n_features == output_matrix->cols ? EIDSP_OK : EIDSP_MATRIX_SIZE_MISMATCH; + } - size_t fx = 0; - - features_row[fx++] = rms_matrix.buffer[row]; + static int extract_spectral_analysis_features_v3( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config, + const float sampling_freq) + { + if (strcmp(config->analysis_type, "Wavelet") == 0) { + return wavelet::extract_wavelet_features(input_matrix, output_matrix, config, sampling_freq); + } else { + return extract_spectral_analysis_features_v2(input_matrix, output_matrix, config, sampling_freq); + } + } - for (size_t peak_row = 0; peak_row < peaks_matrix.rows; peak_row++) { + static ei_vector get_ratio_combo(int r) + { + if (r == 1 || r == 3 || r == 10) { + return {r}; + } else if (r == 30) { + return {3, 10}; + } else if (r == 100) { + return {10, 10}; + } else if (r == 1000) { + return {10, 10, 10}; + } else { + assert(0); + } + return {0}; // to make linter happy + } - features_row[fx++] = (EIDSP_i16)(peaks_matrix.buffer[peak_row * peaks_matrix.cols + 0] >> 16) * fft_length; - features_row[fx++] = (EIDSP_i16)(peaks_matrix.buffer[peak_row * peaks_matrix.cols + 1] >> 16) * fft_length; - } + // can do in-place or out-of-place + static size_t _decimate(matrix_t *input_matrix, matrix_t *output_matrix, size_t ratio) + { + // generated by build_sav4_header in prepare.py + static float sos_deci_3[] = { + 3.4799547399084973e-05f, 6.959909479816995e-05f, 3.4799547399084973e-05f, 1.0f, -1.416907422639627f, 0.5204552955670066f, 1.0f, 2.0f, 1.0f, 1.0f, -1.3342748248687593f, 0.594631953081447f, 1.0f, 2.0f, 1.0f, 1.0f, -1.237675162600336f, 0.7259326611233617f, 1.0f, 2.0f, 1.0f, 1.0f, -1.2180861262950025f, 0.8987833581253264}; + static float sos_zi_deci_3[] = { 0.0013094887094341828f, -0.0006648423946383296f, + 0.0193087012128479f, -0.010936639208493802f, + 0.1485445305451165f, -0.10217301649013415f, + 0.8250625539381586f, -0.7244268881025758 }; + static float sos_deci_10[] = { 3.5863243209995215e-09f, + 7.172648641999043e-09f, + 3.5863243209995215e-09f, + 1.0f, + -1.8204968644767618f, + 0.8308597403796137f, + 1.0f, + 2.0f, + 1.0f, + 1.0f, + -1.8289505620176847f, + 0.8553173710387741f, + 1.0f, + 2.0f, + 1.0f, + 1.0f, + -1.8517334482627625f, + 0.9015161055713813f, + 1.0f, + 2.0f, + 1.0f, + 1.0f, + -1.8965395961864169f, + 0.9644245584642932 }; + static float sos_zi_deci_10[] = { 1.38071060429997e-06f, -1.146570262401316e-06f, + 0.00020862168862901534f, -0.0001782374705409433f, + 0.016663820918116152f, -0.015002020730727955f, + 0.9773862470492868f, -0.9420150059170858 }; + + assert(ratio == 3 || ratio == 10); + + float* sos = ratio == 3 ? sos_deci_3 : sos_deci_10; + float* sos_zi = ratio == 3 ? sos_zi_deci_3 : sos_zi_deci_10; + + const size_t out_size = signal::get_decimated_size(input_matrix->cols, ratio); - for (size_t edge_row = 0; edge_row < edges_matrix_out.rows; edge_row++) { - features_row[fx] = (EIDSP_i16)(edges_matrix_out.buffer[edge_row * edges_matrix_out.cols] >> 16); - } + for (size_t row = 0; row < input_matrix->rows; row++) { + const float *x = input_matrix->get_row_ptr(row); + float *y = output_matrix->get_row_ptr(row); + signal::sosfilt sosfilt(sos, sos_zi, 4); + signal::decimate_simple( + x, + input_matrix->cols, + y, + output_matrix->cols, + ratio, + sosfilt); } - return EIDSP_OK; + return out_size; } - /** - * Calculate the buffer size for Spectral Analysis - * @param rms: Whether to calculate the RMS as part of the features - * @param peaks_count: Number of FFT peaks - * @param spectral_edges_count: Number of spectral edges - */ - static size_t calculate_spectral_buffer_size( - bool rms, size_t peaks_count, size_t spectral_edges_count) + static int extract_spectral_analysis_features_v4( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config_p, + const float sampling_freq) { - size_t count = 0; - if (rms) count++; - count += (peaks_count * 2); - if (spectral_edges_count > 0) { - count += (spectral_edges_count - 1); + auto config_copy = *config_p; + auto config = &config_copy; + if (strcmp(config->analysis_type, "Wavelet") == 0) { + return wavelet::extract_wavelet_features(input_matrix, output_matrix, config, sampling_freq); + } + else if (config->extra_low_freq == false && config->input_decimation_ratio == 1) { + size_t n_features = + extract_spec_features(input_matrix, output_matrix, config, sampling_freq); + return n_features == output_matrix->cols ? EIDSP_OK : EIDSP_MATRIX_SIZE_MISMATCH; + } + else { + numpy::transpose_in_place(input_matrix); + EI_TRY(numpy::scale(input_matrix, config->scale_axes)); + + if (config->input_decimation_ratio > 1) { + ei_vector ratio_combo = get_ratio_combo(config->input_decimation_ratio); + size_t out_size = input_matrix->cols; + for (int r : ratio_combo) { + out_size = _decimate(input_matrix, input_matrix, r); + } + + // rearrange input matrix to be in the right shape after decimation + float* out = input_matrix->get_row_ptr(0) + out_size; + for(uint32_t r = 1; r < input_matrix->rows; r++) { + float *row = input_matrix->get_row_ptr(r); + for(size_t c = 0; c < out_size; c++) { + *out++ = row[c]; + } + } + input_matrix->cols = out_size; + } + + float new_sampling_freq = sampling_freq / config->input_decimation_ratio; + + // filter here, before decimating, instead of inside extract_spec_features + if (strcmp(config->filter_type, "low") == 0) { + if( config->filter_order ) { + EI_TRY(spectral::processing::butterworth_lowpass_filter( + input_matrix, + new_sampling_freq, + config->filter_cutoff, + config->filter_order)); + } + } + else if (strcmp(config->filter_type, "high") == 0) { + if( config->filter_order ) { + EI_TRY(spectral::processing::butterworth_highpass_filter( + input_matrix, + new_sampling_freq, + config->filter_cutoff, + config->filter_order)); + } + } + + // set the filter order to 0, so that we won't double filter + config->filter_order = 0; + + // do this before extract_spec_features because extract_spec_features modifies the matrix + constexpr size_t decimation = 10; + const size_t decimated_size = + signal::get_decimated_size(input_matrix->cols, decimation); + matrix_t lf_signal(input_matrix->rows, decimated_size); + _decimate(input_matrix, &lf_signal, decimation); + + size_t n_features = extract_spec_features( + input_matrix, + output_matrix, + config, + new_sampling_freq, + true, + false); + + if (n_features > 0 && config->extra_low_freq) { + // disable filtering post decimation + matrix_t lf_features(1, output_matrix->rows * output_matrix->cols - n_features, + output_matrix->buffer + n_features); + + n_features += extract_spec_features( + &lf_signal, + &lf_features, + config, + new_sampling_freq / decimation, + true, + false); + } + return n_features == output_matrix->cols ? EIDSP_OK : EIDSP_MATRIX_SIZE_MISMATCH; } - return count; } }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/filters.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/filters.hpp index ed8cdb7..c400fcf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/filters.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/filters.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPECTRAL_FILTERS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/fir_filter.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/fir_filter.hpp index a3a9b94..52c5874 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/fir_filter.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/fir_filter.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2020 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef __FIR_FILTER__H__ #define __FIR_FILTER__H__ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/processing.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/processing.hpp index 82444fe..c70d516 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/processing.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/processing.hpp @@ -1,33 +1,27 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPECTRAL_PROCESSING_H_ #define _EIDSP_SPECTRAL_PROCESSING_H_ -#include +#include "edge-impulse-sdk/dsp/ei_vector.h" #include #include "../numpy.hpp" #include "filters.hpp" -#include "fir_filter.hpp" namespace ei { namespace spectral { @@ -77,11 +71,6 @@ namespace processing { float amplitude; } freq_peak_t; - typedef struct { - EIDSP_i16 freq; - EIDSP_i16 amplitude; - } freq_peak_i16_t; - typedef struct { EIDSP_i32 freq; EIDSP_i32 amplitude; @@ -160,43 +149,6 @@ namespace processing { return EIDSP_OK; } - /** - * @brief Perform in place filtering on the input matrix - * @param matrix Input matrix, and output matrix (in place operation) - * @param sampling_frequency Sampling freqency of data - * @param filter_order Filter size -1 - * @param lowpass_cutoff Lowpass cutoff freqency. If 0, will be a high pass filter - * @param highpass_cutoff Highpass cutoff. If 0, will just be a lowpass. If both, bandpass - * @param decimation_ratio To downsample, ratio of samples to get rid of. - * For example, 4 to go from sample rate of 40k to 10k. LOWPASS CUTOFF MUST MATCH THIS - * If you don't filter the high frequencies, they WILL alias into the passband - * So in the above example, you would want to cutoff at 5K (so you have some buffer) - * TODO (will the cutoff be the start of rolloff, or the -20 dB level?) - * @return int always EIDSP_OK (for now) - */ - static int i16_filter( - matrix_i16_t *matrix, - float sampling_frequency, - uint8_t filter_order, - float lowpass_cutoff, - float highpass_cutoff = 0, - int decimation_ratio = 1 ) - { - - //per convention, filter length is filter order +1 - fir_filter filter(sampling_frequency, filter_order + 1, lowpass_cutoff, highpass_cutoff, decimation_ratio); - for (size_t row = 0; row < matrix->rows; row++) - { - filter.reset(); - filter.apply_filter( - matrix->buffer + (row * matrix->cols), - matrix->buffer + (row * matrix->cols), - matrix->cols); - } - - return EIDSP_OK; - } - /** * Find peaks in a FFT spectrum * threshold is *normalized* threshold @@ -263,92 +215,6 @@ namespace processing { return EIDSP_OK; } - static int find_peak_indexes( - matrix_i16_t *input_matrix, - matrix_i16_t *output_matrix, - int16_t threshold, - uint16_t *peaks_found) - { - if (input_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - uint16_t out_ix = 0; - size_t in_size = input_matrix->cols; - int16_t *in = input_matrix->buffer; - size_t out_size = output_matrix->rows; - int16_t *out = output_matrix->buffer; - - signed short prev = in[0]; - - // so.... - for (size_t ix = 1; ix < in_size - 1; ix++) { - // first make sure it's actually a peak... - if (in[ix] > prev && in[ix] > in[ix+1]) { - // then make sure the threshold is met (on both?) - EIDSP_i16 height = ((in[ix] - prev) + (in[ix] - in[ix + 1])); - if (height > threshold) { - out[out_ix] = ix; - out_ix++; - if (out_ix == out_size) break; - } - } - - prev = in[ix]; - } - - *peaks_found = out_ix; - - return EIDSP_OK; - } - - static int find_peak_indexes( - matrix_i32_t *input_matrix, - matrix_i32_t *output_matrix, - int16_t threshold, - uint16_t *peaks_found) - { - if (input_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - uint16_t out_ix = 0; - size_t in_size = input_matrix->cols; - int32_t *in = input_matrix->buffer; - size_t out_size = output_matrix->rows; - int32_t *out = output_matrix->buffer; - - EIDSP_i32 prev = in[0]; - - // so.... - for (size_t ix = 1; ix < in_size - 1; ix++) { - // first make sure it's actually a peak... - if (in[ix] > prev && in[ix] > in[ix+1]) { - // then make sure the threshold is met (on both?) - EIDSP_i32 height = ((in[ix] - prev) + (in[ix] - in[ix + 1])); - if (height > threshold) { - out[out_ix] = ix; - out_ix++; - if (out_ix == out_size) break; - } - } - - prev = in[ix]; - } - - *peaks_found = out_ix; - - return EIDSP_OK; - } - /** * Find peaks in FFT * @param fft_matrix Matrix of FFT numbers (1xN) @@ -372,6 +238,10 @@ namespace processing { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } + if (output_matrix->rows == 0) { + return EIDSP_OK; + } + int ret; int N = static_cast(fft_length); @@ -392,7 +262,7 @@ namespace processing { } // turn this into C++ vector and sort it based on amplitude - std::vector peaks; + ei_vector peaks; for (uint8_t ix = 0; ix < peak_count; ix++) { freq_peak_t d; @@ -428,166 +298,6 @@ namespace processing { return EIDSP_OK; } - __attribute__((unused)) static int find_fft_peaks( - matrix_i16_t *fft_matrix, - matrix_i16_t *output_matrix, - float sampling_freq, - float threshold, - uint16_t fft_length) - { - if (fft_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->cols != 2) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - int ret; - - int N = static_cast(fft_length); - float T = 1.0f / sampling_freq; - - EIDSP_i16 stop_point; - float stop = (((1.0f / (2.0f * T)))/N); - numpy::float_to_int16(&stop, &stop_point, 1); - - EI_DSP_i16_MATRIX(freq_space, 1, fft_matrix->cols); - ret = numpy::linspace(0, stop_point, (N >> 1), freq_space.buffer); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - - EI_DSP_i16_MATRIX(peaks_matrix, output_matrix->rows * 4, 1); - - uint16_t peak_count; - ret = find_peak_indexes(fft_matrix, &peaks_matrix, 0, &peak_count); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - - EIDSP_i16 i16_threshold; - threshold /= fft_length; - numpy::float_to_int16(&threshold, &i16_threshold, 1); - - // turn this into C++ vector and sort it based on amplitude - std::vector peaks; - for (uint8_t ix = 0; ix < peak_count; ix++) { - freq_peak_i16_t d; - // @todo: something somewhere does not go OK... and these numbers are dependent on - // the FFT length I think... But they are an OK approximation for now. - d.freq = freq_space.buffer[static_cast(peaks_matrix.buffer[ix])]; - d.amplitude = fft_matrix->buffer[peaks_matrix.buffer[ix]]; - - if (d.amplitude < i16_threshold) { - d.freq = 0.0f; - d.amplitude = 0.0f; - } - peaks.push_back(d); - } - sort(peaks.begin(), peaks.end(), - [](const freq_peak_i16_t & a, const freq_peak_i16_t & b) -> bool - { - return a.amplitude > b.amplitude; - }); - - // fill with zeros at the end (if needed) - for (size_t ix = peaks.size(); ix < output_matrix->rows; ix++) { - freq_peak_i16_t d; - d.freq = 0; - d.amplitude = 0; - peaks.push_back(d); - } - - for (size_t row = 0; row < output_matrix->rows; row++) { - // col 0 is freq, col 1 is ampl - output_matrix->buffer[row * output_matrix->cols + 0] = (peaks[row].freq); - output_matrix->buffer[row * output_matrix->cols + 1] = (peaks[row].amplitude); - } - - return EIDSP_OK; - } - - - static int find_fft_peaks( - matrix_i32_t *fft_matrix, - matrix_i32_t *output_matrix, - float sampling_freq, - float threshold, - uint16_t fft_length) - { - if (fft_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->cols != 2) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - int ret; - - int N = static_cast(fft_length); - float T = 1.0f / sampling_freq; - - EIDSP_i32 stop_point; - float stop = (((1.0f / (2.0f * T)))/N); - numpy::float_to_int32(&stop, &stop_point, 1); - - EI_DSP_i32_MATRIX(freq_space, 1, fft_matrix->cols); - ret = numpy::linspace(0, stop_point, (N >> 1), freq_space.buffer); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - - EI_DSP_i32_MATRIX(peaks_matrix, output_matrix->rows * 4, 1); - - uint16_t peak_count; - ret = find_peak_indexes(fft_matrix, &peaks_matrix, 0, &peak_count); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - - EIDSP_i32 i32_threshold; - threshold /= fft_length; - numpy::float_to_int32(&threshold, &i32_threshold, 1); - - // turn this into C++ vector and sort it based on amplitude - std::vector peaks; - for (uint8_t ix = 0; ix < peak_count; ix++) { - freq_peak_i32_t d; - // @todo: something somewhere does not go OK... and these numbers are dependent on - // the FFT length I think... But they are an OK approximation for now. - d.freq = freq_space.buffer[static_cast(peaks_matrix.buffer[ix])]; - d.amplitude = fft_matrix->buffer[peaks_matrix.buffer[ix]]; - - if (d.amplitude < i32_threshold) { - d.freq = 0.0f; - d.amplitude = 0.0f; - } - peaks.push_back(d); - } - sort(peaks.begin(), peaks.end(), - [](const freq_peak_i32_t & a, const freq_peak_i32_t & b) -> bool - { - return a.amplitude > b.amplitude; - }); - - // fill with zeros at the end (if needed) - for (size_t ix = peaks.size(); ix < output_matrix->rows; ix++) { - freq_peak_i32_t d; - d.freq = 0; - d.amplitude = 0; - peaks.push_back(d); - } - - for (size_t row = 0; row < output_matrix->rows; row++) { - // col 0 is freq, col 1 is ampl - output_matrix->buffer[row * output_matrix->cols + 0] = (peaks[row].freq); - output_matrix->buffer[row * output_matrix->cols + 1] = (peaks[row].amplitude); - } - - return EIDSP_OK; - } /** * Calculate spectral power edges in a singal @@ -652,68 +362,6 @@ namespace processing { return EIDSP_OK; } - int spectral_power_edges( - matrix_i16_t *fft_matrix, - matrix_i16_t *freq_matrix, - matrix_i16_t *edges_matrix, - matrix_i16_t *output_matrix, - float sampling_freq - ) { - if (fft_matrix->rows != 1 || freq_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (edges_matrix->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (output_matrix->rows != edges_matrix->rows - 1 || output_matrix->cols != edges_matrix->cols) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (fft_matrix->cols != freq_matrix->cols) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - EI_DSP_i16_MATRIX(buckets, 1, edges_matrix->rows - 1); - EI_DSP_i16_MATRIX(bucket_count, 1, edges_matrix->rows - 1); - - for (uint16_t ix = 0; ix < freq_matrix->cols; ix++) { - int16_t t = freq_matrix->buffer[ix]; - int16_t v = fft_matrix->buffer[ix]; - - // does this fit between any edges? - for (uint16_t ex = 0; ex < edges_matrix->rows - 1; ex++) { - if (t >= edges_matrix->buffer[ex] && t < edges_matrix->buffer[ex + 1]) { - buckets.buffer[ex] += v; - bucket_count.buffer[ex]++; - break; - } - } - } - - // average out and push to vector - for (uint16_t ex = 0; ex < edges_matrix->rows - 1; ex++) { - if (bucket_count.buffer[ex] == 0) { - output_matrix->buffer[ex] = 0; - } - else { - int neg = 0; - - if(buckets.buffer[ex] & 0x8000) { - buckets.buffer[ex] &= ~0x8000; - neg = 1; - } - output_matrix->buffer[ex] = buckets.buffer[ex] / bucket_count.buffer[ex]; - - if(neg) { - output_matrix->buffer[ex] |= 0x8000; - } - } - } - - return EIDSP_OK; - } /** * Estimate power spectral density using a periodogram using Welch's method. @@ -816,103 +464,22 @@ namespace processing { return EIDSP_OK; } - int periodogram(matrix_i16_t *input_matrix, matrix_i16_t *out_fft_matrix, matrix_i16_t *out_freq_matrix, float sampling_freq, uint16_t n_fft) - { - if (input_matrix->rows != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (out_fft_matrix->rows != 1 || out_fft_matrix->cols != (uint32_t)n_fft / 2 + 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (out_freq_matrix->rows != 1 || out_freq_matrix->cols != (uint32_t)n_fft / 2 + 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - if (input_matrix->buffer == NULL) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - if (out_fft_matrix->buffer == NULL) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - if (out_freq_matrix->buffer == NULL) { - EIDSP_ERR(EIDSP_OUT_OF_MEM); - } - - // map over the input buffer, so we can manipulate the number of columns - EI_DSP_i16_MATRIX_B(welch_matrix, input_matrix->rows, input_matrix->cols, input_matrix->buffer); - - uint16_t nperseg = n_fft; - - if (n_fft > input_matrix->cols) { - nperseg = input_matrix->cols; - } - // make the column align to nperseg in this case - else if (n_fft < input_matrix->cols) { - welch_matrix.cols = n_fft; - } - - EI_DSP_i16_MATRIX(triage_segments, 1, nperseg); - for (uint16_t ix = 0; ix < nperseg; ix++) { - triage_segments.buffer[ix] = 1.0f; - } - - int16_t scale = static_cast((1.0f / (sampling_freq)) * (1<<15)); - float freq_in_fft = (1.0f / (n_fft * (1.0f / sampling_freq))); - - /* Create frequency buffer, scale to 0 - 1 for q15 */ - for (uint16_t ix = 0; ix < n_fft / 2 + 1; ix++) { - float scaled_freq_in_fft = freq_in_fft / (sampling_freq / 2.f); - out_freq_matrix->buffer[ix] = static_cast((static_cast(ix) * scaled_freq_in_fft) * (1<<15)); - } - - int ret; - - // now we need to detrend... which is done constant so just subtract the mean - EI_DSP_i16_MATRIX(mean_matrix, 1, 1); - ret = numpy::mean(&welch_matrix, &mean_matrix); - if (ret != EIDSP_OK) { - EIDSP_ERR(ret); - } - - ret = numpy::subtract(&welch_matrix, &mean_matrix); + static int subtract_mean(matrix_t* input_matrix) { + // calculate the mean + EI_DSP_MATRIX(mean_matrix, input_matrix->rows, 1); + int ret = numpy::mean(input_matrix, &mean_matrix); if (ret != EIDSP_OK) { - EIDSP_ERR(ret); + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - fft_complex_i16_t *fft_output = (fft_complex_i16_t*)ei_dsp_calloc((n_fft / 2 + 1) * sizeof(fft_complex_i16_t), 1); - ret = numpy::rfft(welch_matrix.buffer, welch_matrix.cols, fft_output, n_fft / 2 + 1, n_fft); + // scale by the mean + ret = numpy::subtract(input_matrix, &mean_matrix); if (ret != EIDSP_OK) { - ei_dsp_free(fft_output, (n_fft / 2 + 1) * sizeof(fft_complex_i16_t)); - EIDSP_ERR(ret); - } - - // conjugate and then multiply with itself and scale - for (uint16_t ix = 0; ix < n_fft / 2 + 1; ix++) { - int16_t i_squared = numpy::saturate(((int32_t)fft_output[ix].i * fft_output[ix].i) >> 15, 16) & 0x7FFF; - - fft_output[ix].r = numpy::saturate(((int32_t)fft_output[ix].r * fft_output[ix].r) >> 15, 16) + i_squared; - - fft_output[ix].i = 0.0f; - - fft_output[ix].r = numpy::saturate(((int32_t)fft_output[ix].r * scale) >> 15, 16); - - if (ix != n_fft / 2) { - fft_output[ix].r = numpy::saturate((int32_t)fft_output[ix].r * 2, 16); - } - - // then multiply by itself... - out_fft_matrix->buffer[ix] = fft_output[ix].r; + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - ei_dsp_free(fft_output, (n_fft / 2 + 1) * sizeof(fft_complex_i16_t)); - return EIDSP_OK; } - } // namespace processing } // namespace spectral } // namespace ei diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/signal.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/signal.hpp new file mode 100644 index 0000000..37fb0e9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/signal.hpp @@ -0,0 +1,356 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "edge-impulse-sdk/dsp/ei_vector.h" +#include +#include + +namespace ei { + +/** + * @brief Class for signal processing. + * tries to mimic scipy.signal + * + * @todo: call CMSIS DSP functions if available + */ +class signal { +public: + using fvec = ei_vector; + + static void scale(fvec &x, float a) + { + for (size_t ix = 0; ix < x.size(); ix++) { + x[ix] *= a; + } + } + + /** + * @brief Decimate a signal using a IIR filter + * This is the counterpart of scipy.signal.decimate with zero-phase=false. This function + * is not recommended for larger decimation factors, as it will have stability issues. + * Use the SOS version instead. + * @param input Input signal + * @param output Output signal + * @param factor Decimation factor + * @param b Numerator coefficients + * @param a Denominator coefficients + * @param zi Initial conditions + */ + static void decimate_simple( + const fvec &input, + fvec &output, + size_t factor, + const fvec &b, + const fvec &a, + const fvec &zi) + { + fvec d = zi; + scale(d, input[0]); + + fvec filtered(input.size()); + lfilter(b, a, input, filtered, d); + + size_t output_size = input.size() / factor; + output.resize(output_size); + + for (size_t ix = 0; ix < output_size; ix++) { + output[ix] = filtered[ix * factor]; + } + } + + static size_t get_decimated_size(size_t input_size, size_t factor) + { + return (input_size + factor - 1) / factor; + } + + struct sosfilt { + const float *coeff; // 6 * num_sections coefficients + float* zi; + fvec zi_vec; // 2 * num_sections initial conditions + size_t num_sections; + + sosfilt(const float *coeff_, const float *zi_, size_t num_sections_) + : coeff(coeff_), + zi_vec(zi_, zi_ + (num_sections_ * 2)), + num_sections(num_sections_) + { + } + + void update(const float *coeff_, const float *zi_) + { + coeff = coeff_; + zi_vec.assign(zi_, zi_ + (num_sections * 2)); + } + + /** + * @brief IIR filters in second-order sections. + * This is the counterpart of scipy.signal.sosfilt . + * @param input Input signal + * @param output Output signal. Can be the same as input for in place + * @param x_size Minimum size of input and output signal + */ + void run(const float *input, const size_t size, float* output) + { + assert(num_sections > 0); + + iir2(input, output, size, coeff, coeff + 3, zi_vec.data()); + + for (size_t sect = 1; sect < num_sections; sect++) { + iir2( + output, + output, + size, + coeff + sect * 6, + coeff + sect * 6 + 3, + zi_vec.data() + sect * 2); + } + } + + void init(float x0) + { + for (size_t sect = 0; sect < num_sections; sect++) { + zi_vec.data()[sect * 2] *= x0; + zi_vec.data()[sect * 2 + 1] *= x0; + } + } + }; + + /** + * @brief Decimate a signal using a IIR filter with second-order sections + * This is the counterpart of scipy.signal.decimate with zero-phase=false. + * @param input Input signal + * @param output Output signal + * @param factor Decimation factor + * @param sos Second-order section + */ + static void decimate_simple( + const float *input, + const size_t input_size, + float *output, + const size_t output_size, + size_t factor, + sosfilt &sos) + { + sos.init(input[0]); + + fvec filtered(input_size); + sos.run(input, input_size, filtered.data()); + + size_t expected_size = get_decimated_size(input_size, factor); + assert(output_size >= expected_size); + + for (size_t ix = 0; ix < expected_size; ix++) { + output[ix] = filtered[ix * factor]; + } + } + + /** + * @brief Linear filter. + * This is the counterpart of scipy.signal.lfilter with zero-phase=false. This function + * is not recommended for high order filters or cutoff close to boundaries, as it will + * have stability issues. Use the sosfilt instead. + * @param input Input signal + * @param output Output signal + * @param b Numerator coefficients + * @param a Denominator coefficients + * @param zi Initial conditions + */ + static void lfilter(const fvec &b, const fvec &a, const fvec &x, fvec &y, fvec &d) + { + /* + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + d[2][n-1] + ... + d[N-2][n] = b[N-1]*x[n] - a[N-1]*y[n] + d[N-1][n-1] + d[N-1][n] = b[N] * x[n] - a[N] * y[n] + */ + + assert(b.size() == a.size() && b.size() == d.size() + 1); + assert(d.size() > 0); + assert(y.size() >= x.size()); + assert(a[0] != 0.0f); + + const float one_over_a0 = 1.0f / a[0]; + for (size_t ix = 0; ix < x.size(); ix++) { + const float xx = x[ix]; + y[ix] = b[0] * xx + d[0]; + y[ix] *= one_over_a0; + size_t jx; + for (jx = 1; jx < b.size() - 1; jx++) { + d[jx - 1] = b[jx] * xx - a[jx] * y[ix] + d[jx]; + } + d[jx - 1] = b[jx] * xx - a[jx] * y[ix]; + } + } + + static void iir2(const float *x, float *y, size_t n, const float *b, const float *a, float *d) + { + /* + a[0]*y[n] = b[0] * x[n] + d[0][n-1] + d[0][n] = b[1] * x[n] - a[1] * y[n] + d[1][n-1] + d[1][n] = b[2] * x[n] - a[2] * y[n] + */ + const float one_over_a0 = 1.0f / a[0]; + for (size_t ix = 0; ix < n; ix++) { + const float xx = x[ix]; + y[ix] = b[0] * xx + d[0]; + y[ix] *= one_over_a0; + d[0] = b[1] * xx - a[1] * y[ix] + d[1]; + d[1] = b[2] * xx - a[2] * y[ix]; + } + } + + static int gcd(int a, int b) + { + if (b == 0) + return a; + return gcd(b, a % b); + } + + /** + * @brief Upsample, FIR and downsample. + * This is the counterpart of scipy.signal.upfirdn without the padding. + * @param y Input signal + * @param y Output signal + * @param h FIR coefficients + */ + static void upfirdn(const float * x, size_t x_size, fvec &y, int up, int down, const fvec &h) + { + assert(up > 0); + assert(down > 0); + assert(h.size() > 0); + +#if 0 // bug in optimized version + const int N = (h.size() - 1) / 2; + + for (size_t n = 0; n < y.size(); n++) { + float acc = 0.0f; + for (size_t k = 0; k < h.size(); k += up) { + const size_t x_ind = n * down + k - N; + if (x_ind >= 0 && x_ind < x.size()) { + acc += h[k] * x[x_ind]; + } + } + y[n] = acc; + } +#else + int nx = x_size; + int nh = h.size(); + + // Upsample the input signal by inserting zeros + fvec r(up * nx); + for (int i = 0; i < nx; i++) + { + r[i * up] = x[i]; + } + + // Filter the upsampled signal using the given filter coefficients + fvec z(nh + up * nx - 1); + for (int i = 0; i < up * nx; i++) + { + for (int j = 0; j < nh; j++) + { + if (i - j >= 0 && i - j < up * nx) + { + z[i] += r[i - j] * h[j]; + } + } + } + + // Downsample the filtered signal by skipping samples + int skip = (nh - 1) / 2; + for (size_t i = 0; i < y.size(); i++) + { + y[i] = z[i * down + skip]; + } +#endif + + } + + /** + * @brief Resample using a polyphase FIR. + * This is the counterpart of scipy.signal.resample_poly. + * @param input Input signal + * @param output Output signal, will be moved from an internal vector sized correctly. + * @param window FIR coefficients. e.g. signal.firwin(2 * half_len + 1, f_c, window=('kaiser', 5.0)) + */ + static void resample_poly(const float* input, size_t input_size, fvec &output, int up, int down, const fvec &window) + { + assert(up > 0); + assert(down > 0); + assert(window.size() > 0 && (window.size() % 2) == 1); + + int gcd_up_down = gcd(up, down); + up /= gcd_up_down; + down /= gcd_up_down; + + if (up == 1 && down == 1) { + // output = std::move(fvec(input, input + input_size)); + output = fvec(input, input + input_size); + return; + } + + int n_out = (input_size * up); + n_out = n_out / down + (n_out % down == 0 ? 0 : 1); + + fvec h = window; + scale(h, float(up)); + + output.resize(n_out); + upfirdn(input, input_size, output, up, down, h); + } + + static void calc_decimation_ratios( + const char *filter_type, + float filter_cutoff, + float sample_rate, + std::vector &ratios) + { + if (strcmp(filter_type, "low") == 0) { + ratios = {1}; + return; + } + + static const std::vector supported = {1000, 100, 30, 10, 3}; + for (size_t i = 0; i < supported.size(); i++) { + const int r = supported[i]; + if (sample_rate * 0.5f / r > filter_cutoff) { + if (r == 3 || r == 10) { + ratios = {r}; + } else if (r == 30) { + ratios = {3, 10}; + } else if (r == 100) { + ratios = {10, 10}; + } else if (r == 1000) { + ratios = {10, 10, 10}; + } + return; + } + } + + } +}; + +} // namespace ei diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/spectral.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/spectral.hpp index 70e595e..0c8b876 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/spectral.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/spectral.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPECTRAL_SPECTRAL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet.hpp new file mode 100644 index 0000000..ba19b29 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet.hpp @@ -0,0 +1,354 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include "edge-impulse-sdk/dsp/ei_vector.h" + +#include "processing.hpp" +#include "wavelet_coeff.hpp" + +namespace ei { +namespace spectral { + +using fvec = ei_vector; + +inline float dot(const float *x, const float *y, size_t sz) +{ + float sum = 0.0f; + for (size_t i = 0; i < sz; i++) { + sum += x[i] * y[i]; + } + return sum; +} + +inline void histo(const fvec &x, size_t nbins, fvec &h, bool normalize = false) +{ + float min = *std::min_element(x.begin(), x.end()); + float max = *std::max_element(x.begin(), x.end()); + float step = (max - min) / nbins; + h.resize(nbins); + for (size_t i = 0; i < x.size(); i++) { + size_t bin = (x[i] - min) / step; + if (bin >= nbins) + bin = nbins - 1; + h[bin]++; + } + if (normalize) { + float s = numpy::sum(h.data(), h.size()); + for (size_t i = 0; i < nbins; i++) { + h[i] /= s; + } + } +} + +class wavelet { + + static constexpr size_t NUM_FEATHERS_PER_COMP = 14; + + template + static void get_filter(const std::array, 2> wav, fvec &h, fvec &g) + { + size_t n = wav[0].size(); + h.resize(n); + g.resize(n); + for (size_t i = 0; i < n; i++) { + h[i] = wav[0][n - i - 1]; + g[i] = wav[1][n - i - 1]; + } + } + + static void find_filter(const char *wav, fvec &h, fvec &g) + { + if (strcmp(wav, "bior1.3") == 0) get_filter<6>(bior1p3, h, g); + else if (strcmp(wav, "bior1.5") == 0) get_filter<10>(bior1p5, h, g); + else if (strcmp(wav, "bior2.2") == 0) get_filter<6>(bior2p2, h, g); + else if (strcmp(wav, "bior2.4") == 0) get_filter<10>(bior2p4, h, g); + else if (strcmp(wav, "bior2.6") == 0) get_filter<14>(bior2p6, h, g); + else if (strcmp(wav, "bior2.8") == 0) get_filter<18>(bior2p8, h, g); + else if (strcmp(wav, "bior3.1") == 0) get_filter<4>(bior3p1, h, g); + else if (strcmp(wav, "bior3.3") == 0) get_filter<8>(bior3p3, h, g); + else if (strcmp(wav, "bior3.5") == 0) get_filter<12>(bior3p5, h, g); + else if (strcmp(wav, "bior3.7") == 0) get_filter<16>(bior3p7, h, g); + else if (strcmp(wav, "bior3.9") == 0) get_filter<20>(bior3p9, h, g); + else if (strcmp(wav, "bior4.4") == 0) get_filter<10>(bior4p4, h, g); + else if (strcmp(wav, "bior5.5") == 0) get_filter<12>(bior5p5, h, g); + else if (strcmp(wav, "bior6.8") == 0) get_filter<18>(bior6p8, h, g); + else if (strcmp(wav, "coif1") == 0) get_filter<6>(coif1, h, g); + else if (strcmp(wav, "coif2") == 0) get_filter<12>(coif2, h, g); + else if (strcmp(wav, "coif3") == 0) get_filter<18>(coif3, h, g); + else if (strcmp(wav, "db2") == 0) get_filter<4>(db2, h, g); + else if (strcmp(wav, "db3") == 0) get_filter<6>(db3, h, g); + else if (strcmp(wav, "db4") == 0) get_filter<8>(db4, h, g); + else if (strcmp(wav, "db5") == 0) get_filter<10>(db5, h, g); + else if (strcmp(wav, "db6") == 0) get_filter<12>(db6, h, g); + else if (strcmp(wav, "db7") == 0) get_filter<14>(db7, h, g); + else if (strcmp(wav, "db8") == 0) get_filter<16>(db8, h, g); + else if (strcmp(wav, "db9") == 0) get_filter<18>(db9, h, g); + else if (strcmp(wav, "db10") == 0) get_filter<20>(db10, h, g); + else if (strcmp(wav, "haar") == 0) get_filter<2>(haar, h, g); + else if (strcmp(wav, "rbio1.3") == 0) get_filter<6>(rbio1p3, h, g); + else if (strcmp(wav, "rbio1.5") == 0) get_filter<10>(rbio1p5, h, g); + else if (strcmp(wav, "rbio2.2") == 0) get_filter<6>(rbio2p2, h, g); + else if (strcmp(wav, "rbio2.4") == 0) get_filter<10>(rbio2p4, h, g); + else if (strcmp(wav, "rbio2.6") == 0) get_filter<14>(rbio2p6, h, g); + else if (strcmp(wav, "rbio2.8") == 0) get_filter<18>(rbio2p8, h, g); + else if (strcmp(wav, "rbio3.1") == 0) get_filter<4>(rbio3p1, h, g); + else if (strcmp(wav, "rbio3.3") == 0) get_filter<8>(rbio3p3, h, g); + else if (strcmp(wav, "rbio3.5") == 0) get_filter<12>(rbio3p5, h, g); + else if (strcmp(wav, "rbio3.7") == 0) get_filter<16>(rbio3p7, h, g); + else if (strcmp(wav, "rbio3.9") == 0) get_filter<20>(rbio3p9, h, g); + else if (strcmp(wav, "rbio4.4") == 0) get_filter<10>(rbio4p4, h, g); + else if (strcmp(wav, "rbio5.5") == 0) get_filter<12>(rbio5p5, h, g); + else if (strcmp(wav, "rbio6.8") == 0) get_filter<18>(rbio6p8, h, g); + else if (strcmp(wav, "sym2") == 0) get_filter<4>(sym2, h, g); + else if (strcmp(wav, "sym3") == 0) get_filter<6>(sym3, h, g); + else if (strcmp(wav, "sym4") == 0) get_filter<8>(sym4, h, g); + else if (strcmp(wav, "sym5") == 0) get_filter<10>(sym5, h, g); + else if (strcmp(wav, "sym6") == 0) get_filter<12>(sym6, h, g); + else if (strcmp(wav, "sym7") == 0) get_filter<14>(sym7, h, g); + else if (strcmp(wav, "sym8") == 0) get_filter<16>(sym8, h, g); + else if (strcmp(wav, "sym9") == 0) get_filter<18>(sym9, h, g); + else if (strcmp(wav, "sym10") == 0) get_filter<20>(sym10, h, g); + else assert(0); // wavelet not in the list + } + + static void calculate_entropy(const fvec &y, fvec &features) + { + fvec h; + histo(y, 100, h, true); + // entropy = -sum(prob * log(prob) + float entropy = 0.0f; + for (size_t i = 0; i < h.size(); i++) { + if (h[i] > 0.0f) { + entropy -= h[i] * log(h[i]); + } + } + features.push_back(entropy); + } + + static float get_percentile_from_sorted(const fvec &sorted, float percentile) + { + // adding 0.5 is a trick to get rounding out of C flooring behavior during cast + size_t index = (size_t) ((percentile * (sorted.size()-1)) + 0.5); + return sorted[index]; + } + + static void calculate_statistics(const fvec &y, fvec &features, float mean) + { + fvec sorted = y; + std::sort(sorted.begin(), sorted.end()); + features.push_back(get_percentile_from_sorted(sorted,0.05)); + features.push_back(get_percentile_from_sorted(sorted,0.25)); + features.push_back(get_percentile_from_sorted(sorted,0.75)); + features.push_back(get_percentile_from_sorted(sorted,0.95)); + features.push_back(get_percentile_from_sorted(sorted,0.5)); + + matrix_t x(1, y.size(), const_cast(y.data())); + matrix_t out(1, 1); + + features.push_back(mean); + if (numpy::stdev(&x, &out) == EIDSP_OK) + features.push_back(out.get_row_ptr(0)[0]); + features.push_back(numpy::variance(const_cast(y.data()), y.size())); + if (numpy::rms(&x, &out) == EIDSP_OK) + features.push_back(out.get_row_ptr(0)[0]); + if (numpy::skew(&x, &out) == EIDSP_OK) + features.push_back(out.get_row_ptr(0)[0]); + if (numpy::kurtosis(&x, &out) == EIDSP_OK) + features.push_back(out.get_row_ptr(0)[0]); + } + + static void calculate_crossings(const fvec &y, fvec &features, float mean) + { + size_t zc = 0; + for (size_t i = 1; i < y.size(); i++) { + if (y[i] * y[i - 1] < 0) { + zc++; + } + } + features.push_back(zc / (float)y.size()); + + size_t mc = 0; + for (size_t i = 1; i < y.size(); i++) { + if ((y[i] - mean) * (y[i - 1] - mean) < 0) { + mc++; + } + } + features.push_back(mc / (float)y.size()); + } + + static void + dwt(const float *x, size_t nx, const float *h, const float *g, size_t nh, fvec &a, fvec &d) + { + assert(nh <= 20 && nh > 0 && nx > 0); + size_t nx_padded = nx + nh * 2 - 2; + fvec x_padded(nx_padded); + + // symmetric padding (default in PyWavelet) + for (size_t i = 0; i < nh - 2; i++) + x_padded[i] = x[nh - 3 - i]; + for (size_t i = 0; i < nx; i++) + x_padded[i + nh - 2] = x[i]; + for (size_t i = 0; i < nh; i++) + x_padded[i + nx + nh - 2] = x[nx - 1 - i]; + + size_t ny = (nx + nh - 1) / 2; + a.resize(ny); + d.resize(ny); + + // decimate and filter + const float *xx = x_padded.data(); + for (size_t i = 0; i < ny; i++) { + a[i] = dot(xx + 2 * i, h, nh); + d[i] = dot(xx + 2 * i, g, nh); + } + + numpy::underflow_handling(d.data(), d.size()); + numpy::underflow_handling(a.data(), a.size()); + } + + static void extract_features(fvec& y, fvec &features) + { + matrix_t x(1, y.size(), const_cast(y.data())); + matrix_t out(1, 1); + if (numpy::mean(&x, &out) != EIDSP_OK) + assert(0); + float mean = out.get_row_ptr(0)[0]; + + calculate_entropy(y, features); + calculate_crossings(y, features, mean); + calculate_statistics(y, features, mean); + } + + static void + wavedec_features(const float *x, int len, const char *wav, int level, fvec &features) + { + assert(level > 0 && level < 8); + + fvec h; + fvec g; + find_filter(wav, h, g); + + features.clear(); + fvec a; + fvec d; + dwt(x, len, h.data(), g.data(), h.size(), a, d); + extract_features(d, features); + + for (int l = 1; l < level; l++) { + dwt(a.data(), a.size(), h.data(), g.data(), h.size(), a, d); + extract_features(d, features); + } + + extract_features(a, features); + + for (int l = 0; l <= level / 2; l++) { // reverse order to match python results. + for (int i = 0; i < (int)NUM_FEATHERS_PER_COMP; i++) { + std::swap( + features[l * NUM_FEATHERS_PER_COMP + i], + features[(level - l) * NUM_FEATHERS_PER_COMP + i]); + } + } + } + + static int dwt_features(const float *x, int len, const char *wav, int level, fvec &features) + { + assert(level <= 7); + + assert(features.size() == 0); // make sure features is empty + features.reserve((level + 1) * NUM_FEATHERS_PER_COMP); + + wavedec_features(x, len, wav, level, features); + + return features.size(); + } + + static bool check_min_size(int len, int level) + { + int min_size = 32 * (1 << level); + return (len >= min_size); + } + +public: + static int extract_wavelet_features( + matrix_t *input_matrix, + matrix_t *output_matrix, + ei_dsp_config_spectral_analysis_t *config, + const float sampling_freq) + { + // transpose the matrix so we have one row per axis + numpy::transpose_in_place(input_matrix); + + // func tests for scale of 1 and does a no op in that case + EI_TRY(numpy::scale(input_matrix, config->scale_axes)); + + // apply filter, if enabled + // "zero" order filter allowed. will still remove unwanted fft bins later + if (strcmp(config->filter_type, "low") == 0) { + if (config->filter_order) { + EI_TRY(spectral::processing::butterworth_lowpass_filter( + input_matrix, + sampling_freq, + config->filter_cutoff, + config->filter_order)); + } + } + else if (strcmp(config->filter_type, "high") == 0) { + if (config->filter_order) { + EI_TRY(spectral::processing::butterworth_highpass_filter( + input_matrix, + sampling_freq, + config->filter_cutoff, + config->filter_order)); + } + } + + EI_TRY(processing::subtract_mean(input_matrix)); + + int out_idx = 0; + for (size_t row = 0; row < input_matrix->rows; row++) { + float *data_window = input_matrix->get_row_ptr(row); + size_t data_size = input_matrix->cols; + + if (!check_min_size(data_size, config->wavelet_level)) + EIDSP_ERR(EIDSP_BUFFER_SIZE_MISMATCH); + + fvec features; + size_t num_features = dwt_features( + data_window, + data_size, + config->wavelet, + config->wavelet_level, + features); + + assert(num_features == output_matrix->cols / input_matrix->rows); + for (size_t i = 0; i < num_features; i++) { + output_matrix->buffer[out_idx++] = features[i]; + } + } + return EIDSP_OK; + } +}; + +} +} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet_coeff.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet_coeff.hpp new file mode 100644 index 0000000..63616fc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/spectral/wavelet_coeff.hpp @@ -0,0 +1,282 @@ +/* Edge Impulse inferencing library + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#pragma once + +#include + +namespace ei{ +namespace spectral{ +//generated by autotune.export + +static const std::array, 2> bior1p3 = {{ + {{-0.08838834764831845, 0.08838834764831845, 0.7071067811865476, 0.7071067811865476, 0.08838834764831845, -0.08838834764831845}}, + {{-0.0, 0.0, -0.7071067811865476, 0.7071067811865476, -0.0, 0.0}} +}}; + +static const std::array, 2> bior1p5 = {{ + {{0.016572815184059706, -0.016572815184059706, -0.12153397801643785, 0.12153397801643785, 0.7071067811865476, 0.7071067811865476, 0.12153397801643785, -0.12153397801643785, -0.016572815184059706, 0.016572815184059706}}, + {{-0.0, 0.0, -0.0, 0.0, -0.7071067811865476, 0.7071067811865476, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior2p2 = {{ + {{0.0, -0.1767766952966369, 0.3535533905932738, 1.0606601717798212, 0.3535533905932738, -0.1767766952966369}}, + {{-0.0, 0.3535533905932738, -0.7071067811865476, 0.3535533905932738, -0.0, 0.0}} +}}; + +static const std::array, 2> bior2p4 = {{ + {{0.0, 0.03314563036811941, -0.06629126073623882, -0.1767766952966369, 0.4198446513295126, 0.9943689110435825, 0.4198446513295126, -0.1767766952966369, -0.06629126073623882, 0.03314563036811941}}, + {{-0.0, 0.0, -0.0, 0.3535533905932738, -0.7071067811865476, 0.3535533905932738, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior2p6 = {{ + {{0.0, -0.006905339660024878, 0.013810679320049757, 0.04695630968816917, -0.1077232986963881, -0.16987135563661201, 0.4474660099696121, 0.966747552403483, 0.4474660099696121, -0.16987135563661201, -0.1077232986963881, 0.04695630968816917, 0.013810679320049757, -0.006905339660024878}}, + {{-0.0, 0.0, -0.0, 0.0, -0.0, 0.3535533905932738, -0.7071067811865476, 0.3535533905932738, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior2p8 = {{ + {{0.0, 0.0015105430506304422, -0.0030210861012608843, -0.012947511862546647, 0.02891610982635418, 0.05299848189069094, -0.13491307360773605, -0.16382918343409023, 0.46257144047591653, 0.9516421218971786, 0.46257144047591653, -0.16382918343409023, -0.13491307360773605, 0.05299848189069094, 0.02891610982635418, -0.012947511862546647, -0.0030210861012608843, 0.0015105430506304422}}, + {{-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.3535533905932738, -0.7071067811865476, 0.3535533905932738, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior3p1 = {{ + {{-0.3535533905932738, 1.0606601717798212, 1.0606601717798212, -0.3535533905932738}}, + {{-0.1767766952966369, 0.5303300858899106, -0.5303300858899106, 0.1767766952966369}} +}}; + +static const std::array, 2> bior3p3 = {{ + {{0.06629126073623882, -0.1988737822087165, -0.15467960838455727, 0.9943689110435825, 0.9943689110435825, -0.15467960838455727, -0.1988737822087165, 0.06629126073623882}}, + {{-0.0, 0.0, -0.1767766952966369, 0.5303300858899106, -0.5303300858899106, 0.1767766952966369, -0.0, 0.0}} +}}; + +static const std::array, 2> bior3p5 = {{ + {{-0.013810679320049757, 0.04143203796014927, 0.052480581416189075, -0.26792717880896527, -0.07181553246425873, 0.966747552403483, 0.966747552403483, -0.07181553246425873, -0.26792717880896527, 0.052480581416189075, 0.04143203796014927, -0.013810679320049757}}, + {{-0.0, 0.0, -0.0, 0.0, -0.1767766952966369, 0.5303300858899106, -0.5303300858899106, 0.1767766952966369, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior3p7 = {{ + {{0.0030210861012608843, -0.009063258303782653, -0.01683176542131064, 0.074663985074019, 0.03133297870736289, -0.301159125922835, -0.02649924094534547, 0.9516421218971786, 0.9516421218971786, -0.02649924094534547, -0.301159125922835, 0.03133297870736289, 0.074663985074019, -0.01683176542131064, -0.009063258303782653, 0.0030210861012608843}}, + {{-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.1767766952966369, 0.5303300858899106, -0.5303300858899106, 0.1767766952966369, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior3p9 = {{ + {{-0.0006797443727836989, 0.002039233118351097, 0.005060319219611981, -0.020618912641105536, -0.014112787930175844, 0.09913478249423216, 0.012300136269419315, -0.32019196836077857, 0.0020500227115698858, 0.9421257006782068, 0.9421257006782068, 0.0020500227115698858, -0.32019196836077857, 0.012300136269419315, 0.09913478249423216, -0.014112787930175844, -0.020618912641105536, 0.005060319219611981, 0.002039233118351097, -0.0006797443727836989}}, + {{-0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.1767766952966369, 0.5303300858899106, -0.5303300858899106, 0.1767766952966369, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> bior4p4 = {{ + {{0.0, 0.03782845550726404, -0.023849465019556843, -0.11062440441843718, 0.37740285561283066, 0.8526986790088938, 0.37740285561283066, -0.11062440441843718, -0.023849465019556843, 0.03782845550726404}}, + {{-0.0, -0.06453888262869706, 0.04068941760916406, 0.41809227322161724, -0.7884856164055829, 0.41809227322161724, 0.04068941760916406, -0.06453888262869706, -0.0, 0.0}} +}}; + +static const std::array, 2> bior5p5 = {{ + {{0.0, 0.0, 0.03968708834740544, 0.007948108637240322, -0.05446378846823691, 0.34560528195603346, 0.7366601814282105, 0.34560528195603346, -0.05446378846823691, 0.007948108637240322, 0.03968708834740544, 0.0}}, + {{-0.013456709459118716, -0.002694966880111507, 0.13670658466432914, -0.09350469740093886, -0.47680326579848425, 0.8995061097486484, -0.47680326579848425, -0.09350469740093886, 0.13670658466432914, -0.002694966880111507, -0.013456709459118716, 0.0}} +}}; + +static const std::array, 2> bior6p8 = {{ + {{0.0, 0.0019088317364812906, -0.0019142861290887667, -0.016990639867602342, 0.01193456527972926, 0.04973290349094079, -0.07726317316720414, -0.09405920349573646, 0.4207962846098268, 0.8259229974584023, 0.4207962846098268, -0.09405920349573646, -0.07726317316720414, 0.04973290349094079, 0.01193456527972926, -0.016990639867602342, -0.0019142861290887667, 0.0019088317364812906}}, + {{-0.0, 0.0, -0.0, 0.014426282505624435, -0.014467504896790148, -0.07872200106262882, 0.04036797903033992, 0.41784910915027457, -0.7589077294536541, 0.41784910915027457, 0.04036797903033992, -0.07872200106262882, -0.014467504896790148, 0.014426282505624435, -0.0, 0.0, -0.0, 0.0}} +}}; + +static const std::array, 2> coif1 = {{ + {{-0.015655728135791993, -0.07273261951252645, 0.3848648468648578, 0.8525720202116004, 0.3378976624574818, -0.07273261951252645}}, + {{0.07273261951252645, 0.3378976624574818, -0.8525720202116004, 0.3848648468648578, 0.07273261951252645, -0.015655728135791993}} +}}; + +static const std::array, 2> coif2 = {{ + {{-0.000720549445520347, -0.0018232088709110323, 0.005611434819368834, 0.02368017194684777, -0.05943441864643109, -0.07648859907828076, 0.4170051844232391, 0.8127236354494135, 0.3861100668227629, -0.0673725547237256, -0.04146493678687178, 0.01638733646320364}}, + {{-0.01638733646320364, -0.04146493678687178, 0.0673725547237256, 0.3861100668227629, -0.8127236354494135, 0.4170051844232391, 0.07648859907828076, -0.05943441864643109, -0.02368017194684777, 0.005611434819368834, 0.0018232088709110323, -0.000720549445520347}} +}}; + +static const std::array, 2> coif3 = {{ + {{-3.459977319727278e-05, -7.0983302506379e-05, 0.0004662169598204029, 0.0011175187708306303, -0.0025745176881367972, -0.009007976136730624, 0.015880544863669452, 0.03455502757329774, -0.08230192710629983, -0.07179982161915484, 0.42848347637737, 0.7937772226260872, 0.40517690240911824, -0.06112339000297255, -0.06577191128146936, 0.023452696142077168, 0.007782596425672746, -0.003793512864380802}}, + {{0.003793512864380802, 0.007782596425672746, -0.023452696142077168, -0.06577191128146936, 0.06112339000297255, 0.40517690240911824, -0.7937772226260872, 0.42848347637737, 0.07179982161915484, -0.08230192710629983, -0.03455502757329774, 0.015880544863669452, 0.009007976136730624, -0.0025745176881367972, -0.0011175187708306303, 0.0004662169598204029, 7.0983302506379e-05, -3.459977319727278e-05}} +}}; + +static const std::array, 2> db2 = {{ + {{-0.12940952255126037, 0.2241438680420134, 0.8365163037378079, 0.48296291314453416}}, + {{-0.48296291314453416, 0.8365163037378079, -0.2241438680420134, -0.12940952255126037}} +}}; + +static const std::array, 2> db3 = {{ + {{0.03522629188570953, -0.08544127388202666, -0.13501102001025458, 0.45987750211849154, 0.8068915093110925, 0.33267055295008263}}, + {{-0.33267055295008263, 0.8068915093110925, -0.45987750211849154, -0.13501102001025458, 0.08544127388202666, 0.03522629188570953}} +}}; + +static const std::array, 2> db4 = {{ + {{-0.010597401785069032, 0.0328830116668852, 0.030841381835560764, -0.18703481171909309, -0.027983769416859854, 0.6308807679298589, 0.7148465705529157, 0.2303778133088965}}, + {{-0.2303778133088965, 0.7148465705529157, -0.6308807679298589, -0.027983769416859854, 0.18703481171909309, 0.030841381835560764, -0.0328830116668852, -0.010597401785069032}} +}}; + +static const std::array, 2> db5 = {{ + {{0.0033357252854737712, -0.012580751999081999, -0.006241490212798274, 0.07757149384004572, -0.032244869584638375, -0.24229488706638203, 0.13842814590132074, 0.7243085284377729, 0.6038292697971896, 0.16010239797419293}}, + {{-0.16010239797419293, 0.6038292697971896, -0.7243085284377729, 0.13842814590132074, 0.24229488706638203, -0.032244869584638375, -0.07757149384004572, -0.006241490212798274, 0.012580751999081999, 0.0033357252854737712}} +}}; + +static const std::array, 2> db6 = {{ + {{-0.0010773010853084796, 0.004777257510945511, 0.0005538422011614961, -0.03158203931748603, 0.027522865530305727, 0.09750160558732304, -0.12976686756726194, -0.22626469396543983, 0.31525035170919763, 0.7511339080210954, 0.49462389039845306, 0.11154074335010947}}, + {{-0.11154074335010947, 0.49462389039845306, -0.7511339080210954, 0.31525035170919763, 0.22626469396543983, -0.12976686756726194, -0.09750160558732304, 0.027522865530305727, 0.03158203931748603, 0.0005538422011614961, -0.004777257510945511, -0.0010773010853084796}} +}}; + +static const std::array, 2> db7 = {{ + {{0.00035371379997452024, -0.0018016407040474908, 0.0004295779729213665, 0.01255099855609984, -0.01657454163066688, -0.03802993693501441, 0.08061260915108308, 0.07130921926683026, -0.22403618499387498, -0.14390600392856498, 0.4697822874051931, 0.7291320908462351, 0.3965393194819173, 0.07785205408500918}}, + {{-0.07785205408500918, 0.3965393194819173, -0.7291320908462351, 0.4697822874051931, 0.14390600392856498, -0.22403618499387498, -0.07130921926683026, 0.08061260915108308, 0.03802993693501441, -0.01657454163066688, -0.01255099855609984, 0.0004295779729213665, 0.0018016407040474908, 0.00035371379997452024}} +}}; + +static const std::array, 2> db8 = {{ + {{-0.00011747678412476953, 0.0006754494064505693, -0.00039174037337694705, -0.004870352993451574, 0.008746094047405777, 0.013981027917398282, -0.044088253930794755, -0.017369301001807547, 0.12874742662047847, 0.0004724845739132828, -0.2840155429615469, -0.015829105256349306, 0.5853546836542067, 0.6756307362972898, 0.31287159091429995, 0.05441584224310401}}, + {{-0.05441584224310401, 0.31287159091429995, -0.6756307362972898, 0.5853546836542067, 0.015829105256349306, -0.2840155429615469, -0.0004724845739132828, 0.12874742662047847, 0.017369301001807547, -0.044088253930794755, -0.013981027917398282, 0.008746094047405777, 0.004870352993451574, -0.00039174037337694705, -0.0006754494064505693, -0.00011747678412476953}} +}}; + +static const std::array, 2> db9 = {{ + {{3.93473203162716e-05, -0.0002519631889427101, 0.00023038576352319597, 0.0018476468830562265, -0.00428150368246343, -0.004723204757751397, 0.022361662123679096, 0.00025094711483145197, -0.06763282906132997, 0.03072568147933338, 0.14854074933810638, -0.09684078322297646, -0.2932737832791749, 0.13319738582500756, 0.6572880780513005, 0.6048231236901112, 0.24383467461259034, 0.038077947363878345}}, + {{-0.038077947363878345, 0.24383467461259034, -0.6048231236901112, 0.6572880780513005, -0.13319738582500756, -0.2932737832791749, 0.09684078322297646, 0.14854074933810638, -0.03072568147933338, -0.06763282906132997, -0.00025094711483145197, 0.022361662123679096, 0.004723204757751397, -0.00428150368246343, -0.0018476468830562265, 0.00023038576352319597, 0.0002519631889427101, 3.93473203162716e-05}} +}}; + +static const std::array, 2> db10 = {{ + {{-1.3264202894521244e-05, 9.358867032006959e-05, -0.00011646685512928545, -0.0006858566949597116, 0.001992405295185056, 0.001395351747052901, -0.010733175483330575, 0.0036065535669561697, 0.033212674059341, -0.029457536821875813, -0.07139414716639708, 0.09305736460357235, 0.12736934033579325, -0.19594627437737705, -0.24984642432731538, 0.2811723436605775, 0.6884590394536035, 0.5272011889317256, 0.1881768000776915, 0.026670057900555554}}, + {{-0.026670057900555554, 0.1881768000776915, -0.5272011889317256, 0.6884590394536035, -0.2811723436605775, -0.24984642432731538, 0.19594627437737705, 0.12736934033579325, -0.09305736460357235, -0.07139414716639708, 0.029457536821875813, 0.033212674059341, -0.0036065535669561697, -0.010733175483330575, -0.001395351747052901, 0.001992405295185056, 0.0006858566949597116, -0.00011646685512928545, -9.358867032006959e-05, -1.3264202894521244e-05}} +}}; + +static const std::array, 2> haar = {{ + {{0.7071067811865476, 0.7071067811865476}}, + {{-0.7071067811865476, 0.7071067811865476}} +}}; + +static const std::array, 2> rbio1p3 = {{ + {{0.0, 0.0, 0.7071067811865476, 0.7071067811865476, 0.0, 0.0}}, + {{0.08838834764831845, 0.08838834764831845, -0.7071067811865476, 0.7071067811865476, -0.08838834764831845, -0.08838834764831845}} +}}; + +static const std::array, 2> rbio1p5 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.7071067811865476, 0.7071067811865476, 0.0, 0.0, 0.0, 0.0}}, + {{-0.016572815184059706, -0.016572815184059706, 0.12153397801643785, 0.12153397801643785, -0.7071067811865476, 0.7071067811865476, -0.12153397801643785, -0.12153397801643785, 0.016572815184059706, 0.016572815184059706}} +}}; + +static const std::array, 2> rbio2p2 = {{ + {{0.0, 0.0, 0.3535533905932738, 0.7071067811865476, 0.3535533905932738, 0.0}}, + {{0.1767766952966369, 0.3535533905932738, -1.0606601717798212, 0.3535533905932738, 0.1767766952966369, 0.0}} +}}; + +static const std::array, 2> rbio2p4 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.3535533905932738, 0.7071067811865476, 0.3535533905932738, 0.0, 0.0, 0.0}}, + {{-0.03314563036811941, -0.06629126073623882, 0.1767766952966369, 0.4198446513295126, -0.9943689110435825, 0.4198446513295126, 0.1767766952966369, -0.06629126073623882, -0.03314563036811941, 0.0}} +}}; + +static const std::array, 2> rbio2p6 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3535533905932738, 0.7071067811865476, 0.3535533905932738, 0.0, 0.0, 0.0, 0.0, 0.0}}, + {{0.006905339660024878, 0.013810679320049757, -0.04695630968816917, -0.1077232986963881, 0.16987135563661201, 0.4474660099696121, -0.966747552403483, 0.4474660099696121, 0.16987135563661201, -0.1077232986963881, -0.04695630968816917, 0.013810679320049757, 0.006905339660024878, 0.0}} +}}; + +static const std::array, 2> rbio2p8 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.3535533905932738, 0.7071067811865476, 0.3535533905932738, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}}, + {{-0.0015105430506304422, -0.0030210861012608843, 0.012947511862546647, 0.02891610982635418, -0.05299848189069094, -0.13491307360773605, 0.16382918343409023, 0.46257144047591653, -0.9516421218971786, 0.46257144047591653, 0.16382918343409023, -0.13491307360773605, -0.05299848189069094, 0.02891610982635418, 0.012947511862546647, -0.0030210861012608843, -0.0015105430506304422, 0.0}} +}}; + +static const std::array, 2> rbio3p1 = {{ + {{0.1767766952966369, 0.5303300858899106, 0.5303300858899106, 0.1767766952966369}}, + {{0.3535533905932738, 1.0606601717798212, -1.0606601717798212, -0.3535533905932738}} +}}; + +static const std::array, 2> rbio3p3 = {{ + {{0.0, 0.0, 0.1767766952966369, 0.5303300858899106, 0.5303300858899106, 0.1767766952966369, 0.0, 0.0}}, + {{-0.06629126073623882, -0.1988737822087165, 0.15467960838455727, 0.9943689110435825, -0.9943689110435825, -0.15467960838455727, 0.1988737822087165, 0.06629126073623882}} +}}; + +static const std::array, 2> rbio3p5 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.1767766952966369, 0.5303300858899106, 0.5303300858899106, 0.1767766952966369, 0.0, 0.0, 0.0, 0.0}}, + {{0.013810679320049757, 0.04143203796014927, -0.052480581416189075, -0.26792717880896527, 0.07181553246425873, 0.966747552403483, -0.966747552403483, -0.07181553246425873, 0.26792717880896527, 0.052480581416189075, -0.04143203796014927, -0.013810679320049757}} +}}; + +static const std::array, 2> rbio3p7 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1767766952966369, 0.5303300858899106, 0.5303300858899106, 0.1767766952966369, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}}, + {{-0.0030210861012608843, -0.009063258303782653, 0.01683176542131064, 0.074663985074019, -0.03133297870736289, -0.301159125922835, 0.02649924094534547, 0.9516421218971786, -0.9516421218971786, -0.02649924094534547, 0.301159125922835, 0.03133297870736289, -0.074663985074019, -0.01683176542131064, 0.009063258303782653, 0.0030210861012608843}} +}}; + +static const std::array, 2> rbio3p9 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.1767766952966369, 0.5303300858899106, 0.5303300858899106, 0.1767766952966369, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0}}, + {{0.0006797443727836989, 0.002039233118351097, -0.005060319219611981, -0.020618912641105536, 0.014112787930175844, 0.09913478249423216, -0.012300136269419315, -0.32019196836077857, -0.0020500227115698858, 0.9421257006782068, -0.9421257006782068, 0.0020500227115698858, 0.32019196836077857, 0.012300136269419315, -0.09913478249423216, -0.014112787930175844, 0.020618912641105536, 0.005060319219611981, -0.002039233118351097, -0.0006797443727836989}} +}}; + +static const std::array, 2> rbio4p4 = {{ + {{0.0, 0.0, -0.06453888262869706, -0.04068941760916406, 0.41809227322161724, 0.7884856164055829, 0.41809227322161724, -0.04068941760916406, -0.06453888262869706, 0.0}}, + {{-0.03782845550726404, -0.023849465019556843, 0.11062440441843718, 0.37740285561283066, -0.8526986790088938, 0.37740285561283066, 0.11062440441843718, -0.023849465019556843, -0.03782845550726404, 0.0}} +}}; + +static const std::array, 2> rbio5p5 = {{ + {{0.0, 0.013456709459118716, -0.002694966880111507, -0.13670658466432914, -0.09350469740093886, 0.47680326579848425, 0.8995061097486484, 0.47680326579848425, -0.09350469740093886, -0.13670658466432914, -0.002694966880111507, 0.013456709459118716}}, + {{-0.0, 0.03968708834740544, -0.007948108637240322, -0.05446378846823691, -0.34560528195603346, 0.7366601814282105, -0.34560528195603346, -0.05446378846823691, -0.007948108637240322, 0.03968708834740544, -0.0, 0.0}} +}}; + +static const std::array, 2> rbio6p8 = {{ + {{0.0, 0.0, 0.0, 0.0, 0.014426282505624435, 0.014467504896790148, -0.07872200106262882, -0.04036797903033992, 0.41784910915027457, 0.7589077294536541, 0.41784910915027457, -0.04036797903033992, -0.07872200106262882, 0.014467504896790148, 0.014426282505624435, 0.0, 0.0, 0.0}}, + {{-0.0019088317364812906, -0.0019142861290887667, 0.016990639867602342, 0.01193456527972926, -0.04973290349094079, -0.07726317316720414, 0.09405920349573646, 0.4207962846098268, -0.8259229974584023, 0.4207962846098268, 0.09405920349573646, -0.07726317316720414, -0.04973290349094079, 0.01193456527972926, 0.016990639867602342, -0.0019142861290887667, -0.0019088317364812906, 0.0}} +}}; + +static const std::array, 2> sym2 = {{ + {{-0.12940952255092145, 0.22414386804185735, 0.836516303737469, 0.48296291314469025}}, + {{-0.48296291314469025, 0.836516303737469, -0.22414386804185735, -0.12940952255092145}} +}}; + +static const std::array, 2> sym3 = {{ + {{0.035226291882100656, -0.08544127388224149, -0.13501102001039084, 0.4598775021193313, 0.8068915093133388, 0.3326705529509569}}, + {{-0.3326705529509569, 0.8068915093133388, -0.4598775021193313, -0.13501102001039084, 0.08544127388224149, 0.035226291882100656}} +}}; + +static const std::array, 2> sym4 = {{ + {{-0.07576571478927333, -0.02963552764599851, 0.49761866763201545, 0.8037387518059161, 0.29785779560527736, -0.09921954357684722, -0.012603967262037833, 0.0322231006040427}}, + {{-0.0322231006040427, -0.012603967262037833, 0.09921954357684722, 0.29785779560527736, -0.8037387518059161, 0.49761866763201545, 0.02963552764599851, -0.07576571478927333}} +}}; + +static const std::array, 2> sym5 = {{ + {{0.027333068345077982, 0.029519490925774643, -0.039134249302383094, 0.1993975339773936, 0.7234076904024206, 0.6339789634582119, 0.01660210576452232, -0.17532808990845047, -0.021101834024758855, 0.019538882735286728}}, + {{-0.019538882735286728, -0.021101834024758855, 0.17532808990845047, 0.01660210576452232, -0.6339789634582119, 0.7234076904024206, -0.1993975339773936, -0.039134249302383094, -0.029519490925774643, 0.027333068345077982}} +}}; + +static const std::array, 2> sym6 = {{ + {{0.015404109327027373, 0.0034907120842174702, -0.11799011114819057, -0.048311742585633, 0.4910559419267466, 0.787641141030194, 0.3379294217276218, -0.07263752278646252, -0.021060292512300564, 0.04472490177066578, 0.0017677118642428036, -0.007800708325034148}}, + {{0.007800708325034148, 0.0017677118642428036, -0.04472490177066578, -0.021060292512300564, 0.07263752278646252, 0.3379294217276218, -0.787641141030194, 0.4910559419267466, 0.048311742585633, -0.11799011114819057, -0.0034907120842174702, 0.015404109327027373}} +}}; + +static const std::array, 2> sym7 = {{ + {{0.002681814568257878, -0.0010473848886829163, -0.01263630340325193, 0.03051551316596357, 0.0678926935013727, -0.049552834937127255, 0.017441255086855827, 0.5361019170917628, 0.767764317003164, 0.2886296317515146, -0.14004724044296152, -0.10780823770381774, 0.004010244871533663, 0.010268176708511255}}, + {{-0.010268176708511255, 0.004010244871533663, 0.10780823770381774, -0.14004724044296152, -0.2886296317515146, 0.767764317003164, -0.5361019170917628, 0.017441255086855827, 0.049552834937127255, 0.0678926935013727, -0.03051551316596357, -0.01263630340325193, 0.0010473848886829163, 0.002681814568257878}} +}}; + +static const std::array, 2> sym8 = {{ + {{-0.0033824159510061256, -0.0005421323317911481, 0.03169508781149298, 0.007607487324917605, -0.1432942383508097, -0.061273359067658524, 0.4813596512583722, 0.7771857517005235, 0.3644418948353314, -0.05194583810770904, -0.027219029917056003, 0.049137179673607506, 0.003808752013890615, -0.01495225833704823, -0.0003029205147213668, 0.0018899503327594609}}, + {{-0.0018899503327594609, -0.0003029205147213668, 0.01495225833704823, 0.003808752013890615, -0.049137179673607506, -0.027219029917056003, 0.05194583810770904, 0.3644418948353314, -0.7771857517005235, 0.4813596512583722, 0.061273359067658524, -0.1432942383508097, -0.007607487324917605, 0.03169508781149298, 0.0005421323317911481, -0.0033824159510061256}} +}}; + +static const std::array, 2> sym9 = {{ + {{0.0014009155259146807, 0.0006197808889855868, -0.013271967781817119, -0.01152821020767923, 0.03022487885827568, 0.0005834627461258068, -0.05456895843083407, 0.238760914607303, 0.717897082764412, 0.6173384491409358, 0.035272488035271894, -0.19155083129728512, -0.018233770779395985, 0.06207778930288603, 0.008859267493400484, -0.010264064027633142, -0.0004731544986800831, 0.0010694900329086053}}, + {{-0.0010694900329086053, -0.0004731544986800831, 0.010264064027633142, 0.008859267493400484, -0.06207778930288603, -0.018233770779395985, 0.19155083129728512, 0.035272488035271894, -0.6173384491409358, 0.717897082764412, -0.238760914607303, -0.05456895843083407, -0.0005834627461258068, 0.03022487885827568, 0.01152821020767923, -0.013271967781817119, -0.0006197808889855868, 0.0014009155259146807}} +}}; + +static const std::array, 2> sym10 = {{ + {{0.0007701598091144901, 9.563267072289475e-05, -0.008641299277022422, -0.0014653825813050513, 0.0459272392310922, 0.011609893903711381, -0.15949427888491757, -0.07088053578324385, 0.47169066693843925, 0.7695100370211071, 0.38382676106708546, -0.03553674047381755, -0.0319900568824278, 0.04999497207737669, 0.005764912033581909, -0.02035493981231129, -0.0008043589320165449, 0.004593173585311828, 5.7036083618494284e-05, -0.0004593294210046588}}, + {{0.0004593294210046588, 5.7036083618494284e-05, -0.004593173585311828, -0.0008043589320165449, 0.02035493981231129, 0.005764912033581909, -0.04999497207737669, -0.0319900568824278, 0.03553674047381755, 0.38382676106708546, -0.7695100370211071, 0.47169066693843925, 0.07088053578324385, -0.15949427888491757, -0.011609893903711381, 0.0459272392310922, 0.0014653825813050513, -0.008641299277022422, -9.563267072289475e-05, 0.0007701598091144901}} +}}; + +} +} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/feature.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/feature.hpp index f6442cf..89765b2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/feature.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/feature.hpp @@ -1,33 +1,31 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPEECHPY_FEATURE_H_ #define _EIDSP_SPEECHPY_FEATURE_H_ -#include #include +#include "../../porting/ei_classifier_porting.h" +#include "../ei_utils.h" #include "functions.hpp" #include "processing.hpp" #include "../memory.hpp" +#include "../returntypes.hpp" +#include "../ei_vector.h" namespace ei { namespace speechpy { @@ -169,6 +167,19 @@ class feature { return EIDSP_OK; } + /** + * @brief Get the fft bin index from hertz + * + * @param fft_size Size of fft + * @param hertz Desired hertz + * @param sampling_freq In Hz + * @return int the index of the bin closest to the hertz + */ + static int get_fft_bin_from_hertz(uint16_t fft_size, float hertz, uint32_t sampling_freq) + { + return static_cast(floor((fft_size + 1) * hertz / sampling_freq)); + } + /** * Compute Mel-filterbank energy features from an audio signal. * @param out_features Use `calculate_mfe_buffer_size` to allocate the right matrix. @@ -203,6 +214,207 @@ class feature { high_frequency = sampling_frequency / 2; } + if (version<4) { + if (low_frequency == 0) { + low_frequency = 300; + } + } + + stack_frames_info_t stack_frame_info = { 0 }; + stack_frame_info.signal = signal; + + ret = processing::stack_frames( + &stack_frame_info, + sampling_frequency, + frame_length, + frame_stride, + false, + version + ); + if (ret != 0) { + EIDSP_ERR(ret); + } + + if (stack_frame_info.frame_ixs.size() != out_features->rows) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } + + if (num_filters != out_features->cols) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } + + if (out_energies) { + if (stack_frame_info.frame_ixs.size() != out_energies->rows || out_energies->cols != 1) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } + } + + for (uint32_t i = 0; i < out_features->rows * out_features->cols; i++) { + *(out_features->buffer + i) = 0; + } + + const size_t power_spectrum_frame_size = (fft_length / 2 + 1); + // Computing the Mel filterbank + // converting the upper and lower frequencies to Mels. + // num_filter + 2 is because for num_filter filterbanks we need + // num_filter+2 point. + float *mels; + const int MELS_SIZE = num_filters + 2; + mels = (float*)ei_calloc(MELS_SIZE, sizeof(float)); + EI_ERR_AND_RETURN_ON_NULL(mels, EIDSP_OUT_OF_MEM); + ei_unique_ptr_t __ptr__(mels,ei_free); + uint16_t* bins = reinterpret_cast(mels); // alias the mels array so we can reuse the space + + numpy::linspace( + functions::frequency_to_mel(static_cast(low_frequency)), + functions::frequency_to_mel(static_cast(high_frequency)), + num_filters + 2, + mels); + + uint16_t max_bin = version >= 4 ? fft_length : power_spectrum_frame_size; // preserve a bug in v<4 + // go to -1 size b/c special handling, see after + for (uint16_t ix = 0; ix < MELS_SIZE-1; ix++) { + mels[ix] = functions::mel_to_frequency(mels[ix]); + if (mels[ix] < low_frequency) { + mels[ix] = low_frequency; + } + if (mels[ix] > high_frequency) { + mels[ix] = high_frequency; + } + bins[ix] = get_fft_bin_from_hertz(max_bin, mels[ix], sampling_frequency); + } + + // here is a really annoying bug in Speechpy which calculates the frequency index wrong for the last bucket + // the last 'hertz' value is not 8,000 (with sampling rate 16,000) but 7,999.999999 + // thus calculating the bucket to 64, not 65. + // we're adjusting this here a tiny bit to ensure we have the same result + mels[MELS_SIZE-1] = functions::mel_to_frequency(mels[MELS_SIZE-1]); + if (mels[MELS_SIZE-1] > high_frequency) { + mels[MELS_SIZE-1] = high_frequency; + } + mels[MELS_SIZE-1] -= 0.001; + bins[MELS_SIZE-1] = get_fft_bin_from_hertz(max_bin, mels[MELS_SIZE-1], sampling_frequency); + + EI_DSP_MATRIX(power_spectrum_frame, 1, power_spectrum_frame_size); + if (!power_spectrum_frame.buffer) { + EIDSP_ERR(EIDSP_OUT_OF_MEM); + } + + // get signal data from the audio file + EI_DSP_MATRIX(signal_frame, 1, stack_frame_info.frame_length); + + for (size_t ix = 0; ix < stack_frame_info.frame_ixs.size(); ix++) { + // don't read outside of the audio buffer... we'll automatically zero pad then + size_t signal_offset = stack_frame_info.frame_ixs.at(ix); + size_t signal_length = stack_frame_info.frame_length; + if (signal_offset + signal_length > stack_frame_info.signal->total_length) { + signal_length = signal_length - + (stack_frame_info.signal->total_length - (signal_offset + signal_length)); + } + + ret = stack_frame_info.signal->get_data( + signal_offset, + signal_length, + signal_frame.buffer + ); + if (ret != 0) { + EIDSP_ERR(ret); + } + + ret = numpy::power_spectrum( + signal_frame.buffer, + stack_frame_info.frame_length, + power_spectrum_frame.buffer, + power_spectrum_frame_size, + fft_length + ); + + if (ret != 0) { + EIDSP_ERR(ret); + } + + float energy = numpy::sum(power_spectrum_frame.buffer, power_spectrum_frame_size); + if (energy == 0) { + energy = 1e-10; + } + + if (out_energies) { + out_energies->buffer[ix] = energy; + } + + auto row_ptr = out_features->get_row_ptr(ix); + for (size_t i = 0; i < num_filters; i++) { + size_t left = bins[i]; + size_t middle = bins[i+1]; + size_t right = bins[i+2]; + + assert(right < power_spectrum_frame_size); + // now we have weights and locations to move from fft to mel sgram + // both left and right become zero weights, so skip them + + // middle always has weight of 1.0 + // since we skip left and right, if left = middle we need to handle that + row_ptr[i] = power_spectrum_frame.buffer[middle]; + + for (size_t bin = left+1; bin < right; bin++) { + if (bin < middle) { + row_ptr[i] += + ((static_cast(bin) - left) / (middle - left)) * // weight * + power_spectrum_frame.buffer[bin]; + } + // intentionally skip middle, handled above + if (bin > middle) { + row_ptr[i] += + ((right - static_cast(bin)) / (right - middle)) * // weight * + power_spectrum_frame.buffer[bin]; + } + } + } + + if (ret != 0) { + EIDSP_ERR(ret); + } + } + + numpy::zero_handling(out_features); + + return EIDSP_OK; + } + + /** + * Compute Mel-filterbank energy features from an audio signal. + * @param out_features Use `calculate_mfe_buffer_size` to allocate the right matrix. + * @param out_energies A matrix in the form of Mx1 where M is the rows from `calculate_mfe_buffer_size` + * @param signal: audio signal structure with functions to retrieve data from a signal + * @param sampling_frequency (int): the sampling frequency of the signal + * we are working with. + * @param frame_length (float): the length of each frame in seconds. + * Default is 0.020s + * @param frame_stride (float): the step between successive frames in seconds. + * Default is 0.02s (means no overlap) + * @param num_filters (int): the number of filters in the filterbank, + * default 40. + * @param fft_length (int): number of FFT points. Default is 512. + * @param low_frequency (int): lowest band edge of mel filters. + * In Hz, default is 0. + * @param high_frequency (int): highest band edge of mel filters. + * In Hz, default is samplerate/2 + * @EIDSP_OK if OK + */ + static int mfe_v3(matrix_t *out_features, matrix_t *out_energies, + signal_t *signal, + uint32_t sampling_frequency, + float frame_length, float frame_stride, uint16_t num_filters, + uint16_t fft_length, uint32_t low_frequency, uint32_t high_frequency, + uint16_t version + ) + { + int ret = 0; + + if (high_frequency == 0) { + high_frequency = sampling_frequency / 2; + } + if (low_frequency == 0) { low_frequency = 300; } @@ -222,7 +434,7 @@ class feature { EIDSP_ERR(ret); } - if (stack_frame_info.frame_ixs->size() != out_features->rows) { + if (stack_frame_info.frame_ixs.size() != out_features->rows) { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } @@ -230,8 +442,10 @@ class feature { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } - if (stack_frame_info.frame_ixs->size() != out_energies->rows || out_energies->cols != 1) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + if (out_energies) { + if (stack_frame_info.frame_ixs.size() != out_energies->rows || out_energies->cols != 1) { + EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); + } } for (uint32_t i = 0; i < out_features->rows * out_features->cols; i++) { @@ -256,7 +470,7 @@ class feature { if (ret != 0) { EIDSP_ERR(ret); } - for (size_t ix = 0; ix < stack_frame_info.frame_ixs->size(); ix++) { + for (size_t ix = 0; ix < stack_frame_info.frame_ixs.size(); ix++) { size_t power_spectrum_frame_size = (fft_length / 2 + 1); EI_DSP_MATRIX(power_spectrum_frame, 1, power_spectrum_frame_size); @@ -268,7 +482,7 @@ class feature { EI_DSP_MATRIX(signal_frame, 1, stack_frame_info.frame_length); // don't read outside of the audio buffer... we'll automatically zero pad then - size_t signal_offset = stack_frame_info.frame_ixs->at(ix); + size_t signal_offset = stack_frame_info.frame_ixs.at(ix); size_t signal_length = stack_frame_info.frame_length; if (signal_offset + signal_length > stack_frame_info.signal->total_length) { signal_length = signal_length - @@ -284,7 +498,7 @@ class feature { EIDSP_ERR(ret); } - ret = processing::power_spectrum( + ret = numpy::power_spectrum( signal_frame.buffer, stack_frame_info.frame_length, power_spectrum_frame.buffer, @@ -298,10 +512,12 @@ class feature { float energy = numpy::sum(power_spectrum_frame.buffer, power_spectrum_frame_size); if (energy == 0) { - energy = FLT_EPSILON; + energy = 1e-10; } - out_energies->buffer[ix] = energy; + if (out_energies) { + out_energies->buffer[ix] = energy; + } // calculate the out_features directly here ret = numpy::dot_by_row( @@ -317,7 +533,7 @@ class feature { } } - functions::zero_handling(out_features); + numpy::zero_handling(out_features); return EIDSP_OK; } @@ -336,7 +552,7 @@ class feature { * @EIDSP_OK if OK */ static int spectrogram(matrix_t *out_features, - signal_t *signal, uint32_t sampling_frequency, + signal_t *signal, float sampling_frequency, float frame_length, float frame_stride, uint16_t fft_length, uint16_t version ) @@ -358,7 +574,7 @@ class feature { EIDSP_ERR(ret); } - if (stack_frame_info.frame_ixs->size() != out_features->rows) { + if (stack_frame_info.frame_ixs.size() != out_features->rows) { EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); } @@ -372,12 +588,12 @@ class feature { *(out_features->buffer + i) = 0; } - for (size_t ix = 0; ix < stack_frame_info.frame_ixs->size(); ix++) { + for (size_t ix = 0; ix < stack_frame_info.frame_ixs.size(); ix++) { // get signal data from the audio file EI_DSP_MATRIX(signal_frame, 1, stack_frame_info.frame_length); // don't read outside of the audio buffer... we'll automatically zero pad then - size_t signal_offset = stack_frame_info.frame_ixs->at(ix); + size_t signal_offset = stack_frame_info.frame_ixs.at(ix); size_t signal_length = stack_frame_info.frame_length; if (signal_offset + signal_length > stack_frame_info.signal->total_length) { signal_length = signal_length - @@ -393,8 +609,8 @@ class feature { EIDSP_ERR(ret); } - // normalize data (only when version is above 3) - if (version >= 3) { + // normalize data (only when version is 3) + if (version == 3) { // it might be that everything is already normalized here... bool all_between_min_1_and_1 = true; for (size_t ix = 0; ix < signal_frame.rows * signal_frame.cols; ix++) { @@ -412,7 +628,7 @@ class feature { } } - ret = processing::power_spectrum( + ret = numpy::power_spectrum( signal_frame.buffer, stack_frame_info.frame_length, out_features->buffer + (ix * coefficients), @@ -425,7 +641,7 @@ class feature { } } - functions::zero_handling(out_features); + numpy::zero_handling(out_features); return EIDSP_OK; } @@ -444,18 +660,18 @@ class feature { float frame_length, float frame_stride, uint16_t num_filters, uint16_t version) { - uint16_t rows = processing::calculate_no_of_stack_frames( + int32_t rows = processing::calculate_no_of_stack_frames( signal_length, sampling_frequency, frame_length, frame_stride, false, version); - uint16_t cols = num_filters; + int32_t cols = num_filters; matrix_size_t size_matrix; - size_matrix.rows = rows; - size_matrix.cols = cols; + size_matrix.rows = (uint32_t)rows; + size_matrix.cols = (uint32_t)cols; return size_matrix; } @@ -569,18 +785,18 @@ class feature { float frame_length, float frame_stride, uint16_t num_cepstral, uint16_t version) { - uint16_t rows = processing::calculate_no_of_stack_frames( + int32_t rows = processing::calculate_no_of_stack_frames( signal_length, sampling_frequency, frame_length, frame_stride, false, version); - uint16_t cols = num_cepstral; + int32_t cols = num_cepstral; matrix_size_t size_matrix; - size_matrix.rows = rows; - size_matrix.cols = cols; + size_matrix.rows = (uint32_t)rows; + size_matrix.cols = (uint32_t)cols; return size_matrix; } }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/functions.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/functions.hpp index 54d11af..733c4ac 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/functions.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/functions.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPEECHPY_FUNCTIONS_H_ @@ -39,7 +34,11 @@ class functions { * @returns The mel scale values(or a single mel). */ static float frequency_to_mel(float f) { - return 1127.0 * numpy::log(1 + f / 700.0f); +#if EI_PORTING_RENESASRA65 == 1 + return 1127.0 * log(1.0 + f / 700.0f); +#else + return 1127.0 * numpy::log((1.0 + f / 700.0f)); +#endif } /** @@ -52,39 +51,16 @@ class functions { return 700.0f * (exp(mel / 1127.0f) - 1.0f); } - /** - * This function handle the issue with zero values if the are exposed - * to become an argument for any log function. - * @param input Array - * @param input_size Size of array - * @returns void - */ - static void zero_handling(float *input, size_t input_size) { - for (size_t ix = 0; ix < input_size; ix++) { - if (input[ix] == 0) { - input[ix] = FLT_EPSILON; - } - } - } - /** - * This function handle the issue with zero values if the are exposed - * to become an argument for any log function. - * @param input Matrix - * @returns void - */ - static void zero_handling(matrix_t *input) { - zero_handling(input->buffer, input->rows * input->cols); - } /** - * Triangle, I'm not really sure what this does + * Triangle, linear scale from left up to middle, then down to right * @param x Linspace output, will be overwritten! * @param x_size Size of the linspace output - * @param left - * @param middle - * @param right + * @param left Starting index (assigned 0) + * @param middle Index where 1.0 will be placed + * @param right Ending index (assigned 0) */ static int triangle(float *x, size_t x_size, int left, int middle, int right) { EI_DSP_MATRIX(out, 1, x_size); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/processing.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/processing.hpp index 136e583..5b34b1b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/processing.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/processing.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPEECHPY_PROCESSING_H_ @@ -31,15 +26,8 @@ namespace speechpy { // one stack frame returned by stack_frames typedef struct ei_stack_frames_info { signal_t *signal; - std::vector *frame_ixs; + ei_vector frame_ixs; int frame_length; - - // start_ixs is owned by us - ~ei_stack_frames_info() { - if (frame_ixs) { - delete frame_ixs; - } - } } stack_frames_info_t; namespace processing { @@ -96,9 +84,6 @@ namespace processing { EIDSP_ERR(ret); } - // it might be that everything is already normalized here... - bool all_between_min_1_and_1 = true; - // now we have the signal and we can preemphasize for (size_t ix = 0; ix < length; ix++) { float now = out_buffer[ix]; @@ -112,12 +97,6 @@ namespace processing { out_buffer[ix] = now - (_cof * _prev_buffer[0]); } - if (_rescale && all_between_min_1_and_1) { - if (out_buffer[ix] < -1.0f || out_buffer[ix] > 1.0f) { - all_between_min_1_and_1 = false; - } - } - // roll through and overwrite last element if (_shift != 1) { numpy::roll(_prev_buffer, _shift, -1); @@ -128,7 +107,7 @@ namespace processing { _next_offset_should_be += length; // rescale from [-1 .. 1] ? - if (_rescale && !all_between_min_1_and_1) { + if (_rescale) { matrix_t scale_matrix(length, 1, out_buffer); ret = numpy::scale(&scale_matrix, 1.0f / 32768.0f); if (ret != 0) { @@ -224,7 +203,7 @@ namespace processing { * @param frame_stride (float): The stride between frames. * @returns Number of frames required, or a negative number if an error occured */ - static int calculate_signal_used( + __attribute__((unused)) static int calculate_signal_used( size_t signal_size, uint32_t sampling_frequency, float frame_length, @@ -279,7 +258,7 @@ namespace processing { * @returns EIDSP_OK if OK */ static int stack_frames(stack_frames_info_t *info, - uint32_t sampling_frequency, + float sampling_frequency, float frame_length, float frame_stride, bool zero_padding, @@ -326,18 +305,16 @@ namespace processing { info->signal->total_length = static_cast(len_sig); } - // alloc the vector on the heap, will be owned by the info struct - std::vector *frame_indices = new std::vector(); + info->frame_ixs.clear(); int frame_count = 0; for (size_t ix = 0; ix < static_cast(len_sig); ix += static_cast(frame_stride)) { if (++frame_count > numframes) break; - frame_indices->push_back(ix); + info->frame_ixs.push_back(ix); } - info->frame_ixs = frame_indices; info->frame_length = frame_sample_length; return EIDSP_OK; @@ -393,34 +370,6 @@ namespace processing { return numframes; } - /** - * Power spectrum of a frame - * @param frame Row of a frame - * @param frame_size Size of the frame - * @param out_buffer Out buffer, size should be fft_points - * @param out_buffer_size Buffer size - * @param fft_points (int): The length of FFT. If fft_length is greater than frame_len, the frames will be zero-padded. - * @returns EIDSP_OK if OK - */ - static int power_spectrum(float *frame, size_t frame_size, float *out_buffer, size_t out_buffer_size, uint16_t fft_points) - { - if (out_buffer_size != static_cast(fft_points / 2 + 1)) { - EIDSP_ERR(EIDSP_MATRIX_SIZE_MISMATCH); - } - - int r = numpy::rfft(frame, frame_size, out_buffer, out_buffer_size, fft_points); - if (r != EIDSP_OK) { - return r; - } - - for (size_t ix = 0; ix < out_buffer_size; ix++) { - out_buffer[ix] = (1.0 / static_cast(fft_points)) * - (out_buffer[ix] * out_buffer[ix]); - } - - return EIDSP_OK; - } - /** * This function performs local cepstral mean and * variance normalization on a sliding window. The code assumes that @@ -437,6 +386,10 @@ namespace processing { static int cmvnw(matrix_t *features_matrix, uint16_t win_size = 301, bool variance_normalization = false, bool scale = false) { + if (win_size == 0) { + return EIDSP_OK; + } + uint16_t pad_size = (win_size - 1) / 2; int ret; @@ -475,6 +428,25 @@ namespace processing { EIDSP_ERR(ret); } + // subtract the mean for the features + for (size_t fm_col = 0; fm_col < features_matrix->cols; fm_col++) { + features_matrix->buffer[(ix * features_matrix->cols) + fm_col] = + features_matrix->buffer[(ix * features_matrix->cols) + fm_col] - mean_matrix.buffer[fm_col]; + } + } + + ret = numpy::pad_1d_symmetric(features_matrix, &vec_pad, pad_size, pad_size); + if (ret != EIDSP_OK) { + EIDSP_ERR(ret); + } + + for (size_t ix = 0; ix < features_matrix->rows; ix++) { + // create a slice on the vec_pad + EI_DSP_MATRIX_B(window, win_size, vec_pad.cols, vec_pad.buffer + (ix * vec_pad.cols)); + if (!window.buffer) { + EIDSP_ERR(EIDSP_OUT_OF_MEM); + } + if (variance_normalization == true) { ret = numpy::std_axis0(&window, &window_variance); if (ret != EIDSP_OK) { @@ -483,15 +455,8 @@ namespace processing { features_buffer_ptr = &features_matrix->buffer[ix * vec_pad.cols]; for (size_t col = 0; col < vec_pad.cols; col++) { - *(features_buffer_ptr) = (*(features_buffer_ptr)-mean_matrix.buffer[col]) / - (window_variance.buffer[col] + FLT_EPSILON); - features_buffer_ptr++; - } - } - else { - features_buffer_ptr = &features_matrix->buffer[ix * vec_pad.cols]; - for (size_t col = 0; col < vec_pad.cols; col++) { - *(features_buffer_ptr) = *(features_buffer_ptr)-mean_matrix.buffer[col]; + *(features_buffer_ptr) = (*(features_buffer_ptr)) / + (window_variance.buffer[col] + 1e-10); features_buffer_ptr++; } } @@ -526,6 +491,17 @@ namespace processing { f += noise; f *= noise_scale; // clip again + + /* Here is the python code we're duplicating: + # Quantize to 8 bits and dequantize back to float32 + mfe = np.uint8(np.around(mfe * 2**8)) + # clip to 2**8 + mfe = np.clip(mfe, 0, 255) + mfe = np.float32(mfe / 2**8) + */ + + f = roundf(f*256)/256; + if (f < 0.0f) f = 0.0f; else if (f > 1.0f) f = 1.0f; features_matrix->buffer[ix] = f; @@ -539,7 +515,7 @@ namespace processing { * then add a hard filter * @param features_matrix input feature matrix, will be modified in place */ - static int spectrogram_normalization(matrix_t *features_matrix, int noise_floor_db) { + static int spectrogram_normalization(matrix_t *features_matrix, int noise_floor_db, bool clip_at_one) { const float noise = static_cast(noise_floor_db * -1); const float noise_scale = 1.0f / (static_cast(noise_floor_db * -1) + 12.0f); @@ -554,7 +530,7 @@ namespace processing { f *= noise_scale; // clip again if (f < 0.0f) f = 0.0f; - else if (f > 1.0f) f = 1.0f; + else if (f > 1.0f && clip_at_one) f = 1.0f; features_matrix->buffer[ix] = f; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/speechpy.hpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/speechpy.hpp index 845261d..c2ca9b6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/speechpy.hpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/dsp/speechpy/speechpy.hpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EIDSP_SPEECHPY_SPEECHPY_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/debug_log.cpp index c8bc9dc..794710e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/debug_log.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/debug_log.cpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #include "../ei_classifier_porting.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/ei_classifier_porting.cpp index e6f5cd4..af64f97 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/ei_classifier_porting.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/arduino/ei_classifier_porting.cpp @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #include "../ei_classifier_porting.h" @@ -46,6 +41,25 @@ uint64_t ei_read_timer_us() { return micros(); } +void ei_serial_set_baudrate(int baudrate) +{ + +} + +EI_WEAK_FN void ei_putchar(char c) +{ + Serial.write(c); +} + +EI_WEAK_FN char ei_getchar() +{ + char ch = 0; + if (Serial.available() > 0) { + ch = Serial.read(); + } + return ch; +} + /** * Printf function uses vsnprintf and output using Arduino Serial */ @@ -63,7 +77,7 @@ __attribute__((weak)) void ei_printf(const char *format, ...) { } __attribute__((weak)) void ei_printf_float(float f) { - ei_printf("%f", f); + Serial.print(f, 6); } __attribute__((weak)) void *ei_malloc(size_t size) { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/debug_log.cpp new file mode 100644 index 0000000..d0da510 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_BRICKML == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_RENESASRA65 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/ei_classifier_porting.cpp new file mode 100644 index 0000000..09b7485 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/brickml/ei_classifier_porting.cpp @@ -0,0 +1,178 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Includes */ +#include "../ei_classifier_porting.h" +#if (EI_PORTING_BRICKML == 1) + +#include +#include +#include +#include "unistd.h" +#include "trace_use.h" +#include "comms.h" +#include + +#include "FreeRTOS.h" +#include "task.h" + +static void *pvPortCalloc(size_t sNb, size_t sSize); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + vTaskDelay(time_ms / portTICK_PERIOD_MS); + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return xTaskGetTickCount(); +} + +uint64_t ei_read_timer_us() { + + return xTaskGetTickCount()*1000; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[256] = {0}; + int length; + va_list myargs; + va_start(myargs, format); + length = vsnprintf(buffer, sizeof(buffer), format, myargs); + va_end(myargs); + + if (length > 0){ + comms_send((uint8_t*)buffer, length, 100); + } + +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + strcpy(s, "0"); + } + else { + int digit, m; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + } + + ei_printf("%s", s); +} + +/** + * + * @param c + */ +void ei_putchar(char c) +{ + ei_printf("%c", c); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return pvPortMalloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return pvPortCalloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + vPortFree(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +static void *pvPortCalloc(size_t sNb, size_t sSize) +{ + void *vPtr = NULL; + if (sSize > 0) { + vPtr = pvPortMalloc(sSize * sNb); // Call FreeRTOS or other standard API + if(vPtr) + memset(vPtr, 0, (sSize * sNb)); // Must required + } + return vPtr; +} + +void * operator new( size_t size ) +{ + return pvPortMalloc( size ); +} + +void * operator new[]( size_t size ) +{ + return pvPortMalloc(size); +} + +void operator delete( void * ptr ) +{ + vPortFree ( ptr ); +} + +void operator delete[]( void * ptr ) +{ + vPortFree ( ptr ); +} + +#endif // EI_PORTING_BRICKML == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_classifier_porting.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_classifier_porting.h index 1108fdf..c0558e8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_classifier_porting.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_classifier_porting.h @@ -1,23 +1,18 @@ -/* Edge Impulse inferencing library - * Copyright (c) 2021 EdgeImpulse Inc. +/* + * Copyright (c) 2022 EdgeImpulse Inc. * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE - * SOFTWARE. + * SPDX-License-Identifier: Apache-2.0 */ #ifndef _EI_CLASSIFIER_PORTING_H_ @@ -25,110 +20,248 @@ #include #include -#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include "edge-impulse-sdk/dsp/returntypes.h" #if defined(__cplusplus) && EI_C_LINKAGE == 1 extern "C" { #endif // defined(__cplusplus) -typedef enum { - EI_IMPULSE_OK = 0, - EI_IMPULSE_ERROR_SHAPES_DONT_MATCH = -1, - EI_IMPULSE_CANCELED = -2, - EI_IMPULSE_TFLITE_ERROR = -3, - EI_IMPULSE_DSP_ERROR = -5, - EI_IMPULSE_TFLITE_ARENA_ALLOC_FAILED = -6, - EI_IMPULSE_CUBEAI_ERROR = -7, - EI_IMPULSE_ALLOC_FAILED = -8, - EI_IMPULSE_ONLY_SUPPORTED_FOR_IMAGES = -9, - EI_IMPULSE_UNSUPPORTED_INFERENCING_ENGINE = -10, - EI_IMPULSE_OUT_OF_MEMORY = -11, - EI_IMPULSE_NOT_SUPPORTED_WITH_I16 = -12, - EI_IMPULSE_INPUT_TENSOR_WAS_NULL = -13, - EI_IMPULSE_OUTPUT_TENSOR_WAS_NULL = -14, - EI_IMPULSE_SCORE_TENSOR_WAS_NULL = -15, - EI_IMPULSE_LABEL_TENSOR_WAS_NULL = -16, - EI_IMPULSE_TENSORRT_INIT_FAILED = -17 -} EI_IMPULSE_ERROR; +/* Private functions ------------------------------------------------------- */ + +EI_IMPULSE_ERROR ei_run_impulse_check_canceled(); +void ei_serial_set_baudrate(int baudrate); + +/* Public functions -------------------------------------------------------- */ /** - * Cancelable sleep, can be triggered with signal from other thread + * @defgroup ei_user_functions User-defined functions + * + * These functions are required to be implemented by the user for the target platform. + * See [this porting guide](https://docs.edgeimpulse.com/docs/edge-ai-hardware/porting-guide) for more information. They are declared internally in the Edge Impulse + * C++ SDK library, and they must be defined by the user. + * + * **Source**: [porting/ei_classifier_porting.h](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/ei_classifier_porting.h) + * + * **Examples**: + * The following examples demonstrate possible implementations of this function for + * various platforms. Note the `__attribute__((weak))` in most of the definitions, which + * means that a user could override the implementation elsewhere in the program: + * * [Arduino classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/arduino/ei_classifier_porting.cpp) + * * [mbed classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/mbed/ei_classifier_porting.cpp) + * * [POSIX classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/posix/ei_classifier_porting.cpp) + * * [Silicon Labs classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/silabs/ei_classifier_porting.cpp) + * * [STM32 classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/stm32-cubeai/ei_classifier_porting.cpp) + * * [TI classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/ti/debug_log.cpp) + * * [Zephyr classifier porting](https://github.com/edgeimpulse/inferencing-sdk-cpp/blob/master/porting/zephyr/ei_classifier_porting.cpp) + * + * @addtogroup ei_user_functions + * @{ */ -EI_IMPULSE_ERROR ei_sleep(int32_t time_ms); /** - * Check if the sampler thread was canceled, use this in conjunction with - * the same signaling mechanism as ei_sleep + * Cancelable sleep, can be triggered with signal from other thread + */ +/** + * @brief Cancellable sleep, can be triggered with signal from other thread + * + * Allow the processor or thread to sleep or block for the given time. + * + * @param[in] time_ms Time in milliseconds to sleep + * + * @return `EI_IMPULSE_OK` if successful, error code otherwise */ -EI_IMPULSE_ERROR ei_run_impulse_check_canceled(); +EI_IMPULSE_ERROR ei_sleep(int32_t time_ms); /** * Read the millisecond timer */ +/** + * @brief Read the millisecond timer + * + * This function should return the number of milliseconds that have passed since the + * start of the program. If you do not need to determine the run times for DSP and + * inference blocks, you can simply return 0 from this function. Your impulse will still + * work correctly without timing information. + * + * @return The number of milliseconds that have passed since the start of the program + */ uint64_t ei_read_timer_ms(); /** - * Read the microsecond timer + * @brief Read the microsecond timer + * + * This function should return the number of milliseconds that have passed since the + * start of the program. If you do not need to determine the run times for DSP and + * inference blocks, you can simply return 0 from this function. Your impulse will still + * work correctly without timing information. + * + * @return The number of microseconds that have passed since the start of the program */ uint64_t ei_read_timer_us(); /** - * Set Serial baudrate + * @brief Send a single character to the serial port + * + * @param[in] c The chararater to send */ -void ei_serial_set_baudrate(int baudrate); +void ei_putchar(char c); /** - * @brief Connect to putchar of target - * - * @param[in] c The chararater + * @brief Read a single character from the serial port + * + * @return The character read from the serial port */ -void ei_putchar(char c); +char ei_getchar(void); /** - * Print wrapper around printf() - * This is used internally to print debug information. + * @brief Print wrapper around printf() + * + * `ei_printf()` is declared internally to the Edge Impulse SDK library so that debugging + * information (e.g. during inference) can be printed out. However, the function must be + * defined by the user, as printing methods can change depending on the platform and use + * case. For example, you may want to print debugging information to stdout in Linux or + * over a UART serial port on a microcontroller. + * + * @param[in] format Pointer to a character array or string that should be printed + * @param[in] ... Other optional arguments may be passed as necessary (e.g. handle to a + * UART object). Note that any calls to `ei_printf()` from within the + * *edge-impulse-sdk* library do not pass anything other than the `format` argument. */ +__attribute__ ((format (printf, 1, 2))) void ei_printf(const char *format, ...); /** - * Override this function if your target cannot properly print floating points - * If not overriden, this will be sent through `ei_printf()`. + * @brief Used to print floating point numbers + * + * Some platforms cannot handle directly printing floating point numbers (e.g. to a + * console or over a serial port). If your platform cannot directly print floats, + * provide an implementation of this function to print them as needed (for example, + * construct a string containing scientific notation with integers and call + * `ei_printf()`). + * + * If your platform can print floating point values, the easiest implementation of this + * function is as follows: + * + * ``` + * __attribute__((weak)) void ei_printf_float(float f) { + * printf("%f", f); + * } + * ``` + * + * @param[in] f The floating point number to print */ void ei_printf_float(float f); /** - * Wrapper around malloc + * @brief Wrapper around malloc + * + * This function should allocate `size` bytes and return a pointer to the allocated + * memory. In bare-metal implementations, it can simply be a wrapper for `malloc()`. For + * example: + * + * ``` + * __attribute__((weak)) void *ei_malloc(size_t size) { + * return malloc(size); + * } + * ``` + * + * If you intend to run your impulse in a multi-threaded environment, you will need to + * ensure that your implementation of `ei_malloc()` is thread-safe. For example, if you + * are using FreeRTOS, here is one possible implementation: + * + * ``` + * __attribute__((weak)) void *ei_malloc(size_t size) { + * return pvPortMalloc(size); + * } + * ``` + * + * @param[in] size The number of bytes to allocate */ void *ei_malloc(size_t size); /** - * Wrapper around calloc + * @brief Wrapper around calloc + * + * This function should allocate `nitems * size` bytes and initialize all bytes in this + * allocated memory to 0. It should return a pointer to the allocated memory. In + * bare-metal implementations, it can simply be a wrapper for `calloc()`. For example: + * + * ``` + * __attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + * return calloc(nitems, size); + * } + * ``` + * + * If you intend to run your impulse in a multi-threaded environment, you will need to + * ensure that your implementation of `ei_calloc()` is thread-safe. For example, if you + * are using FreeRTOS, here is one possible implementation: + * + * ``` + * __attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + * void *ptr = NULL; + * if (size > 0) { + * ptr = pvPortMalloc(nitems * size); + * if(ptr) + * memset(ptr, 0, (nitems * size)); + * } + * return ptr; + * } + * ``` + * + * @param[in] nitems Number of blocks to allocate and clear + * @param[in] size Size (in bytes) of each block */ void *ei_calloc(size_t nitems, size_t size); /** - * Wrapper around free + * @brief Wrapper around free + * + * This function should free the memory space pointed to by `ptr`. If `ptr` is `NULL`, + * no operation should be performed. In bare-metal implementations, it can simply be a + * wrapper for `free()`. For example: + * + * ``` + * __attribute__((weak)) void ei_free(void *ptr) { + * free(ptr); + * } + * ``` + * + * If you intend to run your impulse in a multi-threaded environment, you will need to + * ensure that your implementation of `ei_free()` is thread-safe. For example, if you + * are using FreeRTOS, here is one possible implementation: + * + * ``` + * __attribute__((weak)) void ei_free(void *ptr) { + * pvPortFree(ptr); + * } + * ``` + * + * @param[in] ptr Pointer to the memory to free */ void ei_free(void *ptr); +/** @} */ + #if defined(__cplusplus) && EI_C_LINKAGE == 1 } #endif // defined(__cplusplus) && EI_C_LINKAGE == 1 // Load porting layer depending on target -#ifndef EI_PORTING_ARDUINO -#ifdef ARDUINO -#define EI_PORTING_ARDUINO 1 + +// First check if any of the general frameworks or operating systems are supported/enabled +#ifndef EI_PORTING_ZEPHYR +#if defined(__ZEPHYR__) +#define EI_PORTING_ZEPHYR 1 #else -#define EI_PORTING_ARDUINO 0 +#define EI_PORTING_ZEPHYR 0 #endif #endif -#ifndef EI_PORTING_ECM3532 -#ifdef ECM3532 -#define EI_PORTING_ECM3532 1 +#ifndef EI_PORTING_ARDUINO +#ifdef ARDUINO +#define EI_PORTING_ARDUINO 1 #else -#define EI_PORTING_ECM3532 0 +#define EI_PORTING_ARDUINO 0 #endif #endif @@ -140,6 +273,21 @@ void ei_free(void *ptr); #endif #endif +// Then check for target spcific build systems + +#ifndef EI_PORTING_ESPRESSIF +#if ((defined(CONFIG_IDF_TARGET_ESP32) || defined(CONFIG_IDF_TARGET_ESP32S3)) && EI_PORTING_ZEPHYR == 0) +#include "esp_idf_version.h" +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 0, 0) +#define portTICK_RATE_MS portTICK_PERIOD_MS +#endif +#define EI_PORTING_ESPRESSIF 1 +#define EI_PORTING_ARDUINO 0 +#else +#define EI_PORTING_ESPRESSIF 0 +#endif +#endif + #ifndef EI_PORTING_POSIX #if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) #define EI_PORTING_POSIX 1 @@ -156,14 +304,15 @@ void ei_free(void *ptr); #endif #endif -#ifndef EI_PORTING_ZEPHYR -#if defined(__ZEPHYR__) -#define EI_PORTING_ZEPHYR 1 +#ifndef EI_PORTING_RASPBERRY +#ifdef PICO_BOARD +#define EI_PORTING_RASPBERRY 1 #else -#define EI_PORTING_ZEPHYR 0 +#define EI_PORTING_RASPBERRY 0 #endif #endif + #ifndef EI_PORTING_STM32_CUBEAI #if defined(USE_HAL_DRIVER) && !defined(__MBED__) && EI_PORTING_ZEPHYR == 0 #define EI_PORTING_STM32_CUBEAI 1 @@ -189,4 +338,23 @@ void ei_free(void *ptr); #endif // End load porting layer depending on target +// Additional configuration for specific architecture +#if defined(__CORTEX_M) + +#if (__CORTEX_M == 55U) +#define EI_MAX_OVERFLOW_BUFFER_COUNT 15 +#endif + +#if (__CORTEX_M == 85U) +#define EI_MAX_OVERFLOW_BUFFER_COUNT 50 +#endif + +#endif + +#if defined(CONFIG_IDF_TARGET_ESP32S3) +#define EI_MAX_OVERFLOW_BUFFER_COUNT 30 +#endif + +// End additional configuration + #endif // _EI_CLASSIFIER_PORTING_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_logging.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_logging.h new file mode 100644 index 0000000..b69604b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ei_logging.h @@ -0,0 +1,82 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#ifndef _EI_LOGGING_H_ +#define _EI_LOGGING_H_ + +#include +#include + +#include "ei_classifier_porting.h" + +#define EI_LOG_LEVEL_NONE 0 /*!< No log output */ +#define EI_LOG_LEVEL_ERROR 1 /*!< Critical errors, software module can not recover on its own */ +#define EI_LOG_LEVEL_WARNING 2 /*!< Error conditions from which recovery measures have been taken */ +#define EI_LOG_LEVEL_INFO 3 /*!< Information messages which describe normal flow of events */ +#define EI_LOG_LEVEL_DEBUG 4 /*!< Extra information which is not necessary for normal use (values, pointers, sizes, etc). */ + +// if we do not want ANY logging, setting EI_LOG_LEVEL to EI_LOG_LEVEL_NONE +// will not generate any code according to +// https://stackoverflow.com/a/25021889 + +#define EI_LOGE(format, ...) (void)0 +#define EI_LOGW(format, ...) (void)0 +#define EI_LOGI(format, ...) (void)0 +#define EI_LOGD(format, ...) (void)0 + +#ifndef EI_LOG_LEVEL + #define EI_LOG_LEVEL EI_LOG_LEVEL_INFO +#endif + +__attribute__((unused)) static const char *debug_msgs[] = +{ + "NONE", // this one will never show + "ERR", + "WARNING", + "INFO", + "DEBUG" +}; + +#if EI_LOG_LEVEL >= EI_LOG_LEVEL_ERROR + #ifdef EI_LOGE + #undef EI_LOGE + #endif // EI_LOGE + #define EI_LOGE(format, ...) ei_printf("%s: ",debug_msgs[EI_LOG_LEVEL_ERROR]); ei_printf(format, ##__VA_ARGS__); +#endif + +#if EI_LOG_LEVEL >= EI_LOG_LEVEL_WARNING + #ifdef EI_LOGW + #undef EI_LOGW + #endif // EI_LOGW + #define EI_LOGW(format, ...) ei_printf("%s: ",debug_msgs[EI_LOG_LEVEL_WARNING]); ei_printf(format, ##__VA_ARGS__); +#endif + +#if EI_LOG_LEVEL >= EI_LOG_LEVEL_INFO + #ifdef EI_LOGI + #undef EI_LOGI + #endif // EI_LOGI + #define EI_LOGI(format, ...) ei_printf("%s: ",debug_msgs[EI_LOG_LEVEL_INFO]); ei_printf(format, ##__VA_ARGS__); +#endif + +#if EI_LOG_LEVEL >= EI_LOG_LEVEL_DEBUG + #ifdef EI_LOGD + #undef EI_LOGD + #endif // EI_LOGD + #define EI_LOGD(format, ...) ei_printf("%s: ",debug_msgs[EI_LOG_LEVEL_DEBUG]); ei_printf(format, ##__VA_ARGS__); +#endif + +#endif // _EI_LOGGING_H_ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CMakeLists.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CMakeLists.txt new file mode 100644 index 0000000..736eaf9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CMakeLists.txt @@ -0,0 +1,51 @@ +cmake_minimum_required(VERSION 3.5) + +set(c_srcs + "src/activation_functions/esp_nn_relu_ansi.c" + "src/basic_math/esp_nn_add_ansi.c" + "src/basic_math/esp_nn_mul_ansi.c" + "src/convolution/esp_nn_conv_ansi.c" + "src/convolution/esp_nn_conv_opt.c" + "src/convolution/esp_nn_depthwise_conv_ansi.c" + "src/convolution/esp_nn_depthwise_conv_opt.c" + "src/fully_connected/esp_nn_fully_connected_ansi.c" + "src/softmax/esp_nn_softmax_ansi.c" + "src/softmax/esp_nn_softmax_opt.c" + "src/pooling/esp_nn_avg_pool_ansi.c" + "src/pooling/esp_nn_max_pool_ansi.c") + +if(CONFIG_IDF_TARGET_ESP32S3) + set(s3_srcs + "src/common/esp_nn_common_functions_esp32s3.S" + "src/common/esp_nn_multiply_by_quantized_mult_esp32s3.S" + "src/common/esp_nn_multiply_by_quantized_mult_ver1_esp32s3.S" + "src/activation_functions/esp_nn_relu_s8_esp32s3.S" + "src/basic_math/esp_nn_add_s8_esp32s3.S" + "src/basic_math/esp_nn_mul_s8_esp32s3.S" + "src/convolution/esp_nn_conv_esp32s3.c" + "src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c" + "src/convolution/esp_nn_conv_s16_mult8_esp32s3.S" + "src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S" + "src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S" + "src/convolution/esp_nn_conv_s8_filter_aligned_input_padded_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult4_esp32s3.S" + "src/convolution/esp_nn_depthwise_conv_s16_mult8_esp32s3.S" + "src/fully_connected/esp_nn_fully_connected_s8_esp32s3.S" + "src/pooling/esp_nn_max_pool_s8_esp32s3.S" + "src/pooling/esp_nn_avg_pool_s8_esp32s3.S") +endif() + +idf_component_register(SRCS "${c_srcs}" + "${s3_srcs}" + INCLUDE_DIRS "include" "src/common") + +if(CONFIG_IDF_TARGET_ESP32S3) + target_compile_options(${COMPONENT_LIB} PRIVATE -mlongcalls -fno-unroll-loops -O2 -Wno-unused-function) +else() + target_compile_options(${COMPONENT_LIB} PRIVATE -O2 -Wno-unused-function) +endif() diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CONTRIBUTING.md b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CONTRIBUTING.md new file mode 100644 index 0000000..b541db7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/CONTRIBUTING.md @@ -0,0 +1,38 @@ +# Contributing + +Contributions to ESP-NN project in the form of pull requests, bug reports, and feature requests are welcome! + +This document covers various topics related to contributions to the ESP-NN projects. Please read it if you plan to submit a PR! + +## CLA + +We require accepting the contributor's license agreement for all pull requests. When opening a pull request the first time you will be prompted to sign the CLA by the [CLA Assistant](https://cla-assistant.io/) service. + +## Large-scale Changes + +If you'd like to propose a change to the existing APIs or a large-scale refactoring of the implementation, we recommend opening an issue first to discuss this. + +## Updating the Benchmarks Table + +The benchmarks table in [README.md](README.md) contains benchmarks for ESP32-S3. The benchmarks are collected by running the app in [test_app](test_app/) directory. Please update this table if you have changed the implementations of some of the functions or added the new ones. + +## Releasing a new version + +Maintainers should follow the steps below to release a new version of ESP-NN component. Assuming the new version is `vX.Y.Z`: + +1. Ensure you are on the latest `master` branch: + ```bash + git checkout master + git pull --ff-only origin master + ``` +1. Create the new tag: + ```bash + git tag -s -a -m "vX.Y.Z" vX.Y.Z + ``` +1. Push the tag and the branch to the internal repository: + ```bash + git push origin vX.Y.Z + ``` +1. CI will automatically push the tag to Github and will upload the new version to the IDF Component Registry. +1. Go to https://github.com/espressif/esp-nn/releases and create a release from the tag vX.Y.Z. +1. Write the release notes and publish the release. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/Kconfig.projbuild b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/Kconfig.projbuild new file mode 100644 index 0000000..a146305 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/Kconfig.projbuild @@ -0,0 +1,29 @@ +menu "ESP-NN" + +choice NN_OPTIMIZATIONS + bool "Optimization for nn functions" + default NN_OPTIMIZED + help + Use ANSI-C versions for verification and debug purpose. + Optimisations are automatically picked up for a chipset. + For ESP32-S3, assembly optimisations are selected. + For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used. + +config NN_ANSI_C + bool "ANSI C" + help + ANSI C versions for verification and debug purposes. +config NN_OPTIMIZED + bool "Optimized versions" + help + Optimisations are automatically picked up for a chipset. + For ESP32-S3, assembly optimisations are selected. + For other platforms(viz., ESP32, ESP32-C3), generic optimisations are used. +endchoice + +config NN_OPTIMIZATIONS + int + default 0 if NN_ANSI_C + default 1 if NN_OPTIMIZED + +endmenu diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/LICENSE b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/README.md b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/README.md new file mode 100644 index 0000000..2efde15 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/README.md @@ -0,0 +1 @@ +Internal Edge Impulse fork of ESP-NN. Derived from https://github.com/edgeimpulse/esp-nn/commit/6b3ef8e226a05554a6d874f6456f5ca1771c01c2. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/idf_component.yml b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/idf_component.yml new file mode 100644 index 0000000..b90ac5e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/idf_component.yml @@ -0,0 +1,11 @@ +description: Optimized NN (Neural Network) functions for Espressif chips +url: https://github.com/espressif/esp-nn +repository: https://github.com/espressif/esp-nn.git +issues: https://github.com/espressif/esp-nn/issues +dependencies: + idf: + version: ">=4.2" +files: + exclude: + - test_app + - tests diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h new file mode 100644 index 0000000..3d2ef30 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h @@ -0,0 +1,46 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#if defined(EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN) +// select apt optimisations +#ifdef CONFIG_IDF_TARGET_ESP32S3 +#define ARCH_ESP32_S3 1 +#endif +#ifdef CONFIG_IDF_TARGET_ESP32 +#define ARCH_ESP32 1 +#endif +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +/* reference kernels included by default */ +#include "esp_nn_ansi_headers.h" + +#if defined(EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN) +#if defined(ARCH_ESP32_S3) +#include "esp_nn_esp32s3.h" +#else // for other platforms use generic optimisations +#include "esp_nn_generic_opt.h" +#endif // #if defined(ARCH_ESP32_S3) +#else +#include "esp_nn_ansi_c.h" +#endif + +#ifdef __cplusplus +} +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_c.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_c.h new file mode 100644 index 0000000..8279ebe --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_c.h @@ -0,0 +1,47 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file Header definitions to include for ANSI C versions. + * These are just typedefs to pick up ANSI versions. + */ + +#pragma once + +#include "esp_nn_defs.h" +#include "esp_nn_ansi_headers.h" + +#define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_ansi +#define esp_nn_mul_elementwise_s8 esp_nn_mul_elementwise_s8_ansi + +#define esp_nn_depthwise_conv_s8 esp_nn_depthwise_conv_s8_ansi + +#define esp_nn_conv_s8 esp_nn_conv_s8_ansi + +#define esp_nn_get_conv_scratch_size esp_nn_get_conv_scratch_size_ansi +#define esp_nn_set_conv_scratch_buf esp_nn_set_conv_scratch_buf_ansi + +#define esp_nn_get_depthwise_conv_scratch_size esp_nn_get_depthwise_conv_scratch_size_ansi +#define esp_nn_set_depthwise_conv_scratch_buf esp_nn_set_depthwise_conv_scratch_buf_ansi + +#define esp_nn_relu6_s8 esp_nn_relu6_s8_ansi + +#define esp_nn_avg_pool_s8 esp_nn_avg_pool_s8_ansi +#define esp_nn_max_pool_s8 esp_nn_max_pool_s8_ansi + +#define esp_nn_fully_connected_s8 esp_nn_fully_connected_s8_ansi + +#define esp_nn_get_softmax_scratch_size esp_nn_get_softmax_scratch_size_ansi +#define esp_nn_set_softmax_scratch_buf esp_nn_set_softmax_scratch_buf_ansi +#define esp_nn_softmax_s8 esp_nn_softmax_s8_ansi diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_headers.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_headers.h new file mode 100644 index 0000000..52ebb68 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_ansi_headers.h @@ -0,0 +1,309 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +/** + * @file Header definitions to include for esp_nn reference functions + */ + +#include "esp_nn_defs.h" +/************************** Basic math functions ****************************/ + +/** + * @brief elementwise addition + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + * + * shift values are expected to be <= 0 + */ +void esp_nn_add_elementwise_s8_ansi(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + const int32_t input1_mult, + const int32_t input2_mult, + const int32_t input1_shift, + const int32_t input2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size); +/** + * @brief elementwise multiplication + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + * + * output shift is expected to be <= 0 + */ +void esp_nn_mul_elementwise_s8_ansi(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size); + + +/************************** Convolution functions *****************************/ + +/** + * @brief depthwise convolution per channel + * + * @note inputs type: int8_t, output: int8_t + * Version used in tflite is per channel. + * This version follows the same footsprints. + * Meaning, it has per out_channel shift and multiplier for + * requantization + * + * optimization notes: Though input_offset is int32 type, + * offset values are contained in 8 bits [-128, 127] + */ +void esp_nn_depthwise_conv_s8_ansi(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data); + +/** + * @brief 2d-convolution channelwise + * + * @note operation: result += (input + offset) * filter + * + * inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_conv_s8_ansi(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data); + +int esp_nn_get_conv_scratch_size_ansi(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params); +void esp_nn_set_conv_scratch_buf_ansi(const void *buf); + +int esp_nn_get_depthwise_conv_scratch_size_ansi(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params); +void esp_nn_set_depthwise_conv_scratch_buf_ansi(const void *buf); + +/************************** Activation functions *****************************/ + +/** + * @brief relu6 + * + * @note inout: int8_t + */ +void esp_nn_relu6_s8_ansi(int8_t *data, uint16_t size); + +/************************** Pooling functions *****************************/ + + +/** + * @brief max_pool + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_max_pool_s8_ansi(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels); + +/** + * @brief avg_pool + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_avg_pool_s8_ansi(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels); + + +/************************** Fully connected functions ***********************/ + +/** + * @brief fully connected + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_fully_connected_s8_ansi(const int8_t *input_data, + const int32_t input_offset, + const uint16_t row_len, + const int8_t *filter_data, + const int32_t filter_offset, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t out_shift, + const int32_t out_mult, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief Get scratch buffer size needed by softmax function + * + * @param width + * @param height + * @return size in bytes + * + * @note buffer must be 4 byte aligned + */ +int32_t esp_nn_get_softmax_scratch_size_ansi(const int32_t width, const int32_t height); + +/* ANSI C function to be hooked up when optimised version needed */ +int32_t esp_nn_get_softmax_scratch_size_opt(const int32_t width, const int32_t height); + +/** + * @brief Set scratch buffer to be used by softmax function + * + * @param buffer this can be NULL if one needs to unset it + * must be aligned to 4 bytes + */ +void esp_nn_set_softmax_scratch_buf_ansi(void *buffer); + +/** + * @brief reference softmax function + * + * @note inputs type: int8_t, output: int8_t + */ +void esp_nn_softmax_s8_ansi(const int8_t *input_data, + const int32_t height, + const int32_t width, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output_data); + + +//////////////////////////// Generic optimisations ///////////////////////////// + +/************************** Convolution functions *****************************/ + +/** + * @brief 2d-convolution channelwise optimized version + * + * @note operation: result += (input + offset) * filter + * + * inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_conv_s8_opt(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data); + +/** + * @brief depthwise convolution per channel optimized version + * + * @note inputs type: int8_t, output: int8_t + * Version used in tflite is per channel. + * This version follows the same footsprints. + * Meaning, it has per out_channel shift and multiplier for + * requantization + * + * optimization notes: Though input_offset is int32 type, + * offset values are contained in 8 bits [-128, 127] + */ +void esp_nn_depthwise_conv_s8_opt(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data); + +int esp_nn_get_conv_scratch_size_opt(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params); +void esp_nn_set_conv_scratch_buf_opt(const void *buf); + +int esp_nn_get_depthwise_conv_scratch_size_opt(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params); +void esp_nn_set_depthwise_conv_scratch_buf_opt(const void *buf); + +/* ANSI C function to be hooked up when optimised version needed */ +void esp_nn_set_softmax_scratch_buf_opt(void *buffer); + +/** + * @brief optimised version of softmax function + * + * @note the function uses extra buffer (4 * width bytes) + * hence, scratch buffers must be set before calling this. + */ +void esp_nn_softmax_s8_opt(const int8_t *input_data, + const int32_t height, + const int32_t width, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output_data); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_defs.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_defs.h new file mode 100644 index 0000000..756d8e6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_defs.h @@ -0,0 +1,83 @@ +// Copyright 2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include + +/** + * @brief structure to club data dims + * this structure can be used for input, output and filter + */ +typedef struct data_dims { + int32_t width; + int32_t height; + int32_t channels; + + int32_t extra; // can be used as batch or any other param +} data_dims_t; + +/** + * @brief 2d data structure (width, height) + * + */ +typedef struct data_2d { + int32_t width; + int32_t height; +} data_2d_t; + +/** + * @brief min/max activation + */ +typedef struct act_params { + int32_t min; + int32_t max; +} act_params_t; + +/** + * @brief per channel quant data + * + * @note number of shift and mult elements are equal to output channels + */ +typedef struct quant_data { + int32_t *shift; + int32_t *mult; +} quant_data_t; + +/** + * @brief params specific to convolution 2d + * + */ +typedef struct conv_params { + int32_t in_offset; + int32_t out_offset; + data_2d_t stride; + data_2d_t padding; + data_2d_t dilation; + act_params_t activation; +} conv_params_t; + +/** + * @brief params specific to depthwise convolution 2d + * + */ +typedef struct dw_conv_params { + int32_t in_offset; + int32_t out_offset; + int32_t ch_mult; // channel multiplier. (in_ch * ch_mult = out_ch) + data_2d_t stride; + data_2d_t padding; + data_2d_t dilation; + act_params_t activation; +} dw_conv_params_t; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_esp32s3.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_esp32s3.h new file mode 100644 index 0000000..0f52c94 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_esp32s3.h @@ -0,0 +1,231 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file Header definitions to include for esp_nn optimized functions for + * the ESP32-S3 platform + */ + +#pragma once + +#include "esp_nn_defs.h" +#include "esp_nn_ansi_headers.h" + +/************************** Basic math functions *****************************/ + + +/** + * @brief elementwise addition + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + * + * shift values are expected to be <= 0 + */ +void esp_nn_add_elementwise_s8_esp32s3(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + const int32_t input1_mult, + const int32_t input2_mult, + const int32_t input1_shift, + const int32_t input2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size); + +/** + * @brief elementwise multiplication + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + * + * output shift is expected to be <= 0 + */ +void esp_nn_mul_elementwise_s8_esp32s3(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size); + + +/************************** Convolution functions *****************************/ + +/** + * @brief depthwise convolution per channel + * + * @note inputs type: int8_t, output: int8_t + * Version used in tflite is per channel. + * This version follows the same footsprints. + * Meaning, it has per out_channel shift and multiplier for + * requantization + * + * optimization notes: Though input_offset is int32 type, + * offset values are contained in 8 bits [-128, 127] + */ +void esp_nn_depthwise_conv_s8_esp32s3(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *output_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data); + +/** + * @brief 2d - convolution channelwise + * + * @note operation: result += (input + offset) * filter + * + * inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_conv_s8_esp32s3(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *output_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data); + +int esp_nn_get_conv_scratch_size_esp32s3(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params); +void esp_nn_set_conv_scratch_buf_esp32s3(const void *buf); + +int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params); +void esp_nn_set_depthwise_conv_scratch_buf_esp32s3(const void *buf); + +/************************** Pooling functions *****************************/ + +/** + * @brief max_pool + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_max_pool_s8_esp32s3(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels); + +/** + * @brief avg_pool + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + */ +void esp_nn_avg_pool_s8_esp32s3(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels); + + +/************************** Fully connected functions *****************************/ + +/** + * @brief fully connected + * + * @note inputs type: int8_t, output: int8_t + * input offsets: although int32_t, they are contained in 8 bits [-128, 127] + * + * Current version works only on aligned input. + * row_len and channels should both be multiple of 8. + */ +void esp_nn_fully_connected_s8_esp32s3(const int8_t *input_data, + const int32_t input_offset, + const uint16_t row_len, + const int8_t *filter_data, + const int32_t filter_offset, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t out_shift, + const int32_t out_mult, + const int32_t activation_min, + const int32_t activation_max); + +/** + * @brief relu6 + * + * @note inout: int8_t + */ +void esp_nn_relu6_s8_esp32s3(int8_t *data, uint16_t size); + +/********************** function defines ***************************/ + +#define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_esp32s3 +#define esp_nn_mul_elementwise_s8 esp_nn_mul_elementwise_s8_esp32s3 + +#define esp_nn_depthwise_conv_s8 esp_nn_depthwise_conv_s8_esp32s3 + +#define esp_nn_get_conv_scratch_size esp_nn_get_conv_scratch_size_esp32s3 +#define esp_nn_set_conv_scratch_buf esp_nn_set_conv_scratch_buf_esp32s3 + +#define esp_nn_get_depthwise_conv_scratch_size esp_nn_get_depthwise_conv_scratch_size_esp32s3 +#define esp_nn_set_depthwise_conv_scratch_buf esp_nn_set_depthwise_conv_scratch_buf_esp32s3 + +#define esp_nn_conv_s8 esp_nn_conv_s8_esp32s3 + +#define esp_nn_relu6_s8 esp_nn_relu6_s8_esp32s3 + +#define esp_nn_avg_pool_s8 esp_nn_avg_pool_s8_esp32s3 +#define esp_nn_max_pool_s8 esp_nn_max_pool_s8_esp32s3 + +#define esp_nn_fully_connected_s8 esp_nn_fully_connected_s8_esp32s3 + +#define esp_nn_get_softmax_scratch_size esp_nn_get_softmax_scratch_size_opt +#define esp_nn_set_softmax_scratch_buf esp_nn_set_softmax_scratch_buf_opt +#define esp_nn_softmax_s8 esp_nn_softmax_s8_opt diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_generic_opt.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_generic_opt.h new file mode 100644 index 0000000..136cba5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn_generic_opt.h @@ -0,0 +1,47 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/** + * @file Header definitions to include for esp_nn generic optimisations + * For functions which not having optimisations, _ansi versions are picked. + */ + +#pragma once + +#include "esp_nn_defs.h" +#include "esp_nn_ansi_headers.h" + +#define esp_nn_add_elementwise_s8 esp_nn_add_elementwise_s8_ansi +#define esp_nn_mul_elementwise_s8 esp_nn_mul_elementwise_s8_ansi + +#define esp_nn_depthwise_conv_s8 esp_nn_depthwise_conv_s8_opt + +#define esp_nn_conv_s8 esp_nn_conv_s8_opt + +#define esp_nn_get_conv_scratch_size esp_nn_get_conv_scratch_size_opt +#define esp_nn_set_conv_scratch_buf esp_nn_set_conv_scratch_buf_opt + +#define esp_nn_get_depthwise_conv_scratch_size esp_nn_get_depthwise_conv_scratch_size_opt +#define esp_nn_set_depthwise_conv_scratch_buf esp_nn_set_depthwise_conv_scratch_buf_opt + +#define esp_nn_relu6_s8 esp_nn_relu6_s8_ansi + +#define esp_nn_avg_pool_s8 esp_nn_avg_pool_s8_ansi +#define esp_nn_max_pool_s8 esp_nn_max_pool_s8_ansi + +#define esp_nn_fully_connected_s8 esp_nn_fully_connected_s8_ansi + +#define esp_nn_get_softmax_scratch_size esp_nn_get_softmax_scratch_size_opt +#define esp_nn_set_softmax_scratch_buf esp_nn_set_softmax_scratch_buf_opt +#define esp_nn_softmax_s8 esp_nn_softmax_s8_opt diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_ansi.c new file mode 100644 index 0000000..2ac260f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_ansi.c @@ -0,0 +1,34 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include + +void esp_nn_relu6_s8_ansi(int8_t *data, uint16_t size) +{ + int32_t i; + + for (i = 0; i < size; i++) { + int32_t ip = data[i]; + + ip = max(ip, 0); + data[i] = min(ip, 6); + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_s8_esp32s3.S new file mode 100644 index 0000000..b020920 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/activation_functions/esp_nn_relu_s8_esp32s3.S @@ -0,0 +1,118 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + + .text + .align 4 + .literal_position + +# in place relu6 function. a2: data, a3: size + # Program Unit: esp_nn_relu6_s8_esp32s3 + .type esp_nn_relu6_s8_esp32s3, @function + .align 4 + .global esp_nn_relu6_s8_esp32s3 + +esp_nn_relu6_s8_esp32s3: + entry a1,48 # + mov.n a9,a2 # [0], data + mov.n a7,a3 # [1], size + + // process multiple of 16 + movi.n a4,6 # [4] + s8i a4,a1,0 # [5] six + addi a10,a3,-7 # [2] + ee.vldbc.8 q1,a1 # [6] id:72 six+0x0 + blti a3,16,.Lt_0_5634 # [7] + + srai a8,a3,4 # [0] + ee.zero.q q2 # [1] + loopgtz a8,.LBB37_esp_nn_relu6_s8_esp32s3 # [3] + + ee.vld.128.ip q0,a2,0 # [0*II+0] id:73 + ee.vmax.s8 q0,q0,q2 # [0*II+2] + ee.vmin.s8 q0,q0,q1 # [0*II+3] + ee.vst.128.ip q0,a2,16 # [0*II+4] id:74 +.LBB37_esp_nn_relu6_s8_esp32s3: # 0x34 + + slli a8,a8,4 # [0] + + // remaining multiple of 8 data + bge a8,a10,.Lt_0_3586 # [1] + +.Lt_0_3842: # 0x3a + sub a6,a7,a8 # [0] + srai a6,a6,3 # [1] + loopgtz a6,.LBB52_esp_nn_relu6_s8_esp32s3 # [2] + + ee.vld.l.64.ip q0,a2,0 # [0*II+0] id:75 + ee.vmax.s8 q0,q0,q2 # [0*II+2] + ee.vmin.s8 q0,q0,q1 # [0*II+3] + ee.vst.l.64.ip q0,a2,8 # [0*II+4] id:76 + +.LBB52_esp_nn_relu6_s8_esp32s3: # 0x4f + addx8 a8,a6,a8 # [0] + +.Lt_0_3586: # 0x52 + // process leftover + bge a8,a7,.Lt_0_6402 # [0] + +.Lt_0_4866: # 0x55 + movi.n a5,0 # [0] + sub a3,a7,a8 # [1] + add.n a2,a8,a9 # [2] + l8ui a6,a2,0 # [3] id:78 + addi.n a3,a3,-1 # [4] + sext a6,a6,7 + max a6,a5,a6 # [6] + min a6,a4,a6 # [7] + s8i a6,a2,0 # [8] id:79 + + loopgtz a3,.LBB67_esp_nn_relu6_s8_esp32s3 # [9] + + l8ui a3,a2,1 # [0*II+0] id:78 + addi.n a2,a2,1 # [1*II+1] + sext a3,a3,7 + max a3,a5,a3 # [0*II+3] + min a3,a4,a3 # [0*II+4] + s8i a3,a2,0 # [0*II+5] id:79 +.LBB67_esp_nn_relu6_s8_esp32s3: # 0x81 + +.Lt_0_6402: # 0x83 + retw.n # [0] + +.Lt_0_5634: # 0x85 + blti a10,1,.Lt_0_5890 # [0] + + movi.n a8,0 # [0] + ee.zero.q q2 # [1] + j .Lt_0_3842 # [2] + +.Lt_0_5890: # 0x90 + beqz.n a3,.Lt_0_6402 # [0] + + movi.n a8,0 # [0] + j .Lt_0_4866 # [1] + + .size esp_nn_relu6_s8_esp32s3, . - esp_nn_relu6_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_ansi.c new file mode 100644 index 0000000..b123d62 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_ansi.c @@ -0,0 +1,101 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +void esp_nn_add_elementwise_u8_ansi(const uint8_t *input1_data, + const uint8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + const int32_t input1_mult, + const int32_t input2_mult, + const int32_t input1_shift, + const int32_t input2_shift, + const int32_t left_shift, + uint8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size) +{ + for (int i = 0; i < size; i++) { + int32_t tmp1 = input1_data[i] + input1_offset; + int32_t tmp2 = input2_data[i] + input2_offset; + + tmp1 <<= left_shift; + tmp2 <<= left_shift; + + tmp1 = esp_nn_sat_round_doubling_high_mul(tmp1, input1_mult); + tmp2 = esp_nn_sat_round_doubling_high_mul(tmp2, input2_mult); + + tmp1 = esp_nn_div_by_power_of_two(tmp1, -input1_shift); + tmp2 = esp_nn_div_by_power_of_two(tmp2, -input2_shift); + + int32_t out = tmp1 + tmp2; + out = esp_nn_sat_round_doubling_high_mul(out, out_mult); + out = esp_nn_div_by_power_of_two(out, -out_shift); + out = out + out_offset; + + out = max(activation_min, min(out, activation_max)); + output[i] = (uint8_t) out; + } +} + +void esp_nn_add_elementwise_s8_ansi(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + const int32_t input1_mult, + const int32_t input2_mult, + const int32_t input1_shift, + const int32_t input2_shift, + const int32_t left_shift, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size) +{ + for (int i = 0; i < size; i++) { + int32_t tmp1 = input1_data[i] + input1_offset; + int32_t tmp2 = input2_data[i] + input2_offset; + + tmp1 <<= left_shift; + tmp2 <<= left_shift; + + tmp1 = esp_nn_sat_round_doubling_high_mul(tmp1, input1_mult); + tmp2 = esp_nn_sat_round_doubling_high_mul(tmp2, input2_mult); + + tmp1 = esp_nn_div_by_power_of_two(tmp1, -input1_shift); + tmp2 = esp_nn_div_by_power_of_two(tmp2, -input2_shift); + + int32_t out = tmp1 + tmp2; + out = esp_nn_sat_round_doubling_high_mul(out, out_mult); + out = esp_nn_div_by_power_of_two(out, -out_shift); + out = out + out_offset; + + out = max(activation_min, min(out, activation_max)); + output[i] = (int8_t) out; + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_s8_esp32s3.S new file mode 100644 index 0000000..492254c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_add_s8_esp32s3.S @@ -0,0 +1,638 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2023 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .align 4 + .literal_position + .literal .nudge_val, 1073741824 + + # Program Unit: esp_nn_add_elementwise_s8_esp32s3 + .type esp_nn_add_elementwise_s8_esp32s3, @function + .align 4 + .global esp_nn_add_elementwise_s8_esp32s3 + +esp_nn_add_elementwise_s8_esp32s3: # 0x4 + # temp_neg_out_shift = 0 + # temp_neg_input2_shift = 4 + # temp_neg_input1_shift = 8 + # gra_spill_temp_2 = 12 + # gra_spill_temp_3 = 16 + # gra_spill_temp_4 = 20 + # gra_spill_temp_5 = 24 + # gra_spill_temp_6 = 28 + # gra_spill_temp_7 = 32 + # gra_spill_temp_8 = 36 + # gra_spill_temp_9 = 40 + # gra_spill_temp_10 = 44 + # gra_spill_temp_11 = 48 + # gra_spill_temp_12 = 52 + # gra_spill_temp_13 = 56 + + // a2 : *input1_data + // a3 : *input2_data + // a4 : input1_offset + // a5 : input2_offset + // a6 : input1_mult + // a7 : input2_mult + // On stack: + // 80: input1_shift + // 84: input2_shift + // 88: left_shift + // 92: *output + // 96: out_offset + // 100: out_mult, loaded in `a8` + // 104: out_shift + // 108: activation_min + // 112: activation_max + // 116: size + + entry a1,80 # + s32i.n a4,a1,48 # [10] gra_spill_temp_11, input1_offset + s32i.n a5,a1,52 # [0] gra_spill_temp_12, input2_offset + s32i.n a2,a1,32 # [5] gra_spill_temp_7, input1_data + s32i.n a3,a1,12 # [3] gra_spill_temp_2, input2_data + + l32i a12,a1,116 # [11] id:720 size+0x0 + mov.n a14,a2 # [6] + mov.n a10,a3 # [8] + blti a12,1,.exit # [1] // exit + + l32i a3,a1,80 # [0] id:721 input1_shift+0x0 + l32i a13,a1,84 # [1] id:722 input2_shift+0x0 + l32i a2,a1,104 # [8] id:723 out_shift+0x0 + l32i a8,a1,100 # [1] out_mult + + neg a3,a3 # [12] + neg a13,a13 # [7] + neg a2,a2 # [11] + + s32i.n a3,a1,8 # [12] temp_neg_input1_shift, -input1_shift + s32i.n a13,a1,4 # [7] temp_neg_input2_shift, -input2_shift + s32i.n a2,a1,0 # [16] temp_neg_out_shift, -out_shift + + movi.n a5,1 + addi a9,a3,-1 + ssl a9 + sll a15,a5 + s32i.n a15,a1,16 # gra_spill_temp_3, 1 << (exponent - 1) for input1 + + addi a9,a13,-1 + ssl a9 + sll a15,a5 + s32i.n a15,a1,20 # gra_spill_temp_4, 1 << (exponent - 1) for input2 + + addi a9,a2,-1 + ssl a9 + sll a15,a5 + s32i.n a15,a1,24 # gra_spill_temp_5, 1 << (exponent - 1) for out + + movi.n a2,0 + blti a12,12,.process_leftover # [23] + + // skip to leftover routine if inputs are unaligned + or a9,a14,a10 + extui a9,a9,0,4 + bnez a9,.process_leftover + + l32i a9,a1,92 # [17] id:1279 output+0x0 + + l32i a13,a1,116 # [20] + srai a13,a13,3 # [21] + s32i.n a13,a1,56 # [22] gra_spill_temp_13 + + movi.n a13,8 + s32i.n a13,a1,28 # gra_spill_temp_6, mult_of8 counter + + ee.zero.q q6 # [8] + +.vector_loop: // process 8 values in one go + l32i a15,a1,88 # [6] left_shift + ee.vld.l.64.ip q0,a14,8 # [9] id:729 + s32i.n a9,a1,44 # [10] gra_spill_temp_10, out_ptr + s32i.n a14,a1,40 # [20] gra_spill_temp_9 + wsr.sar a15 # [21] load left shift + + addi.n a15,a1,48 # [14] + ee.vldbc.16 q7,a15 # [21] id:1277 input1_offset + ee.vcmp.lt.s8 q5,q0,q6 # [29] + ee.vzip.8 q0,q5 # [31], 20 bits + ee.vadds.s16 q0,q0,q7 # [34], add offset + ee.vcmp.lt.s16 q2,q0,q6 # [36] + ee.vzip.16 q0,q2 # [39], 32 bits + ee.vsl.32 q0,q0 # [41] left_shift + ee.vsl.32 q2,q2 # [42] left_shift + + l32r a9,.nudge_val # [15], nudge + +// mulhi32 for q0 + ee.movi.32.a q0,a3,2 # [44] + ee.movi.32.a q0,a4,3 # [45] + ee.movi.32.a q0,a14,1 # [46] + ee.movi.32.a q0,a5,0 # [62] + + mulsh a13,a6,a3 # [51] + mull a3,a6,a3 # [53] + + mulsh a12,a6,a4 # [50] + mull a4,a6,a4 # [55] + + mulsh a15,a6,a14 # [48] + mull a14,a6,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q0,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q0,a12,3 # [62] + + mulsh a13,a6,a5 # [51] + mull a5,a6,a5 # [53] + ee.movi.32.q q0,a15,1 # [62] + + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q0,a13,0 # [62] + + +// mulhi32 for q2 + ee.movi.32.a q2,a3,2 # [44] + ee.movi.32.a q2,a4,3 # [45] + ee.movi.32.a q2,a14,1 # [46] + ee.movi.32.a q2,a5,0 # [62] + + mulsh a13,a6,a3 # [51] + mull a3,a6,a3 # [53] + + mulsh a12,a6,a4 # [50] + mull a4,a6,a4 # [55] + + mulsh a15,a6,a14 # [48] + mull a14,a6,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q2,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q2,a12,3 # [62] + + mulsh a13,a6,a5 # [51] + mull a5,a6,a5 # [53] + ee.movi.32.q q2,a15,1 # [62] + + l32i a3,a1,8 # [12] temp_neg_input1_shift, -input1_shift + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q2,a13,0 # [62] + + + blti a3,1, .skip_div_by2_in0 + + addi.n a13,a1,16 + ee.vcmp.lt.s32 q1,q0,q6 + ee.vcmp.lt.s32 q3,q2,q6 + ee.vldbc.32 q5,a13 // 1 << (exponent - 1) + wsr.sar a3 // load right_shift + ee.vadds.s32 q0,q0,q1 // subtract 1 `if (val < 0)` + ee.vadds.s32 q2,q2,q3 // subtract 1 `if (val < 0)` + ee.vadds.s32 q0,q0,q5 + ee.vadds.s32 q2,q2,q5 + ee.vsr.32 q0,q0 + ee.vsr.32 q2,q2 + +.skip_div_by2_in0: + + + ee.vld.l.64.ip q1,a10,8 # [11] id:1290 + addi.n a15,a1,52 # [12] + ee.vldbc.16 q7,a15 # [19] id:1278 input2_offset + l32i a15,a1,88 # [6] left_shift + s32i a10,a1,36 # [14] gra_spill_temp_8 + ee.vcmp.lt.s8 q3,q1,q6 # [271] + wsr.sar a15 # [21], load shift for left shift + ee.vzip.8 q1,q3 # [274], 20 bits + ee.vadds.s16 q1,q1,q7 # [281] + ee.vcmp.lt.s16 q3,q1,q6 # [282] + ee.vzip.16 q1,q3 # [283], 32 bits + ee.vsl.32 q1,q1 # [284] + ee.vsl.32 q3,q3 # [285] + + +// mulhi32 for q1 + ee.movi.32.a q1,a3,2 # [44] + ee.movi.32.a q1,a4,3 # [45] + ee.movi.32.a q1,a14,1 # [46] + ee.movi.32.a q1,a5,0 # [62] + + mulsh a13,a7,a3 # [51] + mull a3,a7,a3 # [53] + + mulsh a12,a7,a4 # [50] + mull a4,a7,a4 # [55] + + mulsh a15,a7,a14 # [48] + mull a14,a7,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q1,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q1,a12,3 # [62] + + mulsh a13,a7,a5 # [51] + mull a5,a7,a5 # [53] + ee.movi.32.q q1,a15,1 # [62] + + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q1,a13,0 # [62] + + +// mulhi32 for q3 + ee.movi.32.a q3,a3,2 # [44] + ee.movi.32.a q3,a4,3 # [45] + ee.movi.32.a q3,a14,1 # [46] + ee.movi.32.a q3,a5,0 # [62] + + mulsh a13,a7,a3 # [51] + mull a3,a7,a3 # [53] + + mulsh a12,a7,a4 # [50] + mull a4,a7,a4 # [55] + + mulsh a15,a7,a14 # [48] + mull a14,a7,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q3,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q3,a12,3 # [62] + + mulsh a13,a7,a5 # [51] + mull a5,a7,a5 # [53] + ee.movi.32.q q3,a15,1 # [62] + l32i a14,a1,4 # [7] temp_neg_input2_shift, -input2_shift + + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q3,a13,0 # [62] + + // multiplication results: q0-q2 & q1-q3 + + + blti a14,1, .skip_div_by2_in1 + + addi.n a5,a1,20 + ee.vcmp.lt.s32 q4,q1,q6 + ee.vcmp.lt.s32 q5,q3,q6 + ee.vldbc.32 q7,a5 // 1 << (exponent - 1) + wsr.sar a14 // load right_shift + ee.vadds.s32 q4,q4,q7 // subtract 1 `if (val < 0)` + ee.vadds.s32 q5,q5,q7 // subtract 1 `if (val < 0)` + ee.vadds.s32 q1,q1,q4 + ee.vadds.s32 q3,q3,q5 + ee.vsr.32 q1,q1 + ee.vsr.32 q3,q3 + +.skip_div_by2_in1: + + ee.vadds.s32 q0,q0,q1 + ee.vadds.s32 q1,q2,q3 + +// mulhi32 for q0 + ee.movi.32.a q0,a3,2 # [44] + ee.movi.32.a q0,a4,3 # [45] + ee.movi.32.a q0,a14,1 # [46] + ee.movi.32.a q0,a5,0 # [62] + + mulsh a13,a8,a3 # [51] + mull a3,a8,a3 # [53] + + mulsh a12,a8,a4 # [50] + mull a4,a8,a4 # [55] + + mulsh a15,a8,a14 # [48] + mull a14,a8,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q0,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q0,a12,3 # [62] + + mulsh a13,a8,a5 # [51] + mull a5,a8,a5 # [53] + ee.movi.32.q q0,a15,1 # [62] + + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q0,a13,0 # [62] + + +// mulhi32 for q1 + ee.movi.32.a q1,a3,2 # [44] + ee.movi.32.a q1,a4,3 # [45] + ee.movi.32.a q1,a14,1 # [46] + ee.movi.32.a q1,a5,0 # [62] + + mulsh a13,a8,a3 # [51] + mull a3,a8,a3 # [53] + + mulsh a12,a8,a4 # [50] + mull a4,a8,a4 # [55] + + mulsh a15,a8,a14 # [48] + mull a14,a8,a14 # [49] + + ssai 31 # [47] + + add a3,a3,a9 + saltu a2,a3,a9 + add.n a13,a13,a2 + src a13,a13,a3 + + add a4,a4,a9 + saltu a2,a4,a9 + add.n a12,a12,a2 + src a12,a12,a4 + ee.movi.32.q q1,a13,2 # [62] + + add a14,a14,a9 + saltu a2,a14,a9 + add.n a15,a15,a2 + src a15,a15,a14 + ee.movi.32.q q1,a12,3 # [62] + + mulsh a13,a8,a5 # [51] + mull a5,a8,a5 # [53] + ee.movi.32.q q1,a15,1 # [62] + l32i a14,a1,0 # [738] temp_neg_out_shift, -out_shift + + add a5,a5,a9 + saltu a2,a5,a9 + add.n a13,a13,a2 + src a13,a13,a5 + ee.movi.32.q q1,a13,0 # [62] + + + //q0-q1 has output + + blti a14,1,.skip_div_by2_out + addi.n a5,a1,24 + ee.vcmp.lt.s32 q2,q0,q6 + ee.vcmp.lt.s32 q3,q1,q6 + ee.vldbc.32 q5,a5 // 1 << (exponent - 1) + wsr.sar a14 // load right shift + ee.vadds.s32 q0,q0,q2 // subtract 1 `if (val < 0)` + ee.vadds.s32 q1,q1,q3 // subtract 1 `if (val < 0)` + ee.vadds.s32 q0,q0,q5 + ee.vadds.s32 q1,q1,q5 + ee.vsr.32 q0,q0 + ee.vsr.32 q1,q1 + +.skip_div_by2_out: + +// add offset and apply activation + addi a15,a1,96 + ee.vldbc.32 q3,a15 # [809] id:802 out_offset + ee.vadds.s32 q0,q0,q3 # [811] + ee.vadds.s32 q1,q1,q3 # [812] + addi a13,a1,108 + addi a14,a1,112 + ee.vldbc.32 q3,a14 # [813] id:803 activation_max + ee.vmin.s32 q0,q0,q3 # [815] + ee.vmin.s32 q1,q1,q3 # [816] + ee.vldbc.32 q3,a13 # [817] id:804 activation_min + l32i a13,a1,4 # [818] temp_neg_input2_shift + ee.vmax.s32 q1,q1,q3 # [819] + ee.vmax.s32 q0,q0,q3 # [820] + +//pack the data and store + l32i.n a9,a1,44 # [784] gra_spill_temp_10 + ee.vunzip.16 q0,q1 # [821] + ee.vunzip.8 q0,q1 # [822] + l32i.n a13,a1,28 # gra_spill_temp_6, multiple of 12 index + ee.vst.l.64.ip q0,a9,8 # [823] id:805 + l32i a15,a1,116 # [1], size + l32i.n a14,a1,40 # [20] gra_spill_temp_9 + l32i.n a10,a1,36 # [14] gra_spill_temp_8 + addi a13,a13,8 + s32i.n a13,a1,28 # gra_spill_temp_6 + bge a15,a13,.vector_loop + + l32i.n a2,a1,56 # [0] gra_spill_temp_13 + +// check for leftover + l32i a10,a1,116 # [1] + slli a2,a2,3 # [2] + bge a2,a10,.exit # [3] // done, exit + +.process_leftover: + l32i.n a3,a1,48 # [1] gra_spill_temp_11 + l32i.n a12,a1,52 # [2] gra_spill_temp_12 + + l32i.n a10,a1,12 # [3] gra_spill_temp_2 + l32i.n a14,a1,32 # [8] gra_spill_temp_7 + add.n a10,a2,a10 # [5] + add.n a14,a2,a14 # [6] + l8ui a14,a14,0 # [7] id:809, input1 + l8ui a10,a10,0 # [12] id:1370, input2 + + sext a14,a14,7 # [9] + sext a10,a10,7 # [10] + add.n a10,a10,a12 # [11] // add offset2 + add.n a14,a14,a3 # [16] // add offset1 + l32i a12,a1,88 # [13] left_shift + + // sat_round_doubling_high_mul step for input1 and input2 + ssl a12 # [15] + sll a10,a10 # [20] + sll a14,a14 # [17] + + l32r a12,.nudge_val # [0], nudge + + // a13,a3 are free, a12: nudge, a6:mult1 + mulsh a13,a14,a6 + mull a9,a14,a6 + ssai 31 + + add a9,a9,a12 + saltu a3,a9,a12 + add.n a13,a13,a3 + src a14,a13,a9 //result in a14 + + mulsh a13,a10,a7 + mull a9,a10,a7 + ssai 31 + + add a9,a9,a12 + saltu a3,a9,a12 + add.n a13,a13,a3 + src a10,a13,a9 //result in a10 + +// divide_by_power_of2_step for input1 (a14), input2 (a10) +// free registers: a13, a12, a9, a3 + + l32i.n a12,a1,8 // -input1_shift + l32i.n a13,a1,4 // -input2_shift + + blti a12,1,.skip_div_by2_in0_remain + l32i.n a3,a1,16 // 1 << (exponent - 1) + extui a9,a14,31,1 + ssr a12 // load right_shift + sub a3,a3,a9 // 1 << (exponent - 1) - (val < 0) + add a14,a14,a3 + sra a14,a14 +.skip_div_by2_in0_remain: + + blti a13,1,.skip_div_by2_in1_remain + l32i.n a3,a1,20 // 1 << (exponent - 1) + extui a9,a10,31,1 + ssr a13 // load right_shift + sub a3,a3,a9 // 1 << (exponent - 1) - (val < 0) + add a10,a10,a3 + sra a10,a10 +.skip_div_by2_in1_remain: + +// process output + l32r a12,.nudge_val # [0], nudge + l32i a13,a1,0 // -out_shift + add.n a10,a10,a14 # [45] + +// multiply and pick high32 + mulsh a3,a10,a8 + mull a10,a10,a8 + ssai 31 # [0] + add a10,a10,a12 + saltu a9,a10,a12 + add a12,a3,a9 + src a12,a12,a10 + +// div by power of 2 for output + + l32i a9,a1,96 # [31] out_offset + blti a13,1,.skip_div_by2_out_remain + l32i.n a3,a1,24 // 1 << (exponent - 1) + extui a14,a12,31,1 + ssr a13 // load right_shift + sub a3,a3,a14 // 1 << (exponent - 1) - (val < 0) + add a12,a12,a3 + sra a12,a12 +.skip_div_by2_out_remain: + +// add offset + add.n a9,a9,a12 # [33] + +// apply activation + l32i a13,a1,112 # [34] activation_max + l32i a12,a1,108 # [35] activation_min + min a13,a13,a9 # [36] + l32i a9,a1,92 # [37] output + max a13,a13,a12 # [38] + add.n a9,a2,a9 # [39] + s8i a13,a9,0 # [40] id:1371 + l32i a12,a1,116 + addi.n a2,a2,1 # [41] + blt a2,a12,.process_leftover + +.exit: + retw.n # [0] + + .size esp_nn_add_elementwise_s8_esp32s3, . - esp_nn_add_elementwise_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_ansi.c new file mode 100644 index 0000000..477d5c6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_ansi.c @@ -0,0 +1,46 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +void esp_nn_mul_elementwise_s8_ansi(const int8_t *input1_data, + const int8_t *input2_data, + const int32_t input1_offset, + const int32_t input2_offset, + int8_t *output, + const int32_t out_offset, + const int32_t out_mult, + const int32_t out_shift, + const int32_t activation_min, + const int32_t activation_max, + const int32_t size) +{ + for (int i = 0; i < size; i++) { + int32_t tmp1 = input1_data[i] + input1_offset; + int32_t tmp2 = input2_data[i] + input2_offset; + + int32_t out = tmp1 * tmp2; + out = esp_nn_multiply_by_quantized_mult(out, out_mult, out_shift); + out = out + out_offset; + + out = max(activation_min, min(out, activation_max)); + output[i] = (int8_t) out; + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_s8_esp32s3.S new file mode 100644 index 0000000..ca28573 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/basic_math/esp_nn_mul_s8_esp32s3.S @@ -0,0 +1,323 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2023 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .align 4 + .literal_position + .literal .LC0_26_123, 1073741824 // `1 << 30` + + # Program Unit: esp_nn_mul_elementwise_s8_esp32s3 + .type esp_nn_mul_elementwise_s8_esp32s3, @function + .align 4 + .global esp_nn_mul_elementwise_s8_esp32s3 + +esp_nn_mul_elementwise_s8_esp32s3: # 0x4 + # to_add = 0 + # gra_spill_temp_0 = 4 + # gra_spill_temp_1 = 8 + # gra_spill_temp_2 = 12 + # gra_spill_temp_3 = 16 + # gra_spill_temp_4 = 20 + # gra_spill_temp_5 = 24 + # gra_spill_temp_6 = 28 + # gra_spill_temp_7 = 32 + # gra_spill_temp_8 = 36 + # gra_spill_temp_<> = 40 + # gra_spill_temp_<> = 44 + # gra_spill_temp_<> = 48 + # gra_spill_temp_13 = 64 + + // registers: + // a2: const int8_t *input1_data + // a3: const int8_t *input2_data + // a4: const int32_t input1_offset + // a5: const int32_t input2_offset + // a6: int8_t *output + // a7: const int32_t out_offset + + // on stack: + // 120: const int32_t out_mult + // 124: const int32_t out_shift + // 128: const int32_t activation_min + // 132: const int32_t activation_max + // 136: const int32_t size + + entry a1,120 # + s32i.n a4,a1,24 # [0] gra_spill_temp_5, input1_offset + s32i.n a5,a1,28 # [1] gra_spill_temp_12, input2_offset + + s32i.n a3,a1,4 # [5] gra_spill_temp_0, input2 + mov.n a10,a3 # [6] + l32i a3,a1,136 # [18] id:361 size+0x0 + mov.n a9,a6 # [2] // out_addr + blti a3,1,.exit # [0] // exit + + s32i.n a2,a1,16 # [9] gra_spill_temp_3, input1 + s32i a7,a1,40 # [4] id:358 out_offset+0x0 + movi.n a11,0 # [3] + mov.n a12,a2 # [10] + s32i a4,a1,44 # [13] id:356 input1_offset+0x0 + s32i a5,a1,48 # [14] id:357 input2_offset+0x0 + movi.n a2,1 # [15] + + l32i a15,a1,124 # [3] id:362 out_shift+0x0 + l32i a13,a1,120 # [4] id:363 out_mult+0x0 + s32i.n a6,a1,8 # [1] gra_spill_temp_1, out_addr + max a14,a15,a11 # [11] left_shift + sub a4,a14,a15 # right_shift + s32i.n a4,a1,20 # [9] gra_spill_temp_4 + + blti a3,8,.process_leftover # [20] + + // skip to leftover routine if inputs are unaligned + or a6,a12,a10 + extui a6,a6,0,4 + bnez a6,.process_leftover + + // `size > 8`, s3 optimisation path... + ee.zero.q q1 # [0] + addi a4,a1,44 # [7] + addi a8,a1,48 # [8] + ee.vldbc.16 q0,a4 # [17] id:359 input1_offset + ee.vldbc.16 q7,a8 # [16] id:360 input2_offset + l32r a4,.LC0_26_123 # [12] + movi a8, 8 + st.qr q0,a1,64 # [19] gra_spill_temp_13 + s32i.n a8,a1,12 # [6] gra_spill_temp_2 + +.Lt_0_7682: # 0x60 + s32i a9,a1,36 # [1] gra_spill_temp_8, out_addr + ld.qr q4,a1,64 # [2] gra_spill_temp_13, input1_offset + ee.vld.l.64.ip q2,a12,8 # [4] id:367, input1_ptr + movi.n a7,16 # [3] + ee.vld.h.64.ip q2,a10,8 # [5] id:368, input2_ptr + wsr.sar a7 # [6] + ee.vcmp.lt.s8 q5,q2,q1 # [7] + ee.vzip.8 q2,q5 # [8] + ee.vadds.s16 q5,q5,q7 # [9] input2_offset + ee.vadds.s16 q4,q2,q4 # [10] input1_offset + ee.vmul.s16 q3,q4,q5 # [11] + wsr.sar a11 # [12] + ee.vmul.s16 q2,q4,q5 # [13] + + wsr.sar a14 # [14] left_shift + ee.vzip.16 q2,q3 # [15] + ee.vsl.32 q6,q2 # [16] left_shift + ssai 31 # [17] + + ee.movi.32.a q6,a3,2 # [18] + ee.movi.32.a q6,a8,3 # [26] + + mulsh a6,a13,a3 # [19] + mull a3,a13,a3 # [20] + mulsh a7,a13,a8 # [27] + add.n a3,a4,a3 # [22] + saltu a2,a3,a4 # [23] + add.n a2,a2,a6 # [24] + src a2,a2,a3 # [25] + + mull a6,a13,a8 # [28] + add.n a6,a4,a6 # [30] + saltu a9,a6,a4 # [31] + add.n a9,a9,a7 # [32] + src a9,a9,a6 # [33] + ee.movi.32.q q2,a2,2 # [53] + ee.movi.32.q q2,a9,3 # [54] + + ee.movi.32.a q6,a6,1 # [34] + mulsh a7,a13,a6 # [35] + mull a6,a13,a6 # [36] + add.n a6,a4,a6 # [38] + saltu a3,a6,a4 # [39] + add.n a3,a3,a7 # [16] + src a3,a3,a6 # [41] + ee.movi.32.a q6,a2,0 # [42] + mulsh a8,a13,a2 # [43] + mull a7,a13,a2 # [4] + add.n a7,a4,a7 # [46] + saltu a6,a7,a4 # [47] + add.n a6,a6,a8 # [24] + src a6,a6,a7 # [49] + ee.movi.32.q q2,a3,1 # [28] + ee.movi.32.q q2,a6,0 # [50] + + wsr.sar a14 # [10] + ee.vsl.32 q4,q3 # [11] + ee.movi.32.a q4,a2,2 # [13] + mulsh a3,a13,a2 # [14] + mull a2,a13,a2 # [15] + ssai 31 # [12] + add.n a2,a4,a2 # [17] + saltu a5,a2,a4 # [18] + add.n a5,a5,a3 # [19] + src a5,a5,a2 # [20] + ee.movi.32.a q4,a3,3 # [21] + mulsh a6,a13,a3 # [22] + mull a3,a13,a3 # [23] + add.n a3,a4,a3 # [25] + saltu a8,a3,a4 # [26] + add.n a8,a8,a6 # [27] + src a8,a8,a3 # [28] + ee.movi.32.q q0,a5,2 # [24] + ee.movi.32.q q0,a8,3 # [51] + + ee.movi.32.a q4,a7,1 # [29] + mulsh a6,a13,a7 # [30] + mull a3,a13,a7 # [31] + add.n a3,a4,a3 # [33] + saltu a2,a3,a4 # [34] + add.n a2,a2,a6 # [35] + src a2,a2,a3 # [36] + ee.movi.32.a q4,a6,0 # [37] + mulsh a7,a13,a6 # [38] + mull a6,a13,a6 # [39] + add.n a6,a4,a6 # [41] + saltu a3,a6,a4 # [42] + add.n a3,a3,a7 # [43] + src a3,a3,a6 # [4] + ee.movi.32.q q0,a2,1 # [47] + ee.movi.32.q q0,a3,0 # [46] + + l32i.n a5,a1,20 # [0] gra_spill_temp_4, right_shift + movi.n a7,1 # [51] + + blti a5,1,.skip_div_by_pow_of_2 +// divide by power of 2 + ee.vcmp.lt.s32 q5,q2,q1 # [56] + ee.vcmp.lt.s32 q6,q0,q1 # [28] + + addi.n a8,a5,-1 # [1] + ssl a8 # [2] + sll a7,a7 # [3] + s32i.n a7,a1,0 # [4] to_add + ee.vldbc.32 q4,a1 # [5] id:376 to_add + + wsr.sar a5 # [6] + ee.vadds.s32 q5,q4,q5 # [7] + ee.vadds.s32 q5,q2,q5 # [8] + ee.vsr.32 q2,q5 # [9] + + wsr.sar a5 # [5] + ee.vadds.s32 q5,q4,q6 # [9] + ee.vadds.s32 q5,q0,q5 # [11] + ee.vsr.32 q0,q5 # [12] +.skip_div_by_pow_of_2: + +// add offset, apply activation + addi a8,a1,132 # [54] + ee.vldbc.32 q4,a8 # [55] id:385 activation_max + addi a5,a1,40 # [8] + ee.vldbc.32 q6,a5 # [10] id:384 out_offset + addi a7,a1,128 # [4] + ee.vadds.s32 q0,q0,q6 # [13] // add out_offset + ee.vadds.s32 q2,q2,q6 # [14] // add out_offset + ee.vldbc.32 q6,a7 # [16] id:386 activation_min + ee.vmin.s32 q0,q0,q4 # [17] + ee.vmin.s32 q2,q2,q4 # [15] + ee.vmax.s32 q0,q0,q6 # [18] + ee.vmax.s32 q2,q2,q6 # [19] + +// pack and store + ee.vunzip.16 q2,q0 # [20] + ee.vunzip.8 q2,q0 # [21] + l32i.n a7,a1,12 // count + l32i a9,a1,36 # [55] gra_spill_temp_8 + l32i.n a3,a1,136 # [1] , size + ee.vst.l.64.ip q2,a9,8 # [22] id:387 + addi a7,a7,8 + s32i.n a7,a1,12 // increment count + bge a3,a7,.Lt_0_7682 + + addi a11,a7,-8 + bge a11,a3,.exit # [3] // exit + +.process_leftover: + sub a8,a3,a11 # [1] + loopgtz a8,.LBB33_esp_nn_mul_elementwise_s8_esp32s3 # [9] + + ssl a14 # [0] left_shift + l32i.n a8,a1,24 # [1] gra_spill_temp_5, input1_offset + l32i.n a10,a1,4 # [2] gra_spill_temp_0, input2 + l32i.n a12,a1,16 # [3] gra_spill_temp_3, input1 + add.n a10,a11,a10 # [4], input2 + add.n a12,a11,a12 # [5], input1 + l8ui a12,a12,0 # [6] id:390 + l8ui a10,a10,0 # [7] id:391 + sext a12,a12,7 # [8] + add.n a12,a12,a8 # [9] + l32i.n a8,a1,28 # [10] gra_spill_temp_12, input2_offset + sext a10,a10,7 # [11] + add.n a10,a10,a8 # [12] + mull a10,a12,a10 # [13] // multiplication result + +// multiply by quantised mult + l32i.n a9,a1,20 # [0] gra_spill_temp_4, load right_shift + + sll a10,a10 # [15] // left shift + + mulsh a3,a10,a13 # [1] + mull a8,a10,a13 # [6] + ssai 31 # [0] + add.n a6,a8,a4 # [8] + saltu a8,a6,a8 # [9] + add.n a8,a8,a3 # [10] + src a3,a8,a6 # [19] // result + + blti a9, 1, .skip_div_by_pow_of_2_remains +// divide by power of 2 + // calculate to_add = `1 << (exponent - 1)` + addi a6,a9,-1 + ssl a6 # [23] + movi a7,1 + sll a7,a7 // to_add + + extui a8,a3,31,1 # [24], sign + add a3,a3,a8 // add sign + add a3,a3,a7 // add to_add + + ssr a9 # [20] load right_shift + sra a3,a3 // right shift +.skip_div_by_pow_of_2_remains: + + l32i.n a6,a1,40 # [32], out_offset + l32i.n a8,a1,132 # [35], act_max + l32i.n a7,a1,128 # [36], act_min + +// add offset and apply activation + add.n a3,a3,a6 # [34], offset added + min a8,a8,a3 # [37] + l32i.n a3,a1,8 # [38] gra_spill_temp_1, load base out_addr + max a8,a8,a7 # [39] + +// store + add.n a3,a11,a3 # [16], add index from `a11` + s8i a8,a3,0 # [41] id:392 // store + addi.n a11,a11,1 # [42] // inc index + +.LBB33_esp_nn_mul_elementwise_s8_esp32s3: # 0x2ed +.exit: + retw.n # [0] + + .size esp_nn_mul_elementwise_s8_esp32s3, . - esp_nn_mul_elementwise_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/common_functions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/common_functions.h new file mode 100644 index 0000000..1158e9b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/common_functions.h @@ -0,0 +1,255 @@ +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#pragma once + +#include +#include +#include + +/** + * c99 standard still doesn't strictly inline functions + * We need to use attribute as well to do this. + */ +#define __NN_FORCE_INLINE__ __attribute((always_inline)) static inline + +/* min/max macros */ +#ifndef max +#define max(a, b) ({ \ + __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a > _b ? _a : _b; \ +}) + +#define min(a, b) ({ \ + __typeof__ (a) _a = (a); \ + __typeof__ (b) _b = (b); \ + _a < _b ? _a : _b; \ +}) +#endif + +__NN_FORCE_INLINE__ int32_t esp_nn_clz32(uint32_t in) +{ +#if CONFIG_IDF_TARGET_ARCH_XTENSA + __asm__ volatile("nsau %0, %0" : "+r" (in)); + return in; +#elif defined(__GNUC__) + return __builtin_clz(in); +#else + int32_t count = 32; + uint32_t x = in, y = in >> 16; + if (y != 0) { + count -= 16; + x = y; + } + y = x >> 8; + if (y != 0) { + count -= 8; + x = y; + } + y = x >> 4; + if (y != 0) { + count -= 4; + x = y; + } + y = x >> 2; + if (y != 0) { + count -= 2; + x = y; + } + y = x >> 1; + if (y != 0) { + return count - 2; + } + return count - x; +#endif +} + +/** + * Signed saturate a 32 bit value to 8 bits keeping output in 32 bit variable. + */ +__NN_FORCE_INLINE__ int32_t esp_nn_saturate8(int32_t in) +{ +#if CONFIG_IDF_TARGET_ARCH_XTENSA + __asm__ volatile("clamps %0, %0, 7" : "+a"(in)); + return in; +#else + return max(INT8_MIN, min(in, INT8_MAX)); +#endif +} + +__NN_FORCE_INLINE__ int32_t esp_nn_pick_sat_high32_of64(int64_t val64) +{ + int32_t sign = (int32_t) (val64 >> 63); + int32_t to_add = sign & ((1ul << 31) - 1); + return (int32_t) ((int64_t) (val64 + to_add) >> 31); +} + +__NN_FORCE_INLINE__ int32_t esp_nn_sat_round_doubling_high_mul(int32_t in0, int32_t in1) +{ + int32_t result; + int64_t in0_64 = (int64_t) in0; + bool overflow = (in0 == in1) && (in0 == (int32_t) INT32_MIN); + + /* Nudge value */ + int64_t nudge_val = 1 << 30; + if ((in0 < 0) ^ (in1 < 0)) { + nudge_val = 1 - nudge_val; + } + + /* Multiply and add nudge */ + int64_t mult = in0_64 * in1 + nudge_val; + + /* Round and pickup 32 bits */ + result = esp_nn_pick_sat_high32_of64(mult); + + return overflow ? INT32_MAX : result; +} + +/** + * fast version + * this will fail for values closer to INT32_MAX and INT32_MIN by `1 << (exponent - 1)`. + * We can afford to do this because we are at the very last stage of filter. + * Also it is pretty rare condition as our output is going to be 8 bit. + */ +__NN_FORCE_INLINE__ int32_t esp_nn_div_by_power_of_two_fast(int32_t val, int32_t exponent) +{ + int32_t to_add = (1 << (exponent - 1)) - (val < 0); + return (int32_t) ((val + to_add) >> exponent); +} + +__NN_FORCE_INLINE__ int32_t esp_nn_div_by_power_of_two(int32_t val, int32_t exponent) +{ + int32_t result; + + const int32_t mask = (1 << exponent) - 1; + const int32_t remainder = val & mask; + + result = val >> exponent; + int32_t threshold = (mask >> 1) + (result < 0); + + if (remainder > threshold) { + result += 1; + } + return result; +} + +__NN_FORCE_INLINE__ int32_t esp_nn_multiply_by_quantized_mult(int32_t x, int32_t mult, int32_t shift) +{ + int32_t left_shift = shift > 0 ? shift : 0; + int32_t right_shift = shift > 0 ? 0 : -shift; + int32_t result = esp_nn_sat_round_doubling_high_mul(x * (1 << left_shift), mult); + return esp_nn_div_by_power_of_two(result, right_shift); +} + +__NN_FORCE_INLINE__ int32_t esp_nn_multiply_by_quantized_mult_fast(int32_t x, int32_t mult, int32_t shift) +{ + int32_t left_shift = max(shift, 0); + int32_t right_shift = left_shift - shift; + + int64_t nudge_val = 1 << 30; + int64_t in0_64 = (int64_t) (x << left_shift); + + /* Multiply and add nudge */ + int64_t mult_64 = in0_64 * mult + nudge_val; + int32_t result = (int32_t) (mult_64 >> 31); + if (right_shift) { + result = esp_nn_div_by_power_of_two_fast(result, right_shift); + } + return result; +} + +static void esp_nn_aligned_s8_pad_with_value(const int8_t *src, int8_t *dst, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const int32_t pad_val, + const uint16_t pad_wd, + const uint16_t pad_ht) +{ + /* memset with pad_val */ + memset(dst, pad_val, ((input_wd + 2 * pad_wd) * (input_ht + 2 * pad_ht)) * channels); + dst += (pad_wd + input_wd + pad_wd) * pad_ht * channels; + + for (int i = 0; i < input_ht; i++) { + dst += pad_wd * channels; + for (int j = 0; j < input_wd * channels; j++) { + *dst++ = *src++; + } + dst += pad_wd * channels; + } +} + +static void esp_nn_aligned_s8_pad_end_with_value(const int8_t *src, int8_t *dst, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const int32_t pad_val, + const uint16_t pad_wd, + const uint16_t pad_ht) +{ + for (int i = 0; i < input_ht; i++) { + for (int j = 0; j < input_wd * channels; j++) { + *dst++ = *src++; + } + if (pad_wd) { + memset(dst, pad_val, pad_wd * channels); + dst += pad_wd * channels; + } + } + /* pad end `pad_ht` lines at end */ + if (pad_ht) { + memset(dst, pad_val, (input_wd + pad_wd) * pad_ht * channels); + } +} + +/** + * @brief convert 8 bit input data to 16 bit + * + * @param src int8_t source data + * @param dst int16_t dst data + * @param size length of data + * @param offset offset to be added to src data. Range: [-128, 127] + */ +__NN_FORCE_INLINE__ void esp_nn_s8_to_s16_with_offset(const int8_t *src, int16_t *dst, + const int size, const int32_t offset) +{ + int i = 0; + for (; i < size; i += 2) { + dst[i + 0] = src[i + 0] + offset; + dst[i + 1] = src[i + 1] + offset; + } + if(i < size) { + dst[i] = src[i] + offset; + } +} + +/** + * @brief convert 8 bit input data to 16 bit + * + * @param src int8_t source data + * @param dst int16_t dst data + * @param size length of data + */ +__NN_FORCE_INLINE__ void esp_nn_s8_to_s16(const int8_t *src, int16_t *dst, const int size) +{ + int i = 0; + for (; i < size; i += 2) { + dst[i + 0] = src[i + 0]; + dst[i + 1] = src[i + 1]; + } + if(i < size) { + dst[i] = src[i]; + } +} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_common_functions_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_common_functions_esp32s3.S new file mode 100644 index 0000000..68d1086 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_common_functions_esp32s3.S @@ -0,0 +1,266 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + + # Program Unit: esp_nn_aligned_s8_to_s16_with_offset_esp32s3 + .type esp_nn_aligned_s8_to_s16_with_offset_esp32s3, @function + .align 4 + .global esp_nn_aligned_s8_to_s16_with_offset_esp32s3 + +esp_nn_aligned_s8_to_s16_with_offset_esp32s3: # 0x30d + + entry a1,48 # + mov.n a10,a2 # // src + mov.n a9,a3 # // dst + mov.n a8,a4 # // size + s32i.n a5,a1,12 # [3] // offset + addi.n a2,a1,12 # [4] + + blti a4,32,.Lt_2_6402 # [5] if (size < 32) goto unopt + + addi.n a6,a8,-1 # [0] + ee.zero.q q5 # [1] + ee.vldbc.16 q4,a2 # [2] id:136 offset + mov.n a3,a10 # [3] + mov.n a2,a9 # [4] + ee.vld.128.ip q0,a3,16 # [5] id:137 + ee.vld.128.ip q1,a3,16 # [6] id:138 + ee.vcmp.lt.s8 q2,q0,q5 # [7] + ee.vzip.8 q0,q2 # [8] + ee.vadds.s16 q0,q0,q4 # [9] + ee.vadds.s16.st.incp q0,a2,q0,q2,q4 # [10] id:139 + blti a4,64,.Lt_2_7170 # [11] + + addi a5,a4,-32 # [0] + srai a5,a5,5 # [1] + slli a4,a5,5 # [2] + loopgtz a5,.LBB37_esp_nn_aligned_s8_to_s16_with_offset_esp32s3 # [3] + + ee.vst.128.ip q0,a2,16 # [0*II+0] id:140 + ee.vcmp.lt.s8 q0,q1,q5 # [0*II+1] + ee.vzip.8 q1,q0 # [0*II+2] + ee.vadds.s16.ld.incp q2,a3,q3,q1,q4 # [0*II+3] id:141 + ee.vadds.s16.st.incp q3,a2,q0,q0,q4 # [0*II+4] id:142 + ee.vcmp.lt.s8 q3,q2,q5 # [0*II+5] + ee.vst.128.ip q0,a2,16 # [0*II+6] id:143 + ee.vzip.8 q2,q3 # [0*II+7] + ee.vadds.s16.ld.incp q1,a3,q0,q2,q4 # [0*II+8] id:144 + ee.vadds.s16.st.incp q0,a2,q0,q3,q4 # [0*II+9] id:145 + +.LBB37_esp_nn_aligned_s8_to_s16_with_offset_esp32s3: # 0x36d + addi a4,a4,32 # [0] + +.Lt_2_3842: # 0x370 + ee.vst.128.ip q0,a2,16 # [0] id:146 + ee.vcmp.lt.s8 q2,q1,q5 # [1] + ee.vzip.8 q1,q2 # [2] + ee.vadds.s16 q2,q2,q4 # [3] + ee.vadds.s16 q3,q1,q4 # [4] + ee.vst.128.ip q3,a2,16 # [5] id:147 + ee.vst.128.ip q2,a2,16 # [6] id:148 + bge a4,a6,.Lt_2_4866 # [7] + + l32i.n a5,a1,12 # [0] id:135 offset+0x0 + +.Lt_2_5122: # 0x38a + mov.n a11,a4 # [0] + add.n a2,a4,a10 # [1] + # 576 dst[i + 0] = src[i + 0] + offset; + l8ui a7,a2,0 # [2] id:149 + addx2 a6,a4,a9 # [3] + sext a7,a7,7 # [4] + add.n a7,a7,a5 # [5] + s16i a7,a6,0 # [6] id:150 + # 577 dst[i + 1] = src[i + 1] + offset; + l8ui a3,a2,1 # [7] id:151 + sub a7,a8,a4 # [8] + addi.n a2,a2,2 # [9] + srai a7,a7,1 # [10] + sext a3,a3,7 # [11] + add.n a3,a3,a5 # [12] + s16i a3,a6,2 # [13] id:152 + addi.n a3,a7,-1 # [14] + loopgtz a3,.LBB52_esp_nn_aligned_s8_to_s16_with_offset_esp32s3 # [15] + + l8ui a3,a2,0 # [0*II+0] id:149 + addi.n a6,a6,4 # [1*II+1] + sext a3,a3,7 # [0*II+2] + add.n a3,a3,a5 # [0*II+3] + s16i a3,a6,0 # [0*II+4] id:150 + l8ui a3,a2,1 # [0*II+5] id:151 + addi.n a2,a2,2 # [0*II+6] + sext a3,a3,7 # [0*II+7] + add.n a3,a3,a5 # [0*II+8] + s16i a3,a6,2 # [0*II+9] id:152 + +.LBB52_esp_nn_aligned_s8_to_s16_with_offset_esp32s3: # 0x3ce + addx2 a4,a7,a11 # [0] + +.Lt_2_4866: # 0x3d1 + bge a4,a8,.Lt_2_7682 # [0] + + # 580 dst[i] = src[i] + offset; + addx2 a11,a4,a9 # [0] + add.n a8,a4,a10 # [1] + l8ui a8,a8,0 # [2] id:153 + l32i.n a12,a1,12 # [3] id:135 offset+0x0 + sext a8,a8,7 # [4] + add.n a8,a8,a12 # [5] + s16i a8,a11,0 # [6] id:154 + retw.n # [7] + +.Lt_2_6402: # 0x3e8 + blti a4,2,.Lt_2_6658 # [0] + + movi.n a4,0 # [0] + j .Lt_2_5122 # [1] + +.Lt_2_7682: # 0x3f0 + retw.n # [0] + +.Lt_2_6658: # 0x3f2 + blti a4,1,.Lt_2_7682 # [0] + + l8ui a11,a10,0 # [0] id:153 + sext a11,a11,7 # [2] + add.n a11,a11,a5 # [3] + s16i a11,a3,0 # [4] id:154 + retw.n # [5] + +.Lt_2_7170: # 0x402 + movi.n a4,32 # [0] + j .Lt_2_3842 # [1] + + .size esp_nn_aligned_s8_to_s16_with_offset_esp32s3, . - esp_nn_aligned_s8_to_s16_with_offset_esp32s3 + + + .literal_position + + # Program Unit: esp_nn_s8_to_s16_esp32s3 + .type esp_nn_s8_to_s16_esp32s3, @function + .align 4 + .global esp_nn_s8_to_s16_esp32s3 + +esp_nn_s8_to_s16_esp32s3: # 0x40b + entry a1,32 # + mov.n a9,a2 // src + mov.n a8,a3 // dst + mov.n a7,a4 // size + blti a4,1,.Lt_3_4866 // size == 0 + blti a4,16,.Lt_3_4610 // if (size < 16) jump to unopt path + + // load align_len to sar_byte + extui a2,a2,0,4 # [0] + wur.sar_byte a2 # [1] + mov.n a2,a9 # [2] + + // preload + ee.vld.128.ip q0,a2,16 + ee.vld.128.ip q1,a2,16 + ee.zero.q q4 + # 672 + # 673 for (i = 16; i < size - 15; i += 16) { + blti a4,32,.Lt_3_5378 # [5] + addi a6,a4,-16 # [1] + srai a6,a6,4 # [2] + slli a4,a6,4 # [3] + loopgtz a6,.LBB35_esp_nn_s8_to_s16_esp32s3 # [4] + + ee.src.q.qup q2,q0,q1 # [0*II+0] + ee.vcmp.lt.s8 q3,q2,q4 # [0*II+1] // sign + ee.vld.128.ip q1,a2,16 # [0*II+2] // for next iteration + ee.vzip.8 q2,q3 # [0*II+3] + ee.vst.128.ip q2,a3,16 # [0*II+4] id:93 + ee.vst.128.ip q3,a3,16 # [0*II+5] id:94 + +.LBB35_esp_nn_s8_to_s16_esp32s3: # 0x449 + addi a4,a4,16 # [0] + +.Lt_3_2050: # 0x44c + ee.src.q.qup q5,q0,q1 # [0] + ee.vcmp.lt.s8 q3,q5,q4 # [1] + ee.vzip.8 q5,q3 # [2] + ee.vst.128.ip q5,a3,16 # [3] id:96 + ee.vst.128.ip q3,a3,16 # [4] id:97 + # 687 + # 688 skip_to_remains_s8_to_s16: + # 689 for (; i < size; i += 2) { + bge a4,a7,.Lt_3_4866 # [5] + +.Lt_3_3330: # 0x45e + mov.n a11,a4 # [0] + add.n a2,a4,a9 # [1] + # 690 dst[i + 0] = src[i + 0]; + l8ui a10,a2,0 # [2] id:98 + addx2 a5,a4,a8 # [3] + sext a10,a10,7 # [4] + s16i a10,a5,0 # [5] id:99 + # 691 dst[i + 1] = src[i + 1]; + l8ui a3,a2,1 # [6] id:100 + sub a10,a7,a4 # [7] + addi.n a2,a2,2 # [8] + addi.n a10,a10,1 # [9] + srai a10,a10,1 # [10] + sext a3,a3,7 # [11] + s16i a3,a5,2 # [12] id:101 + addi.n a3,a10,-1 # [13] + loopgtz a3,.LBB50_esp_nn_s8_to_s16_esp32s3 # [14] + + l8ui a3,a2,0 # [0*II+0] id:98 + addi.n a5,a5,4 # [1*II+1] + sext a3,a3,7 # [0*II+2] + s16i a3,a5,0 # [0*II+3] id:99 + l8ui a3,a2,1 # [0*II+4] id:100 + addi.n a2,a2,2 # [0*II+5] + sext a3,a3,7 # [0*II+6] + s16i a3,a5,2 # [0*II+7] id:101 + +.LBB50_esp_nn_s8_to_s16_esp32s3: # 0x49c + addx2 a4,a10,a11 # [0] + # 692 } + # 693 if(i < size) { + bge a4,a7,.Lt_3_4866 # [1] + + # 694 dst[i] = src[i]; + add.n a11,a4,a9 # [0] + l8ui a11,a11,0 # [1] id:102 + addx2 a12,a4,a8 # [2] + sext a11,a11,7 # [3] + s16i a11,a12,0 # [4] id:103 + retw.n # [5] + +.Lt_3_4610: # 0x4b2 + movi.n a4,0 # [0] + j .Lt_3_3330 # [1] + +.Lt_3_4866: # 0x4ba + retw.n # [0] + +.Lt_3_5378: # 0x4bc + movi.n a4,16 # [1] + j .Lt_3_2050 # [2] + + .size esp_nn_s8_to_s16_esp32s3, . - esp_nn_s8_to_s16_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_esp32s3.S new file mode 100644 index 0000000..08ff1b8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_esp32s3.S @@ -0,0 +1,127 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// the macro `use_nudge` enables adding rounding factor similar to tflite implementation +// this barely changes any accuracy +// keep this disabled for better performance + +#ifndef SKIP_NUDGE + # set SKIP_NUDGE flag for ~20% faster (but not bit-exact) quantisation + .set use_nudge, 1 +#endif + + .text + .literal_position + .literal .nudge_val, 1073741824 # 1 << 30 + + .type esp_nn_multiply_by_quantized_mult_asm_esp32s3, @function + .align 4 + .global esp_nn_multiply_by_quantized_mult_asm_esp32s3 + +esp_nn_multiply_by_quantized_mult_asm_esp32s3: # 0x4 + # to_add = 4 + + entry a1,32 + wsr.sar a3 + ee.zero.q q2 + + bltz a3, .skip_left_shift + ee.vsl.32 q0,q0 # [13] +.skip_left_shift: + + ssai 31 # [15] + +# move data to general purpose registers + ee.movi.32.a q0,a12,0 # [17] + ee.movi.32.a q0,a13,1 # [16] + ee.movi.32.a q0,a14,2 # [18] + ee.movi.32.a q0,a15,3 # [19] + +.ifdef use_nudge + l32r a6,.nudge_val +.endif + +# perform 64 bit mult + mulsh a4,a2,a12 # [22] + mulsh a11,a2,a13 # [23] + mulsh a10,a2,a14 # [21] + mulsh a8,a2,a15 # [20] + mull a12,a2,a12 # [24] + mull a13,a2,a13 # [25] + mull a14,a2,a14 # [26] + mull a15,a2,a15 # [27] + +# add nudge_val and discard low31 + +.ifdef use_nudge + add.n a14,a6,a14 # [41] + saltu a2,a14,a6 # [44] + add.n a10,a10,a2 # [45] + + add.n a13,a6,a13 # [47] + saltu a9,a13,a6 # [50] + add.n a11,a11,a9 # [51] +.endif + + src a10,a10,a14 # [88] + src a11,a11,a13 # [78] + ee.movi.32.q q0,a10,2 + ee.movi.32.q q0,a11,1 + +.ifdef use_nudge + add.n a15,a6,a15 # [36] + saltu a2,a15,a6 # [39] + add.n a8,a8,a2 # [40] + + add.n a12,a6,a12 # [54] + saltu a10,a12,a6 # [57] + add.n a4,a4,a10 # [58] +.endif + + src a8,a8,a15 # [95] + src a4,a4,a12 # [69] # discard lower 31 bits + ee.movi.32.q q0,a8,3 + ee.movi.32.q q0,a4,0 + + bgez a3, .skip_div_by_power_of_2 + + neg a5,a3 # [0] right_shift/exponent = -shift + ee.vcmp.lt.s32 q2,q0,q2 # [97] + addi.n a7,a5,-1 # [0] exponent - 1 + ssl a7 # [1] + movi.n a6,1 # [92] + sll a6,a6 # [2] + s32i.n a6,a1,4 # [3] to_add + addi.n a4,a1,4 # [94] to_add_addr + ee.vldbc.32 q1,a4 # [4] id:148 to_add + wsr.sar a5 + ee.vadds.s32 q1,q1,q2 + ee.vadds.s32 q0,q0,q1 + ee.vsr.32 q0,q0 + +.skip_div_by_power_of_2: + retw.n # [9] + + .size esp_nn_multiply_by_quantized_mult_asm_esp32s3, . - esp_nn_multiply_by_quantized_mult_asm_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_ver1_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_ver1_esp32s3.S new file mode 100644 index 0000000..ed83816 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/common/esp_nn_multiply_by_quantized_mult_ver1_esp32s3.S @@ -0,0 +1,163 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// quantisation version where we deal with different shifts and mults. + + .set use_nudge, 1 + + .text + .literal_position + .literal .LC3_19_48, 1073741824 + + # Program Unit: esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + .type esp_nn_multiply_by_quantized_mult_ver1_esp32s3, @function + .align 4 + .global esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + +esp_nn_multiply_by_quantized_mult_ver1_esp32s3: # 0x1ee + entry a1,32 # + ee.zero.q q3 # [0] + l32i.n a8,a3,0 # [5] id:200 // shift0 + l32i.n a7,a3,4 # [2] id:201 // shift1 + l32i.n a12,a2,0 # [3] id:204 // mult0 + l32i.n a15,a2,4 # [1] id:205 // mult1 + movi.n a10,0 # [7] + + max a6,a10,a8 # [1] // left_shift0 + max a5,a10,a7 # [7] // left_shift1 + sub a8,a6,a8 # [2] // right_shift0 + sub a7,a5,a7 # [8] // right_shift1 + + ee.movi.32.a q0,a9,0 # [4] + ee.movi.32.a q0,a11,1 # [11] + ssl a6 # [3] + sll a9,a9 # [4] + mulsh a4,a12,a9 # [6] + mull a12,a12,a9 # [9] + ssl a5 # [10] + sll a11,a11 # [12] + mulsh a14,a15,a11 # [14] + mull a15,a15,a11 # [16] + l32r a13,.LC3_19_48 # [23] + + ee.movi.32.q q0,a9,0 # [5] + ee.movi.32.q q0,a11,1 # [15] + + + l32i.n a6,a3,8 # [6] id:202 // shift2 + l32i.n a9,a2,8 # [19] id:206 // mult2 + max a5,a10,a6 # [0] // left_shift2 + sub a6,a5,a6 # [24] // right_shift2 + + + ee.movi.32.a q0,a11,2 # [17] + ssl a5 # [13] + sll a11,a11 # [18] + ee.movi.32.q q0,a11,2 # [20] + mulsh a5,a9,a11 # [21] + mull a9,a9,a11 # [22] + mov a11, a5 + +// add nudge to result0 & result1 + add.n a12,a13,a12 # [25] + saltu a5,a12,a13 # [26] + add.n a15,a13,a15 # [27] + add.n a5,a5,a4 # [28] + saltu a4,a15,a13 # [29] + add.n a4,a4,a14 # [30] + + + l32i.n a14,a3,12 # [31] id:203 // shift3 + add.n a9,a13,a9 # [32] // add nudge low2 + max a10,a10,a14 # [33] // left_shift3 + sub a14,a10,a14 # [34] // right_shift3 + ssl a10 # [35] + ee.movi.32.a q0,a10,3 # [36] + sll a10,a10 # [37] + +// select high32 from result0 and resul1 + ssai 31 # [39] + src a5,a5,a12 # [40] + src a4,a4,a15 # [41] + movi.n a12,1 # [42] + ee.movi.32.q q0,a5,0 # [43] + saltu a15,a9,a13 # [44] + add.n a15,a15,a11 # [45] + ee.movi.32.q q0,a4,1 # [46] + l32i.n a11,a2,12 # [47] id:207 // mult3 + src a15,a15,a9 # [48] + ee.movi.32.q q0,a15,2 # [49] + mull a9,a11,a10 # [50] + mulsh a11,a11,a10 # [51] + add.n a9,a13,a9 # [52] + saltu a13,a9,a13 # [53] + add.n a13,a13,a11 # [54] + src a13,a13,a9 # [55] + ee.movi.32.q q0,a13,3 # [57] + +// divide_by_power_of2_step + ssl a8 # [56] + sll a9,a12 # [58] + ssl a7 # [59] + addi.n a9,a9,-1 # [60] + ee.movi.32.q q2,a9,0 # [61] + sll a11,a12 # [62] + addi.n a11,a11,-1 # [63] + ssl a6 # [64] + sll a10,a12 # [65] + ee.movi.32.q q2,a11,1 # [66] + ssl a14 # [67] + addi.n a10,a10,-1 # [68] + ee.movi.32.q q2,a10,2 # [69] + sll a9,a12 # [70] + addi.n a9,a9,-1 # [71] + ee.movi.32.q q2,a9,3 # [74] + ee.andq q1,q0,q2 # [75] + + ssr a8 # [72] + sra a5,a5 # [73] + ssr a7 # [76] + sra a4,a4 # [78] + ssr a6 # [79] + sra a15,a15 # [81] + ssr a14 # [82] + sra a13,a13 # [84] + wsr.sar a12 # [85] + + ee.movi.32.q q7,a5,0 # [77] + ee.movi.32.q q7,a4,1 # [80] + ee.movi.32.q q7,a15,2 # [83] + ee.movi.32.q q7,a13,3 # [86] + + ee.vcmp.lt.s32 q3,q7,q3 # [87] + ee.vsr.32 q2,q2 # [88] + ee.vsubs.s32 q2,q2,q3 # [89] + ee.vcmp.gt.s32 q1,q1,q2 # [90] + ee.vsubs.s32 q0,q7,q1 # [91] + +// return + retw.n # [92] + + .size esp_nn_multiply_by_quantized_mult_ver1_esp32s3, . - esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_ansi.c new file mode 100644 index 0000000..60b6b41 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_ansi.c @@ -0,0 +1,183 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +int esp_nn_get_conv_scratch_size_ansi(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params) +{ + return 0; +} + +void esp_nn_set_conv_scratch_buf_ansi(const void *buf) +{ + +} + +/** + * Assumption 1: i/p channels == o/p channels + * Assumption 2: Pointers are valid + * Assumption 3: dialation width = 1 + */ +void esp_nn_conv_u8_ansi(const uint8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t in_channels, + const int32_t input_offset, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint8_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t filter_offset, + const int32_t *bias, + uint8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t out_shift, + const int32_t out_mult, + const int32_t activation_min, + const int32_t activation_max) +{ + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + for (int out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) {//channel_loop + int32_t result = 0; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + for (int in_ch_idx = 0; in_ch_idx < in_channels; in_ch_idx++) { + int32_t input_index = (idx_y * input_wd + idx_x) * in_channels + in_ch_idx; + int32_t filter_index = ((out_ch_idx * filter_ht + filter_y_idx) + * filter_wd + filter_x_idx) * in_channels + + in_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index] + filter_offset; + result += input_val * filter_val; + } + } + } + if (bias) { + result += bias[out_ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult(result, out_mult, out_shift); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + int out_index = (out_y * out_wd + out_x) * out_channels + out_ch_idx; + out_data[out_index] = (uint8_t) result; + } + } + } +} + +/** + * Assumption 1: i/p channels == o/p channels + * Assumption 2: Pointers are valid + * Assumption 3: dialation width = 1 + */ +void esp_nn_conv_s8_ansi(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t in_channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const uint16_t out_channels = output_dims->channels; + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + int32_t out_ch_idx, out_y, out_x, in_ch_idx, filter_y_idx, filter_x_idx; + + for (out_y = 0; out_y < out_ht; out_y++) { + for (out_x = 0; out_x < out_wd; out_x++) { + for (out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) { + int32_t conv_out = 0; + + const int32_t base_y = stride_ht * out_y - pad_ht; + const int32_t base_x = stride_wd * out_x - pad_wd; + + const int32_t filter_y_start = max(0, -base_y); + const int32_t filter_x_start = max(0, -base_x); + + const int32_t filter_y_end = min(filter_ht, input_ht - base_y); + const int32_t filter_x_end = min(filter_wd, input_wd - base_x); + + for (filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + for (filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t in_row = base_y + filter_y_idx; + const int32_t in_col = base_x + filter_x_idx; + int32_t input_base_offset = (in_row * input_wd + in_col) * in_channels; + int32_t filter_base_offset = out_ch_idx * in_channels * filter_ht * filter_wd + + (filter_y_idx * filter_wd + filter_x_idx) * in_channels; + for (in_ch_idx = 0; in_ch_idx < in_channels; in_ch_idx++) { + conv_out += + (input_data[input_base_offset + in_ch_idx] + input_offset) * + filter_data[filter_base_offset + in_ch_idx]; + } + } + } + if (bias) { + conv_out += bias[out_ch_idx]; + } + conv_out = esp_nn_multiply_by_quantized_mult(conv_out, out_mult[out_ch_idx], out_shift[out_ch_idx]); + conv_out += out_offset; + conv_out = max(conv_out, activation_min); + conv_out = min(conv_out, activation_max); + *out_data++ = (int8_t) conv_out; + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_esp32s3.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_esp32s3.c new file mode 100644 index 0000000..1ddf4ba --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_esp32s3.c @@ -0,0 +1,273 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +/* + * SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/** + * Optimizations strategies used: + * Below optimizations are capable of any size of input/filter: + * + * 1. For filter wdxht = 1x1 (Refer esp_nn_conv_s8_mult8_1x1_esp32s3 function) + * - For this specific version, the strategy we employ: + * > This particular filter has only the channel + * dimension and we have `out_ch` number of such filters. + * > We take 8 input lines at a time and transpose those. + * > Keep loading and multiplying filter values one by one, + * to produce 8 outputs in parallel + * + * 2. General version: (Refer esp_nn_conv_s8_filter_aligned_input_padded_esp32s3) + * - For all other cases: + * > Consider `filter_wd * in_ch` as a single row. These many values can + * be continuosly loaded from inputs as well. + * > multiply accumulate into a single filter output. + * > To speed things up further, we pre-calculate + * (filter * in_offset + bias term) earlier and add it at the end of filter + * + * About ((filter * in_offset + bias term)) accumulate term: + * > The conv operation before requantization is as follows: + * for i in filter_size: + * conv_out += (input + input_offset) * filter; + * conv_out += bias + * + * > where input_offset is constant term hence, we can see that + * this term can be precalculated as: + * for i in filter_size: + * acc_term += input_offset * filter[i]; + * acc_term += bias + * OR + * for i in filter_size: + * acc_term += filter[i]; // accumulate filter values + * acc_term = acc_term * input_offset + bias + * + * + * In both the above versions we align the filter if needed, pad the input with + * -input_offset if needed and extend the channels to make those multiple + * of 8/16 as per function needs + */ + +#include +#include + +#include + +static int16_t *scratch_buffer = NULL; + +extern void esp_nn_conv_s8_mult8_1x1_esp32s3( + const int8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t in_channels, + const int32_t input_offset, + const int8_t *filter_aligned, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + void *buffer /* scratch buffer */); + +extern void esp_nn_conv_s8_filter_aligned_input_padded_esp32s3( + const int8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t in_channels, + const int32_t input_offset, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int8_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max, + void *scratch_buffer); + +int esp_nn_get_conv_scratch_size_esp32s3(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t in_ch = input_dims->channels; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_ch = output_dims->channels; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + + int new_channels = (in_ch + 7) & ~7; + + int input_scratch = input_wd * input_ht * in_ch; + int filter_scratch = filter_wd * filter_ht * in_ch * out_ch; + + int align_buf_size = 32; /* extra buffer for alignment */ + if ((filter_wd == 1 && filter_ht == 1 && pad_wd == 0 && pad_ht == 0) && + (stride_wd == 1 && stride_ht == 1)) { + int transpose_buf_size = 2 * (8 * new_channels); /* to store intermediate data */ + if (input_wd * input_ht < 8) { + transpose_buf_size = 0; // not using this for leftover + } + if (in_ch % 8) { + input_scratch = input_wd * input_ht * new_channels; + } else { + input_scratch = 0; + } + filter_scratch = new_channels * out_ch; + return input_scratch + filter_scratch + transpose_buf_size + align_buf_size; + } else { + new_channels = (in_ch + 15) & ~15; + if (pad_wd == 0 && pad_ht == 0) { + input_scratch = 0; + } else { + input_scratch = (input_wd + 2 * pad_wd) * (input_ht + 2 * pad_ht) * in_ch; + } + filter_scratch = filter_wd * filter_ht * new_channels * out_ch; + int offset_acc_scratch = out_ch * 4; + return input_scratch + filter_scratch + align_buf_size + offset_acc_scratch; + } + return align_buf_size; +} + +void esp_nn_set_conv_scratch_buf_esp32s3(void *buf) +{ + scratch_buffer = (int16_t *) buf; +} + +void esp_nn_conv_s8_esp32s3(const data_dims_t *input_dims, + const int8_t *input, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + if (scratch_buffer == NULL) { + printf("esp_nn_conv error! scratch_buffer not set!\n"); + return; + } + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const uint16_t out_channels = output_dims->channels; + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + int filter_size = filter_wd * filter_ht * channels * out_channels; + + if (filter_wd == 1 && filter_ht == 1 && pad_wd == 0 && pad_ht == 0 && + stride_wd == 1 && stride_ht == 1) { + + int8_t *input_aligned = (int8_t *) input; + int8_t *scratch_buf = (int8_t *) scratch_buffer; + int8_t *filter_aligned = (int8_t *) scratch_buffer; + int new_channels = channels; + if (channels % 8 == 0) { + if ((int) filter_data & 7) { // if the filter_data is not aligned to 8 bytes + int scratch_offset = (int) (filter_aligned + filter_size); + scratch_buf = (int8_t *) (scratch_offset + 16 - (scratch_offset & 15)); + memcpy(filter_aligned, filter_data, filter_size); // copy to aligned address + } else { + filter_aligned = (int8_t *) filter_data; + } + } else { + // pad extra channel to make it multiple of 8. Both input and filter + new_channels = (channels + 7) & ~7; + for (int out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) { + memcpy(filter_aligned, filter_data, channels); + memset(filter_aligned + channels, 0, new_channels - channels); + filter_aligned += new_channels; + filter_data += channels; + } + filter_aligned = (int8_t *) scratch_buffer; + int filter_data_size = new_channels * out_channels; + input_aligned = filter_aligned + filter_data_size; + for (int input_idx = 0; input_idx < input_ht * input_wd; input_idx++) { + memcpy(input_aligned, input, channels); + memset(input_aligned + channels, 0, new_channels - channels); + input_aligned += new_channels; + input += channels; + } + input_aligned = filter_aligned + filter_data_size; + scratch_buf = input_aligned + input_ht * input_wd * new_channels; + } + esp_nn_conv_s8_mult8_1x1_esp32s3( + input_aligned, input_wd, input_ht, new_channels, input_offset, + filter_aligned, bias, out_data, out_wd, out_ht, out_channels, out_offset, + out_shift, out_mult, activation_min, activation_max, scratch_buf); + } else { + // align the `filter width * channels` to 16 bytes. Do zero padding for the same + int32_t filter_row_size = filter_wd * channels; + int32_t filter_alignment_padding = 16 - (filter_row_size & 15); + int8_t *filter_data_aligned = (int8_t *) filter_data; + int8_t *input_padded = (int8_t *) input; + int8_t *scratch_data = (int8_t *) scratch_buffer; + int new_input_wd = input_wd, new_input_ht = input_ht; + if (filter_alignment_padding != 16) { + // pad filter_data + int32_t new_row_size = filter_wd * channels + filter_alignment_padding; + filter_data_aligned = scratch_data; + int8_t *row_ptr = filter_data_aligned; + for (int32_t ch_idx = 0; ch_idx < out_channels; ch_idx++) { + for (int32_t row_idx = 0; row_idx < filter_ht; row_idx++) { + memcpy(row_ptr, filter_data, filter_row_size); + memset(row_ptr + filter_row_size, 0, new_row_size - filter_row_size); + filter_data += filter_row_size; + row_ptr += new_row_size; + } + } + scratch_data += new_row_size * filter_ht * out_channels; + filter_row_size = new_row_size; + } else if ( (int) filter_data & 15) { + filter_data_aligned = scratch_data; + memcpy(filter_data_aligned, filter_data, filter_size); + scratch_data += filter_size; + } + if (pad_wd != 0 || pad_ht != 0) { // need padding + input_padded = (int8_t *) scratch_data; + esp_nn_aligned_s8_pad_with_value(input, input_padded, input_wd, input_ht, channels, + -input_offset, pad_wd, pad_ht); + new_input_wd = input_wd + 2 * pad_wd; + new_input_ht = input_ht + 2 * pad_ht; + scratch_data += new_input_wd * new_input_ht * channels; + } + esp_nn_conv_s8_filter_aligned_input_padded_esp32s3( + input_padded, new_input_wd, new_input_ht, channels, input_offset, + stride_wd, stride_ht, filter_data_aligned, filter_wd, filter_ht, + bias, out_data, out_wd, out_ht, out_channels, out_offset, + out_shift, out_mult, activation_min, activation_max, scratch_data); + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_opt.c new file mode 100644 index 0000000..c1478ba --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_opt.c @@ -0,0 +1,183 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +int esp_nn_get_conv_scratch_size_opt(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const conv_params_t *conv_params) +{ + return 0; +} + +void esp_nn_set_conv_scratch_buf_opt(const void *buf) +{ + +} + +__attribute__ ((noinline)) +static void esp_nn_conv_s8_1x1(const data_dims_t *input_dims, + const int8_t *input_data, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t in_channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const uint16_t out_channels = output_dims->channels; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + for (int32_t in_row = 0; in_row < out_ht * stride_ht; in_row += stride_ht) { + for (int32_t in_col = 0; in_col < out_wd * stride_wd; in_col += stride_wd) { + const int32_t *out_mult = quant_data->mult; + const int32_t *out_shift = quant_data->shift; + const int8_t *filter_ptr = filter_data; + const int8_t *input_base_ptr = input_data + (in_row * input_wd + in_col) * in_channels; + int32_t out_ch_idx = 0; + for (; out_ch_idx < out_channels; out_ch_idx++) { + int32_t conv_out = 0; + + const int8_t *input_ptr = input_base_ptr; + + int32_t in_ch_idx = 0; + for (; in_ch_idx < in_channels - 3; in_ch_idx += 4) { + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + } + for (; in_ch_idx < in_channels; in_ch_idx ++) { + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + } + if (bias) { + conv_out += bias[out_ch_idx]; + } + conv_out = esp_nn_multiply_by_quantized_mult_fast(conv_out, *out_mult++, *out_shift++); + conv_out += out_offset; + conv_out = max(conv_out, activation_min); + conv_out = min(conv_out, activation_max); + *out_data++ = (int8_t) conv_out; + } + } + } +} + +/** + * Assumption 1: i/p channels == o/p channels + * Assumption 2: Pointers are valid + * Assumption 3: dialation width = 1 + */ +void esp_nn_conv_s8_opt(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + + if (filter_wd == 1 && filter_ht == 1) { + esp_nn_conv_s8_1x1(input_dims, input_data, filter_data, bias, + output_dims, out_data, conv_params, quant_data); + return; + } + + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t in_channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const uint16_t out_channels = output_dims->channels; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + int32_t out_ch_idx, out_y, out_x, filter_y_idx, filter_x_idx; + + for (out_y = 0; out_y < out_ht; out_y++) { + for (out_x = 0; out_x < out_wd; out_x++) { + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + for (out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) { + int32_t conv_out = 0; + + const int32_t base_y = stride_ht * out_y - pad_ht; + const int32_t base_x = stride_wd * out_x - pad_wd; + + const int32_t filter_y_start = max(0, -base_y); + const int32_t filter_x_start = max(0, -base_x); + + const int32_t filter_y_end = min(filter_ht, input_ht - base_y); + const int32_t filter_x_end = min(filter_wd, input_wd - base_x); + + for (filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + for (filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t in_row = base_y + filter_y_idx; + const int32_t in_col = base_x + filter_x_idx; + + const int8_t *input_ptr = input_data + + (in_row * input_wd + in_col) * in_channels; + const int8_t *filter_ptr = filter_data + + out_ch_idx * in_channels * filter_ht * filter_wd + + (filter_y_idx * filter_wd + filter_x_idx) * in_channels; + int32_t in_ch_idx = 0; + for (; in_ch_idx < in_channels - 3; in_ch_idx += 4) { + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + } + for (; in_ch_idx < in_channels; in_ch_idx ++) { + conv_out += (*input_ptr++ + input_offset) * *filter_ptr++; + } + } + } + if (bias) { + conv_out += bias[out_ch_idx]; + } + conv_out = esp_nn_multiply_by_quantized_mult_fast(conv_out, *out_mult++, *out_shift++); + conv_out += out_offset; + conv_out = max(conv_out, activation_min); + conv_out = min(conv_out, activation_max); + *out_data++ = (int8_t) conv_out; + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S new file mode 100644 index 0000000..50c00cc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult4_1x1_esp32s3.S @@ -0,0 +1,358 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + .literal .nudge_val, 1073741824 + + # Program Unit: esp_nn_conv_s16_mult4_1x1_esp32s3 + .type esp_nn_conv_s16_mult4_1x1_esp32s3, @function + .align 4 + .global esp_nn_conv_s16_mult4_1x1_esp32s3 +esp_nn_conv_s16_mult4_1x1_esp32s3: # 0xa62 + # scratch_buf = 0 + # to_add = 32 + # gra_spill_temp_139 = 36 + # gra_spill_temp_140 = 40 + # gra_spill_temp_141 = 44 + # gra_spill_temp_155 = 48 + # gra_spill_temp_156 = 52 + # gra_spill_temp_144 = 56 + # gra_spill_temp_145 = 60 + # gra_spill_temp_146 = 64 + # gra_spill_temp_147 = 68 + # gra_spill_temp_148 = 72 + # gra_spill_temp_149 = 76 + # gra_spill_temp_150 = 80 + # gra_spill_temp_151 = 84 + # gra_spill_temp_152 = 88 + # gra_spill_temp_153 = 92 + # lgra_spill_temp_165 = 96 + # lgra_spill_temp_166 = 100 + # lgra_spill_temp_167 = 104 + # lgra_spill_temp_168 = 108 + # gra_spill_temp_158 = 112 + # gra_spill_temp_159 = 116 + # gra_spill_temp_160 = 120 + + + // registers: + // a2: int16_t *input_data + // a3: uint16_t input_wd + // a4: uint16_t input_ht + // a5: uint16_t in_channels + // a6: int16_t *filter_data + // a7: int32_t *bias + + // on stack: + // 160: int8_t *out_data + // 164: uint16_t out_wd + // 168: uint16_t out_ht + // 172: uint16_t out_channels + // 176: int32_t out_offset + // 180: int32_t *out_shift + // 184: int32_t *out_mult + // 188: int32_t activation_min + // 192: int32_t activation_max + // 196: *buffer /* scratch buffer */ + + + entry a1,160 # + s32i.n a2,a1,40 # [0] gra_spill_temp_140 + s32i a6,a1,68 # [1] gra_spill_temp_147 + s32i a7,a1,116 # [2] gra_spill_temp_159 + + mul16u a3,a3,a4 # [3] + addi a10,a1,112 # [4] + addmi a11,a1,176 # [5] + addmi a8,a1,176 # [6] + addmi a9,a1,176 # [7] + addi.n a9,a9,12 # [8] + addi a8,a8,16 # [9] + ee.vldbc.32 q5,a11 # [10] id:188 out_offset + ee.vldbc.32 q7,a8 # [12] id:270 activation_max + ee.vldbc.32 q6,a9 # [13] id:269 activation_min + blti a3,4,.Lt_3_6402 # [14] + +.LBB3_esp_nn_conv_s16_mult4_1x1_esp32s3: # 0xa90 + l32i a13,a1,160 # [0] id:280 out_data+0x0 + srai a8,a5,2 # [1] + addi a10,a3,-3 # [2] + addi a9,a5,-3 # [3] + movi.n a12,0 # [4] + slli a11,a5,2 # [5] + slli a15,a5,1 # [6] + l16ui a14,a1,172 # [7] id:271 out_channels+0x0 + s32i.n a15,a1,36 # [9] gra_spill_temp_139 + s32i.n a11,a1,56 # [10] gra_spill_temp_144 + s32i a12,a1,84 # [11] gra_spill_temp_151 + s32i a9,a1,52 # [12] gra_spill_temp_156 + s32i.n a10,a1,60 # [13] gra_spill_temp_145 + s32i a8,a1,88 # [14] gra_spill_temp_152 + movi.n a10,0 # [15] + l32i a8,a1,196 # [16] id:281 buffer+0x0 + slli a11,a11,1 # [19] + l32i a15,a1,184 # [20] id:192 out_mult+0x0 + s32i a11,a1,64 # [22] gra_spill_temp_146 + s32i a8,a1,112 # [25] gra_spill_temp_158 + s32i a10,a1,92 # [26] gra_spill_temp_153 + movi.n a8,0 # [27] + s32i a10,a1,80 # [31] gra_spill_temp_150 + s32i a8,a1,76 # [32] gra_spill_temp_149 + slli a8,a14,1 # [34] + addx2 a9,a14,a14 # [35] + s32i a9,a1,72 # [36] gra_spill_temp_148 + s32i.n a8,a1,44 # [37] gra_spill_temp_141 + addx4 a14,a14,a15 # [38] + s32i a14,a1,48 # [39] gra_spill_temp_155 + j .Lt_3_6914 # [40] + +.Lt_3_8194: # 0xb00 +# Part of loop body line 305, head labeled .Lt_3_6914 + l32i.n a12,a1,60 # [0] gra_spill_temp_145 + l32i.n a9,a1,56 # [1] gra_spill_temp_144 + l32i a8,a1,76 # [2] gra_spill_temp_149 + l32i a15,a1,64 # [3] gra_spill_temp_146 + l32i a11,a1,72 # [4] gra_spill_temp_148 + l32i a14,a1,84 # [5] gra_spill_temp_151 + add.n a13,a13,a11 # [6] + l32i a11,a1,80 # [7] gra_spill_temp_150 + add.n a14,a14,a15 # [8] + add.n a8,a8,a9 # [9] + s32i a8,a1,76 # [10] gra_spill_temp_149 + s32i a14,a1,84 # [11] gra_spill_temp_151 + addi.n a11,a11,4 # [12] + s32i a11,a1,80 # [13] gra_spill_temp_150 + bge a11,a12,.Lt_3_6402 # [14] + +.Lt_3_6914: # 0xb27 + l32i a12,a1,52 # [0] gra_spill_temp_156 + l32i a4,a1,112 # [1] gra_spill_temp_158 + blti a12,1,.Lt_3_7170 # [2] + +.LBB6_esp_nn_conv_s16_mult4_1x1_esp32s3: # 0xb30 + l32i a3,a1,88 # [0] gra_spill_temp_152 + l32i.n a5,a1,40 # [1] gra_spill_temp_140 + l32i a2,a1,84 # [3] gra_spill_temp_151 + add.n a2,a2,a5 # [7] + l32i.n a5,a1,36 # [9] gra_spill_temp_139 + + // load and transose 4 lines of input 4xchannels, + loopgtz a3,.transpose_loop_end + mov.n a3,a2 # [0*II+0] + ee.vld.l.64.xp q0,a3,a5 # [0*II+2] id:282 + ee.vld.l.64.xp q1,a3,a5 # [0*II+3] id:283 + ee.vld.l.64.xp q2,a3,a5 # [0*II+4] id:284 + ee.vld.l.64.xp q3,a3,a5 # [0*II+5] id:285 + ee.vzip.16 q0,q1 # [0*II+6] + ee.vzip.16 q2,q3 # [0*II+7] + ee.vzip.32 q0,q2 # [0*II+8] + ee.vst.128.ip q0,a4,16 # [0*II+9] id:286 + ee.vst.128.ip q2,a4,16 # [0*II+10] id:287 + addi.n a2,a2,8 # [0*II+1] +.transpose_loop_end: + +.Lt_3_7170: # 0xb7c + l32i a2,a1,68 # [0] gra_spill_temp_147 + l32i a9,a1,116 # [1] gra_spill_temp_159 + l16ui a8,a1,172 # [2] out_channels + s32i a9,a1,120 # [3] gra_spill_temp_160 + beqz.n a8,.Lt_3_8194 # [4] + + l32i a9,a1,180 # [0] out_shift + l32i a11,a1,184 # [1] out_mult + l32i a15,a1,72 # [2] gra_spill_temp_148 + l32i.n a14,a1,44 # [3] gra_spill_temp_141 + add.n a15,a15,a13 # [4] + add.n a14,a14,a13 # [5] + j .Lt_3_8706 # [6] + +.Lt_3_10754: # 0xb9a + + movi.n a3,0 # [0] + +.Lt_3_10498: # 0xb9c + +// esp_nn_multiply_by_quantized_mult_esp32s3 + ee.zero.q q0 # [0] + l32i a5,a1,92 # [1] gra_spill_temp_153 + s32i a2,a1,96 # [2] lgra_spill_temp_165 + s32i a11,a1,104 # [3] lgra_spill_temp_167 + s32i a13,a1,108 # [4] lgra_spill_temp_168 + s32i a9,a1,100 # [5] lgra_spill_temp_166 + + movi.n a13,0 # [6] + max a12,a12,a13 # [7] + wsr.sar a12 # [8] + ee.vsl.32 q1,q1 # [9] + ssai 31 # [10] + ee.movi.32.a q1,a7,0 # [11] + ee.movi.32.a q1,a8,1 # [12] + ee.movi.32.a q1,a6,3 # [13] + ee.movi.32.a q1,a9,2 # [14] + mulsh a12,a4,a9 # [15] + mulsh a11,a4,a6 # [16] + mulsh a2,a4,a8 # [17] + mulsh a13,a7,a4 # [18] + mull a8,a4,a8 # [19] + mull a7,a7,a4 # [20] + mull a6,a4,a6 # [24] + + add.n a11,a5,a11 # [21] + add.n a12,a5,a12 # [22] + add.n a2,a5,a2 # [23] + add.n a5,a5,a13 # [25] + + l32r a13,.nudge_val + mull a9,a4,a9 # [27] + + add.n a6,a13,a6 # [28] + add.n a9,a13,a9 # [29] + add.n a10,a13,a7 # [30] + add.n a8,a13,a8 # [32] + + saltu a7,a10,a13 # [33] + add.n a7,a7,a5 # [34] + saltu a5,a8,a13 # [35] + add.n a5,a5,a2 # [36] + src a5,a5,a8 # [37] + saltu a2,a9,a13 # [38] + add.n a2,a2,a12 # [40] + saltu a13,a6,a13 # [41] + addi.n a12,a3,-1 # [42] + src a2,a2,a9 # [43] + ee.movi.32.q q3,a5,1 # [51] + ee.movi.32.q q3,a2,2 # [54] + + add.n a13,a13,a11 # [44] + addi a9,a1,32 # [45] to_add + movi.n a11,1 # [46] + src a7,a7,a10 # [47] + src a13,a13,a6 # [48] + ee.movi.32.q q3,a7,0 # [50] + ee.movi.32.q q3,a13,3 # [57] + + addi a8,a1,112 # [49] + + l32i a7,a1,48 # [52] gra_spill_temp_155 + l16ui a5,a1,172 # [53] out_channels + ssl a12 # [55] + sll a11,a11 # [56] + wsr.sar a3 # [58] + ee.vcmp.lt.s32 q0,q3,q0 # [59] + l32i a13,a1,108 # [60] lgra_spill_temp_168 + s32i.n a11,a1,32 # [61] to_add + ee.vldbc.32 q1,a9 # [62] id:317 to_add + add.n a5,a5,a13 # [63] + l32i a9,a1,100 # [64] lgra_spill_temp_166 + ee.vadds.s32 q1,q1,q0 # [65] + addi.n a9,a9,4 # [66] + ee.vadds.s32 q1,q3,q1 # [67] + ee.vsr.32 q1,q1 # [69] + +# add offset, apply activation and store + ee.vadds.s32 q1,q1,q5 # [70] + ee.vmin.s32 q1,q1,q7 # [72] + ee.vmax.s32 q1,q1,q6 # [73] + ee.vst.128.ip q1,a1,0 # [74] id:320 + l8ui a6,a1,0 # [75] scratch_buf + s8i a6,a13,0 # [76] + addi.n a13,a13,1 # [77] + l8ui a2,a1,4 # [78] scratch_buf+4 + s8i a2,a5,0 # [79] + l8ui a12,a1,8 # [80] scratch_buf+8 + l32i a2,a1,96 # [81] lgra_spill_temp_165 + s8i a12,a14,0 # [82] + addi.n a14,a14,1 # [83] + l8ui a11,a1,12 # [84] scratch_buf+12 + s8i a11,a15,0 # [85] + l32i a11,a1,104 # [86] lgra_spill_temp_167 + addi.n a15,a15,1 # [87] + addi.n a11,a11,4 # [88] + sub a7,a11,a7 # [89] + beqz a7,.Lt_3_8194 # [90] + +.Lt_3_8706: # 0xc97 + ee.zero.qacc # [0] + l32i a8,a1,52 # [1] gra_spill_temp_156 + l32i a3,a1,112 # [2] gra_spill_temp_158 + blti a8,1,.Lt_3_8962 # [3] + + l32i a4,a1,88 # [0] gra_spill_temp_152 + loopgtz a4,.LBB53_esp_nn_conv_s16_mult4_1x1_esp32s3 # [2] + + ee.vld.l.64.ip q0,a2,8 # [0*II+0] id:289 + ee.vld.l.64.ip q1,a3,8 # [0*II+1] id:290 + ee.vld.l.64.ip q2,a3,8 # [0*II+2] id:291 + ee.vsmulas.s16.qacc q1,q0,0 # [0*II+3] + ee.vld.l.64.ip q3,a3,8 # [0*II+4] id:292 + ee.vsmulas.s16.qacc q2,q0,1 # [0*II+5] + ee.vld.l.64.ip q4,a3,8 # [0*II+6] id:293 + ee.vsmulas.s16.qacc q3,q0,2 # [0*II+7] + ee.vsmulas.s16.qacc q4,q0,3 # [0*II+8] + +.LBB53_esp_nn_conv_s16_mult4_1x1_esp32s3: # 0xcc4 + +.Lt_3_8962: # 0xcc4 + +// extract data: + mov a10,a1 + ee.st.qacc_l.l.128.ip a10,16 # [0] id:298 + ee.st.qacc_l.h.32.ip a10,-16 # [1] id:299 + l8ui a12,a1,16 # [2] scratch_buf+16 + l8ui a8,a1,6 # [3] scratch_buf+6 + s8i a8,a1,3 # [4] scratch_buf+3 + s8i a12,a1,7 # [5] scratch_buf+7 + l8ui a8,a1,15 # [6] scratch_buf+15 + l8ui a12,a1,5 # [7] scratch_buf+5 + s8i a12,a1,2 # [8] scratch_buf+2 + s8i a8,a1,6 # [9] scratch_buf+6 + l16ui a12,a1,10 # [10] scratch_buf+10 + movi.n a8,16 # [11] + ee.srcmb.s16.qacc q2,a8,0 # [12] + s16i a12,a1,4 # [13] scratch_buf+4 + ee.vld.l.64.ip q1,a10,0 # [14] id:309 + l32i a12,a1,116 # [15] gra_spill_temp_159, bias + ee.vzip.16 q1,q2 # [16] + + beqz.n a12,.Lt_3_9986 # [17] // skip bias + // add bias: + l32i a8,a1,120 # [0] gra_spill_temp_160 + ee.vldbc.32.ip q0,a8,4 # [2] id:311 + s32i a8,a1,120 # [3] gra_spill_temp_160 + ee.vadds.s32 q1,q1,q0 # [4] +.Lt_3_9986: # 0xd04 + + l32i.n a12,a9,0 # [0] id:313 + l32i.n a4,a11,0 # [1] id:312 + bgei a12,1,.Lt_3_10754 # [2] + + neg a3,a12 # [0] + j .Lt_3_10498 # [1] + +.Lt_3_6402: # 0xd11 + retw.n # [0] + + .size esp_nn_conv_s16_mult4_1x1_esp32s3, . - esp_nn_conv_s16_mult4_1x1_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult8_esp32s3.S new file mode 100644 index 0000000..4c49f80 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s16_mult8_esp32s3.S @@ -0,0 +1,489 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + .literal .LC10_28_153, -2147483648 + .literal .LC11_28_154, -1073741823 + .literal .LC12_28_155, 2147483647 + .literal .LC13_28_156, 1073741824 + + # Program Unit: esp_nn_conv_s16_mult8_esp32s3 + .type esp_nn_conv_s16_mult8_esp32s3, @function + .align 4 + .global esp_nn_conv_s16_mult8_esp32s3 +esp_nn_conv_s16_mult8_esp32s3: # 0x6e2 + # qacc_scratch = 0 + # gra_spill_temp_96 = 48 + # gra_spill_temp_97 = 52 + # gra_spill_temp_98 = 56 + # gra_spill_temp_99 = 60 + # gra_spill_temp_100 = 64 + # gra_spill_temp_101 = 68 + # gra_spill_temp_102 = 72 + # gra_spill_temp_103 = 76 + # gra_spill_temp_104 = 80 + # gra_spill_temp_105 = 84 + # gra_spill_temp_106 = 88 + # gra_spill_temp_107 = 92 + # gra_spill_temp_108 = 96 + # gra_spill_temp_109 = 100 + # gra_spill_temp_110 = 104 + # gra_spill_temp_111 = 108 + # gra_spill_temp_112 = 112 + # gra_spill_temp_113 = 116 + # gra_spill_temp_114 = 120 + # gra_spill_temp_115 = 124 + # gra_spill_temp_116 = 128 + # gra_spill_temp_117 = 132 + # gra_spill_temp_118 = 136 + # gra_spill_temp_119 = 140 + # gra_spill_temp_120 = 144 + # gra_spill_temp_121 = 148 + # gra_spill_temp_122 = 152 + # gra_spill_temp_123 = 156 + # gra_spill_temp_124 = 160 + # gra_spill_temp_125 = 164 + # gra_spill_temp_126 = 168 + # gra_spill_temp_127 = 172 + # gra_spill_temp_128 = 176 + # gra_spill_temp_129 = 180 + # gra_spill_temp_130 = 184 + # gra_spill_temp_131 = 188 + # gra_spill_temp_132 = 192 + # gra_spill_temp_133 = 196 + # gra_spill_temp_134 = 200 + # gra_spill_temp_135 = 204 + # gra_spill_temp_136 = 208 + # gra_spill_temp_137 = 212 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t in_channels + // a6: const uint16_t pad_wd + // a7: const uint16_t pad_ht + + // on stack: + // const uint16_t stride_wd + // const uint16_t stride_ht + // const int16_t *filter_data + // const uint16_t filter_wd + // const uint16_t filter_ht + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const uint16_t out_channels + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + + entry a1,256 # + s32i a2,a1,176 # [0] gra_spill_temp_128 + s32i a3,a1,192 # [1] gra_spill_temp_132 + s32i.n a6,a1,60 # [2] gra_spill_temp_99 + l16ui a8,a1,288 # [3] id:282 out_ht+0x0 + s32i a8,a1,68 # [4] gra_spill_temp_101 + beqz.n a8,.Lt_2_11778 # [5] + + s32i a7,a1,76 # [0] gra_spill_temp_103 + s32i a1,a1,156 # [1] gra_spill_temp_123 + l16ui a8,a1,272 # [2] id:285 filter_ht+0x0 + neg a11,a7 # [3] + movi.n a12,0 # [4] + neg a14,a6 # [5] + l16ui a15,a1,268 # [6] id:286 filter_wd+0x0 + l16ui a9,a1,292 # [7] id:283 out_channels+0x0 + l32i a10,a1,304 # [8] id:284 out_mult+0x0 + s32i a10,a1,88 # [9] gra_spill_temp_106 + s32i a9,a1,96 # [10] gra_spill_temp_108 + s32i a15,a1,196 # [11] gra_spill_temp_133 + s32i.n a14,a1,48 # [12] gra_spill_temp_96 + s32i a12,a1,72 # [13] gra_spill_temp_102 + s32i a11,a1,80 # [14] gra_spill_temp_104 + s32i.n a8,a1,52 # [15] gra_spill_temp_97 + sub a13,a3,a14 # [16] + mul16u a8,a5,a8 # [17] + s32i.n a13,a1,56 # [18] gra_spill_temp_98 + sub a11,a4,a11 # [19] + l32i a12,a1,276 # [20] id:292 bias+0x0 + s32i a12,a1,152 # [21] gra_spill_temp_122 + s32i a11,a1,84 # [22] gra_spill_temp_105 + l32i a14,a1,308 # [23] id:290 activation_min+0x0 + l32i a13,a1,312 # [24] id:291 activation_max+0x0 + s32i a13,a1,144 # [25] gra_spill_temp_120 + mull a15,a15,a8 # [26] + addx4 a9,a9,a10 # [27] + s32i a14,a1,140 # [28] gra_spill_temp_119 + l32i a11,a1,300 # [29] id:293 out_shift+0x0 + s32i a11,a1,92 # [30] gra_spill_temp_107 + slli a14,a5,1 # [31] + s32i a9,a1,124 # [32] gra_spill_temp_115 + s32i a15,a1,128 # [33] gra_spill_temp_116 + l32i a8,a1,280 # [34] id:288 out_data+0x0 + movi.n a10,0 # [35] + s32i a10,a1,160 # [36] gra_spill_temp_124 + s32i a8,a1,132 # [37] gra_spill_temp_117 + l32i a15,a1,296 # [38] id:289 out_offset+0x0 + l32i a9,a1,264 # [39] id:287 filter_data+0x0 + s32i a9,a1,180 # [40] gra_spill_temp_129 + s32i a15,a1,136 # [41] gra_spill_temp_118 + l16ui a8,a1,284 # [42] id:296 out_wd+0x0 + l16ui a10,a1,256 # [43] id:294 stride_wd+0x0 + s32i a10,a1,100 # [44] gra_spill_temp_109 + s32i a8,a1,104 # [45] gra_spill_temp_110 + addi.n a15,a5,-1 # [46] + l16ui a9,a1,260 # [47] id:295 stride_ht+0x0 + s32i a9,a1,64 # [48] gra_spill_temp_100 + srai a15,a15,3 # [49] + j .Lt_2_12290 # [50] + +.Lt_2_12546: # 0x788 + l32i a8,a1,68 # [0] gra_spill_temp_101 + l32i a12,a1,80 # [1] gra_spill_temp_104 + l32i a11,a1,84 # [2] gra_spill_temp_105 + l32i a10,a1,64 # [3] gra_spill_temp_100 + l32i a13,a1,72 # [4] gra_spill_temp_102 + l32i a9,a1,76 # [5] gra_spill_temp_103 + addi.n a13,a13,1 # [6] + s32i a13,a1,72 # [7] gra_spill_temp_102 + sub a9,a9,a10 # [8] + sub a11,a11,a10 # [9] + add.n a12,a12,a10 # [10] + s32i a12,a1,80 # [11] gra_spill_temp_104 + s32i a11,a1,84 # [12] gra_spill_temp_105 + s32i a9,a1,76 # [13] gra_spill_temp_103 + sub a13,a13,a8 # [14] + beqz a13,.Lt_2_11778 # [15] + +.Lt_2_12290: # 0x7b6 // width loop + l32i a13,a1,104 # [0] gra_spill_temp_110 + beqz.n a13,.Lt_2_12546 # [2] + + l32i a8,a1,192 # [0] gra_spill_temp_132 + l32i a9,a1,80 # [1] gra_spill_temp_104 + movi.n a11,0 # [2] + l32i a10,a1,76 # [3] gra_spill_temp_103 + l32i.n a12,a1,60 # [4] gra_spill_temp_99 + l32i.n a13,a1,56 # [5] gra_spill_temp_98 + s32i a13,a1,116 # [6] gra_spill_temp_113 + s32i a12,a1,112 # [7] gra_spill_temp_112 + max a10,a10,a11 # [8] + s32i a10,a1,148 # [9] gra_spill_temp_121 + add.n a9,a9,a10 # [10] + l32i.n a11,a1,48 # [11] gra_spill_temp_96 + s32i a11,a1,184 # [12] gra_spill_temp_130 + mull a8,a8,a9 # [13] + l32i a10,a1,84 # [14] gra_spill_temp_105 + s32i a8,a1,120 # [15] gra_spill_temp_114 + l32i.n a9,a1,52 # [16] gra_spill_temp_97 + movi.n a8,0 # [17] + s32i a8,a1,108 # [18] gra_spill_temp_111 + min a9,a9,a10 # [19] + s32i a9,a1,204 # [20] gra_spill_temp_135 + j .Lt_2_13058 # [21] + +.Lt_2_13314: # 0x7f6 +# Part of loop body line 186, head labeled .Lt_2_13058 + l32i a13,a1,104 # [0] gra_spill_temp_110 + l32i a11,a1,112 # [1] gra_spill_temp_112 + l32i a10,a1,184 # [2] gra_spill_temp_130 + l32i a9,a1,100 # [3] gra_spill_temp_109 + l32i a12,a1,108 # [4] gra_spill_temp_111 + l32i a8,a1,116 # [5] gra_spill_temp_113 + addi.n a12,a12,1 # [6] + s32i a12,a1,108 # [7] gra_spill_temp_111 + sub a8,a8,a9 # [8] + add.n a10,a10,a9 # [9] + sub a11,a11,a9 # [10] + s32i a11,a1,112 # [11] gra_spill_temp_112 + s32i a10,a1,184 # [12] gra_spill_temp_130 + s32i a8,a1,116 # [13] gra_spill_temp_113 + beq a12,a13,.Lt_2_12546 # [14] + +.Lt_2_13058: # 0x821 // channel loop + l32i a12,a1,96 # [0] gra_spill_temp_108 + beqz.n a12,.Lt_2_13314 # [2] + + movi.n a11,0 # [0] + l32i a10,a1,112 # [1] gra_spill_temp_112 + l32i a13,a1,92 # [2] gra_spill_temp_107 + l32i a8,a1,152 # [3] gra_spill_temp_122 + movi.n a9,0 # [4] + l32i a12,a1,88 # [5] gra_spill_temp_106 + s32i a12,a1,168 # [6] gra_spill_temp_126 + s32i a9,a1,188 # [7] gra_spill_temp_131 + s32i a8,a1,164 # [8] gra_spill_temp_125 + s32i a13,a1,172 # [9] gra_spill_temp_127 + l32i a8,a1,116 # [10] gra_spill_temp_113 + l32i a13,a1,196 # [11] gra_spill_temp_133 + max a10,a10,a11 # [12] + s32i a10,a1,208 # [13] gra_spill_temp_136 + min a13,a13,a8 # [14] + s32i a13,a1,200 # [15] gra_spill_temp_134 + j .Lt_2_13826 # [16] + +.Lt_2_14082: # 0x857 + +// extract data + l32i a4,a1,156 # [0] gra_spill_temp_123 + ee.st.qacc_l.l.128.ip a4,16 # [2] id:303 + ee.st.qacc_l.h.32.ip a4,0 # [3] id:304 + l8ui a9,a1,15 # [4] qacc_scratch+15 + l16ui a8,a1,10 # [5] qacc_scratch+10 + l8ui a12,a1,16 # [6] qacc_scratch+16 + l8ui a11,a1,6 # [7] qacc_scratch+6 + l8ui a10,a1,5 # [8] qacc_scratch+5 + s8i a10,a1,2 # [9] qacc_scratch+2 + s8i a11,a1,3 # [10] qacc_scratch+3 + s8i a12,a1,7 # [11] qacc_scratch+7 + s16i a8,a1,4 # [12] qacc_scratch+4 + s8i a9,a1,6 # [13] qacc_scratch+6 + + ee.st.qacc_h.l.128.ip a4,16 # [14] id:314 + ee.st.qacc_h.h.32.ip a4,-32 # [15] id:315 + l8ui a13,a1,32 # [16] qacc_scratch+32 + l8ui a9,a1,21 # [17] qacc_scratch+21 + l8ui a12,a1,31 # [18] qacc_scratch+31 + l16ui a11,a1,26 # [19] qacc_scratch+26 + l8ui a10,a1,22 # [20] qacc_scratch+22 + l16ui a8,a1,16 # [21] qacc_scratch+16 + s16i a8,a1,8 # [22] qacc_scratch+8 + s8i a10,a1,11 # [23] qacc_scratch+11 + s16i a11,a1,12 # [24] qacc_scratch+12 + s8i a12,a1,14 # [25] qacc_scratch+14 + s8i a9,a1,10 # [26] qacc_scratch+10 + s8i a13,a1,15 # [27] qacc_scratch+15 + + l32i a9,a1,152 # [28] gra_spill_temp_122, bias + movi.n a13,16 # [29] + ee.srcmb.s16.qacc q1,a13,0 # [30] + ee.vld.128.ip q0,a4,0 # [31] id:327 + s32i a4,a1,156 # [32] gra_spill_temp_123 + ee.vzip.16 q0,q1 # [33] + ee.vadds.s32 q0,q0,q1 # [34] + ee.movi.32.a q0,a12,3 # [35] + ee.movi.32.a q0,a11,2 # [36] + ee.movi.32.a q0,a10,0 # [37] + add.n a11,a11,a12 # [38] + ee.movi.32.a q0,a12,1 # [39] + add.n a10,a10,a12 # [40] + add.n a10,a10,a11 # [41] + + beqz.n a9,.Lt_2_17154 # [42] // skip bias + + l32i a13,a1,164 # [0] gra_spill_temp_125 + l32i.n a13,a13,0 # [2] id:329 + add.n a10,a10,a13 # [4] +.Lt_2_17154: # 0x8d7 + + # 259 conv_out = esp_nn_multiply_by_quantized_mult(conv_out, out_mult[out_ch_idx], out_shift[out_ch_idx]); + l32i a11,a1,172 # [0] gra_spill_temp_127 + l32i a4,a1,168 # [1] gra_spill_temp_126 + l32i.n a11,a11,0 # [2] id:331 + l32i.n a4,a4,0 # [3] id:330 + + blti a11,1,.LBB26_esp_nn_conv_s16_mult8_esp32s3 # [4] + movi.n a13,0 # [0] + j .Lt_2_17666 # [1] +.LBB26_esp_nn_conv_s16_mult8_esp32s3: # 0xa4e + neg a13,a11 # [0] +.Lt_2_17666: # 0x8e6 + + movi.n a12,0 # [0] + max a12,a11,a12 # [1] + movi.n a11,0 # [2] + ssl a12 # [3] + sll a10,a10 # [4] + bne a10,a4,.Lt_2_20994 # [5] + + l32r a9,.LC10_28_153 # [0] + movi.n a8,1 # [1] + sub a9,a10,a9 # [2] + moveqz a11,a8,a9 # [3] + +.Lt_2_20994: # 0x901 + extui a8,a4,31,1 # [0] + extui a12,a10,31,1 # [1] + xor a12,a12,a8 # [2] + extui a12,a12,0,8 # [3] + + beqz.n a12,.Lt_2_18434 # [4] + movi.n a12,-1 # [0] + l32r a9,.LC11_28_154 # [1] + j .Lt_2_18178 # [2] + +.Lt_2_18434: # 0xa54 + movi.n a12,0 # [0] + l32r a9,.LC13_28_156 # [1] +.Lt_2_18178: # 0x914 + + ssai 31 # [0] + l32r a8,.LC12_28_155 # [1] + mulsh a6,a4,a10 # [2] + mull a4,a4,a10 # [3] + add.n a6,a6,a12 # [4] + add.n a7,a4,a9 # [5] + saltu a4,a7,a4 # [6] + add.n a4,a4,a6 # [7] + srai a6,a4,31 # [8] + and a6,a6,a8 # [9] + add.n a7,a6,a7 # [10] + srai a3,a6,31 # [11] + add.n a3,a3,a4 # [12] + saltu a6,a7,a6 # [13] + add.n a6,a6,a3 # [14] + src a6,a6,a7 # [15] + extui a3,a11,0,8 # [16] + movi.n a7,1 # [17] + ssr a13 # [18] + movnez a6,a8,a3 # [19] + sra a8,a6 # [20] + + addi.n a3,a8,1 # [21] + ssl a13 # [22] + sll a7,a7 # [23] + extui a4,a8,31,1 # [24] + addi.n a7,a7,-1 # [25] + and a6,a6,a7 # [26] + srai a7,a7,1 # [27] + add.n a4,a4,a7 # [28] + l32i a7,a1,164 # [29] gra_spill_temp_125 + salt a4,a4,a6 # [30] + movnez a8,a3,a4 # [31] + l32i a6,a1,172 # [32] gra_spill_temp_127 + l32i a4,a1,132 # [33] gra_spill_temp_117 + l32i a3,a1,160 # [34] gra_spill_temp_124 + addi.n a7,a7,4 # [35] + s32i a7,a1,164 # [36] gra_spill_temp_125 + addi.n a6,a6,4 # [37] + s32i a6,a1,172 # [38] gra_spill_temp_127 + l32i a7,a1,136 # [39] gra_spill_temp_118 + l32i a6,a1,140 # [40] gra_spill_temp_119 + add.n a4,a3,a4 # [41] + add.n a7,a7,a8 # [42] + addi.n a3,a3,1 # [43] + l32i a8,a1,128 # [44] gra_spill_temp_116 + max a6,a6,a7 # [45] + s32i a3,a1,160 # [46] gra_spill_temp_124 + l32i a7,a1,188 # [47] gra_spill_temp_131 + l32i a3,a1,144 # [48] gra_spill_temp_120 + add.n a7,a7,a8 # [49] + min a3,a3,a6 # [50] + s8i a3,a4,0 # [51] id:332 + s32i a7,a1,188 # [52] gra_spill_temp_131 + l32i a4,a1,168 # [53] gra_spill_temp_126 + l32i a6,a1,124 # [54] gra_spill_temp_115 + addi.n a4,a4,4 # [55] + s32i a4,a1,168 # [56] gra_spill_temp_126 + sub a4,a4,a6 # [57] + beqz a4,.Lt_2_13314 # [58] + +.Lt_2_13826: # 0x9b4 + ee.zero.qacc # [0] + l32i a9,a1,204 # [1] gra_spill_temp_135 + l32i a8,a1,148 # [2] gra_spill_temp_121 + s32i a8,a1,212 # [3] gra_spill_temp_137 + bge a8,a9,.Lt_2_14082 # [4] + +.LBB12_esp_nn_conv_s16_mult8_esp32s3: # 0x9c3 +# Part of loop body line 187, head labeled .Lt_2_13826 + l32i a8,a1,196 # [0] gra_spill_temp_133 + l32i a7,a1,212 # [1] gra_spill_temp_137 + l32i a13,a1,200 # [2] gra_spill_temp_134 + mull a7,a7,a8 # [3] + l32i a6,a1,120 # [4] gra_spill_temp_114 + add.n a13,a7,a13 # [5] + j .Lt_2_14594 # [6] + +.Lt_2_14850: # 0x9d7 +# Part of loop body line 201, head labeled .Lt_2_14594 + l32i a9,a1,204 # [0] gra_spill_temp_135 + l32i a10,a1,212 # [1] gra_spill_temp_137 + l32i a12,a1,192 # [2] gra_spill_temp_132 + l32i a11,a1,196 # [3] gra_spill_temp_133 + add.n a6,a6,a12 # [4] + add.n a7,a7,a11 # [5] + add.n a13,a13,a11 # [6] + addi.n a10,a10,1 # [7] + s32i a10,a1,212 # [8] gra_spill_temp_137 + sub a9,a9,a10 # [9] + beqz a9,.Lt_2_14082 # [10] + +.Lt_2_14594: # 0x9f4 + l32i a9,a1,200 # [0] gra_spill_temp_134 + l32i a8,a1,208 # [1] gra_spill_temp_136 + bge a8,a9,.Lt_2_14850 # [3] + + l32i a11,a1,176 # [0] gra_spill_temp_128 + l32i a10,a1,184 # [1] gra_spill_temp_130 + add.n a12,a7,a8 # [2] + add.n a10,a10,a8 # [3] + add.n a10,a6,a10 # [4] + mull a10,a5,a10 # [5] + mull a8,a12,a5 # [6] + addx2 a10,a10,a11 # [7] + l32i a11,a1,188 # [8] gra_spill_temp_131 + add.n a11,a11,a8 # [10] + l32i a8,a1,180 # [11] gra_spill_temp_129 + mov.n a2,a10 # [12] + addx2 a11,a11,a8 # [13] + movi.n a8,8 # [14] + mov.n a3,a11 # [15] + j .Lt_2_15362 # [16] + +.LBB18_esp_nn_conv_s16_mult8_esp32s3: # 0xa26 + loopgtz a15,.LBB54_esp_nn_conv_s16_mult8_esp32s3 # [0] + + ee.vmulas.s16.qacc.ld.ip q0,a2,16,q0,q1 # [0*II+0] id:300 + ee.vld.128.ip q1,a3,16 # [0*II+1] id:301 +.LBB54_esp_nn_conv_s16_mult8_esp32s3: # 0xa30 + +.Lt_2_15618: # 0xa30 + ee.vmulas.s16.qacc q0,q1 # [0] + movi.n a8,8 # [1] + add.n a10,a10,a14 # [2] + add.n a11,a11,a14 # [3] + mov.n a3,a11 # [4] + mov.n a2,a10 # [5] + beq a12,a13,.Lt_2_14850 # [6] + +.Lt_2_15362: # 0xa40 + ee.vld.128.ip q1,a3,16 # [0] id:299 + ee.vld.128.ip q0,a2,16 # [1] id:298 + addi.n a12,a12,1 # [2] + bltu a8,a5,.LBB18_esp_nn_conv_s16_mult8_esp32s3 # [3] + + j .Lt_2_15618 # [0] + +.Lt_2_11778: # 0xa5c + retw.n # [0] + + .size esp_nn_conv_s16_mult8_esp32s3, . - esp_nn_conv_s16_mult8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_filter_aligned_input_padded_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_filter_aligned_input_padded_esp32s3.S new file mode 100644 index 0000000..5545b27 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_filter_aligned_input_padded_esp32s3.S @@ -0,0 +1,271 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// +// SPDX-FileCopyrightText: 2023 Espressif Systems (Shanghai) CO LTD +// +// SPDX-License-Identifier: Apache-2.0 +// + + +// +// Contraints used by this function are: +// 1. pad_wd and pad_ht is 0. For versions needing padding we do this +// explicitly +// 2. All the filter rows are aligned to 16 bytes boundary. To make sure +// this is indeed the case, for filter rows (filter_wd * channels) not +// multiple of 16, we add zeros to fill it till 16 bondary. +// +// The optimized kernel assumes this and skips filter row with following +// size: ((filter_wd * input_ch) + 15) & ~15. + + .text + +.literal_position + .literal .LC1, 1073741824 + + # Program Unit: esp_nn_conv_s8_filter_aligned_input_padded_esp32s3 + .type esp_nn_conv_s8_filter_aligned_input_padded_esp32s3, @function + .align 4 + .global esp_nn_conv_s8_filter_aligned_input_padded_esp32s3 + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t in_ch + // a6: const uint16_t input_offset + // a7: const uint16_t stride_wd + + // on stack: + // const uint16_t stride_ht : 80 + // const int8_t *filter_data : 84 + // const uint16_t filter_wd : 88 + // const uint16_t filter_ht : 92 + // const int32_t *bias : 96 + // int8_t *out_data : 100 + // const uint16_t out_wd : 104 + // const uint16_t out_ht : 108 + // const uint16_t out_channels : 112 + // const int32_t out_offset : 116 + // const int32_t *out_shift : 120 + // const int32_t *out_mult : 124 + // const int32_t activation_min: 128 + // const int32_t activation_max: 132 + // void *scratch_buffer: 136 + +esp_nn_conv_s8_filter_aligned_input_padded_esp32s3: + entry sp, 80 + s32i.n a2, sp, 40 # input_data + mov a11, a6 # input_offset + l16ui a2, sp, 88 # filter_wd + l32i a8, sp, 100 # out_data + l16ui a6, sp, 80 # stride_ht + mov.n a15, a5 + + mull a4, a2, a15 # filter_row_sz + s32i.n a8, sp, 24 # out_data_ptr + movi.n a9, 0 + s32i.n a9, sp, 36 # out_y + + addi.n a4, a4, 15 # to round the size up + srli a2, a4, 4 # (filter_row_sz) >> 4 + slli a12, a2, 4 # ((filter_row_sz) >> 4) << 4 + + mull a4, a6, a3 # stride_ht * input_wd + mull a5, a3, a15 # input_wd * in_ch + l32i.n a10, sp, 112 # out_ch + + mull a9, a7, a15 # stride_wd * in_ch + mull a4, a4, a15 # (stride_ht * input_wd) * in_ch + + slli a3, a10, 2 # out_ch * 4 + + s32i.n a3, sp, 32 # out_ch * 4 + s32i.n a5, sp, 12 # input_wd * in_ch + s32i.n a9, sp, 52 # stride_wd * in_ch + s32i a4, sp, 56 # (stride_ht * input_wd) * in_ch + + l32i.n a3, sp, 92 # filter_ht + l32i a13, sp, 136 # scratch_buf + l32i a5, sp, 84 # filter_data + mull a4, a12, a3 # (filter_wd * filter_ht * in_ch) + srai a4, a4, 1 + addx4 a10, a10, a13 # scratch_buf + 4 * out_ch + l32i a3, sp, 96 + // accumulate filter values per channel into scratch buffer +.L_acc_out_channel_loop: + movi.n a9, 0 // acc + loop a4, .L_acc_filter_size_loop + l8ui a14, a5, 0 + l8ui a7, a5, 1 + addi.n a5, a5, 2 + sext a14, a14, 7 + sext a7, a7, 7 + add a9, a9, a14 + add a9, a9, a7 + .L_acc_filter_size_loop: + + // multiply by offset, add bias and store the acc value per channel + mull a9, a9, a11 + beqz.n a3, .L_skip_bias + l32i a8, a3, 0 + addi a3, a3, 4 // this will remain 0 if bias not present + add a9, a9, a8 +.L_skip_bias: + s32i a9, a13, 0 + addi.n a13, a13, 4 + blt a13, a10, .L_acc_out_channel_loop + + movi.n a4, 0 # 0 + +.L_height_loop: + l32i.n a8, sp, 40 # in_row_ptr + movi.n a9, 0 + l32i.n a10, sp, 104 # out_wd + s32i.n a8, sp, 28 # input_ptr + s32i.n a9, sp, 44 # out_x + +.L_width_loop: + movi.n a9, 0 + l32i a5, sp, 84 # filter_data + s32i.n a9, sp, 20 + l32i a3, sp, 136 # scratch_buf + +.L_out_ch_loop: + movi.n a6, 0 + l32i.n a9, sp, 28 # input_ptr + mov.n a10, a6 + +.L_filter_ht_loop: + add.n a8, a5, a12 + mov.n a13, a9 + + ee.zero.accx + ee.ld.128.usar.ip q0, a13, 16 + ee.vld.128.ip q4, a13, 16 + ee.vld.128.ip q1, a5, 16 + + sub a15, a8, a5 // row_len - 16 + extui a14, a15, 4, 1 // if multiple of 16 and not 32 + srai a15, a15, 5 // multiples of 32 + ee.src.q.qup q2, q0, q4 + beqz a15, .L_vector_32_loop_end + + loop a15, .L_vector_32_loop_end + + ee.vld.128.ip q4, a13, 16 + ee.vmulas.s8.accx.ld.ip.qup q3, a5, 16, q2, q1, q0, q4 + ee.vld.128.ip q2, a13, 16 + ee.vmulas.s8.accx.ld.ip.qup q1, a5, 16, q0, q3, q4, q2 + ee.orq q0, q2, q2 + ee.orq q2, q4, q4 + +.L_vector_32_loop_end: + beqz a14, .L_vector_loop_end + ee.vmulas.s8.accx.ld.ip q4, a13, 16, q2, q1 + ee.src.q.ld.ip q1, a5, 16, q0, q4 + ee.orq q2, q0, q0 + +.L_vector_loop_end: + ee.vmulas.s8.accx q2, q1 + addi a13, a13, -16 // since we incremented by 16 too much + movi a15, 0 + ee.srs.accx a14, a15, 0 + + mov.n a5, a8 + add.n a6, a6, a14 +.L7: + l32i.n a8, sp, 12 # input_wd * in_ch + l32i.n a2, sp, 92 # filter_ht + addi.n a10, a10, 1 # filter_y_idx + add.n a9, a9, a8 + blt a10, a2, .L_filter_ht_loop +.L9: + l32i a7, a3, 0 # load input_offset acc + addi a3, a3, 4 # increment offset acc ptr + l32i.n a8, sp, 20 + add.n a6, a6, a7 # add input_offset accumulation + +.L_multiply_by_quant_mult: + l32i a10, sp, 120 + l32i a9, sp, 124 + add.n a2, a10, a8 + l32i.n a2, a2, 0 + add.n a7, a9, a8 + l32i.n a7, a7, 0 + max a8, a2, a4 + ssl a8 + sll a6, a6 + mull a9, a6, a7 + l32r a10, .LC1 + sub a2, a8, a2 + add.n a8, a9, a10 + mulsh a6, a6, a7 + movi.n a7, 1 + bltu a8, a9, .L13 + movi.n a7, 0 + +.L13: + add.n a6, a7, a6 + slli a6, a6, 1 + extui a8, a8, 31, 1 + or a6, a6, a8 + beqz.n a2, .L_skip_div_by_pow_of_2 + addi.n a7, a2, -1 + movi.n a9, 1 + extui a8, a6, 31, 1 + ssl a7 + sll a7, a9 + sub a7, a7, a8 + add.n a6, a7, a6 + ssr a2 + sra a6, a6 +.L_skip_div_by_pow_of_2: + l32i a10, sp, 116 + l32i a8, sp, 128 + add.n a2, a10, a6 + l32i a9, sp, 132 + l32i.n a10, sp, 24 # out_data_ptr + max a2, a2, a8 + min a2, a2, a9 + s8i a2, a10, 0 + l32i.n a2, sp, 20 + addi.n a10, a10, 1 + addi.n a2, a2, 4 + l32i.n a6, sp, 32 + s32i.n a2, sp, 20 + s32i.n a10, sp, 24 # out_data_ptr + bne a6, a2, .L_out_ch_loop + +.L4: + l32i.n a5, sp, 44 # out_x + l32i.n a6, sp, 28 # input_ptr (was stored by height loop) + l32i.n a8, sp, 52 # stride_wd * in_ch + addi.n a5, a5, 1 + add.n a6, a6, a8 # input_ptr + stride_wd * in_ch + l32i.n a9, sp, 104 # out_wd + s32i.n a5, sp, 44 # out_x + s32i.n a6, sp, 28 # input_ptr + bne a9, a5, .L_width_loop + + l32i.n a10, sp, 36 # out_y + l32i.n a2, sp, 40 # in_row_ptr + l32i a5, sp, 56 # (stride_ht * input_wd) * in_ch + l32i.n a6, sp, 108 # out_ht + addi.n a10, a10, 1 + add.n a2, a2, a5 # in_row_ptr + s32i.n a10, sp, 36 # out_y + s32i.n a2, sp, 40 # in_row_ptr + blt a10, a6, .L_height_loop + // end outer (height) loop + retw.n + + .size esp_nn_conv_s8_filter_aligned_input_padded_esp32s3, .-esp_nn_conv_s8_filter_aligned_input_padded_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S new file mode 100644 index 0000000..111fd08 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_conv_s8_mult8_1x1_esp32s3.S @@ -0,0 +1,497 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + .literal .nudge_val, 1073741824 + + # Program Unit: esp_nn_conv_s8_mult8_1x1_esp32s3 + .type esp_nn_conv_s8_mult8_1x1_esp32s3, @function + .align 4 + .global esp_nn_conv_s8_mult8_1x1_esp32s3 + +esp_nn_conv_s8_mult8_1x1_esp32s3: # 0xdbc + # scratch_buf = 0 // to store qacc regs need 36 bytes + # gra_spill_temp_164 = 36, channel itr, (in_channels - 1) >> 3 + # gra_spill_temp_165 = 40, i_out + # gra_spill_temp_166 = 44, in_channels + # gra_spill_temp_167 = 48, in_channels/8 - 1 + # gra_spill_temp_168 = 52, in_channels-7 + # gra_spill_temp_169 = 56, input + # gra_spill_temp_170 = 60, filter_data + # gra_spill_temp_171 = 64, input_offset + # gra_spill_temp_172 = 68, input_ptr + # gra_spill_temp_173 = 72, bias + # gra_spill_temp_174 = 76, in_channels*8 + # gra_spill_temp_175 = 80, size-7 + # gra_spill_temp_176 = 84, size + + // registers: + // a2: int8_t *input_data + // a3: uint16_t input_wd + // a4: uint16_t input_ht + // a5: uint16_t in_channels + // a6: int32_t input_offset + // a7: int16_t *filter_data + + // on stack: + // int32_t *bias // 160 + // int8_t *out_data // 164 + // uint16_t out_wd // 168 + // uint16_t out_ht // 172 + // uint16_t out_channels // 176 + // int32_t out_offset // 180 + // int32_t *out_shift // 184 + // int32_t *out_mult // 188 + // int32_t activation_min // 192 + // int32_t activation_max // 196 + // void *buffer // tmp buf // 200 + + entry a1,160 # + s32i a5,a1,44 # [0] gra_spill_temp_166, in_channels + s32i a6,a1,64 # [2] id:619 input_offset+0x0 + s32i a7,a1,60 # [1] gra_spill_temp_170, filter_data + mul16u a8,a3,a4 # [3] size = input_wd * input_ht; + s32i a2,a1,56 # [0] gra_spill_temp_169, input + l32i a4,a1,164 # [1] id:624 out_data+0x0 + mov.n a3,a1 # [52] scratch_buf + + s32i a8,a1,84 # [4] gra_spill_temp_176, size + blti a8,8,.prepare_leftover # [5] // process remaining lines one by one + addi a9,a8,-7 # [32] + s32i a9,a1,80 # [33] gra_spill_temp_175, size-7 + + s32i a2,a1,68 # [2] gra_spill_temp_172 , input_ptr + srai a15,a5,3 # [7] `in_ch/8` loop_cnt + movi.n a11,0 # [10] + s32i a11,a1,40 # [11] gra_spill_temp_165 + addi a15,a15,-1 # [17] `in_ch/8` loop_cnt - 1 + s32i a15,a1,48 # [18] gra_spill_temp_167 + slli a9,a5,3 # [19] in_channels*8 + s32i a9,a1,76 # [20] gra_spill_temp_174 + addi a15,a5,-7 # [31] + s32i a15,a1,52 # [34] gra_spill_temp_168 + +.outer_loop: // for (; i_out < size - 7; i_out += 8) { + + l32i a10,a1,200 # [1] gra_spill_temp_165, buffer + l32i.n a11,a1,44 # [1] gra_spill_temp_166, input_channels + l32i.n a8,a1,68 # [2] gra_spill_temp_172, input_ptr + srai a9,a11,3 # [7] `in_ch/8` loop_cnt for transpose loop + + ee.zero.q q7 # [0] + addi a12,a1,64 # [6] + ee.vldbc.16 q5,a12 # [0*II+16] id:638 input_offset + + // load and transose 8 lines of input 8xchannels, + // add input offset and store 16 bit data to tmp buffer + loopgtz a9,.transpose_loop_end # [10] + mov.n a9,a8 + ee.vld.l.64.xp q0,a9,a11 + ee.vld.l.64.xp q1,a9,a11 + ee.vld.h.64.xp q0,a9,a11 + ee.vld.h.64.xp q1,a9,a11 + ee.vld.l.64.xp q2,a9,a11 + ee.vzip.8 q0,q1 + ee.vld.l.64.xp q3,a9,a11 + ee.vld.h.64.xp q2,a9,a11 + ee.vld.h.64.ip q3,a9,0 + ee.vzip.16 q0,q1 + ee.vzip.8 q2,q3 + ee.vzip.16 q2,q3 + ee.vzip.32 q0,q2 + ee.vcmp.lt.s8 q4,q2,q7 + ee.vzip.8 q2,q4 + ee.vcmp.lt.s8 q6,q0,q7 + ee.vzip.8 q0,q6 + ee.vadds.s16 q0,q0,q5 + ee.vadds.s16.st.incp q0,a10,q6,q6,q5 + ee.vadds.s16.st.incp q6,a10,q2,q2,q5 + ee.vadds.s16.st.incp q2,a10,q4,q4,q5 + ee.vst.128.ip q4,a10,16 + ee.vzip.32 q1,q3 + ee.vcmp.lt.s8 q4,q3,q7 + ee.vzip.8 q3,q4 + ee.vcmp.lt.s8 q6,q1,q7 + ee.vzip.8 q1,q6 + ee.vadds.s16 q1,q1,q5 + ee.vadds.s16.st.incp q1,a10,q6,q6,q5 + ee.vadds.s16.st.incp q6,a10,q3,q3,q5 + ee.vadds.s16.st.incp q3,a10,q4,q4,q5 + ee.vst.128.ip q4,a10,16 + addi.n a8,a8,8 +.transpose_loop_end: # 0xeeb + + # 468 uint32_t bias_ptr = (uint32_t) bias; + # 469 uint32_t filter_ptr = (uint32_t) (filter_data); + # 470 const int32_t *out_mult_ptr = out_mult; + # 471 const int32_t *out_shift_ptr = out_shift; + l32i a6,a1,184 # [0] out_shift + l32i a2,a1,188 # [1] out_mult + l32i a5,a1,60 # [2] gra_spill_temp_170, filter + l32i a9,a1,160 # [3] gra_spill_temp_170, bias + # 472 for (int32_t out_ch_idx = 0; out_ch_idx < out_channels; out_ch_idx++) { + l16ui a8,a1,176 # [5] id:620 out_channels+0x0 + s32i a9,a1,72 # [5] gra_spill_temp_173 + blti a8,1,.outer_ch_loop_end + + movi.n a7,0 + +.out_ch_loop: # 0xf3e + l32i a8,a1,200 # [4] gra_spill_temp_165, buffer_ptr + ee.zero.qacc # [3] + ee.zero.q q5 # + l32i a10,a1,52 # [1] gra_spill_temp_168, in_channels-7 + l32i a9,a1,48 # [1] gra_spill_temp_167, in_channels/8 - 1 + ee.vld.l.64.ip q7,a5,8 # load filter 8 values + ee.vld.128.ip q0,a8,16 + ee.vld.128.ip q1,a8,16 + ee.vcmp.lt.s8 q6,q7,q5 + ee.vzip.8 q7,q6 + + ee.vsmulas.s16.qacc.ld.incp q2,a8,q0,q7,0 + ee.vsmulas.s16.qacc.ld.incp q3,a8,q1,q7,1 + ee.vsmulas.s16.qacc.ld.incp q0,a8,q2,q7,2 + ee.vsmulas.s16.qacc.ld.incp q1,a8,q3,q7,3 + ee.vsmulas.s16.qacc.ld.incp q2,a8,q0,q7,4 + ee.vsmulas.s16.qacc.ld.incp q3,a8,q1,q7,5 + blti a10,8,.inner_loop_end # [16] + + loopgtz a9,.inner_loop_end # [3] + + ee.vsmulas.s16.qacc.ld.incp q0,a8,q2,q7,6 # [0*II+0] id:657 + ee.vsmulas.s16.qacc.ld.incp q1,a8,q3,q7,7 # [0*II+1] id:658 + ee.vld.l.64.ip q7,a5,8 # [0*II+2] id:659, filter + ee.vcmp.lt.s8 q6,q7,q5 + ee.vzip.8 q7,q6 + ee.vsmulas.s16.qacc.ld.incp q2,a8,q0,q7,0 # [0*II+4] id:660 + ee.vsmulas.s16.qacc.ld.incp q3,a8,q1,q7,1 # [0*II+5] id:661 + ee.vsmulas.s16.qacc.ld.incp q0,a8,q2,q7,2 # [0*II+6] id:662 + ee.vsmulas.s16.qacc.ld.incp q1,a8,q3,q7,3 # [0*II+7] id:663 + ee.vsmulas.s16.qacc.ld.incp q2,a8,q0,q7,4 # [0*II+8] id:664 + ee.vsmulas.s16.qacc.ld.incp q3,a8,q1,q7,5 # [0*II+9] id:665 +.inner_loop_end: # 0xfaf + + ee.vsmulas.s16.qacc q2,q7,6 # [2] + ee.vsmulas.s16.qacc q3,q7,7 # [3] + + # store qacc registers and re-arrange data for low 16 bits + + ee.st.qacc_l.l.128.ip a3,16 # [5] id:668 + ee.st.qacc_l.h.32.ip a3,-16 # [6] id:669 + l32i.n a10, a1, 0 + l32i.n a11, a1, 5 + l32i.n a12, a1, 10 + l32i.n a13, a1, 15 + ee.movi.32.q q0, a10, 0 + ee.movi.32.q q0, a11, 1 + ee.movi.32.q q0, a12, 2 + ee.movi.32.q q0, a13, 3 + + ee.st.qacc_h.l.128.ip a3,16 # [5] id:668 + ee.st.qacc_h.h.32.ip a3,-16 # [6] id:669 + l32i.n a10, a1, 0 + l32i.n a11, a1, 5 + l32i.n a12, a1, 10 + l32i.n a13, a1, 15 + ee.movi.32.q q4, a10, 0 + ee.movi.32.q q4, a11, 1 + ee.movi.32.q q4, a12, 2 + ee.movi.32.q q4, a13, 3 + + l32i a9,a1,160 # [17] gra_spill_temp_170, bias + l32i a10,a1,72 # [0] gra_spill_temp_173, bias_ptr + + # add bias + beqz.n a9,.no_bias + ee.vldbc.32.ip q6,a10,4 + s32i a10,a1,72 # [3] gra_spill_temp_173, bias_ptr + ee.vadds.s32 q0,q0,q6 # [4] + ee.vadds.s32 q4,q4,q6 # [5] +.no_bias: # 0x102e + + l32i.n a11,a6,0 # [1] id:696 + l32i.n a10,a2,0 # [3] id:695 + .global esp_nn_multiply_by_quantized_mult_asm_esp32s3 + call8 esp_nn_multiply_by_quantized_mult_asm_esp32s3 # [4] esp_nn_multiply_by_quantized_mult_asm_esp32s3 + + l32i.n a10,a2,0 # [0] id:697, mult + l32i.n a11,a6,0 # [2] id:698, shift + mv.qr q5,q0 + mv.qr q0,q4 + call8 esp_nn_multiply_by_quantized_mult_asm_esp32s3 # [5] esp_nn_multiply_by_quantized_mult_asm_esp32s3 + + addi.n a6,a6,4 # out_shift_ptr++ + addi.n a2,a2,4 # out_mult_ptr++ + addi a9,a1,180 # [7] + addi a10,a1,192 # [5] + addi a8,a1,196 # [6] + +# load broadcast, activation and out_offset + ee.vldbc.32 q4,a9 # [14] id:699 out_offset + ee.vldbc.32 q2,a10 # [11] id:700 activation_min + ee.vldbc.32 q3,a8 # [12] id:701 activation_max + +# add offset + ee.vadds.s32 q1,q0,q4 # [17] + ee.vadds.s32 q0,q5,q4 # [22] + + # activation + ee.vmin.s32 q1,q1,q3 # [19] + ee.vmax.s32 q1,q1,q2 # [21] + ee.vmin.s32 q0,q0,q3 # [23] + ee.vmax.s32 q0,q0,q2 # [24] + + l16ui a9,a1,176 # [33] out_channels + +# unzip and store + ee.vunzip.16 q0,q1 # [25] + ee.vst.128.ip q0,a3,0 # [26] id:702, scratch_buf + + # a4 = out_data, out_channels = a1+176 + + l8ui a14,a1,0 # [27] + l8ui a11,a1,2 # [30] scratch_buf+2 + add a10,a4,a9 + s8i a14,a4,0 # [28], out_data + s8i a11,a10,0 # [31], out_data + out_channels + + l8ui a14,a1,4 # [32] scratch_buf+4 + l8ui a11,a1,6 # [37] scratch_buf+6 + add a12,a10,a9 + add a10,a12,a9 + s8i a14,a12,0 # [28] + s8i a11,a10,0 # [31] + + l8ui a14,a1,8 # [41] scratch_buf+8 + l8ui a11,a1,10 # [47] scratch_buf+10 + add a12,a10,a9 + add a10,a12,a9 + s8i a14,a12,0 # [28] + s8i a11,a10,0 # [31] + + l8ui a14,a1,12 # [51] scratch_buf+12 + l8ui a11,a1,14 # [55] scratch_buf+14 + add a12,a10,a9 + add a10,a12,a9 + s8i a14,a12,0 # [28] + s8i a11,a10,0 # [31] + + addi.n a4,a4,1 # [29] out_data++; + addi.n a7,a7,1 + bne a7,a9,.out_ch_loop + +.outer_ch_loop_end: + + subx8 a11,a9,a9 # (7 * out_channels); + l32i a10,a1,76 # [1] gra_spill_temp_174, in_channels * 8 + l32i a15,a1,40 # [4] gra_spill_temp_165 + l32i a9,a1,68 # [2] gra_spill_temp_172 + l32i a8,a1,80 # [0] gra_spill_temp_175, size-7 + add.n a4,a4,a11 # [5] out_data += (7 * out_channels); + addi.n a15,a15,8 + s32i a15,a1,40 # [7] gra_spill_temp_165 + add.n a9,a9,a10 # [8] + s32i a9,a1,68 # [9] gra_spill_temp_172 + blt a15,a8,.outer_loop # [10] + + # check if leftover + l32i a15,a1,40 + l32i a13,a1,84 # [1] gra_spill_temp_176, size + l32i a8,a1,44 # [0] gra_spill_temp_166, in_channels + bge a15, a13, .return_function # no leftover + +// This block below processes one input channel line at a time. +.process_leftover: + l32i a15,a1,40 # [1] gra_spill_temp_165, i_out + l32i a14,a1,56 # [2] gra_spill_temp_169, input + mull a15,a15,a8 # [3] in_channels * i_out + addi.n a8,a8,-1 # [4] in_channels - 1 + add.n a14,a14,a15 # [5] input_ptr = in_channels * i_out + input + srai a8,a8,3 # [6] iterations, (in_channels - 1) >> 3 + s32i a8,a1,36 # [7] gra_spill_temp_164, iterations + s32i a14,a1,68 # [8] gra_spill_temp_172, in_channels * i_out + input + addi a12,a1,64 + ee.vldbc.16 q4,a12 # [8] id:716 input_offset + +.leftover_outer_loop: + + l32i a15,a1,184 # [0] out_shift + l32i a2,a1,188 # [1] out_mult + l32i a8,a1,60 # [3] gra_spill_temp_170, filter_data + l32i a5,a1,160 # [0] gra_spill_temp_170, bias + movi.n a11,0 # [2] + +.leftover_out_ch_loop: + + ee.zero.qacc # [0] + ee.zero.q q3 # [1] + l32i.n a9,a1,68 # [4] gra_spill_temp_172, input_ptr + l32i a10,a1,36 # [1] gra_spill_temp_164, iterations, (in_channels - 1) >> 3 + ee.vld.l.64.ip q0,a9,8 # [7] id:717, input + ee.vld.l.64.ip q1,a8,8 # [7] filter + ee.vcmp.lt.s8 q6,q0,q3 + ee.vcmp.lt.s8 q7,q1,q3 + ee.vzip.8 q0,q6 + ee.vzip.8 q1,q7 + ee.vadds.s16 q0,q0,q4 # [11] id:718, add offset + + loopgtz a10,.leftover_inner_loop_end # [3] + + ee.vmulas.s16.qacc q0,q1 # mula(q0,q1) + ee.vld.l.64.ip q0,a9,8 # load 8 input values + ee.vld.l.64.ip q1,a8,8 # [7] load filter + ee.vcmp.lt.s8 q2,q0,q3 # sign + ee.vcmp.lt.s8 q7,q1,q3 + ee.vzip.8 q0,q2 # 16 bit input + ee.vzip.8 q1,q7 # 16 bit filter + ee.vadds.s16 q0,q0,q4 # add offset +.leftover_inner_loop_end: # 0x1262 + +# re-arrange data from qacc in 32 bit q registers + ee.vmulas.s16.qacc q0,q1 # [3] + ee.st.qacc_l.l.128.ip a3,16 # [5] id:722 + ee.st.qacc_l.h.32.ip a3,0 # [6] id:723 + l8ui a10,a1,5 # [11] scratch_buf+5 + l8ui a12,a1,6 # [10] scratch_buf+6 + l16ui a14,a1,10 # [8] scratch_buf+10 + l8ui a9,a1,15 # [7] scratch_buf+15 + l8ui a13,a1,16 # [9] scratch_buf+16 + s8i a10,a1,2 # [12] scratch_buf+2 + s8i a12,a1,3 # [13] scratch_buf+3 + s16i a14,a1,4 # [15] scratch_buf+4 + s8i a9,a1,6 # [16] scratch_buf+6 + s8i a13,a1,7 # [14] scratch_buf+7 + + ee.st.qacc_h.l.128.ip a3,16 # [17] id:724 + ee.st.qacc_h.h.32.ip a3,-32 # [18] id:725 + l16ui a13,a1,16 # [30] scratch_buf+16 + l8ui a14,a1,21 # [23] scratch_buf+21 + l8ui a9,a1,22 # [22] scratch_buf+22 + l16ui a10,a1,26 # [21] scratch_buf+26 + s16i a13,a1,8 # [31] scratch_buf+8 + l8ui a12,a1,31 # [20] scratch_buf+31 + l8ui a13,a1,32 # [19] scratch_buf+32 + s8i a14,a1,10 # [24] scratch_buf+10 + s8i a9,a1,11 # [25] scratch_buf+11 + s16i a10,a1,12 # [26] scratch_buf+12 + s8i a12,a1,14 # [27] scratch_buf+14 + s8i a13,a1,15 # [28] scratch_buf+15 + movi.n a12,16 + +# get data now + ee.vld.128.ip q0,a3,0 + ee.srcmb.s16.qacc q1,a12,0 + ee.vzip.16 q0,q1 + + ee.vadds.s32 q0,q0,q1 + ee.movi.32.a q0,a10,3 + ee.movi.32.a q0,a9,2 + ee.movi.32.a q0,a14,0 + add a9,a9,a10 + ee.movi.32.a q0,a10,1 + add a14,a14,a10 + add a14,a14,a9 + +# a14 contains conv_out + l32i a9,a1,160 # [43] gra_spill_temp_170, bias ptr + l32i.n a6,a15,0 # [44] id:730, shift + beqz.n a9,.leftover_multiply_by_quant_mult # [45] + +# load and add bias + l32i.n a9,a5,0 + add.n a14,a14,a9 + +.leftover_multiply_by_quant_mult: # 0x12e7 + l32i.n a9,a2,0 # [0] id:729, mult + movi.n a10,0 # [1] + max a10,a6,a10 # [2] left_shift + ssl a10 # [3] + sll a14,a14 # [4] (value << left_shift) + + sub a7,a10,a6 # right_shift + + l32r a13,.nudge_val + mulsh a12,a9,a14 + mull a14,a9,a14 + ssai 31 + + addi.n a2,a2,4 # [0] mult + addi.n a15,a15,4 # [1] shift + addi.n a5,a5,4 # [2] bias + addi.n a11,a11,1 # [3] + + add a13,a14,a13 # low part + saltu a14,a13,a14 + add a9,a12,a14 # high part + src a12,a9,a13 + + blti a7,1,.leftover_skip_div_by2 + + addi.n a14,a7,-1 + ssl a14 + movi.n a10,1 + sll a10,a10 # 1 << (exponent - 1) + extui a14,a12,31,1 + ssr a7 + sub a10,a10,a14 # 1 << (exponent - 1) - (val < 0) + add a12,a12,a10 # val += to_add + sra a12,a12 + +.leftover_skip_div_by2: + l32i a10,a1,180 # [26] id:733 out_offset+0x0 + l32i a9,a1,192 # [29] id:732 activation_min+0x0 + l16ui a13,a1,176 # [5] id:620 out_channels+0x0 + l32i a14,a1,196 # [31] id:731 activation_max+0x0 + +// add offset, apply activation and store + add.n a10,a10,a12 + max a9,a9,a10 + min a14,a14,a9 + s8i a14,a4,0 + addi.n a4,a4,1 + + bne a11,a13,.leftover_out_ch_loop + + l32i a15,a1,44 # [0] gra_spill_temp_166, in_channels + l32i a14,a1,68 # [1] gra_spill_temp_172, input_ptr + l32i a13,a1,40 # [2] gra_spill_temp_165, i_out + l32i a12,a1,84 # [3] gra_spill_temp_176, size + addi.n a13,a13,1 # [4] + s32i a13,a1,40 # [5] gra_spill_temp_165, i_out + add a14,a14,a15 # [7] input_ptr += in_channels + s32i a14,a1,68 # [8] gra_spill_temp_172, input_ptr + blt a13,a12,.leftover_outer_loop + +.return_function: + retw.n # [9] + +.prepare_leftover: + l32i a8,a1,44 # [0] gra_spill_temp_166, in_channels + movi.n a15,0 + s32i a15,a1,40 # [7] gra_spill_temp_165, i_out + j .process_leftover + + .size esp_nn_conv_s8_mult8_1x1_esp32s3, . - esp_nn_conv_s8_mult8_1x1_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_ansi.c new file mode 100644 index 0000000..10c31b9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_ansi.c @@ -0,0 +1,104 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +int esp_nn_get_depthwise_conv_scratch_size_ansi(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params) +{ + return 0; +} + +void esp_nn_set_depthwise_conv_scratch_buf_ansi(const void *buf) +{ + +} + +void esp_nn_depthwise_conv_s8_ansi(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + const uint16_t ch_mult = conv_params->ch_mult; + + int out_idx = 0; + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + for (int ch_mult_idx = 0; ch_mult_idx < ch_mult; ch_mult_idx++) { + int32_t result = 0; + const int out_ch_idx = ch_mult_idx + ch_idx * ch_mult; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index]; + result += input_val * filter_val; + } + } + if (bias) { + result += bias[out_ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult(result, out_mult[out_ch_idx], out_shift[out_ch_idx]); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + out_data[out_idx++] = result; + } + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_opt.c new file mode 100644 index 0000000..e0cc29d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_opt.c @@ -0,0 +1,295 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +int esp_nn_get_depthwise_conv_scratch_size_opt(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params) +{ + return 0; +} + +void esp_nn_set_depthwise_conv_scratch_buf_opt(const void *buf) +{ + +} + +/* common channel multiplier == 1 case */ +__attribute__ ((noinline)) +static void esp_nn_depthwise_conv_s8_ch_mult_1(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + int out_idx = 0; + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + int ch_idx = 0; + for (; ch_idx < channels - 3; ch_idx += 4) {//channel_loop + int32_t result0 = 0; + int32_t result1 = 0; + int32_t result2 = 0; + int32_t result3 = 0; + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels) + ch_idx; + int32_t input_val0 = input_data[input_index + 0] + input_offset; + int32_t input_val1 = input_data[input_index + 1] + input_offset; + int32_t input_val2 = input_data[input_index + 2] + input_offset; + int32_t input_val3 = input_data[input_index + 3] + input_offset; + int32_t filter_val0 = filter_data[filter_index + 0]; + int32_t filter_val1 = filter_data[filter_index + 1]; + int32_t filter_val2 = filter_data[filter_index + 2]; + int32_t filter_val3 = filter_data[filter_index + 3]; + result0 += input_val0 * filter_val0; + result1 += input_val1 * filter_val1; + result2 += input_val2 * filter_val2; + result3 += input_val3 * filter_val3; + } + } + if (bias) { + result0 += bias[ch_idx + 0]; + result1 += bias[ch_idx + 1]; + result2 += bias[ch_idx + 2]; + result3 += bias[ch_idx + 3]; + } + result0 = esp_nn_multiply_by_quantized_mult_fast(result0, *out_mult++, *out_shift++); + result1 = esp_nn_multiply_by_quantized_mult_fast(result1, *out_mult++, *out_shift++); + result2 = esp_nn_multiply_by_quantized_mult_fast(result2, *out_mult++, *out_shift++); + result3 = esp_nn_multiply_by_quantized_mult_fast(result3, *out_mult++, *out_shift++); + + result0 += out_offset; + result1 += out_offset; + result2 += out_offset; + result3 += out_offset; + + result0 = max(result0, activation_min); + result1 = max(result1, activation_min); + result2 = max(result2, activation_min); + result3 = max(result3, activation_min); + + result0 = min(result0, activation_max); + result1 = min(result1, activation_max); + result2 = min(result2, activation_max); + result3 = min(result3, activation_max); + + out_data[out_idx++] = result0; + out_data[out_idx++] = result1; + out_data[out_idx++] = result2; + out_data[out_idx++] = result3; + } + for (; ch_idx < channels; ch_idx++) {//channel_loop + int32_t result = 0; + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels) + ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index]; + result += input_val * filter_val; + } + } + if (bias) { + result += bias[ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult_fast(result, *out_mult++, *out_shift++); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + out_data[out_idx++] = result; + } + } + } +} + +void esp_nn_depthwise_conv_s8_opt(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t ch_mult = conv_params->ch_mult; + if (ch_mult == 1) { + esp_nn_depthwise_conv_s8_ch_mult_1(input_dims, input_data, filter_dims, filter_data, + bias, output_dims, out_data, conv_params, quant_data); + return; + } + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + + int out_idx = 0; + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + int ch_mult_idx = 0; + for (; ch_mult_idx < ch_mult - 3; ch_mult_idx += 4) { + int32_t result0 = 0; + int32_t result1 = 0; + int32_t result2 = 0; + int32_t result3 = 0; + const int out_ch_idx = ch_idx * ch_mult + ch_mult_idx; + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val0 = filter_data[filter_index + 0]; + int32_t filter_val1 = filter_data[filter_index + 1]; + int32_t filter_val2 = filter_data[filter_index + 2]; + int32_t filter_val3 = filter_data[filter_index + 3]; + result0 += input_val * filter_val0; + result1 += input_val * filter_val1; + result2 += input_val * filter_val2; + result3 += input_val * filter_val3; + } + } + if (bias) { + result0 += bias[out_ch_idx + 0]; + result1 += bias[out_ch_idx + 1]; + result2 += bias[out_ch_idx + 2]; + result3 += bias[out_ch_idx + 3]; + } + result0 = esp_nn_multiply_by_quantized_mult_fast(result0, *out_mult++, *out_shift++); + result1 = esp_nn_multiply_by_quantized_mult_fast(result1, *out_mult++, *out_shift++); + result2 = esp_nn_multiply_by_quantized_mult_fast(result2, *out_mult++, *out_shift++); + result3 = esp_nn_multiply_by_quantized_mult_fast(result3, *out_mult++, *out_shift++); + + result0 += out_offset; + result1 += out_offset; + result2 += out_offset; + result3 += out_offset; + + result0 = max(result0, activation_min); + result1 = max(result1, activation_min); + result2 = max(result2, activation_min); + result3 = max(result3, activation_min); + result0 = min(result0, activation_max); + result1 = min(result1, activation_max); + result2 = min(result2, activation_max); + result3 = min(result3, activation_max); + + out_data[out_idx++] = result0; + out_data[out_idx++] = result1; + out_data[out_idx++] = result2; + out_data[out_idx++] = result3; + } + for (; ch_mult_idx < ch_mult; ch_mult_idx++) { + int32_t result = 0; + const int out_ch_idx = ch_idx * ch_mult + ch_mult_idx; + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index]; + result += input_val * filter_val; + } + } + if (bias) { + result += bias[out_ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult_fast(result, *out_mult++, *out_shift++); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + out_data[out_idx++] = result; + } + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3.S new file mode 100644 index 0000000..2042573 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3.S @@ -0,0 +1,403 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3 + .type esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3 + +esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3: # 0x776 + # qacc_scratch = 0 + # gra_spill_temp_35 = 48 + # gra_spill_temp_36 = 52 + # gra_spill_temp_37 = 56 + # gra_spill_temp_38 = 60 + # gra_spill_temp_39 = 64 + # gra_spill_temp_40 = 68 + # gra_spill_temp_41 = 72 + # gra_spill_temp_42 = 76 + # gra_spill_temp_43 = 80 + # gra_spill_temp_44 = 84 + # gra_spill_temp_45 = 88 + # gra_spill_temp_46 = 92 + # gra_spill_temp_47 = 96 + # gra_spill_temp_48 = 100 + # gra_spill_temp_49 = 104 + # gra_spill_temp_50 = 108 + # gra_spill_temp_51 = 112 + # gra_spill_temp_52 = 116 + # gra_spill_temp_53 = 120 + # gra_spill_temp_54 = 124 + # gra_spill_temp_55 = 128 + # gra_spill_temp_56 = 132 + # gra_spill_temp_57 = 136 + # gra_spill_temp_58 = 140 + # gra_spill_temp_59 = 144 + # gra_spill_temp_60 = 148 + # gra_spill_temp_61 = 152 + # gra_spill_temp_62 = 156 + # gra_spill_temp_63 = 160 + # gra_spill_temp_64 = 164 + # gra_spill_temp_65 = 168 + # gra_spill_temp_66 = 176 + # gra_spill_temp_67 = 192 + # gra_spill_temp_68 = 208 + # gra_spill_temp_69 = 224 + # gra_spill_temp_70 = 240 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t channels + // a6: const uint16_t pad_wd + // a7: const uint16_t pad_ht + + // on stack + // const uint16_t stride_wd + // const uint16_t stride_ht + // const int16_t *filter_data + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + entry a1,288 # + s32i a2,a1,104 # [0] gra_spill_temp_49 + s32i a3,a1,112 # [1] gra_spill_temp_51 + s32i a5,a1,116 # [2] gra_spill_temp_52 + s32i.n a6,a1,56 # [3] gra_spill_temp_37 + addi a14,a1,112 # [4] + addmi a11,a1,256 # [5] + addmi a13,a1,256 # [6] + addmi a15,a1,256 # [7] + l32i a9,a1,304 # [8] id:251 out_data+0x0 + l16ui a8,a1,312 # [9] id:252 out_ht+0x0 + s32i a8,a1,64 # [10] gra_spill_temp_39 + s32i a9,a1,156 # [11] gra_spill_temp_62 + addi a15,a15,60 # [12] + addi a13,a13,72 # [13] + addi a11,a11,76 # [14] + ee.vldbc.32 q0,a11 # [15] id:250 activation_max + ee.vldbc.32 q1,a13 # [16] id:249 activation_min + ee.vldbc.32 q2,a15 # [17] id:248 out_offset + st.qr q2,a14,80 # [18] gra_spill_temp_67-112 + st.qr q1,a14,96 # [19] gra_spill_temp_68-112 + st.qr q0,a14,112 # [20] gra_spill_temp_69-112 + beqz.n a8,.Lt_5_7426 # [21] + +.LBB3_esp_nn_depthwise_conv_s16_mult1_3x3: # 0x7b9 + s32i a1,a1,160 # [0] gra_spill_temp_63 + s32i a7,a1,72 # [1] gra_spill_temp_41 + mul16u a6,a3,a5 # [2] + l32i a14,a1,296 # [3] id:254 filter_data+0x0 + l32i a15,a1,300 # [4] id:253 bias+0x0 + l16ui a9,a1,308 # [5] id:259 out_wd+0x0 + l16ui a13,a1,288 # [6] id:255 stride_wd+0x0 + neg a8,a7 # [7] + l16ui a10,a1,292 # [8] id:258 stride_ht+0x0 + l32i a11,a1,324 # [9] id:257 out_mult+0x0 + l32i a12,a1,320 # [10] id:256 out_shift+0x0 + s32i a12,a1,84 # [11] gra_spill_temp_44 + s32i a11,a1,88 # [12] gra_spill_temp_45 + s32i.n a10,a1,60 # [13] gra_spill_temp_38 + s32i a8,a1,124 # [14] gra_spill_temp_54 + s32i a13,a1,80 # [15] gra_spill_temp_43 + s32i a9,a1,92 # [16] gra_spill_temp_46 + s32i a15,a1,140 # [17] gra_spill_temp_58 + s32i a14,a1,108 # [18] gra_spill_temp_50 + slli a6,a6,1 # [19] + movi.n a14,16 # [20] + extui a15,a15,0,4 # [21] + addi a9,a5,-7 # [22] + movi.n a13,0 # [23] + sub a8,a4,a8 # [24] + addx2 a7,a5,a5 # [25] + slli a7,a7,1 # [26] + slli a4,a5,1 # [27] + s32i a13,a1,68 # [28] gra_spill_temp_40 + s32i a9,a1,144 # [29] gra_spill_temp_59 + s32i a15,a1,132 # [30] gra_spill_temp_56 + l32i.n a9,a1,56 # [31] gra_spill_temp_37 + s32i a8,a1,76 # [32] gra_spill_temp_42 + neg a9,a9 # [33] + s32i.n a9,a1,48 # [34] gra_spill_temp_35 + sub a8,a3,a9 # [35] + s32i.n a8,a1,52 # [36] gra_spill_temp_36 + +.Lt_5_7938: # 0x822 + l32i a10,a1,92 # [0] gra_spill_temp_46 + beqz.n a10,.Lt_5_8194 # [2] + +.LBB6_esp_nn_depthwise_conv_s16_mult1_3x3: # 0x827 + l32i.n a5,a1,52 # [0] gra_spill_temp_36 + l32i a11,a1,76 # [1] gra_spill_temp_42 + movi.n a13,0 # [2] + l32i a12,a1,72 # [3] gra_spill_temp_41 + movi.n a15,0 # [4] + l32i.n a8,a1,48 # [5] gra_spill_temp_35 + l32i.n a9,a1,56 # [6] gra_spill_temp_37 + s32i a9,a1,100 # [7] gra_spill_temp_48 + s32i a8,a1,128 # [8] gra_spill_temp_55 + s32i a15,a1,96 # [9] gra_spill_temp_47 + max a12,a12,a13 # [10] + s32i a12,a1,152 # [11] gra_spill_temp_61 + movi.n a13,3 # [12] + min a11,a11,a13 # [13] + s32i a11,a1,136 # [14] gra_spill_temp_57 + sub a11,a11,a12 # [15] + s32i a11,a1,120 # [16] gra_spill_temp_53 + +.Lt_5_8706: # 0x854 + l32i a2,a1,84 # [0] gra_spill_temp_44 + l32i a10,a1,144 # [1] gra_spill_temp_59 + l32i a11,a1,140 # [2] gra_spill_temp_58 + l32i a12,a1,88 # [3] gra_spill_temp_45 + s32i a12,a1,168 # [4] gra_spill_temp_65 + s32i a11,a1,148 # [5] gra_spill_temp_60 + blti a10,1,.Lt_5_8962 # [6] + + movi.n a8,0 # [0] + movi.n a13,0 # [1] + l32i a3,a1,100 # [2] gra_spill_temp_48 + s32i a13,a1,164 # [3] gra_spill_temp_64 + max a3,a3,a8 # [4] + +.Lt_5_9474: # 0x876 + l32i a10,a1,136 # [0] gra_spill_temp_57 + l32i a9,a1,152 # [1] gra_spill_temp_61 + ee.zero.qacc # [2] + bge a9,a10,.Lt_5_9730 # [3] + +.LBB12_esp_nn_depthwise_conv_s16_mult1_3x3: # 0x882 + l32i a12,a1,128 # [0] gra_spill_temp_55 + l32i a15,a1,112 # [1] gra_spill_temp_51 + l32i a10,a1,116 # [2] gra_spill_temp_52 + l32i a13,a1,124 # [3] gra_spill_temp_54 + mull a11,a9,a10 # [4] + add.n a13,a13,a9 # [5] + mull a13,a13,a15 # [6] + addx2 a11,a11,a11 # [7] + l32i a9,a1,164 # [8] gra_spill_temp_64 + add.n a12,a12,a13 # [9] + mull a10,a10,a12 # [10] + add.n a11,a9,a11 # [11] + l32i a12,a1,108 # [12] gra_spill_temp_50 + add.n a9,a9,a10 # [13] + l32i a10,a1,104 # [14] gra_spill_temp_49 + addx2 a11,a11,a12 # [15] + l32i a12,a1,120 # [16] gra_spill_temp_53 + addx2 a9,a9,a10 # [17] + loopgtz a12,.LBB32_esp_nn_depthwise_conv_s16_mult1_3x3 # [18] + + mov.n a13,a9 # [0] + mov.n a12,a11 # [1] + mov.n a9,a11 # [2] + mov.n a11,a13 # [3] + + beqz.n a3,.Lt_5_10498 # [4] if (filter_x_start) + + add.n a11,a4,a13 # [0] + add.n a9,a4,a12 # [1] +.Lt_5_10498: # 0x8c5 + + ee.vld.128.xp q0,a11,a4 # [0] id:261 + ee.vld.128.xp q1,a9,a4 # [1] id:262 + + bnez.n a3,.Lt_5_11010 # [2] if (filter_x_start) + + ee.vmulas.s16.qacc q0,q1 # [0] + ee.vld.128.xp q0,a11,a4 # [1] id:264 + ee.vld.128.xp q1,a9,a4 # [2] id:265 +.Lt_5_11010: # 0x8d6 + + ee.vmulas.s16.qacc q0,q1 # [0] + ee.vld.128.xp q0,a11,a4 # [1] id:267 + ee.vld.128.xp q1,a9,a4 # [2] id:268 + add.n a9,a6,a13 # [3] + + blti a5,3,.Lt_5_11522 # [4] if (filter_x_end) + ee.vmulas.s16.qacc q0,q1 # [0] +.Lt_5_11522: # 0x8e7 + + add.n a11,a7,a12 # [0] + +.LBB32_esp_nn_depthwise_conv_s16_mult1_3x3: # 0x8eb + +.Lt_5_9730: # 0x8eb + // extract data + l32i a9,a1,160 # [0] gra_spill_temp_63 + ee.st.qacc_l.l.128.ip a9,16 # [2] id:270 + ee.st.qacc_l.h.32.ip a9,0 # [3] id:271 + l8ui a11,a1,15 # [4] qacc_scratch+15 + l16ui a10,a1,10 # [5] qacc_scratch+10 + l8ui a15,a1,16 # [6] qacc_scratch+16 + l8ui a13,a1,6 # [7] qacc_scratch+6 + l8ui a12,a1,5 # [8] qacc_scratch+5 + s8i a12,a1,2 # [9] qacc_scratch+2 + s8i a13,a1,3 # [10] qacc_scratch+3 + s8i a15,a1,7 # [11] qacc_scratch+7 + s16i a10,a1,4 # [12] qacc_scratch+4 + s8i a11,a1,6 # [13] qacc_scratch+6 + + ee.st.qacc_h.l.128.ip a9,16 # [14] id:281 + ee.st.qacc_h.h.32.ip a9,-32 # [15] id:282 + ee.srcmb.s16.qacc q1,a14,0 # [16] + l8ui a15,a1,31 # [17] qacc_scratch+31 + l8ui a8,a1,32 # [18] qacc_scratch+32 + l16ui a13,a1,26 # [19] qacc_scratch+26 + l8ui a12,a1,22 # [20] qacc_scratch+22 + l8ui a11,a1,21 # [21] qacc_scratch+21 + l16ui a10,a1,16 # [22] qacc_scratch+16 + s16i a10,a1,8 # [23] qacc_scratch+8 + s8i a11,a1,10 # [24] qacc_scratch+10 + s8i a12,a1,11 # [25] qacc_scratch+11 + s16i a13,a1,12 # [26] qacc_scratch+12 + s8i a8,a1,15 # [27] qacc_scratch+15 + s8i a15,a1,14 # [28] qacc_scratch+14 + + + l32i a8,a1,140 # [29] gra_spill_temp_58 , bias + ee.vld.128.ip q0,a9,0 # [30] id:294 + s32i a9,a1,160 # [31] gra_spill_temp_63 + ee.vzip.16 q0,q1 # [32] + beqz.n a8,.Lt_5_12290 # [33] // skip bias + + addi a8,a1,112 # [0] + l32i a10,a1,132 # [1] gra_spill_temp_56 + l32i a9,a1,148 # [2] gra_spill_temp_60 + wur.sar_byte a10 # [3] + ee.vld.128.ip q4,a9,16 # [4] id:297 + ee.vld.128.ip q7,a9,16 # [5] id:298 + ee.vld.128.ip q5,a9,0 # [6] id:299 + s32i a9,a1,148 # [7] gra_spill_temp_60 + ee.src.q.qup q6,q4,q7 # [8] + ee.vadds.s32 q0,q0,q6 # [9] + ee.src.q.qup q3,q4,q5 # [10] + ee.vadds.s32 q1,q1,q3 # [11] + st.qr q1,a8,64 # [12] gra_spill_temp_66-112 + +.Lt_5_12290: # 0x974 + addi a11,a1,112 # [0] + + # 287 q0 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + l32i a10,a1,168 # [1] gra_spill_temp_65 + st.qr q1,a11,64 # [2] gra_spill_temp_66-112 + mov.n a11,a2 # [3] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [4] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + # 288 out_mult_ptr += 4; + # 289 out_shift_ptr += 4; + # 290 + # 291 q1 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q1, out_mult_ptr, out_shift_ptr); + l32i a10,a1,168 # [0] gra_spill_temp_65 + addmi a12,a1,256 # [1] + addi a11,a1,112 # [2] + st.qr q0,a12,-16 # [3] gra_spill_temp_70-256 + ld.qr q0,a11,64 # [4] gra_spill_temp_66-112 + addi a10,a10,16 # [5] + addi a11,a2,16 # [6] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [7] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + +.LBB25_esp_nn_depthwise_conv_s16_mult1_3x3: # 0x99a +# Part of loop body line 216, head labeled .Lt_5_9474 + movi.n a14,16 # [0] + # 292 out_mult_ptr += 4; + # 293 out_shift_ptr += 4; + addi a2,a2,32 # [1] + l32i a15,a1,144 # [2] gra_spill_temp_59 + l32i a9,a1,156 # [3] gra_spill_temp_62 + l32i a8,a1,168 # [4] gra_spill_temp_65 + addmi a12,a1,256 # [5] + addi a13,a1,112 # [6] + ld.qr q3,a13,112 # [7] gra_spill_temp_69-112 + ld.qr q1,a13,80 # [8] gra_spill_temp_67-112 + ld.qr q2,a12,-16 # [9] gra_spill_temp_70-256 + addi a8,a8,32 # [10] + s32i a8,a1,168 # [11] gra_spill_temp_65 + ee.vadds.s32 q2,q2,q1 # [12] + ee.vadds.s32 q1,q0,q1 # [13] + ee.vmin.s32 q0,q2,q3 # [14] + ee.vmin.s32 q1,q1,q3 # [15] + ld.qr q2,a13,96 # [16] gra_spill_temp_68-112 + l32i a13,a1,164 # [17] gra_spill_temp_64 + ee.vmax.s32 q1,q1,q2 # [18] + ee.vmax.s32 q0,q0,q2 # [19] + addi.n a13,a13,8 # [20] + s32i a13,a1,164 # [21] gra_spill_temp_64 + ee.vunzip.16 q0,q1 # [22] + ee.vunzip.8 q0,q1 # [23] + ee.vst.l.64.ip q0,a9,8 # [24] id:302 + s32i a9,a1,156 # [25] gra_spill_temp_62 + blt a13,a15,.Lt_5_9474 # [26] + +.Lt_5_8962: # 0x9e9 +# Part of loop body line 203, head labeled .Lt_5_8706 + l32i a8,a1,92 # [0] gra_spill_temp_46 + l32i a11,a1,100 # [1] gra_spill_temp_48 + l32i a10,a1,128 # [2] gra_spill_temp_55 + l32i a9,a1,80 # [3] gra_spill_temp_43 + l32i a15,a1,96 # [4] gra_spill_temp_47 + sub a5,a5,a9 # [5] + addi.n a15,a15,1 # [6] + s32i a15,a1,96 # [7] gra_spill_temp_47 + add.n a10,a10,a9 # [8] + sub a11,a11,a9 # [9] + s32i a11,a1,100 # [10] gra_spill_temp_48 + s32i a10,a1,128 # [11] gra_spill_temp_55 + sub a15,a15,a8 # [12] + bnez a15,.Lt_5_8706 # [13] + +.Lt_5_8194: # 0xa11 +# Part of loop body line 201, head labeled .Lt_5_7938 + l32i a13,a1,64 # [0] gra_spill_temp_39 + l32i a10,a1,72 # [1] gra_spill_temp_41 + l32i a9,a1,124 # [2] gra_spill_temp_54 + l32i.n a8,a1,60 # [3] gra_spill_temp_38 + l32i a12,a1,68 # [4] gra_spill_temp_40 + l32i a15,a1,76 # [5] gra_spill_temp_42 + addi.n a12,a12,1 # [6] + s32i a12,a1,68 # [7] gra_spill_temp_40 + sub a15,a15,a8 # [8] + add.n a9,a9,a8 # [9] + sub a10,a10,a8 # [10] + s32i a10,a1,72 # [11] gra_spill_temp_41 + s32i a9,a1,124 # [12] gra_spill_temp_54 + s32i a15,a1,76 # [13] gra_spill_temp_42 + sub a12,a12,a13 # [14] + bnez a12,.Lt_5_7938 # [15] + +.Lt_5_7426: # 0xa3e + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3, . - esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3.S new file mode 100644 index 0000000..06f9307 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3.S @@ -0,0 +1,367 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3 + .type esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3 + +esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3: # 0xa42 + # qacc_scratch = 0 + # gra_spill_temp_71 = 48 + # gra_spill_temp_72 = 52 + # gra_spill_temp_73 = 56 + # gra_spill_temp_74 = 60 + # gra_spill_temp_75 = 64 + # gra_spill_temp_76 = 68 + # gra_spill_temp_77 = 72 + # gra_spill_temp_78 = 76 + # gra_spill_temp_79 = 80 + # gra_spill_temp_80 = 84 + # gra_spill_temp_81 = 88 + # gra_spill_temp_82 = 92 + # gra_spill_temp_83 = 96 + # gra_spill_temp_84 = 100 + # gra_spill_temp_85 = 104 + # gra_spill_temp_86 = 108 + # gra_spill_temp_87 = 112 + # gra_spill_temp_88 = 116 + # gra_spill_temp_89 = 120 + # gra_spill_temp_90 = 124 + # gra_spill_temp_91 = 128 + # gra_spill_temp_92 = 132 + # gra_spill_temp_93 = 136 + # gra_spill_temp_94 = 140 + # gra_spill_temp_95 = 144 + # gra_spill_temp_96 = 160 + # gra_spill_temp_97 = 176 + # gra_spill_temp_98 = 192 + # gra_spill_temp_99 = 208 + # gra_spill_temp_100 = 224 + # gra_spill_temp_101 = 240 + # gra_spill_temp_102 = 244 + # gra_spill_temp_103 = 248 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t channels + // a6: const uint16_t stride_wd + // a7: const uint16_t stride_ht + + // on stack: + // const int16_t *filter_data + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + entry a1,288 # + s32i a2,a1,120 # [0] gra_spill_temp_89 + s32i.n a3,a1,48 # [1] gra_spill_temp_71 + s32i a5,a1,76 # [2] gra_spill_temp_78 + s32i a6,a1,84 # [3] gra_spill_temp_80 + s32i.n a7,a1,60 # [4] gra_spill_temp_74 + l32i a12,a1,296 # [5] id:241 out_data+0x0 + addi a14,a1,112 # [6] + addmi a10,a1,256 # [7] + addmi a13,a1,256 # [8] + addmi a15,a1,256 # [9] + + // height loop + l16ui a8,a1,304 # [10] id:242 out_ht+0x0 + s32i.n a8,a1,56 # [11] gra_spill_temp_73 + addi a15,a15,52 # [12] + addi a13,a13,64 # [13] + addi a10,a10,68 # [14] + ee.vldbc.32 q0,a10 # [15] id:240 activation_max + ee.vldbc.32 q1,a13 # [16] id:239 activation_min + ee.vldbc.32 q2,a15 # [17] id:238 out_offset + st.qr q2,a14,64 # [18] gra_spill_temp_97-112 + st.qr q1,a14,80 # [19] gra_spill_temp_98-112 + st.qr q0,a14,96 # [20] gra_spill_temp_99-112 + beqz.n a8,.Lt_6_6914 # [21] + +.LBB3_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad: # 0xa83 + s32i a1,a1,144 # [0] gra_spill_temp_95 + mul16u a7,a3,a5 # [1] + s32i a4,a1,72 # [2] gra_spill_temp_77 + addi a9,a5,-7 # [3] + l16ui a11,a1,300 # [4] id:247 out_wd+0x0 + l32i a10,a1,292 # [5] id:243 bias+0x0 + l32i a15,a1,288 # [6] id:244 filter_data+0x0 + l32i a13,a1,316 # [7] id:246 out_mult+0x0 + l32i a14,a1,312 # [8] id:245 out_shift+0x0 + s32i a14,a1,88 # [9] gra_spill_temp_81 + s32i a13,a1,92 # [10] gra_spill_temp_82 + s32i a15,a1,124 # [11] gra_spill_temp_90 + s32i a10,a1,116 # [12] gra_spill_temp_88 + s32i a11,a1,96 # [13] gra_spill_temp_83 + s32i a9,a1,136 # [14] gra_spill_temp_93 + addx2 a4,a5,a5 # [15] + slli a4,a4,1 # [16] + slli a7,a7,1 # [17] + l32i.n a9,a1,60 # [18] gra_spill_temp_74 + movi.n a11,0 # [19] + extui a10,a10,0,4 # [20] + movi.n a15,0 # [21] + slli a5,a5,1 # [22] + s32i a15,a1,68 # [23] gra_spill_temp_76 + s32i a10,a1,112 # [24] gra_spill_temp_87 + s32i a11,a1,64 # [25] gra_spill_temp_75 + mul16u a8,a3,a9 # [26] + movi.n a11,0 # [27] + s32i a11,a1,80 # [28] gra_spill_temp_79 + s32i.n a8,a1,52 # [29] gra_spill_temp_72 + +.Lt_6_7426: # 0xad8 // width_loop + l32i a8,a1,96 # [0] gra_spill_temp_83 + beqz.n a8,.Lt_6_7682 # [2] + + movi.n a11,3 # [0] + l32i a10,a1,72 # [1] gra_spill_temp_77 + movi.n a9,0 # [2] + movi.n a13,0 # [3] + l32i.n a14,a1,48 # [4] gra_spill_temp_71 + s32i a14,a1,108 # [5] gra_spill_temp_86 + s32i a13,a1,104 # [6] gra_spill_temp_85 + s32i a9,a1,100 # [7] gra_spill_temp_84 + min a10,a10,a11 # [8] + s32i a10,a1,128 # [9] gra_spill_temp_91 + +.Lt_6_8194: # 0xaf7 + l32i a2,a1,88 # [0] gra_spill_temp_81 + l32i a6,a1,92 # [1] gra_spill_temp_82 + l32i a8,a1,116 # [2] gra_spill_temp_88 + +// channel loop + l32i a15,a1,136 # [3] gra_spill_temp_93 + s32i a8,a1,140 # [4] gra_spill_temp_94 + blti a15,1,.Lt_6_8450 # [5] + + movi.n a11,0 # [0] + movi.n a10,0 # [1] + l32i a9,a1,76 # [2] gra_spill_temp_78 + l32i a14,a1,80 # [3] gra_spill_temp_79 + movi.n a8,3 # [4] + l32i a3,a1,108 # [5] gra_spill_temp_86 + l32i a13,a1,104 # [6] gra_spill_temp_85 + min a3,a3,a8 # [7] + add.n a13,a13,a14 # [8] + mull a9,a9,a13 # [9] + s32i a9,a1,132 # [10] gra_spill_temp_92 + +.Lt_6_8962: # 0xb26 + ee.zero.qacc # [0] + l32i a9,a1,132 # [1] gra_spill_temp_92 + l32i a13,a1,120 # [2] gra_spill_temp_89 + add.n a9,a9,a10 # [3] + addx2 a9,a9,a13 # [4] + l32i a13,a1,124 # [5] gra_spill_temp_90 + l32i a14,a1,128 # [6] gra_spill_temp_91 + add.n a13,a11,a13 # [7] + loopgtz a14,.LBB30_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad # [8] + +.Lt_6_9730: # 0xb3f +# Loop body line 360, nesting depth: 4, estimated iterations: 100 + mov.n a14,a13 # [0] + mov.n a15,a9 # [1] + ee.vld.128.xp q0,a15,a5 # [2] id:249 + ee.vld.128.xp q1,a14,a5 # [3] id:250 + add.n a9,a9,a7 # [4] + beqi a3,2,.LBB15_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad # [5] + +.Lt_6_9986: # 0xb4e + beqi a3,3,.LBB17_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad # [0] + +.Lt_6_10498: # 0xb51 + add.n a13,a13,a4 # [0] + ee.vmulas.s16.qacc q0,q1 # [1] + +.LBB30_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad: # 0xb58 + + // extract data + l32i a15,a1,144 # [0] gra_spill_temp_95 + ee.st.qacc_l.l.128.ip a15,16 # [2] id:258 + ee.st.qacc_l.h.32.ip a15,0 # [3] id:259 + l8ui a14,a1,15 # [4] qacc_scratch+15 + l8ui a13,a1,16 # [5] qacc_scratch+16 + l8ui a8,a1,5 # [6] qacc_scratch+5 + l8ui a9,a1,6 # [7] qacc_scratch+6 + s8i a9,a1,3 # [8] qacc_scratch+3 + s8i a8,a1,2 # [9] qacc_scratch+2 + s8i a13,a1,7 # [10] qacc_scratch+7 + s8i a14,a1,6 # [11] qacc_scratch+6 + l16ui a13,a1,10 # [12] qacc_scratch+10 + s16i a13,a1,4 # [13] qacc_scratch+4 + ee.st.qacc_h.l.128.ip a15,16 # [14] id:269 + ee.st.qacc_h.h.32.ip a15,-32 # [15] id:270 + l8ui a9,a1,32 # [16] qacc_scratch+32 + l8ui a13,a1,22 # [17] qacc_scratch+22 + l8ui a8,a1,31 # [18] qacc_scratch+31 + l16ui a14,a1,26 # [19] qacc_scratch+26 + s16i a14,a1,12 # [20] qacc_scratch+12 + s8i a8,a1,14 # [21] qacc_scratch+14 + s8i a13,a1,11 # [22] qacc_scratch+11 + s8i a9,a1,15 # [23] qacc_scratch+15 + + l32i a13,a1,116 # [24] gra_spill_temp_88 + l8ui a9,a1,21 # [25] qacc_scratch+21 + l16ui a8,a1,16 # [26] qacc_scratch+16 + movi.n a14,16 # [27] + ee.srcmb.s16.qacc q1,a14,0 # [28] + s16i a8,a1,8 # [29] qacc_scratch+8 + s8i a9,a1,10 # [30] qacc_scratch+10 + ee.vld.128.ip q0,a15,0 # [31] id:282 + s32i a15,a1,144 # [32] gra_spill_temp_95 + ee.vzip.16 q0,q1 # [33] + + bnez.n a13,.LBB20_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad # [34] + + s32i a12,a1,240 # [0] gra_spill_temp_101 + s32i a11,a1,244 # [1] gra_spill_temp_102 + s32i a10,a1,248 # [2] gra_spill_temp_103 + addi a14,a1,112 # [3] + st.qr q1,a14,48 # [4] gra_spill_temp_96-112 + j .Lt_6_11266 # [5] + +.LBB15_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad: # 0xbce +# Part of loop body line 360, head labeled .Lt_6_9730 + ee.vmulas.s16.qacc.ld.xp q0,a15,a5,q0,q1 # [0] id:251 + ee.vld.128.xp q1,a14,a5 # [1] id:252 + bnei a3,3,.Lt_6_10498 # [2] + +.LBB17_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad: # 0xbd8 + ee.vmulas.s16.qacc.ld.xp q3,a15,a5,q0,q1 # [0] id:253 + ee.vld.128.xp q4,a14,a5 # [1] id:254 + ee.vld.128.xp q1,a14,a5 # [2] id:256 + ee.vmulas.s16.qacc.ld.xp q0,a15,a5,q3,q4 # [3] id:255 + j .Lt_6_10498 # [4] + +.LBB20_esp_nn_depthwise_conv_s16_mult1_3x3_no_pad: # 0xbe9 +# Part of loop body line 358, head labeled .Lt_6_8962 + s32i a12,a1,240 # [0] gra_spill_temp_101 + s32i a11,a1,244 # [1] gra_spill_temp_102 + s32i a10,a1,248 # [2] gra_spill_temp_103 + addi a15,a1,112 # [3] + l32i a9,a1,112 # [4] gra_spill_temp_87 + l32i a8,a1,140 # [5] gra_spill_temp_94 + wur.sar_byte a9 # [6] + ee.vld.128.ip q6,a8,16 # [7] id:285 + ee.vld.128.ip q3,a8,16 # [8] id:286 + ee.vld.128.ip q7,a8,0 # [9] id:287 + s32i a8,a1,140 # [10] gra_spill_temp_94 + ee.src.q.qup q2,q6,q3 # [11] + ee.vadds.s32 q0,q0,q2 # [12] + ee.src.q.qup q5,q6,q7 # [13] + ee.vadds.s32 q1,q1,q5 # [14] + st.qr q1,a15,48 # [15] gra_spill_temp_96-112 + +.Lt_6_11266: # 0xc19 + # 423 q0 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + mov.n a10,a6 # [0] + mov.n a11,a2 # [1] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [2] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + addi a11,a1,112 # [0] + addi a10,a6,16 # [1] + st.qr q0,a11,112 # [2] gra_spill_temp_100-112 + ld.qr q0,a11,48 # [3] gra_spill_temp_96-112 + addi a11,a2,16 # [4] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [5] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + addi a6,a6,32 # [0] + addi a2,a2,32 # [1] + + l32i a13,a1,136 # [2] gra_spill_temp_93 + l32i a12,a1,240 # [3] gra_spill_temp_101 + l32i a10,a1,248 # [4] gra_spill_temp_103 + l32i a11,a1,244 # [5] gra_spill_temp_102 + addi a9,a1,112 # [6] + ld.qr q6,a9,80 # [7] gra_spill_temp_98-112 + ld.qr q7,a9,96 # [8] gra_spill_temp_99-112 + ld.qr q5,a9,64 # [9] gra_spill_temp_97-112 + ld.qr q4,a9,112 # [10] gra_spill_temp_100-112 + addi a11,a11,16 # [11] + addi.n a10,a10,8 # [12] + ee.vadds.s32 q4,q4,q5 # [13] + ee.vadds.s32 q5,q0,q5 # [14] + ee.vmin.s32 q4,q4,q7 # [15] + ee.vmax.s32 q4,q4,q6 # [16] + ee.vmin.s32 q5,q5,q7 # [17] + ee.vmax.s32 q5,q5,q6 # [18] + ee.vunzip.16 q4,q5 # [19] + ee.vunzip.8 q4,q5 # [20] + ee.vst.l.64.ip q4,a12,8 # [21] id:290 + blt a10,a13,.Lt_6_8962 # [22] + +.Lt_6_8450: # 0xc76 +# Part of loop body line 348, head labeled .Lt_6_8194 + l32i a11,a1,96 # [0] gra_spill_temp_83 + l32i a15,a1,104 # [1] gra_spill_temp_85 + l32i a14,a1,84 # [2] gra_spill_temp_80 + l32i a10,a1,100 # [3] gra_spill_temp_84 + l32i a13,a1,108 # [4] gra_spill_temp_86 + addi.n a10,a10,1 # [5] + s32i a10,a1,100 # [6] gra_spill_temp_84 + sub a13,a13,a14 # [7] + add.n a15,a15,a14 # [8] + s32i a15,a1,104 # [9] gra_spill_temp_85 + s32i a13,a1,108 # [10] gra_spill_temp_86 + sub a10,a10,a11 # [11] + bnez a10,.Lt_6_8194 # [12] + +.Lt_6_7682: # 0xc9b + l32i.n a9,a1,56 # [0] gra_spill_temp_73 + l32i a15,a1,64 # [1] gra_spill_temp_75 + l32i.n a14,a1,52 # [2] gra_spill_temp_72 + l32i a13,a1,80 # [3] gra_spill_temp_79 + l32i.n a11,a1,60 # [4] gra_spill_temp_74 + l32i a8,a1,68 # [5] gra_spill_temp_76 + l32i a10,a1,72 # [6] gra_spill_temp_77 + addi.n a8,a8,1 # [7] + s32i a8,a1,68 # [8] gra_spill_temp_76 + sub a10,a10,a11 # [9] + add.n a13,a13,a14 # [10] + add.n a15,a15,a11 # [11] + s32i a15,a1,64 # [12] gra_spill_temp_75 + s32i a13,a1,80 # [13] gra_spill_temp_79 + s32i a10,a1,72 # [14] gra_spill_temp_77 + sub a8,a8,a9 # [15] + bnez a8,.Lt_6_7426 # [16] + +.Lt_6_6914: # 0xcc8 + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3, . - esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S new file mode 100644 index 0000000..8568df5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult1_esp32s3.S @@ -0,0 +1,345 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult1_esp32s3 + .type esp_nn_depthwise_conv_s16_mult1_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult1_esp32s3 + +esp_nn_depthwise_conv_s16_mult1_esp32s3: # 0x4c8 + # scratch_buf = 0 + # gra_spill_temp_2 = 48 + # gra_spill_temp_22 = 52 + # gra_spill_temp_4 = 56 + # gra_spill_temp_23 = 60 + # gra_spill_temp_24 = 64 + # gra_spill_temp_7 = 68 + # gra_spill_temp_26 = 72 + # gra_spill_temp_27 = 76 + # gra_spill_temp_28 = 80 + # gra_spill_temp_29 = 84 + # gra_spill_temp_12 = 88 + # gra_spill_temp_13 = 92 + # gra_spill_temp_14 = 96 + # gra_spill_temp_15 = 100 + # gra_spill_temp_21 = 104 + # gra_spill_temp_17 = 108 + # gra_spill_temp_18 = 112 + # gra_spill_temp_20 = 116 + # gra_spill_temp_30 = 0 + # gra_spill_temp_34 = 16 + + // in registers: + // a2: *input_data + // a3: input_wd + // a4: input_ht + // a5: channels + // a6: pad_wd + // a7: pad_ht + + // on stack: + // stride_wd + // stride_ht + // *filter_data + // filter_wd + // filter_ht + // *bias + // *out_data + // out_wd + // out_ht + // out_offset + // *out_shift + // *out_mult + // activation_min + // activation_max + + entry a1,160 # + l32i a9,a1,184 # [7] id:237 out_data+0x0 + l16ui a8,a1,192 # [8] id:238 out_ht+0x0 + s32i a2,a1,52 # [0] gra_spill_temp_22 + s32i.n a4,a1,56 # [1] gra_spill_temp_4 + s32i a5,a1,60 # [2] gra_spill_temp_23 + s32i a9,a1,112 # [10] gra_spill_temp_18 + beqz.n a8,.Lt_4_7170 # [20] + +.LBB3_esp_nn_depthwise_conv_s16_mult1: # 0x508 + l16ui a4,a1,172 # [0] id:240 filter_wd+0x0 + neg a13,a7 # [2] + neg a12,a6 # [3] + sext a12,a12,15 # [16] + sext a13,a13,15 # [17] + s32i a13,a1,92 # [18] gra_spill_temp_13 + s32i.n a12,a1,48 # [19] gra_spill_temp_2 + movi.n a8,0 # [20] + slli a9,a5,1 # [21] + addi a10,a5,-7 # [22] + s32i a10,a1,100 # [23] gra_spill_temp_15 + s32i a9,a1,64 # [24] gra_spill_temp_24 + s32i a8,a1,68 # [25] gra_spill_temp_7 + j .Lt_4_7682 # [30] + +.Lt_4_7938: # 0x561 + l32i a15,a1,192 # [0] out_ht + l32i.n a9,a1,164 # [1] stride_ht + l32i a14,a1,68 # [2] gra_spill_temp_7 + l32i a8,a1,92 # [3] gra_spill_temp_13 + addi.n a14,a14,1 # [4] + s32i a14,a1,68 # [5] gra_spill_temp_7 + add.n a9,a8,a9 # [6] + sub a14,a14,a15 # [7] + sext a8,a9,15 # [8] + s32i a8,a1,92 # [9] gra_spill_temp_13 + beqz a14,.Lt_4_7170 # [10] + +.Lt_4_7682: # 0x57f +# Loop body line 59, nesting depth: 1, estimated iterations: 100 + # 60 const int16_t base_y = (out_y * stride_ht) - pad_ht; + # 61 for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + l32i a10,a1,188 # [0] out_width + beqz.n a10,.Lt_4_7938 # [2] + +.LBB6_esp_nn_depthwise_conv_s16_mult1: # 0x584 +# Part of loop body line 59, head labeled .Lt_4_7682 + movi.n a14,0 # [0] + l32i.n a7,a1,176 # [1] filter_ht + l32i a13,a1,92 # [2] gra_spill_temp_13 + l32i.n a8,a1,56 # [3] gra_spill_temp_4 + movi.n a11,0 # [4] + l32i.n a12,a1,48 # [5] gra_spill_temp_2 + s32i a12,a1,84 # [6] gra_spill_temp_29 + s32i a11,a1,88 # [7] gra_spill_temp_12 + sub a8,a8,a13 # [8] + min a7,a7,a8 # [9] + neg a13,a13 # [10] + max a13,a13,a14 # [11] + s32i a13,a1,96 # [12] gra_spill_temp_14 + j .Lt_4_8450 # [13] + +.Lt_4_8706: # 0x5a9 +# Part of loop body line 61, head labeled .Lt_4_8450 + l32i a10,a1,188 # [0] out_width + l32i a12,a1,160 # [1] stride_wd + l32i a9,a1,88 # [2] gra_spill_temp_12 + l32i a11,a1,84 # [3] gra_spill_temp_29 + addi.n a9,a9,1 # [4] + s32i a9,a1,88 # [5] gra_spill_temp_12 + add.n a12,a11,a12 # [6] + sext a11,a12,15 # [7] + s32i a11,a1,84 # [8] gra_spill_temp_29 + beq a9,a10,.Lt_4_7938 # [9] + +.Lt_4_8450: # 0x5c5 +# Loop body line 61, nesting depth: 2, estimated iterations: 100 + # 69 uint32_t bias_ptr = (uint32_t) bias; + # 70 const int32_t *out_mult_ptr = out_mult; + # 71 const int32_t *out_shift_ptr = out_shift; + # 72 + # 73 for (int ch_idx = 0; ch_idx < channels - 7; ch_idx += 8) {//channel_loop + l32i a13,a1,100 # [0] gra_spill_temp_15 + l32i a14,a1,180 # [1] bias + l32i a15,a1,204 # [2] out_mult + l32i a8,a1,200 # [3] out_shift + s32i a8,a1,104 # [4] gra_spill_temp_21 + s32i a15,a1,116 # [5] gra_spill_temp_20 + s32i a14,a1,108 # [6] gra_spill_temp_17 + blti a13,1,.Lt_4_8706 # [7] + +.LBB9_esp_nn_depthwise_conv_s16_mult1: # 0x5dd +# Part of loop body line 61, head labeled .Lt_4_8450 + movi.n a2,0 # [0] + l32i a5,a1,84 # [1] gra_spill_temp_29 + movi.n a8,0 # [2] + neg a6,a5 # [3] + max a6,a6,a8 # [4] + sub a5,a3,a5 # [5] + min a5,a4,a5 # [6] + sub a9,a5,a6 # [7] + s32i a9,a1,72 # [8] gra_spill_temp_26 + j .Lt_4_9218 # [9] + +.Lt_4_9474: # 0x5f9 + +// extract data + mov a11,a1 + ee.st.qacc_l.l.128.ip a11,16 # [2] id:252 + ee.st.qacc_l.h.32.ip a11,0 # [3] id:253 + l8ui a12,a1,15 # [4] scratch_buf+15 + l16ui a10,a1,10 # [5] scratch_buf+10 + l8ui a13,a1,5 # [6] scratch_buf+5 + l8ui a14,a1,6 # [7] scratch_buf+6 + l8ui a15,a1,16 # [8] scratch_buf+16 + s8i a13,a1,2 # [11] scratch_buf+2 + s8i a14,a1,3 # [10] scratch_buf+3 + s8i a15,a1,7 # [9] scratch_buf+7 + s16i a10,a1,4 # [12] scratch_buf+4 + s8i a12,a1,6 # [13] scratch_buf+6 + + movi.n a10,16 # [14] + ee.st.qacc_h.l.128.ip a11,16 # [15] id:263 + ee.st.qacc_h.h.32.ip a11,-32 # [16] id:264 + ee.srcmb.s16.qacc q1,a10,0 # [17] + l8ui a8,a1,31 # [18] scratch_buf+31 + l8ui a9,a1,32 # [19] scratch_buf+32 + l16ui a12,a1,16 # [20] scratch_buf+16 + l8ui a13,a1,21 # [21] scratch_buf+21 + l8ui a14,a1,22 # [22] scratch_buf+22 + l16ui a15,a1,26 # [23] scratch_buf+26 + s8i a13,a1,10 # [26] scratch_buf+10 + s8i a14,a1,11 # [25] scratch_buf+11 + s16i a15,a1,12 # [24] scratch_buf+12 + s16i a12,a1,8 # [27] scratch_buf+8 + s8i a9,a1,15 # [28] scratch_buf+15 + s8i a8,a1,14 # [29] scratch_buf+14 + + l32i a9,a1,180 # [30] bias + ee.vld.128.ip q0,a11,0 # [31] id:164 + ee.vzip.16 q0,q1 # [33] + beqz.n a9,.Lt_4_11522 # [34] // skip bias + +// add bias + l32i a9,a1,108 # [0] gra_spill_temp_17 + addi a8,a1,112 # [1] + extui a10,a9,0,4 # [2] + wur.sar_byte a10 # [3] + ee.vld.128.ip q4,a9,16 # [4] id:279 + ee.vld.128.ip q7,a9,16 # [5] id:168 + ee.vld.128.ip q5,a9,0 # [6] id:281 + s32i a9,a1,108 # [7] gra_spill_temp_17 + ee.src.q q4,q4,q7 # [8] + ee.src.q q7,q7,q5 # [10] + ee.vadds.s32 q0,q0,q4 # [9] + ee.vadds.s32 q1,q1,q7 # [11] + st.qr q1,a1,0 # [12] gra_spill_temp_30-112 + +.Lt_4_11522: # 0x684 + +// apply quantisation: esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + + l32i a10,a1,116 # [1] gra_spill_temp_20 + l32i a11,a1,104 # [3] gra_spill_temp_21 + st.qr q1,a1,0 # [2] gra_spill_temp_30-112 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [4] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + l32i a10,a1,116 # [2] gra_spill_temp_20 + l32i a11,a1,104 # [0] gra_spill_temp_21 + st.qr q0,a1,16 # [3] gra_spill_temp_34-112 + ld.qr q0,a1,0 # [4] gra_spill_temp_30-112 + addi a10,a10,16 # [5] // out_mult_ptr += 4 + addi a11,a11,16 # [6] // out_shift_ptr += 4 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [7] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + +// add offset, apply activation and store + l32i a13,a1,100 # [0] gra_spill_temp_15 + addi.n a2,a2,8 # [1] + l32i a8,a1,112 # [2] gra_spill_temp_18 + l32i a15,a1,116 # [3] gra_spill_temp_20 + l32i a14,a1,104 # [4] gra_spill_temp_21 + + addi a12,a1,212 + ee.vldbc.32 q3,a12 # [14] id:236 activation_max + addi a12,a1,196 + ee.vldbc.32 q1,a12 # [16] id:234 out_offset + addi a12,a1,208 + + ld.qr q2,a1,16 # [8] gra_spill_temp_34-112 + + addi a14,a14,32 # [9] + addi a15,a15,32 # [10] + s32i a15,a1,116 # [11] gra_spill_temp_20 + ee.vadds.s32 q2,q2,q1 # [12] + s32i a14,a1,104 # [13] gra_spill_temp_21 + ee.vadds.s32 q1,q0,q1 # [14] + ee.vmin.s32 q0,q2,q3 # [15] + ee.vldbc.32 q2,a12 # [16] id:234 out_offset + ee.vmin.s32 q1,q1,q3 # [17] + ee.vmax.s32 q1,q1,q2 # [18] + ee.vmax.s32 q0,q0,q2 # [19] + ee.vunzip.16 q0,q1 # [20] + ee.vunzip.8 q0,q1 # [21] + ee.vst.l.64.ip q0,a8,8 # [22] id:172 + s32i a8,a1,112 # [23] gra_spill_temp_18 + bge a2,a13,.Lt_4_8706 # [24] + +.Lt_4_9218: # 0x6f5 + ee.zero.qacc # [0] + l32i a13,a1,96 # [1] gra_spill_temp_14 + s32i a13,a1,80 # [2] gra_spill_temp_28 + bge a13,a7,.Lt_4_9474 # [3] + +.LBB12_esp_nn_depthwise_conv_s16_mult1: # 0x701 // channel_loop + mull a15,a13,a4 # [0] + l32i a14,a1,92 # [1] gra_spill_temp_13 + add.n a8,a15,a5 # [2] + add.n a14,a14,a13 # [3] + mull a14,a3,a14 # [4] + s32i a8,a1,76 # [5] gra_spill_temp_27 + bge a6,a5,.Lt_4_10242 # [6] + +.LBB15_esp_nn_depthwise_conv_s16_mult1: # 0x714 + l32i a12,a1,64 # [0] gra_spill_temp_24 + l32i a9,a1,168 # [1] filter_data + l32i a10,a1,60 # [2] gra_spill_temp_23 + l32i a11,a1,84 # [3] gra_spill_temp_29 + add.n a8,a15,a6 # [4] + add.n a11,a11,a6 # [5] + mull a8,a8,a10 # [6] + add.n a11,a14,a11 # [7] + mull a10,a10,a11 # [8] + add.n a8,a2,a8 # [9] + l32i a11,a1,52 # [10] gra_spill_temp_22 + addx2 a8,a8,a9 # [11] + add.n a10,a2,a10 # [12] + l32i a9,a1,72 # [13] gra_spill_temp_26 + addx2 a10,a10,a11 # [14] + loopgtz a9,.LBB41_esp_nn_depthwise_conv_s16_mult1 # [15] +// innermost loop + ee.vld.128.xp q0,a10,a12 # [0*II+3] id:249 + ee.vld.128.xp q1,a8,a12 # [0*II+4] id:250 + ee.vmulas.s16.qacc q0,q1 # [0*II+6] +.LBB41_esp_nn_depthwise_conv_s16_mult1: # 0x750 + +.Lt_4_10242: # 0x750 + add.n a14,a14,a3 # [0] + add.n a15,a15,a4 # [1] + l32i a9,a1,80 # [2] gra_spill_temp_28 + l32i a10,a1,76 # [3] gra_spill_temp_27 + addi.n a9,a9,1 # [4] + add.n a10,a10,a4 # [5] + s32i a10,a1,76 # [6] gra_spill_temp_27 + s32i a9,a1,80 # [7] gra_spill_temp_28 + sub a9,a7,a9 # [8] + beqz a9,.Lt_4_9474 # [9] + + blt a6,a5,.LBB15_esp_nn_depthwise_conv_s16_mult1 # [0] + + j .Lt_4_10242 # [0] + +.Lt_4_7170: # 0x770 + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult1_esp32s3, . - esp_nn_depthwise_conv_s16_mult1_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult4_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult4_esp32s3.S new file mode 100644 index 0000000..792d137 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult4_esp32s3.S @@ -0,0 +1,416 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult4_esp32s3 + .type esp_nn_depthwise_conv_s16_mult4_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult4_esp32s3 + +esp_nn_depthwise_conv_s16_mult4_esp32s3: # 0x17c8 + # qacc_scratch = 0 + # gra_spill_temp_220 = 32 + # gra_spill_temp_221 = 36 + # gra_spill_temp_222 = 40 + # gra_spill_temp_223 = 44 + # gra_spill_temp_224 = 48 + # gra_spill_temp_225 = 52 + # gra_spill_temp_226 = 56 + # gra_spill_temp_227 = 60 + # gra_spill_temp_228 = 64 + # gra_spill_temp_229 = 68 + # gra_spill_temp_230 = 72 + # gra_spill_temp_231 = 76 + # gra_spill_temp_232 = 80 + # gra_spill_temp_233 = 84 + # gra_spill_temp_234 = 88 + # gra_spill_temp_235 = 92 + # gra_spill_temp_236 = 96 + # gra_spill_temp_237 = 100 + # gra_spill_temp_238 = 104 + # gra_spill_temp_239 = 108 + # gra_spill_temp_240 = 112 + # gra_spill_temp_241 = 116 + # gra_spill_temp_242 = 120 + # gra_spill_temp_243 = 124 + # gra_spill_temp_244 = 128 + # gra_spill_temp_245 = 132 + # gra_spill_temp_246 = 136 + # gra_spill_temp_247 = 140 + # gra_spill_temp_248 = 144 + # gra_spill_temp_249 = 148 + # gra_spill_temp_250 = 152 + # gra_spill_temp_251 = 156 + # gra_spill_temp_252 = 160 + # gra_spill_temp_253 = 164 + # gra_spill_temp_254 = 168 + # gra_spill_temp_255 = 172 + # gra_spill_temp_256 = 176 + # gra_spill_temp_257 = 192 + # gra_spill_temp_258 = 208 + # gra_spill_temp_259 = 224 + # gra_spill_temp_260 = 240 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t channels + // a6: const uint16_t pad_wd + // a7: const uint16_t pad_ht + + // on stack: + // const uint16_t stride_wd + // const uint16_t stride_ht + // const uint16_t ch_mult + // const int16_t *filter_data + // const uint16_t filter_wd + // const uint16_t filter_ht + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + + entry a1,288 # + s32i a2,a1,136 # [0] gra_spill_temp_246 + s32i.n a4,a1,40 # [1] gra_spill_temp_222 + s32i a5,a1,164 # [2] gra_spill_temp_253 + addi a12,a1,112 # [3] + addmi a10,a1,256 # [4] + addmi a11,a1,256 # [5] + addmi a13,a1,256 # [6] + l16ui a8,a1,324 # [7] id:216 out_ht+0x0 + s32i.n a8,a1,48 # [8] gra_spill_temp_224 + addi a13,a13,72 # [9] + addi a11,a11,88 # [10] + addi a10,a10,84 # [11] + ee.vldbc.32 q0,a10 # [12] id:215 activation_min + ee.vldbc.32 q1,a11 # [13] id:214 activation_max + ee.vldbc.32 q2,a13 # [14] id:213 out_offset + st.qr q2,a12,80 # [15] gra_spill_temp_257-112 + st.qr q1,a12,96 # [16] gra_spill_temp_258-112 + st.qr q0,a12,112 # [17] gra_spill_temp_259-112 + beqz.n a8,.Lt_10_8450 # [18] + + s32i a1,a1,112 # [0] gra_spill_temp_240 + neg a15,a6 # [1] + neg a4,a7 # [2] + addmi a8,a1,256 # [3] + movi.n a9,0 # [4] + movi.n a11,0 # [5] + slli a14,a5,1 # [6] + l16ui a13,a1,296 # [7] id:217 ch_mult+0x0 + l16ui a10,a1,308 # [8] id:227 filter_ht+0x0 + s32i.n a10,a1,36 # [9] gra_spill_temp_221 + s32i a13,a1,76 # [10] gra_spill_temp_231 + s32i a14,a1,148 # [11] gra_spill_temp_249 + s32i.n a11,a1,52 # [12] gra_spill_temp_225 + s32i a9,a1,116 # [13] gra_spill_temp_241 + st.qr q4,a8,-16 # [14] gra_spill_temp_260-256 + sext a4,a4,15 # [15] + sext a15,a15,15 # [16] + s32i.n a15,a1,32 # [17] gra_spill_temp_220 + mul16u a12,a5,a13 # [18] + s32i a4,a1,92 # [19] gra_spill_temp_235 + l16ui a8,a1,320 # [20] id:229 out_wd+0x0 + l16ui a9,a1,292 # [21] id:228 stride_ht+0x0 + l32i a11,a1,336 # [22] id:226 out_mult+0x0 + s32i a11,a1,64 # [23] gra_spill_temp_228 + s32i.n a9,a1,44 # [24] gra_spill_temp_223 + s32i a8,a1,68 # [25] gra_spill_temp_229 + l32i a4,a1,300 # [26] id:218 filter_data+0x0 + s32i a12,a1,140 # [27] gra_spill_temp_247 + l32i a15,a1,316 # [28] id:219 out_data+0x0 + s32i a15,a1,96 # [29] gra_spill_temp_236 + slli a12,a12,1 # [30] + s32i a4,a1,152 # [31] gra_spill_temp_250 + addi a14,a13,-3 # [32] + l16ui a4,a1,304 # [33] id:223 filter_wd+0x0 + s32i a14,a1,108 # [34] gra_spill_temp_239 + s32i a12,a1,144 # [35] gra_spill_temp_248 + slli a13,a13,2 # [36] + s32i a13,a1,80 # [37] gra_spill_temp_232 + l32i a12,a1,332 # [38] id:225 out_shift+0x0 + l32i a14,a1,312 # [39] id:222 bias+0x0 + s32i a14,a1,104 # [40] gra_spill_temp_238 + s32i.n a12,a1,60 # [41] gra_spill_temp_227 + l16ui a13,a1,288 # [42] id:224 stride_wd+0x0 + s32i.n a13,a1,56 # [43] gra_spill_temp_226 + j .Lt_10_8962 # [44] + +.Lt_10_9218: # 0x1880 + l32i.n a9,a1,48 # [0] gra_spill_temp_224 + l32i.n a11,a1,44 # [1] gra_spill_temp_223 + l32i.n a8,a1,52 # [2] gra_spill_temp_225 + l32i a10,a1,92 # [3] gra_spill_temp_235 + addi.n a8,a8,1 # [4] + s32i.n a8,a1,52 # [5] gra_spill_temp_225 + add.n a11,a10,a11 # [6] + sub a8,a8,a9 # [7] + sext a10,a11,15 # [8] + s32i a10,a1,92 # [9] gra_spill_temp_235 + beqz a8,.Lt_10_8450 # [10] + +.Lt_10_8962: # 0x189b +# Loop body line 1223, nesting depth: 1, estimated iterations: 100 + #1224 const int16_t base_y = (out_y * stride_ht) - pad_ht; + #1225 for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + l32i a12,a1,68 # [0] gra_spill_temp_229 + beqz.n a12,.Lt_10_9218 # [2] + +.LBB6_esp_nn_depthwise_conv_s16_mult4: # 0x18a0 + l32i.n a7,a1,36 # [0] gra_spill_temp_221 + movi.n a11,0 # [1] + l32i.n a8,a1,40 # [2] gra_spill_temp_222 + l32i a9,a1,92 # [3] gra_spill_temp_235 + movi.n a13,0 # [4] + l32i.n a14,a1,32 # [5] gra_spill_temp_220 + s32i a14,a1,160 # [6] gra_spill_temp_252 + s32i a13,a1,72 # [7] gra_spill_temp_230 + neg a10,a9 # [8] + sub a8,a8,a9 # [9] + max a10,a10,a11 # [10] + s32i a10,a1,100 # [11] gra_spill_temp_237 + min a7,a7,a8 # [12] + j .Lt_10_9730 # [13] + +.Lt_10_9986: # 0x18c5 + l32i a13,a1,68 # [0] gra_spill_temp_229 + l32i.n a15,a1,56 # [1] gra_spill_temp_226 + l32i a12,a1,72 # [2] gra_spill_temp_230 + l32i a14,a1,160 # [3] gra_spill_temp_252 + addi.n a12,a12,1 # [4] + s32i a12,a1,72 # [5] gra_spill_temp_230 + add.n a15,a14,a15 # [6] + sext a14,a15,15 # [7] + s32i a14,a1,160 # [8] gra_spill_temp_252 + beq a12,a13,.Lt_10_9218 # [9] + +.Lt_10_9730: # 0x18e0 + l32i a8,a1,164 # [0] gra_spill_temp_253 + l32i a9,a1,64 # [1] gra_spill_temp_228 + l32i.n a10,a1,60 # [2] gra_spill_temp_227 + s32i a10,a1,132 # [3] gra_spill_temp_245 + s32i a9,a1,128 # [4] gra_spill_temp_244 + beqz.n a8,.Lt_10_9986 # [5] + + movi.n a8,0 # [0] + l32i a5,a1,160 # [1] gra_spill_temp_252 + movi.n a12,0 # [2] + movi.n a13,0 # [3] + movi.n a14,0 # [4] + s32i a14,a1,84 # [5] gra_spill_temp_233 + s32i a13,a1,88 # [6] gra_spill_temp_234 + s32i a12,a1,176 # [7] gra_spill_temp_256 + neg a6,a5 # [8] + max a6,a6,a8 # [9] + sub a5,a3,a5 # [10] + min a5,a4,a5 # [11] + sub a11,a5,a6 # [12] + s32i a11,a1,156 # [13] gra_spill_temp_251 + j .Lt_10_10498 # [14] + +.Lt_10_10754: # 0x1919 + l32i a10,a1,164 # [0] gra_spill_temp_253 + l32i a14,a1,76 # [1] gra_spill_temp_231 + l32i a13,a1,84 # [2] gra_spill_temp_233 + l32i a12,a1,80 # [3] gra_spill_temp_232 + l32i a9,a1,176 # [4] gra_spill_temp_256 + l32i a11,a1,88 # [5] gra_spill_temp_234 + addi.n a9,a9,1 # [6] + s32i a9,a1,176 # [7] gra_spill_temp_256 + add.n a11,a11,a12 # [8] + add.n a13,a13,a14 # [9] + s32i a13,a1,84 # [10] gra_spill_temp_233 + s32i a11,a1,88 # [11] gra_spill_temp_234 + beq a9,a10,.Lt_10_9986 # [12] + +.Lt_10_10498: # 0x193d + l32i a15,a1,108 # [0] gra_spill_temp_239 + blti a15,1,.Lt_10_10754 # [2] + + l32i a2,a1,84 # [0] gra_spill_temp_233 + l32i a10,a1,104 # [1] gra_spill_temp_238 + l32i a9,a1,88 # [2] gra_spill_temp_234 + movi.n a8,0 # [3] + s32i a8,a1,120 # [4] gra_spill_temp_242 + add.n a9,a9,a10 # [5] + s32i a9,a1,124 # [6] gra_spill_temp_243 + j .Lt_10_11266 # [7] + +.Lt_10_11522: # 0x1959 + addmi a12,a1,256 # [0] + l32i a14,a1,112 # [1] gra_spill_temp_240 + movi.n a13,16 # [2] + ee.st.qacc_l.l.128.ip a14,16 # [3] id:234 + ee.st.qacc_l.h.32.ip a14,-16 # [4] id:235 + ee.srcmb.s16.qacc q5,a13,0 # [5] + l16ui a15,a1,10 # [6] qacc_scratch+10 + l8ui a8,a1,15 # [7] qacc_scratch+15 + l8ui a9,a1,5 # [8] qacc_scratch+5 + l8ui a11,a1,16 # [9] qacc_scratch+16 + l8ui a10,a1,6 # [10] qacc_scratch+6 + s8i a10,a1,3 # [11] qacc_scratch+3 + s8i a11,a1,7 # [12] qacc_scratch+7 + s8i a9,a1,2 # [13] qacc_scratch+2 + + l32i a11,a1,104 # [14] gra_spill_temp_238 + s8i a8,a1,6 # [15] qacc_scratch+6 + s16i a15,a1,4 # [16] qacc_scratch+4 + ee.vld.l.64.ip q0,a14,0 # [17] id:245 + s32i a14,a1,112 # [18] gra_spill_temp_240 + ee.vzip.16 q0,q5 # [19] + st.qr q5,a12,-16 # [20] gra_spill_temp_260-256 + + beqz.n a11,.Lt_10_13570 # [21] // skip_bias + + // add bias + l32i a13,a1,124 # [0] gra_spill_temp_243 + extui a12,a13,0,4 # [2] + ee.vld.128.ip q7,a13,16 # [3] id:248 + ee.vld.128.ip q1,a13,0 # [4] id:249 + wur.sar_byte a12 # [5] + ee.src.q.qup q6,q7,q1 # [6] + ee.vadds.s32 q0,q0,q6 # [7] + +.Lt_10_13570: # 0x19ae + #1287 q0 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + l32i a10,a1,128 # [0] gra_spill_temp_244 + l32i a11,a1,132 # [1] gra_spill_temp_245 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [2] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + addi.n a2,a2,4 # [0] + l32i a13,a1,96 # [1] gra_spill_temp_236 + l32i a11,a1,128 # [2] gra_spill_temp_244 + l32i a10,a1,132 # [3] gra_spill_temp_245 + addi a8,a1,112 # [4] + ld.qr q1,a8,96 # [5] gra_spill_temp_258-112 + ld.qr q2,a8,80 # [6] gra_spill_temp_257-112 + addi a10,a10,16 # [7] + addi a11,a11,16 # [8] + s32i a11,a1,128 # [9] gra_spill_temp_244 + ee.vadds.s32 q0,q0,q2 # [10] + s32i a10,a1,132 # [11] gra_spill_temp_245 + ee.vmin.s32 q0,q0,q1 # [12] + ld.qr q1,a8,112 # [13] gra_spill_temp_259-112 + l32i a8,a1,116 # [14] gra_spill_temp_241 + ee.vmax.s32 q0,q0,q1 # [15] + ee.movi.32.a q0,a14,2 # [16] + ee.movi.32.a q0,a15,1 # [17] + ee.movi.32.a q0,a9,0 # [18] + add.n a13,a8,a13 # [19] + ee.movi.32.a q0,a12,3 # [20] + addi.n a8,a8,4 # [21] + s8i a12,a13,3 # [22] id:254 + s32i a8,a1,116 # [23] gra_spill_temp_241 + s8i a9,a13,0 # [24] id:251 + s8i a15,a13,1 # [25] id:252 + s8i a14,a13,2 # [26] id:253 + l32i a15,a1,108 # [27] gra_spill_temp_239 + l32i a14,a1,120 # [28] gra_spill_temp_242 + l32i a9,a1,124 # [29] gra_spill_temp_243 + addi.n a14,a14,4 # [30] + addi a9,a9,16 # [31] + s32i a9,a1,124 # [32] gra_spill_temp_243 + s32i a14,a1,120 # [33] gra_spill_temp_242 + bge a14,a15,.Lt_10_10754 # [34] + +.Lt_10_11266: # 0x1a1c +# Loop body line 1230, nesting depth: 4, estimated iterations: 100 + ee.zero.qacc # [0] + l32i a9,a1,100 # [1] gra_spill_temp_237 + s32i a9,a1,172 # [2] gra_spill_temp_255 + bge a9,a7,.Lt_10_11522 # [3] + + mull a15,a9,a4 # [0] + l32i a14,a1,92 # [1] gra_spill_temp_235 + add.n a11,a15,a5 # [2] + add.n a14,a14,a9 # [3] + mull a14,a3,a14 # [4] + s32i a11,a1,168 # [5] gra_spill_temp_254 + bge a6,a5,.Lt_10_12290 # [6] + +.LBB18_esp_nn_depthwise_conv_s16_mult4: # 0x1a3b + l32i a10,a1,176 # [0] gra_spill_temp_256 + l32i a11,a1,164 # [1] gra_spill_temp_253 + l32i a12,a1,160 # [2] gra_spill_temp_252 + add.n a9,a15,a6 # [3] + l32i a8,a1,140 # [4] gra_spill_temp_247 + addmi a13,a1,256 # [5] + ld.qr q1,a13,-16 # [6] gra_spill_temp_260-256 + mull a8,a8,a9 # [7] + add.n a12,a12,a6 # [8] + l32i a9,a1,152 # [9] gra_spill_temp_250 + add.n a12,a14,a12 # [10] + mull a11,a11,a12 # [11] + add.n a8,a2,a8 # [12] + l32i a12,a1,148 # [13] gra_spill_temp_249 + addx2 a8,a8,a9 # [14] + add.n a10,a10,a11 # [15] + l32i a11,a1,136 # [16] gra_spill_temp_246 + l32i a9,a1,156 # [17] gra_spill_temp_251 + addx2 a10,a10,a11 # [18] + l32i a11,a1,144 # [19] gra_spill_temp_248 + loopgtz a9,.LBB45_esp_nn_depthwise_conv_s16_mult4 # [20] + + mov.n a9,a8 # [0*II+0] + ee.vldbc.16 q0,a10 # [0*II+1] id:232 + add.n a10,a10,a12 # [0*II+2] + ee.vld.l.64.ip q1,a9,0 # [0*II+3] id:231 + add.n a8,a8,a11 # [0*II+4] + ee.vmulas.s16.qacc q0,q1 # [0*II+5] +.LBB45_esp_nn_depthwise_conv_s16_mult4: # 0x1a84 + + addmi a10,a1,256 # [0] + st.qr q1,a10,-16 # [1] gra_spill_temp_260-256 + +.Lt_10_12290: # 0x1a8a + add.n a14,a14,a3 # [0] + add.n a15,a15,a4 # [1] + l32i a11,a1,172 # [2] gra_spill_temp_255 + l32i a12,a1,168 # [3] gra_spill_temp_254 + addi.n a11,a11,1 # [4] + add.n a12,a12,a4 # [5] + s32i a12,a1,168 # [6] gra_spill_temp_254 + s32i a11,a1,172 # [7] gra_spill_temp_255 + sub a11,a7,a11 # [8] + beqz a11,.Lt_10_11522 # [9] + + blt a6,a5,.LBB18_esp_nn_depthwise_conv_s16_mult4 # [0] + + j .Lt_10_12290 # [0] + +.Lt_10_8450: # 0x1aaa + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult4_esp32s3, . - esp_nn_depthwise_conv_s16_mult4_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3.S new file mode 100644 index 0000000..b894713 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3.S @@ -0,0 +1,458 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3 + .type esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3 + +esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3: # 0x11b3 + # qacc_scratch = 0 + # gra_spill_temp_142 = 48 + # gra_spill_temp_143 = 52 + # gra_spill_temp_144 = 56 + # gra_spill_temp_145 = 60 + # gra_spill_temp_146 = 64 + # gra_spill_temp_147 = 68 + # gra_spill_temp_148 = 72 + # gra_spill_temp_149 = 76 + # gra_spill_temp_150 = 80 + # gra_spill_temp_151 = 84 + # gra_spill_temp_152 = 88 + # gra_spill_temp_153 = 92 + # gra_spill_temp_154 = 96 + # gra_spill_temp_155 = 100 + # gra_spill_temp_156 = 104 + # gra_spill_temp_157 = 108 + # gra_spill_temp_158 = 112 + # gra_spill_temp_159 = 116 + # gra_spill_temp_160 = 120 + # gra_spill_temp_161 = 124 + # gra_spill_temp_162 = 128 + # gra_spill_temp_163 = 132 + # gra_spill_temp_164 = 136 + # gra_spill_temp_165 = 140 + # gra_spill_temp_166 = 144 + # gra_spill_temp_167 = 148 + # gra_spill_temp_168 = 152 + # gra_spill_temp_169 = 156 + # gra_spill_temp_170 = 160 + # gra_spill_temp_171 = 164 + # gra_spill_temp_172 = 168 + # gra_spill_temp_173 = 172 + # gra_spill_temp_174 = 176 + # gra_spill_temp_175 = 180 + # gra_spill_temp_176 = 184 + # gra_spill_temp_177 = 188 + # gra_spill_temp_178 = 192 + # gra_spill_temp_179 = 208 + # gra_spill_temp_180 = 224 + # gra_spill_temp_181 = 240 + # gra_spill_temp_182 = 256 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t channels + // a6: const uint16_t pad_wd + // a7: const uint16_t pad_ht + + // const uint16_t stride_wd + // const uint16_t stride_ht + // const uint16_t ch_mult + // const int16_t *filter_data + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + entry a1,304 # + s32i a2,a1,116 # [0] gra_spill_temp_159 + s32i a3,a1,120 # [1] gra_spill_temp_160 + s32i a5,a1,144 # [2] gra_spill_temp_166 + s32i.n a6,a1,60 # [3] gra_spill_temp_145 + + addmi a9,a1,256 # [4] + addi a12,a1,112 # [5] + addmi a10,a1,256 # [6] + addmi a11,a1,256 # [7] + addmi a13,a1,256 # [8] + + // height loop + l16ui a8,a1,332 # [9] id:261 out_ht+0x0 + l32i a14,a1,324 # [10] id:257 out_data+0x0 + s32i a14,a1,176 # [11] gra_spill_temp_174 + s32i a8,a1,68 # [12] gra_spill_temp_147 + addi a13,a13,80 # [13] + addi a11,a11,96 # [14] + addi a10,a10,92 # [15] + ee.vldbc.32 q0,a10 # [16] id:260 activation_min + ee.vldbc.32 q1,a11 # [17] id:259 activation_max + ee.vldbc.32 q2,a13 # [18] id:258 out_offset + st.qr q2,a12,96 # [19] gra_spill_temp_179-112 + st.qr q1,a12,112 # [20] gra_spill_temp_180-112 + st.qr q0,a9,-16 # [21] gra_spill_temp_181-256 + beqz.n a8,.Lt_8_8194 # [22] + +.LBB3_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x11f9 + s32i a1,a1,180 # [0] gra_spill_temp_175 + mul16u a6,a3,a5 # [1] + s32i a7,a1,76 # [2] gra_spill_temp_149 + l32i a9,a1,316 # [3] id:264 filter_data+0x0 + l32i a15,a1,320 # [4] id:262 bias+0x0 + l16ui a10,a1,312 # [5] id:263 ch_mult+0x0 + slli a11,a5,1 # [6] + l16ui a12,a1,308 # [7] id:268 stride_ht+0x0 + l32i a13,a1,344 # [8] id:267 out_mult+0x0 + l32i a14,a1,340 # [9] id:266 out_shift+0x0 + s32i a14,a1,88 # [10] gra_spill_temp_152 + s32i a13,a1,92 # [11] gra_spill_temp_153 + s32i a12,a1,64 # [12] gra_spill_temp_146 + s32i a11,a1,124 # [13] gra_spill_temp_161 + s32i a10,a1,108 # [14] gra_spill_temp_157 + s32i a15,a1,160 # [15] gra_spill_temp_170 + s32i a9,a1,128 # [16] gra_spill_temp_162 + neg a7,a7 # [17] + slli a6,a6,1 # [18] + s32i a7,a1,136 # [19] gra_spill_temp_164 + movi.n a9,0 # [20] + extui a15,a15,0,4 # [21] + s32i a15,a1,152 # [22] gra_spill_temp_168 + s32i a9,a1,72 # [23] gra_spill_temp_148 + sub a7,a4,a7 # [24] + l32i.n a9,a1,60 # [25] gra_spill_temp_145 + s32i a7,a1,80 # [26] gra_spill_temp_150 + l16ui a4,a1,328 # [27] id:269 out_wd+0x0 + s32i a4,a1,96 # [28] gra_spill_temp_154 + l16ui a7,a1,304 # [29] id:265 stride_wd+0x0 + s32i a7,a1,84 # [30] gra_spill_temp_151 + mul16u a4,a5,a10 # [31] + neg a9,a9 # [32] + s32i.n a9,a1,52 # [33] gra_spill_temp_143 + sub a8,a3,a9 # [34] + addi a10,a10,-7 # [35] + s32i a10,a1,164 # [36] gra_spill_temp_171 + s32i.n a8,a1,56 # [37] gra_spill_temp_144 + addx2 a7,a4,a4 # [38] + slli a7,a7,1 # [39] + j .Lt_8_8706 # [40] + +.Lt_8_8962: # 0x1270 +# Part of loop body line 933, head labeled .Lt_8_8706 + l32i a10,a1,68 # [0] gra_spill_temp_147 + l32i a14,a1,76 # [1] gra_spill_temp_149 + l32i a13,a1,136 # [2] gra_spill_temp_164 + l32i a12,a1,64 # [3] gra_spill_temp_146 + l32i a9,a1,72 # [4] gra_spill_temp_148 + l32i a11,a1,80 # [5] gra_spill_temp_150 + addi.n a9,a9,1 # [6] + s32i a9,a1,72 # [7] gra_spill_temp_148 + sub a11,a11,a12 # [8] + add.n a13,a13,a12 # [9] + sub a14,a14,a12 # [10] + s32i a14,a1,76 # [11] gra_spill_temp_149 + s32i a13,a1,136 # [12] gra_spill_temp_164 + s32i a11,a1,80 # [13] gra_spill_temp_150 + sub a9,a9,a10 # [14] + beqz a9,.Lt_8_8194 # [15] + +.Lt_8_8706: # 0x129e +# Loop body line 933, nesting depth: 1, estimated iterations: 100 + # 934 const int32_t base_y = (out_y * stride_ht) - pad_ht; + # 935 for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + l32i a15,a1,96 # [0] gra_spill_temp_154 + beqz.n a15,.Lt_8_8962 # [2] + +.LBB6_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x12a3 +# Part of loop body line 933, head labeled .Lt_8_8706 + l32i.n a3,a1,56 # [0] gra_spill_temp_144 + l32i a8,a1,80 # [1] gra_spill_temp_150 + movi.n a10,0 # [2] + l32i a9,a1,76 # [3] gra_spill_temp_149 + movi.n a11,0 # [4] + l32i.n a12,a1,52 # [5] gra_spill_temp_143 + l32i.n a13,a1,60 # [6] gra_spill_temp_145 + s32i a13,a1,104 # [7] gra_spill_temp_156 + s32i a12,a1,140 # [8] gra_spill_temp_165 + s32i a11,a1,100 # [9] gra_spill_temp_155 + max a9,a9,a10 # [10] + movi.n a10,3 # [11] + s32i a9,a1,172 # [12] gra_spill_temp_173 + min a8,a8,a10 # [13] + s32i a8,a1,156 # [14] gra_spill_temp_169 + sub a8,a8,a9 # [15] + s32i a8,a1,132 # [16] gra_spill_temp_163 + j .Lt_8_9474 # [17] + +.Lt_8_9730: # 0x12d3 +# Part of loop body line 935, head labeled .Lt_8_9474 + l32i a15,a1,96 # [0] gra_spill_temp_154 + l32i a10,a1,104 # [1] gra_spill_temp_156 + l32i a9,a1,140 # [2] gra_spill_temp_165 + l32i a8,a1,84 # [3] gra_spill_temp_151 + l32i a14,a1,100 # [4] gra_spill_temp_155 + sub a3,a3,a8 # [5] + addi.n a14,a14,1 # [6] + s32i a14,a1,100 # [7] gra_spill_temp_155 + add.n a9,a9,a8 # [8] + sub a10,a10,a8 # [9] + s32i a10,a1,104 # [10] gra_spill_temp_156 + s32i a9,a1,140 # [11] gra_spill_temp_165 + beq a14,a15,.Lt_8_8962 # [12] + +.Lt_8_9474: # 0x12f8 + # 936 const int32_t base_x = (out_x * stride_wd) - pad_wd; + # 937 const int32_t *out_mult_ptr = out_mult; + # 938 const int32_t *out_shift_ptr = out_shift; + l32i a2,a1,88 # [0] gra_spill_temp_152 + l32i a10,a1,92 # [1] gra_spill_temp_153 + # 939 uint32_t bias_ptr = (uint32_t) (bias); + l32i a12,a1,160 # [2] gra_spill_temp_170 + # 940 + # 941 for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + l32i a11,a1,144 # [3] gra_spill_temp_166 + s32i a12,a1,168 # [4] gra_spill_temp_172 + beqz.n a11,.Lt_8_9730 # [5] + +.LBB9_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x1309 +# Part of loop body line 935, head labeled .Lt_8_9474 + movi.n a8,0 # [0] + l32i a5,a1,104 # [1] gra_spill_temp_156 + movi.n a13,0 # [2] + movi.n a9,0 # [3] + s32i a9,a1,112 # [4] gra_spill_temp_158 + s32i a13,a1,148 # [5] gra_spill_temp_167 + max a5,a5,a8 # [6] + j .Lt_8_10242 # [7] + +.Lt_8_10498: # 0x131e +# Part of loop body line 941, head labeled .Lt_8_10242 + l32i a12,a1,144 # [0] gra_spill_temp_166 + l32i a14,a1,108 # [1] gra_spill_temp_157 + l32i a11,a1,148 # [2] gra_spill_temp_167 + l32i a13,a1,112 # [3] gra_spill_temp_158 + addi.n a11,a11,1 # [4] + s32i a11,a1,148 # [5] gra_spill_temp_167 + add.n a13,a13,a14 # [6] + s32i a13,a1,112 # [7] gra_spill_temp_158 + beq a11,a12,.Lt_8_9730 # [8] + +.Lt_8_10242: # 0x1337 + # 942 for (int ch_mult_idx = 0; ch_mult_idx < ch_mult - 7; ch_mult_idx += 8) { + l32i a15,a1,164 # [0] gra_spill_temp_171 + blti a15,1,.Lt_8_10498 # [2] + + movi.n a8,0 # [0] + l32i a9,a1,112 # [1] gra_spill_temp_158 + s32i a9,a1,188 # [2] gra_spill_temp_177 + s32i a8,a1,184 # [3] gra_spill_temp_176 + j .Lt_8_11010 # [4] + +.LBB23_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x134b + s32i.n a10,a1,48 # [0] gra_spill_temp_142 + addi a11,a1,112 # [1] + l32i a13,a1,152 # [2] gra_spill_temp_168 + l32i a12,a1,168 # [3] gra_spill_temp_172 + wur.sar_byte a13 # [4] + ee.vld.128.ip q4,a12,16 # [5] id:307 + ee.vld.128.ip q7,a12,16 # [6] id:308 + ee.vld.128.ip q5,a12,0 # [7] id:309 + s32i a12,a1,168 # [8] gra_spill_temp_172 + ee.src.q.qup q6,q4,q7 # [9] + ee.vadds.s32 q0,q0,q6 # [10] + ee.src.q.qup q3,q4,q5 # [11] + ee.vadds.s32 q1,q1,q3 # [12] + st.qr q1,a11,80 # [13] gra_spill_temp_178-112 + +.Lt_8_13314: # 0x1374 + #1025 q0 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + l32i.n a10,a1,48 # [0] gra_spill_temp_142 + mov.n a11,a2 # [1] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + #1026 out_mult_ptr += 4; + #1027 out_shift_ptr += 4; + #1028 + #1029 q1 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q1, out_mult_ptr, out_shift_ptr); + l32i.n a10,a1,48 # [0] gra_spill_temp_142 + addmi a12,a1,256 # [1] + addi a11,a1,112 # [2] + st.qr q0,a12,0 # [3] gra_spill_temp_182-256 + ld.qr q0,a11,80 # [4] gra_spill_temp_178-112 + addi a10,a10,16 # [5] + addi a11,a2,16 # [6] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + +# Part of loop body line 942, head labeled .Lt_8_11010 + #1030 out_mult_ptr += 4; + #1031 out_shift_ptr += 4; + addi a2,a2,32 # [0] + l32i a14,a1,164 # [1] gra_spill_temp_171 + + l32i a8,a1,176 # [2] gra_spill_temp_174 + l32i a15,a1,188 # [3] gra_spill_temp_177 + l32i a13,a1,184 # [4] gra_spill_temp_176 + l32i.n a10,a1,48 # [5] gra_spill_temp_142 + addmi a11,a1,256 # [6] + addi a12,a1,112 # [7] + ld.qr q3,a12,112 # [8] gra_spill_temp_180-112 + ld.qr q1,a12,96 # [9] gra_spill_temp_179-112 + ld.qr q2,a11,0 # [10] gra_spill_temp_182-256 + addi a10,a10,32 # [11] + addi.n a13,a13,8 # [12] + addi.n a15,a15,8 # [13] + s32i a15,a1,188 # [14] gra_spill_temp_177 + ee.vadds.s32 q2,q2,q1 # [15] + s32i a13,a1,184 # [16] gra_spill_temp_176 + ee.vadds.s32 q1,q0,q1 # [17] + ee.vmin.s32 q0,q2,q3 # [18] + ld.qr q2,a11,-16 # [19] gra_spill_temp_181-256 + ee.vmin.s32 q1,q1,q3 # [20] + ee.vmax.s32 q1,q1,q2 # [21] + ee.vmax.s32 q0,q0,q2 # [22] + ee.vunzip.16 q0,q1 # [23] + ee.vunzip.8 q0,q1 # [24] + ee.vst.l.64.ip q0,a8,8 # [25] id:312 + s32i a8,a1,176 # [26] gra_spill_temp_174 + bge a13,a14,.Lt_8_10498 # [27] + +.Lt_8_11010: # 0x13e3 +# Loop body line 942, nesting depth: 4, estimated iterations: 100 + l32i a14,a1,156 # [0] gra_spill_temp_169 + l32i a13,a1,172 # [1] gra_spill_temp_173 + ee.zero.qacc # [2] + bge a13,a14,.Lt_8_11266 # [3] + +.LBB15_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x13ef +# Part of loop body line 942, head labeled .Lt_8_11010 + l32i a12,a1,124 # [0] gra_spill_temp_161 + l32i a8,a1,140 # [1] gra_spill_temp_165 + l32i a11,a1,120 # [2] gra_spill_temp_160 + l32i a14,a1,188 # [3] gra_spill_temp_177 + l32i a9,a1,136 # [4] gra_spill_temp_164 + mull a15,a4,a13 # [5] + add.n a9,a9,a13 # [6] + addx2 a15,a15,a15 # [7] + l32i a13,a1,148 # [8] gra_spill_temp_167 + add.n a14,a14,a15 # [9] + mull a9,a9,a11 # [10] + l32i a15,a1,144 # [11] gra_spill_temp_166 + add.n a8,a8,a9 # [12] + mull a15,a15,a8 # [13] + l32i a8,a1,128 # [14] gra_spill_temp_162 + add.n a13,a13,a15 # [15] + l32i a15,a1,116 # [16] gra_spill_temp_159 + addx2 a14,a14,a8 # [17] + addx2 a13,a13,a15 # [18] + add.n a11,a12,a13 # [19] + l32i a15,a1,132 # [20] gra_spill_temp_163 + add.n a12,a12,a11 # [21] + loopgtz a15,.LBB34_esp_nn_depthwise_conv_s16_mult8_3x3 # [22] + +.Lt_8_11778: # 0x142e + mov.n a15,a14 # [0] + mov.n a9,a14 # [1] + bnez.n a5,.Lt_8_12034 # [2] + + ee.vldbc.16 q3,a13 # [0] id:271 + mov.n a9,a14 # [1] + ee.vld.128.ip q4,a9,0 # [2] id:272 + ee.vmulas.s16.qacc q3,q4 # [4] + +.Lt_8_12034: # 0x143f + ee.vldbc.16 q5,a11 # [0] id:274 + addx2 a9,a4,a9 # [1] + ee.vld.128.ip q6,a9,0 # [2] id:275 + add.n a13,a13,a6 # [3] + ee.vmulas.s16.qacc q5,q6 # [4] + blti a3,3,.Lt_8_12546 # [5] + + ee.vldbc.16 q7,a12 # [0] id:277 + addx2 a14,a4,a9 # [1] + ee.vld.128.ip q0,a14,0 # [2] id:278 + ee.vmulas.s16.qacc q7,q0 # [4] + +.Lt_8_12546: # 0x145c +# Part of loop body line 953, head labeled .Lt_8_11778 + add.n a11,a11,a6 # [0] + add.n a12,a12,a6 # [1] + add.n a14,a7,a15 # [2] + +.LBB34_esp_nn_depthwise_conv_s16_mult8_3x3: # 0x1464 +.Lt_8_11266: # 0x1464 + + l32i a8,a1,180 # [0] gra_spill_temp_175 + ee.st.qacc_l.l.128.ip a8,16 # [2] id:280 + ee.st.qacc_l.h.32.ip a8,0 # [3] id:281 + l16ui a9,a1,10 # [4] qacc_scratch+10 + l8ui a11,a1,15 # [5] qacc_scratch+15 + l8ui a12,a1,5 # [6] qacc_scratch+5 + l8ui a13,a1,6 # [7] qacc_scratch+6 + l8ui a14,a1,16 # [8] qacc_scratch+16 + s8i a14,a1,7 # [9] qacc_scratch+7 + s8i a13,a1,3 # [10] qacc_scratch+3 + s8i a12,a1,2 # [11] qacc_scratch+2 + s8i a11,a1,6 # [12] qacc_scratch+6 + s16i a9,a1,4 # [13] qacc_scratch+4 + ee.st.qacc_h.l.128.ip a8,16 # [14] id:291 + ee.st.qacc_h.h.32.ip a8,-32 # [15] id:292 + l16ui a9,a1,16 # [16] qacc_scratch+16 + l8ui a15,a1,32 # [17] qacc_scratch+32 + l8ui a12,a1,22 # [18] qacc_scratch+22 + l8ui a11,a1,21 # [19] qacc_scratch+21 + l8ui a14,a1,31 # [20] qacc_scratch+31 + l16ui a13,a1,26 # [21] qacc_scratch+26 + s16i a13,a1,12 # [22] qacc_scratch+12 + s8i a14,a1,14 # [23] qacc_scratch+14 + s8i a11,a1,10 # [24] qacc_scratch+10 + s8i a12,a1,11 # [25] qacc_scratch+11 + s8i a15,a1,15 # [26] qacc_scratch+15 + s16i a9,a1,8 # [27] qacc_scratch+8 + l32i a15,a1,160 # [28] gra_spill_temp_170 + movi.n a9,16 # [29] + ee.srcmb.s16.qacc q1,a9,0 # [30] + ee.vld.128.ip q0,a8,0 # [31] id:304 + s32i a8,a1,180 # [32] gra_spill_temp_175 + ee.vzip.16 q0,q1 # [33] + bnez.n a15,.LBB23_esp_nn_depthwise_conv_s16_mult8_3x3 # [34] + + s32i.n a10,a1,48 # [0] gra_spill_temp_142 + addi a15,a1,112 # [1] + st.qr q1,a15,80 # [2] gra_spill_temp_178-112 + j .Lt_8_13314 # [3] + +.Lt_8_8194: # 0x14d3 + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3, . - esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_esp32s3.S new file mode 100644 index 0000000..4f9143b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s16_mult8_esp32s3.S @@ -0,0 +1,432 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + + # Program Unit: esp_nn_depthwise_conv_s16_mult8_esp32s3 + .type esp_nn_depthwise_conv_s16_mult8_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s16_mult8_esp32s3 + +esp_nn_depthwise_conv_s16_mult8_esp32s3: # 0x14d7 + # qacc_scratch = 0 + # gra_spill_temp_183 = 48 + # gra_spill_temp_184 = 52 + # gra_spill_temp_185 = 56 + # gra_spill_temp_186 = 60 + # gra_spill_temp_187 = 64 + # gra_spill_temp_188 = 68 + # gra_spill_temp_189 = 72 + # gra_spill_temp_190 = 76 + # gra_spill_temp_191 = 80 + # gra_spill_temp_192 = 84 + # gra_spill_temp_193 = 88 + # gra_spill_temp_194 = 92 + # gra_spill_temp_195 = 96 + # gra_spill_temp_196 = 100 + # gra_spill_temp_197 = 104 + # gra_spill_temp_198 = 108 + # gra_spill_temp_199 = 112 + # gra_spill_temp_200 = 116 + # gra_spill_temp_201 = 120 + # gra_spill_temp_202 = 124 + # gra_spill_temp_203 = 128 + # gra_spill_temp_204 = 132 + # gra_spill_temp_205 = 136 + # gra_spill_temp_206 = 140 + # gra_spill_temp_207 = 144 + # gra_spill_temp_208 = 148 + # gra_spill_temp_209 = 152 + # gra_spill_temp_210 = 156 + # gra_spill_temp_211 = 160 + # gra_spill_temp_212 = 164 + # gra_spill_temp_213 = 168 + # gra_spill_temp_214 = 172 + # gra_spill_temp_215 = 176 + # gra_spill_temp_216 = 180 + # gra_spill_temp_217 = 184 + # gra_spill_temp_218 = 192 + # gra_spill_temp_219 = 208 + + // registers: + // a2: const int16_t *input_data + // a3: const uint16_t input_wd + // a4: const uint16_t input_ht + // a5: const uint16_t channels + // a6: const uint16_t pad_wd + // a7: const uint16_t pad_ht + + // on stack: + // const uint16_t stride_wd + // const uint16_t stride_ht + // const uint16_t ch_mult + // const int16_t *filter_data + // const uint16_t filter_wd + // const uint16_t filter_ht + // const int32_t *bias + // int8_t *out_data + // const uint16_t out_wd + // const uint16_t out_ht + // const int32_t out_offset + // const int32_t *out_shift + // const int32_t *out_mult + // const int32_t activation_min + // const int32_t activation_max + + entry a1,256 # + s32i a2,a1,144 # [0] gra_spill_temp_207 + s32i.n a4,a1,56 # [1] gra_spill_temp_185 + s32i a5,a1,172 # [2] gra_spill_temp_214 + l32i a9,a1,284 # [3] id:241 out_data+0x0 + + l16ui a8,a1,292 # [4] id:242 out_ht+0x0 + s32i a8,a1,64 # [5] gra_spill_temp_187 + s32i a9,a1,124 # [6] gra_spill_temp_202 + beqz.n a8,.Lt_9_8450 # [7] + + s32i a1,a1,128 # [0] gra_spill_temp_203 + neg a13,a7 # [1] + movi.n a4,0 # [2] + neg a12,a6 # [3] + l32i a9,a1,280 # [4] id:243 bias+0x0 + slli a11,a5,1 # [5] + l16ui a10,a1,264 # [6] id:244 ch_mult+0x0 + l32i a14,a1,268 # [7] id:245 filter_data+0x0 + s32i a14,a1,160 # [8] gra_spill_temp_211 + s32i a10,a1,92 # [9] gra_spill_temp_194 + s32i a11,a1,156 # [10] gra_spill_temp_210 + s32i a9,a1,112 # [11] gra_spill_temp_199 + sext a12,a12,15 # [12] + s32i a4,a1,68 # [13] gra_spill_temp_188 + sext a13,a13,15 # [14] + l16ui a4,a1,272 # [15] id:246 filter_wd+0x0 + s32i a13,a1,100 # [16] gra_spill_temp_196 + s32i.n a12,a1,48 # [17] gra_spill_temp_183 + mul16u a8,a5,a10 # [18] + extui a9,a9,0,4 # [19] + l32i a11,a1,304 # [20] id:249 out_mult+0x0 + s32i a11,a1,80 # [21] gra_spill_temp_191 + s32i a9,a1,104 # [22] gra_spill_temp_197 + s32i a8,a1,148 # [23] gra_spill_temp_208 + addi a10,a10,-7 # [24] + l32i a12,a1,300 # [25] id:248 out_shift+0x0 + l16ui a13,a1,256 # [26] id:247 stride_wd+0x0 + s32i a13,a1,72 # [27] gra_spill_temp_189 + s32i a12,a1,76 # [28] gra_spill_temp_190 + s32i a10,a1,116 # [29] gra_spill_temp_200 + slli a8,a8,1 # [30] + l16ui a9,a1,260 # [31] id:251 stride_ht+0x0 + s32i.n a9,a1,60 # [32] gra_spill_temp_186 + s32i a8,a1,152 # [33] gra_spill_temp_209 + l16ui a10,a1,276 # [34] id:250 filter_ht+0x0 + s32i.n a10,a1,52 # [35] gra_spill_temp_184 + l16ui a8,a1,288 # [36] id:252 out_wd+0x0 + s32i a8,a1,84 # [37] gra_spill_temp_192 + j .Lt_9_8962 # [38] + +.Lt_9_9218: # 0x1561 +# Part of loop body line 1083, head labeled .Lt_9_8962 + l32i a15,a1,64 # [0] gra_spill_temp_187 + l32i.n a9,a1,60 # [1] gra_spill_temp_186 + l32i a14,a1,68 # [2] gra_spill_temp_188 + l32i a8,a1,100 # [3] gra_spill_temp_196 + addi.n a14,a14,1 # [4] + s32i a14,a1,68 # [5] gra_spill_temp_188 + add.n a9,a8,a9 # [6] + sub a14,a14,a15 # [7] + sext a8,a9,15 # [8] + s32i a8,a1,100 # [9] gra_spill_temp_196 + beqz a14,.Lt_9_8450 # [10] + +.Lt_9_8962: # 0x157f + l32i a10,a1,84 # [0] gra_spill_temp_192 + beqz.n a10,.Lt_9_9218 # [2] + + l32i.n a7,a1,52 # [0] gra_spill_temp_184 + movi.n a11,0 # [1] + l32i.n a8,a1,56 # [2] gra_spill_temp_185 + l32i a9,a1,100 # [3] gra_spill_temp_196 + l32i.n a12,a1,48 # [4] gra_spill_temp_183 + s32i a12,a1,168 # [5] gra_spill_temp_213 + neg a10,a9 # [6] + sub a8,a8,a9 # [7] + max a10,a10,a11 # [8] + s32i a10,a1,108 # [9] gra_spill_temp_198 + min a7,a7,a8 # [10] + movi.n a11,0 # [11] + s32i a11,a1,88 # [12] gra_spill_temp_193 + j .Lt_9_9730 # [13] + +.Lt_9_9986: # 0x15a9 +# Part of loop body line 1085, head labeled .Lt_9_9730 + l32i a13,a1,84 # [0] gra_spill_temp_192 + l32i a15,a1,72 # [1] gra_spill_temp_189 + l32i a12,a1,88 # [2] gra_spill_temp_193 + l32i a14,a1,168 # [3] gra_spill_temp_213 + addi.n a12,a12,1 # [4] + s32i a12,a1,88 # [5] gra_spill_temp_193 + add.n a15,a14,a15 # [6] + sext a14,a15,15 # [7] + s32i a14,a1,168 # [8] gra_spill_temp_213 + beq a12,a13,.Lt_9_9218 # [9] + +.Lt_9_9730: # 0x15c5 +# Loop body line 1085, nesting depth: 2, estimated iterations: 100 + #1086 const int16_t base_x = (out_x * stride_wd) - pad_wd; + #1087 const int32_t *out_mult_ptr = out_mult; + #1088 const int32_t *out_shift_ptr = out_shift; + #1089 uint32_t bias_ptr = (uint32_t) (bias); + #1090 for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + l32i a8,a1,172 # [0] gra_spill_temp_214 + l32i a9,a1,80 # [1] gra_spill_temp_191 + l32i a10,a1,76 # [2] gra_spill_temp_190 + l32i a11,a1,112 # [3] gra_spill_temp_199 + s32i a11,a1,120 # [4] gra_spill_temp_201 + s32i a10,a1,140 # [5] gra_spill_temp_206 + s32i a9,a1,136 # [6] gra_spill_temp_205 + beqz.n a8,.Lt_9_9986 # [7] + +.LBB9_esp_nn_depthwise_conv_s16_mult8: # 0x15dc +# Part of loop body line 1085, head labeled .Lt_9_9730 + movi.n a8,0 # [0] + l32i a5,a1,168 # [1] gra_spill_temp_213 + movi.n a13,0 # [2] + movi.n a14,0 # [3] + s32i a14,a1,96 # [4] gra_spill_temp_195 + s32i a13,a1,184 # [5] gra_spill_temp_217 + neg a6,a5 # [6] + max a6,a6,a8 # [7] + sub a5,a3,a5 # [8] + min a5,a4,a5 # [9] + sub a12,a5,a6 # [10] + s32i a12,a1,164 # [11] gra_spill_temp_212 + j .Lt_9_10498 # [12] + +.Lt_9_10754: # 0x1600 +# Part of loop body line 1090, head labeled .Lt_9_10498 + l32i a10,a1,172 # [0] gra_spill_temp_214 + l32i a12,a1,92 # [1] gra_spill_temp_194 + l32i a9,a1,184 # [2] gra_spill_temp_217 + l32i a11,a1,96 # [3] gra_spill_temp_195 + addi.n a9,a9,1 # [4] + s32i a9,a1,184 # [5] gra_spill_temp_217 + add.n a11,a11,a12 # [6] + s32i a11,a1,96 # [7] gra_spill_temp_195 + beq a9,a10,.Lt_9_9986 # [8] + +.Lt_9_10498: # 0x1619 +# Loop body line 1090, nesting depth: 3, estimated iterations: 100 + #1091 for (int ch_mult_idx = 0; ch_mult_idx < ch_mult - 7; ch_mult_idx += 8) { + l32i a13,a1,116 # [0] gra_spill_temp_200 + blti a13,1,.Lt_9_10754 # [2] + +.LBB12_esp_nn_depthwise_conv_s16_mult8: # 0x161f +# Part of loop body line 1090, head labeled .Lt_9_10498 + l32i a2,a1,96 # [0] gra_spill_temp_195 + movi.n a14,0 # [1] + s32i a14,a1,132 # [2] gra_spill_temp_204 + j .Lt_9_11266 # [3] + +.Lt_9_11522: # 0x162a + l32i a9,a1,128 # [0] gra_spill_temp_203 + ee.st.qacc_l.l.128.ip a9,16 # [2] id:257 + ee.st.qacc_l.h.32.ip a9,0 # [3] id:258 + l8ui a10,a1,15 # [4] qacc_scratch+15 + l16ui a8,a1,10 # [5] qacc_scratch+10 + l8ui a13,a1,16 # [6] qacc_scratch+16 + l8ui a12,a1,6 # [7] qacc_scratch+6 + l8ui a11,a1,5 # [8] qacc_scratch+5 + s8i a11,a1,2 # [9] qacc_scratch+2 + s8i a12,a1,3 # [10] qacc_scratch+3 + s8i a13,a1,7 # [11] qacc_scratch+7 + s16i a8,a1,4 # [12] qacc_scratch+4 + s8i a10,a1,6 # [13] qacc_scratch+6 + + movi.n a8,16 # [14] + ee.st.qacc_h.l.128.ip a9,16 # [15] id:268 + ee.st.qacc_h.h.32.ip a9,-32 # [16] id:269 + ee.srcmb.s16.qacc q1,a8,0 # [17] + l16ui a13,a1,26 # [18] qacc_scratch+26 + l8ui a15,a1,32 # [19] qacc_scratch+32 + l8ui a12,a1,22 # [20] qacc_scratch+22 + l8ui a11,a1,21 # [21] qacc_scratch+21 + l16ui a10,a1,16 # [22] qacc_scratch+16 + l8ui a14,a1,31 # [23] qacc_scratch+31 + s8i a14,a1,14 # [24] qacc_scratch+14 + s16i a10,a1,8 # [25] qacc_scratch+8 + s8i a11,a1,10 # [26] qacc_scratch+10 + s8i a12,a1,11 # [27] qacc_scratch+11 + s8i a15,a1,15 # [28] qacc_scratch+15 + s16i a13,a1,12 # [29] qacc_scratch+12 + #1138 EE_VZIP_16(q0, q1); /* 4x32 */ + #1139 + #1140 if (bias) { + l32i a15,a1,112 # [30] gra_spill_temp_199 + ee.vld.128.ip q0,a9,0 # [31] id:281 + s32i a9,a1,128 # [32] gra_spill_temp_203 + ee.vzip.16 q0,q1 # [33] + beqz.n a15,.Lt_9_13570 # [34] + +.LBB23_esp_nn_depthwise_conv_s16_mult8: # 0x168e +# Part of loop body line 1091, head labeled .Lt_9_11266 + addi a14,a1,112 # [0] + l32i a8,a1,104 # [1] gra_spill_temp_197 + l32i a15,a1,120 # [2] gra_spill_temp_201 + wur.sar_byte a8 # [3] + ee.vld.128.ip q3,a15,16 # [4] id:284 + ee.vld.128.ip q6,a15,16 # [5] id:285 + ee.vld.128.ip q4,a15,0 # [6] id:286 + s32i a15,a1,120 # [7] gra_spill_temp_201 + ee.src.q.qup q5,q3,q6 # [8] + ee.vadds.s32 q0,q0,q5 # [9] + ee.src.q.qup q2,q3,q4 # [10] + ee.vadds.s32 q1,q1,q2 # [11] + st.qr q1,a14,96 # [12] gra_spill_temp_219-112 + +.Lt_9_13570: # 0x16b5 + #1158 q0 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q0, out_mult_ptr, out_shift_ptr); + l32i a10,a1,136 # [0] gra_spill_temp_205 + l32i a11,a1,140 # [1] gra_spill_temp_206 + addi a9,a1,112 # [2] + st.qr q1,a9,96 # [3] gra_spill_temp_219-112 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + #1159 out_mult_ptr += 4; + #1160 out_shift_ptr += 4; + #1161 + #1162 q1 = esp_nn_multiply_by_quantized_mult_ver1_esp32s3(q1, out_mult_ptr, out_shift_ptr); + l32i a11,a1,140 # [0] gra_spill_temp_206 + addi a12,a1,112 # [1] + l32i a10,a1,136 # [2] gra_spill_temp_205 + st.qr q0,a12,80 # [3] gra_spill_temp_218-112 + ld.qr q0,a12,96 # [4] gra_spill_temp_219-112 + addi a10,a10,16 # [5] + addi a11,a11,16 # [6] + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + addi.n a2,a2,8 # [0] + l32i a14,a1,116 # [1] gra_spill_temp_200 + l32i a15,a1,124 # [2] gra_spill_temp_202 + l32i a13,a1,132 # [3] gra_spill_temp_204 + l32i a10,a1,140 # [4] gra_spill_temp_206 + l32i a11,a1,136 # [5] gra_spill_temp_205 + addmi a9,a1,256 # [6] + addi a8,a1,112 # [7] + ld.qr q7,a8,80 # [8] gra_spill_temp_218-112 + addi a9,a9,56 # [9] + ee.vldbc.32 q2,a9 # [10] id:290 activation_max + addi a11,a11,32 # [11] + addi a10,a10,32 # [12] + addi.n a13,a13,8 # [13] + s32i a13,a1,132 # [14] gra_spill_temp_204 + s32i a10,a1,140 # [15] gra_spill_temp_206 + s32i a11,a1,136 # [16] gra_spill_temp_205 + addmi a10,a1,256 # [17] + addmi a11,a1,256 # [18] + addi a11,a11,52 # [19] + addi a10,a10,40 # [20] + ee.vldbc.32 q3,a10 # [21] id:289 out_offset + ee.vldbc.32 q1,a11 # [22] id:291 activation_min + ee.vadds.s32 q0,q0,q3 # [23] + ee.vadds.s32 q7,q7,q3 # [24] + ee.vmin.s32 q7,q7,q2 # [25] + ee.vmin.s32 q0,q0,q2 # [26] + ee.vmax.s32 q0,q0,q1 # [27] + ee.vmax.s32 q7,q7,q1 # [28] + ee.vunzip.16 q7,q0 # [29] + ee.vunzip.8 q7,q0 # [30] + ee.vst.l.64.ip q7,a15,8 # [31] id:292 + s32i a15,a1,124 # [32] gra_spill_temp_202 + bge a13,a14,.Lt_9_10754 # [33] + +.Lt_9_11266: # 0x1740 + + ee.zero.qacc # [0] + l32i a12,a1,108 # [1] gra_spill_temp_198 + s32i a12,a1,180 # [2] gra_spill_temp_216 + bge a12,a7,.Lt_9_11522 # [3] + + mull a15,a12,a4 # [0] + l32i a14,a1,100 # [1] gra_spill_temp_196 + add.n a8,a15,a5 # [2] + add.n a14,a14,a12 # [3] + mull a14,a3,a14 # [4] + s32i a8,a1,176 # [5] gra_spill_temp_215 + bge a6,a5,.Lt_9_12290 # [6] + +.LBB18_esp_nn_depthwise_conv_s16_mult8: # 0x175f +# Part of loop body line 1091, head labeled .Lt_9_11266 + l32i a10,a1,184 # [0] gra_spill_temp_217 + l32i a11,a1,172 # [1] gra_spill_temp_214 + l32i a12,a1,168 # [2] gra_spill_temp_213 + l32i a8,a1,148 # [3] gra_spill_temp_208 + add.n a9,a15,a6 # [4] + mull a8,a8,a9 # [5] + add.n a12,a12,a6 # [6] + l32i a9,a1,160 # [7] gra_spill_temp_211 + add.n a12,a14,a12 # [8] + mull a11,a11,a12 # [9] + add.n a8,a2,a8 # [10] + l32i a12,a1,156 # [11] gra_spill_temp_210 + addx2 a8,a8,a9 # [12] + add.n a10,a10,a11 # [13] + l32i a11,a1,144 # [14] gra_spill_temp_207 + l32i a9,a1,164 # [15] gra_spill_temp_212 + addx2 a10,a10,a11 # [16] + l32i a11,a1,152 # [17] gra_spill_temp_209 + loopgtz a9,.LBB45_esp_nn_depthwise_conv_s16_mult8 # [18] + + mov.n a9,a8 # [0*II+0] + ee.vldbc.16 q0,a10 # [0*II+1] id:255 + ee.vld.128.ip q1,a9,0 # [0*II+2] id:254 + add.n a10,a10,a12 # [0*II+3] + add.n a8,a8,a11 # [0*II+4] + ee.vmulas.s16.qacc q0,q1 # [0*II+5] + +.LBB45_esp_nn_depthwise_conv_s16_mult8: # 0x17a2 + +.Lt_9_12290: # 0x17a2 + + add.n a14,a14,a3 # [0] + add.n a15,a15,a4 # [1] + l32i a10,a1,180 # [2] gra_spill_temp_216 + l32i a11,a1,176 # [3] gra_spill_temp_215 + addi.n a10,a10,1 # [4] + add.n a11,a11,a4 # [5] + s32i a11,a1,176 # [6] gra_spill_temp_215 + s32i a10,a1,180 # [7] gra_spill_temp_216 + sub a10,a7,a10 # [8] + beqz a10,.Lt_9_11522 # [9] + +.Lt_9_12034: # 0x17bc + blt a6,a5,.LBB18_esp_nn_depthwise_conv_s16_mult8 # [0] + + j .Lt_9_12290 # [0] + +.Lt_9_8450: # 0x17c2 + retw.n # [0] + + .size esp_nn_depthwise_conv_s16_mult8_esp32s3, . - esp_nn_depthwise_conv_s16_mult8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c new file mode 100644 index 0000000..abb11d3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_esp32s3.c @@ -0,0 +1,547 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#include + +static int16_t *scratch_buffer = NULL; + +extern void esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t ch_mult, + const int16_t *filter_data, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3(const int8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const int32_t input_offset, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int8_t *filter_data, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s16_mult1_3x3_no_pad_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int16_t *filter_data, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s16_mult8_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t ch_mult, + const int16_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s16_mult4_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t ch_mult, + const int16_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int16_t *filter_data, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_depthwise_conv_s16_mult1_esp32s3(const int16_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int16_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max); + +extern void esp_nn_s8_to_s16_esp32s3(const int8_t *src, int16_t *dst, const int size); + +extern void esp_nn_aligned_s8_to_s16_with_offset_esp32s3(const int8_t *src, int16_t *dst, + const int size, const int32_t offset); + +static void esp_nn_depthwise_conv_s8_unrolled(const int8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const int32_t input_offset, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t ch_mult, + const int8_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max) +{ + int out_idx = 0; + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + int ch_mult_idx = 0; + for (; ch_mult_idx < ch_mult - 3; ch_mult_idx += 4) { + int32_t result0 = 0, result1 = 0, result2 = 0, result3 = 0; + const int out_ch_idx = ch_mult_idx + ch_idx * ch_mult; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val0 = filter_data[filter_index + 0]; + int32_t filter_val1 = filter_data[filter_index + 1]; + int32_t filter_val2 = filter_data[filter_index + 2]; + int32_t filter_val3 = filter_data[filter_index + 3]; + result0 += input_val * filter_val0; + result1 += input_val * filter_val1; + result2 += input_val * filter_val2; + result3 += input_val * filter_val3; + } + } + if (bias) { + result0 += bias[out_ch_idx + 0]; + result1 += bias[out_ch_idx + 1]; + result2 += bias[out_ch_idx + 2]; + result3 += bias[out_ch_idx + 3]; + } + result0 = esp_nn_multiply_by_quantized_mult(result0, + out_mult[out_ch_idx + 0], out_shift[out_ch_idx + 0]); + result1 = esp_nn_multiply_by_quantized_mult(result1, + out_mult[out_ch_idx + 1], out_shift[out_ch_idx + 1]); + result2 = esp_nn_multiply_by_quantized_mult(result2, + out_mult[out_ch_idx + 2], out_shift[out_ch_idx + 2]); + result3 = esp_nn_multiply_by_quantized_mult(result3, + out_mult[out_ch_idx + 3], out_shift[out_ch_idx + 3]); + + result0 += out_offset; + result1 += out_offset; + result2 += out_offset; + result3 += out_offset; + + result0 = max(result0, activation_min); + result1 = max(result1, activation_min); + result2 = max(result2, activation_min); + result3 = max(result3, activation_min); + + result0 = min(result0, activation_max); + result1 = min(result1, activation_max); + result2 = min(result2, activation_max); + result3 = min(result3, activation_max); + + out_data[out_idx++] = result0; + out_data[out_idx++] = result1; + out_data[out_idx++] = result2; + out_data[out_idx++] = result3; + } + + /* left-over */ + for (; ch_mult_idx < ch_mult; ch_mult_idx++) { + int32_t result = 0; + const int out_ch_idx = ch_mult_idx + ch_idx * ch_mult; + + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * (channels * ch_mult) + out_ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index]; + result += input_val * filter_val; + } + } + if (bias) { + result += bias[out_ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult(result, out_mult[out_ch_idx], out_shift[out_ch_idx]); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + out_data[out_idx++] = result; + } + } + } + } +} + +void esp_nn_depthwise_conv_s8_ch_mult1(const int8_t *input_data, + const uint16_t input_wd, + const uint16_t input_ht, + const uint16_t channels, + const int32_t input_offset, + const uint16_t pad_wd, + const uint16_t pad_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const int8_t *filter_data, + const uint16_t filter_wd, + const uint16_t filter_ht, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_wd, + const uint16_t out_ht, + const int32_t out_offset, + const int32_t *out_shift, + const int32_t *out_mult, + const int32_t activation_min, + const int32_t activation_max) +{ + int out_idx = 0; + for (int out_y = 0; out_y < out_ht; out_y++) { //height loop + const int16_t base_y = (out_y * stride_ht) - pad_ht; + for (int out_x = 0; out_x < out_wd; out_x++) { //width_loop + const int16_t base_x = (out_x * stride_wd) - pad_wd; + for (int ch_idx = 0; ch_idx < channels; ch_idx++) {//channel_loop + int32_t result = 0; + /* Select filter so as the point doesn't lie outside block */ + int filter_y_start = max(0, -base_y); + int filter_x_start = max(0, -base_x); + int filter_y_end = min(filter_ht, input_ht - base_y); + int filter_x_end = min(filter_wd, input_wd - base_x); + + for (int filter_y_idx = filter_y_start; filter_y_idx < filter_y_end; filter_y_idx++) { + const int32_t idx_y = base_y + filter_y_idx; + for (int filter_x_idx = filter_x_start; filter_x_idx < filter_x_end; filter_x_idx++) { + const int32_t idx_x = base_x + filter_x_idx; + int32_t input_index = (idx_y * input_wd + idx_x) * channels + ch_idx; + int32_t filter_index = (filter_y_idx * filter_wd + filter_x_idx) * channels + ch_idx; + int32_t input_val = input_data[input_index] + input_offset; + int32_t filter_val = filter_data[filter_index]; + result += input_val * filter_val; + } + } + if (bias) { + result += bias[ch_idx]; + } + result = esp_nn_multiply_by_quantized_mult(result, out_mult[ch_idx], out_shift[ch_idx]); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + + out_data[out_idx++] = result; + } + } + } +} + +int esp_nn_get_depthwise_conv_scratch_size_esp32s3(const data_dims_t *input_dims, + const data_dims_t *filter_dims, + const data_dims_t *output_dims, + const dw_conv_params_t *conv_params) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t ch_mult = conv_params->ch_mult; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + + int filter_size = filter_wd * filter_ht * channels * ch_mult; + int pad_width = 0, pad_height = 0; + + if ((ch_mult == 1) && (channels % 8 == 0) && (filter_wd == 3) && (filter_ht == 3)) { + if (channels % 16 == 0) { + if (pad_wd || pad_ht) { + pad_width = pad_wd * 2; + pad_height = pad_ht * 2; + } else { + // check if we need to pad additionally + pad_width = (out_wd * stride_wd + filter_wd - 1) - input_wd; + pad_height = (out_ht * stride_ht + filter_ht - 1) - input_ht; + // printf("in(%d %d %d), out(%d %d), filter (%d %d) stride (%d %d), pad (%d %d)", + // input_wd, input_ht, channels, out_wd, out_ht, filter_wd, filter_ht, + // stride_wd, stride_ht, pad_wd, pad_ht); + } + if (pad_width || pad_height) { + int input_size = (input_wd + pad_width) * (input_ht + pad_height) * channels; + // printf("ask1 %d\n", filter_size + input_size + 16); + return filter_size + input_size + 16; // 16 for alignment + } else { + // printf("ask2 %d\n", filter_size + 16); + return filter_size + 16; // 16 for alignment + } + } else { + int input_size = input_wd * input_ht * channels; + // printf("ask3 %d\n", 2 * (filter_size + input_size) + 16); + return 2 * (filter_size + input_size) + 16; // 16 for alignment + } + } else if (ch_mult % 4 == 0) { + int input_size = input_wd * input_ht * channels; + // printf("ask4 %d\n", 2 * (filter_size + input_size) + 16); + return 2 * (filter_size + input_size) + 16; // 16 for alignment + } + return 32; // just few bytes +} + +void esp_nn_set_depthwise_conv_scratch_buf_esp32s3(void *buf) +{ + scratch_buffer = (int16_t *) buf; +} + +/** + * Assumption 1: i/p channels == o/p channels + * Assumption 2: Pointers are valid + * Assumption 3: dialation width = 1 + */ + + + +void esp_nn_depthwise_conv_s8_esp32s3(const data_dims_t *input_dims, + const int8_t *input_data, + const data_dims_t *filter_dims, + const int8_t *filter_data, + const int32_t *bias, + const data_dims_t *output_dims, + int8_t *out_data, + const dw_conv_params_t *conv_params, + const quant_data_t *quant_data) +{ + const uint16_t input_wd = input_dims->width; + const uint16_t input_ht = input_dims->height; + const uint16_t channels = input_dims->channels; + const int32_t input_offset = conv_params->in_offset; + const int32_t out_offset = conv_params->out_offset; + const uint16_t pad_wd = conv_params->padding.width; + const uint16_t pad_ht = conv_params->padding.height; + const uint16_t stride_wd = conv_params->stride.width; + const uint16_t stride_ht = conv_params->stride.height; + const uint16_t filter_wd = filter_dims->width; + const uint16_t filter_ht = filter_dims->height; + const uint16_t out_wd = output_dims->width; + const uint16_t out_ht = output_dims->height; + const int32_t *out_shift = quant_data->shift; + const int32_t *out_mult = quant_data->mult; + const int32_t activation_min = conv_params->activation.min; + const int32_t activation_max = conv_params->activation.max; + const uint16_t ch_mult = conv_params->ch_mult; + + int filter_size = filter_wd * filter_ht * channels * ch_mult; + int align_len = 16 - (filter_size & 15); + int input_size = input_wd * input_ht * channels; + int16_t *filter_data16 = scratch_buffer; + int16_t *input_data16 = scratch_buffer + filter_size + align_len; + if (scratch_buffer == NULL) { + printf("esp_nn_depthwise_conv error! scratch_buffer not set!\n"); + return; + } + + if ((ch_mult == 1) && (channels % 8 == 0)) { + if ((filter_wd == 3) && (filter_ht == 3)) { + if ((channels % 16 == 0) && (pad_wd == 1) && (pad_ht == 1)) { + /* process in 8 bits */ + int8_t *filter_aligned = (int8_t *) scratch_buffer; + int8_t *input_padded = (int8_t *) scratch_buffer + filter_size + align_len; + memcpy(filter_aligned, filter_data, filter_size); + esp_nn_aligned_s8_pad_with_value(input_data, input_padded, input_wd, input_ht, channels, + -input_offset, pad_wd, pad_ht); + esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3(input_padded, input_wd + 2 * pad_wd, + input_ht + 2 * pad_ht, channels, input_offset, + stride_wd, stride_ht, filter_aligned, bias, + out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } else if ((channels % 16 == 0) && (pad_wd == 0) && (pad_ht == 0)) { + /* process in 8 bits */ + int8_t *filter_aligned = (int8_t *) scratch_buffer; + int8_t *input_padded = (int8_t *) scratch_buffer + filter_size + align_len; + + // check if we need to pad additionally + int pad_right = (out_wd * stride_wd + filter_wd - 1) - input_wd; + int pad_bottom = (out_ht * stride_ht + filter_ht - 1) - input_ht; + if (pad_right || pad_bottom) { // pad right and bottom + esp_nn_aligned_s8_pad_end_with_value(input_data, input_padded, input_wd, input_ht, + channels, -input_offset, pad_right, pad_bottom); + } else { + input_padded = (int8_t *) input_data; + } + memcpy(filter_aligned, filter_data, filter_size); + esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3(input_padded, input_wd + pad_right, + input_ht + pad_bottom, channels, input_offset, + stride_wd, stride_ht, filter_aligned, bias, + out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } else { /* (channels % 8) == 0 */ + esp_nn_s8_to_s16_esp32s3(filter_data, filter_data16, filter_size); + esp_nn_aligned_s8_to_s16_with_offset_esp32s3(input_data, input_data16, input_size, input_offset); + esp_nn_depthwise_conv_s16_mult1_3x3_esp32s3(input_data16, input_wd, input_ht, channels, + pad_wd, pad_ht, stride_wd, stride_ht, filter_data16, + bias, out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } + } else { // all other ch_mult == 1, `channels % 8 == 0` + esp_nn_depthwise_conv_s8_ch_mult1(input_data, input_wd, input_ht, channels, input_offset, + pad_wd, pad_ht, stride_wd, stride_ht, + filter_data, filter_wd, filter_ht, + bias, out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } + } else if (ch_mult % 8 == 0) { + esp_nn_s8_to_s16_esp32s3(filter_data, filter_data16, filter_size); + esp_nn_aligned_s8_to_s16_with_offset_esp32s3(input_data, input_data16, input_size, input_offset); + if (filter_wd == 3 && filter_ht == 3) { + esp_nn_depthwise_conv_s16_mult8_3x3_esp32s3(input_data16, input_wd, input_ht, channels, + pad_wd, pad_ht, stride_wd, stride_ht, ch_mult, + filter_data16, bias, + out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } else { + esp_nn_depthwise_conv_s16_mult8_esp32s3(input_data16, input_wd, input_ht, channels, + pad_wd, pad_ht, stride_wd, stride_ht, ch_mult, + filter_data16, filter_wd, filter_ht, bias, + out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } + } else if (ch_mult % 4 == 0) { + esp_nn_s8_to_s16_esp32s3(filter_data, filter_data16, filter_size); + esp_nn_aligned_s8_to_s16_with_offset_esp32s3(input_data, input_data16, input_size, input_offset); + esp_nn_depthwise_conv_s16_mult4_esp32s3(input_data16, input_wd, input_ht, channels, + pad_wd, pad_ht, stride_wd, stride_ht, ch_mult, + filter_data16, filter_wd, filter_ht, bias, + out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } else { + esp_nn_depthwise_conv_s8_unrolled(input_data, input_wd, input_ht, channels, input_offset, + pad_wd, pad_ht, stride_wd, stride_ht, ch_mult, + filter_data, filter_wd, filter_ht, + bias, out_data, out_wd, out_ht, out_offset, out_shift, + out_mult, activation_min, activation_max); + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S new file mode 100644 index 0000000..c9240d4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/convolution/esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3.S @@ -0,0 +1,512 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .literal_position + +// processes multiple of 16 channels +// already padded version. no additional padding needed +// simply keep sliding filter window by stride_size + + # Program Unit: esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3 + .type esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3, @function + .align 4 + .global esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3 + +esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3: # 0xccc + # qacc_scratch = 0 + # gra_spill_temp_103 = 40 // stride_wd*channels + # gra_spill_temp_104 = 44 // bias_align + # gra_spill_temp_107 = 48 // input_offset + # gra_spill_temp_105 = 52 // out_mult_ptr + # gra_spill_temp_106 = 56 // out_shift_ptr + # gra_spill_temp_108 = 60 // ch_idx + # gra_spill_temp_109 = 64 // out_ch + # gra_spill_temp_110 = 68 // bias_ptr + # gra_spill_temp_111 = 72 // 2 * (input_wd * channels) + # gra_spill_temp_112 = 76 // input_data + # gra_spill_temp_118 = 96 + # gra_spill_temp_119 = 100 + # gra_spill_temp_120 = 104 + # gra_spill_temp_121 = 108 + # gra_spill_temp_113 = 112 // input_wd * channels + # gra_spill_temp_114 = 116 // input_wd + # gra_spill_temp_130 = 120 + + # gra_spill_temp_141 = 0 + # gra_spill_temp_120 = 16 + # gra_spill_temp_137 = 80 + +// offset+bias factor + # gra_spill_temp_134 = 128 //256-128 + # gra_spill_temp_135 = 144 //256-112 + # gra_spill_temp_133 = 160 //256-96 + # gra_spill_temp_132 = 176 //256-80 + + + // registers: + // a2: input_data + // a3: input_wd + // a4: input_ht + // a5: channels + // a6: input_offset + // a7: stride_wd + + // on stack: + + // 320: stride_ht + // 324: filter_data + // 328: *bias + // 332: *out_data + // 336: out_wd + // 340: out_ht + // 344: out_offset + // 348: *out_shift + // 352: *out_mult + // 356: activation_min + // 360: activation_max + + entry a1,320 # + mul16u a7,a7,a5 + s32i a3,a1,116 # [0] gra_spill_temp_114, input_wd + s32i a6,a1,48 # [1] gra_spill_temp_107, input_offset + s32i a7,a1,40 # gra_spill_temp_103, stride_wd*channels + + addi a8,a5,-15 # [2] + s32i a2,a1,76 # [3] gra_spill_temp_112, input_data + l32i a9,a1,328 # [4] id:664 bias+0x0 + mov.n a2,a5 # [5] + s32i a8,a1,64 # [7] gra_spill_temp_109 + s32i a9,a1,68 # [8] gra_spill_temp_110, bias_ptr + blti a8,1,.Lt_7_4610 # [9] + + l32i a12,a1,348 # [4] id:666 out_shift+0x0 + mul16u a15,a3,a5 # [1] + movi.n a9,0 # [13] + s32i a12,a1,56 # [9] gra_spill_temp_106 // out_shift_ptr + s32i a9,a1,60 # [14] gra_spill_temp_108, ch_idx + s32i a15,a1,112 # [12] gra_spill_temp_113, input_wd*channels + l32i a9,a1,352 # [24] id:665 out_mult+0x0 + slli a15,a15,1 # [15] + s32i a15,a1,72 # [23] gra_spill_temp_111, 2 * (input_wd * channels) + s32i a9,a1,52 # [25] gra_spill_temp_105, out_mult_ptr + +// outer most out_ch loop +.Lt_7_5122: # 0xd57 + l32i a13,a1,324 # [1] filter_data + l32i a6,a1,60 # [2] gra_spill_temp_108, ch_idx + l32i a9,a1,48 # [0] gra_spill_temp_107, input_offset + ee.zero.q q2 # [3] + add.n a13,a6,a13 # [4] + s32i a13,a1,108 # [5] gra_spill_temp_121 + +// multiply accumulate filter points + ee.vld.128.xp q1,a13,a2 # [6] id:673 + ee.vld.128.xp q3,a13,a2 # [7] id:674 + ee.vcmp.lt.s8 q0,q1,q2 # [8] + ee.vcmp.lt.s8 q4,q3,q2 # [9] + ee.vzip.8 q1,q0 # [10] + ee.vzip.8 q3,q4 # [11] + ee.vadds.s16 q0,q0,q4 # [12] + ee.vld.128.xp q4,a13,a2 # [13] id:675 + ee.vadds.s16 q1,q1,q3 # [14] + ee.vcmp.lt.s8 q3,q4,q2 # [15] + ee.vzip.8 q4,q3 # [16] + ee.vadds.s16 q1,q1,q4 # [17] + ee.vld.128.xp q4,a13,a2 # [18] id:676 + ee.vadds.s16 q0,q0,q3 # [19] + ee.vcmp.lt.s8 q3,q4,q2 # [20] + ee.vzip.8 q4,q3 # [21] + ee.vadds.s16 q0,q0,q3 # [22] + ee.vld.128.xp q3,a13,a2 # [23] id:677 + ee.vadds.s16 q1,q1,q4 # [24] + ee.vcmp.lt.s8 q4,q3,q2 # [25] + ee.vzip.8 q3,q4 # [26] + ee.vadds.s16 q1,q1,q3 # [27] + ee.vld.128.xp q3,a13,a2 # [28] id:678 + ee.vadds.s16 q0,q0,q4 # [29] + ee.vcmp.lt.s8 q4,q3,q2 # [30] + ee.vzip.8 q3,q4 # [31] + ee.vadds.s16 q0,q0,q4 # [32] + ee.vld.128.xp q4,a13,a2 # [33] id:679 + ee.vadds.s16 q1,q1,q3 # [34] + ee.vcmp.lt.s8 q3,q4,q2 # [35] + ee.vzip.8 q4,q3 # [36] + ee.vadds.s16 q1,q1,q4 # [37] + ee.vld.128.xp q4,a13,a2 # [38] id:680 + ee.vadds.s16 q0,q0,q3 # [39] + ee.vcmp.lt.s8 q3,q4,q2 # [40] + ee.vzip.8 q4,q3 # [41] + ee.vadds.s16 q0,q0,q3 # [42] + ee.vld.128.xp q3,a13,a2 # [44] id:681 + ee.vadds.s16 q1,q1,q4 # [43] + ee.vcmp.lt.s8 q2,q3,q2 # [47] + ee.vzip.8 q3,q2 # [48] + ee.vadds.s16 q0,q0,q2 # [49] + ee.vadds.s16 q1,q1,q3 # [50] + + ee.movi.32.a q1,a15,1 # [51] + ee.movi.32.a q1,a8,3 # [52] + ee.movi.32.a q0,a10,3 # [54] + ee.movi.32.a q0,a13,1 # [55] + srai a11,a10,16 # [56] + srai a12,a8,16 # [57] + mull a12,a9,a12 # [58] + mull a11,a9,a11 # [59] + sext a8,a8,15 # [328] + sext a10,a10,15 # [61] + srai a14,a13,16 # [62] + mull a14,a9,a14 # [63] + mull a10,a9,a10 # [64] + mull a8,a9,a8 # [65] + sext a13,a13,15 # [66] + mull a13,a9,a13 # [67] + ee.movi.32.q q3,a11,3 # [68] + ee.movi.32.q q4,a12,3 # [69] + ee.movi.32.q q4,a8,2 # [70] + ee.movi.32.q q3,a10,2 # [71] + ee.movi.32.a q1,a11,2 # [72] + srai a12,a11,16 # [74] + srai a8,a15,16 # [75] + mull a8,a9,a8 # [76] + mull a12,a9,a12 # [77] + sext a15,a15,15 # [78] + sext a11,a11,15 # [79] + mull a11,a9,a11 # [80] + mull a15,a9,a15 # [81] + ee.movi.32.q q4,a12,1 # [82] + ee.movi.32.q q1,a8,3 # [83] + ee.movi.32.q q1,a15,2 # [84] + ee.movi.32.q q4,a11,0 # [85] + ee.movi.32.a q0,a15,2 # [86] + ee.movi.32.q q0,a14,3 # [88] + ee.movi.32.q q0,a13,2 # [91] + srai a8,a15,16 # [89] + mull a8,a9,a8 # [90] + sext a15,a15,15 # [92] + mull a15,a9,a15 # [93] + # 526 MUL_IN_OFFSET_EXPAND(q_sum2, 0, q_sum2, 0); + ee.movi.32.a q0,a11,0 # [94] + srai a13,a11,16 # [95] + ee.movi.32.q q3,a8,1 # [96] + ee.movi.32.q q3,a15,0 # [100] + sext a11,a11,15 # [97] + mull a13,a9,a13 # [98] + l32i a8,a1,332 # [99] + ee.movi.32.a q1,a10,0 # [103] + ee.movi.32.q q0,a13,1 # [100] + srai a12,a10,16 # [105] + sext a10,a10,15 # [106] + mull a12,a9,a12 # [107] + mull a10,a9,a10 # [108] + mull a9,a9,a11 # [109] + ee.movi.32.q q1,a12,1 # [110] + ee.movi.32.q q1,a10,0 # [111] + + l32i a11,a1,328 // load bias + add.n a6,a6,a8 # [102] + ee.movi.32.q q0,a9,0 # [113] + beqz.n a11,.Lt_7_5378 # [114] + +// add bias + l32i a8,a1,68 # [0] gra_spill_temp_110, bias_ptr + extui a11,a11,0,4 # [2] // bias_align + wur.sar_byte a11 # [4] + ee.vld.128.ip q5,a8,16 # [5] id:683 + ee.vld.128.ip q6,a8,16 # [6] id:684 + ee.vld.128.ip q7,a8,16 # [7] id:685 + addmi a10,a1,256 # [2] + ee.src.q.ld.ip q2,a8,16,q5,q6 # [9] + ee.vadds.s32 q1,q1,q5 # [12] + ee.src.q.ld.ip q5,a8,0,q6,q7 # [13] + s32i a8,a1,68 # [11] gra_spill_temp_110, bias_ptr + ee.vadds.s32 q4,q4,q6 # [18] + ee.src.q q7,q7,q2 # [9] + ee.src.q q2,q2,q5 # [13] + ee.vadds.s32 q0,q0,q7 # [12] + ee.vadds.s32 q3,q3,q2 # [12] +.Lt_7_5378: # 0xeef + +// store offset+bias factor (q1,q4,q0,q3) + st.qr q4,a10,-112 # [17] gra_spill_temp_135-256 + st.qr q3,a10,-128 # [21] gra_spill_temp_134-256 + st.qr q1,a10,-96 # [7] gra_spill_temp_133-256 + st.qr q0,a10,-80 # [8] gra_spill_temp_132-256 + +// prepare height loop + movi.n a15,0 # [1] + movi.n a8,0 # [2] + movi.n a9,0 # [3] + s32i a9,a1,100 # [4] gra_spill_temp_119 + s32i a8,a1,104 # [5] gra_spill_temp_120 + s32i a15,a1,96 # [6] gra_spill_temp_118 + +// height loop +.Lt_7_6402: # 0xf0c + l32i a4,a1,104 # [2] gra_spill_temp_120 // out_y * (input_wd * stride_ht) * channels) + l32i a8,a1,100 # [3] gra_spill_temp_119 // initialised to 0 before height loop + l32i a5,a1,76 # [1] gra_spill_temp_112, input_data + l32i a3,a1,60 # [0] gra_spill_temp_108, ch_idx + l32i a7,a1,112 # [1] gra_spill_temp_113, input_wd*channels + l32i a10,a1,336 # [0] out_wd + add.n a4,a4,a5 # [4] // input_data + (out_y * stride_ht) * input_wd * channels + mov.n a5,a8 # [5] // index + add.n a3,a3,a4 # [6] // input_row0 + l32i a4,a1,72 # [9] gra_spill_temp_111, 2 * (input_wd * channels) + add.n a7,a7,a3 # [7] // input_row1 = (input_wd * channels) + add.n a8,a8,a10 # [8] + s32i a8,a1,120 # [10] gra_spill_temp_130 + add.n a4,a4,a3 # [11] // input_row2 + +// width loop +.Lt_7_7170: # 0xf32 + l32i a9,a1,108 # [3] gra_spill_temp_121, filter_ptr + ee.zero.qacc # [2] + mov.n a12,a3 # [4] + mov.n a11,a7 # [1] + mov.n a10,a4 # [0] + ee.vld.128.xp q0,a12,a2 # [5] id:693 + ee.vld.128.xp q6,a12,a2 # [6] id:695 + ee.vld.128.xp q1,a9,a2 # [7] id:694 + ee.vld.128.xp q7,a9,a2 # [8] id:696 + ee.vld.128.xp q5,a9,a2 # [9] id:698 + ee.vld.128.xp q3,a9,a2 # [10] id:700 + ee.vmulas.s8.qacc.ld.xp q4,a12,a2,q0,q1 # [11] id:697 + ee.vmulas.s8.qacc.ld.xp q2,a11,a2,q6,q7 # [13] id:699 + ee.vld.128.xp q1,a9,a2 # [14] id:702 + ee.vmulas.s8.qacc.ld.xp q0,a11,a2,q4,q5 # [15] id:701 + ee.vmulas.s8.qacc.ld.xp q6,a11,a2,q2,q3 # [16] id:703 + ee.vld.128.xp q7,a9,a2 # [17] id:704 + ee.vld.128.xp q3,a9,a2 # [18] id:706 + ee.vmulas.s8.qacc.ld.xp q0,a10,a2,q0,q1 # [19] id:705 + ee.vmulas.s8.qacc.ld.xp q1,a10,a2,q6,q7 # [20] id:707 + ee.vmulas.s8.qacc.ld.xp q4,a10,a2,q0,q3 # [21] id:709 + ee.vld.128.xp q6,a9,a2 # [22] id:708 + ee.vld.128.xp q5,a9,a2 # [23] id:710 + ee.vmulas.s8.qacc q1,q6 # [24] + ee.vmulas.s8.qacc q4,q5 # [25] + + // extract data + mov a12,a1 //// scratch + ee.st.qacc_l.l.128.ip a12,16 # [27] id:713 + ee.st.qacc_l.h.32.ip a12,-16 # [28] id:714 + + l32i.n a9,a1,8 # [29] qacc_scratch+8 + l32i.n a11,a1,4 # [30] qacc_scratch+4 + l32i.n a15,a1,0 # [31] qacc_scratch + slli a14,a11,24 # [32] + sext a8,a15,19 # [33] + slli a10,a9,16 # [34] + slli a13,a11,4 # [35] + extui a9,a9,16,16 # [36] + srai a13,a13,12 # [37] + extui a15,a15,20,12 # [39] + srai a14,a14,12 # [40] + srai a10,a10,12 # [41] + extui a11,a11,28,4 # [42] + or a10,a10,a11 # [43] + or a14,a14,a15 # [44] + +// insert to q0 + ee.movi.32.q q0,a8,0 # [38] + ee.movi.32.q q0,a14,1 # [45] + ee.movi.32.q q0,a13,2 # [48] + ee.movi.32.q q0,a10,3 # [49] + + l32i.n a11,a1,16 # [46] qacc_scratch+16 + l32i.n a14,a1,12 # [47] qacc_scratch+12 + slli a13,a11,20 # [50] + + ee.st.qacc_h.l.128.ip a12,16 # [51] id:720 + ee.st.qacc_h.h.32.ip a12,-16 # [55] id:721 + srai a11,a11,12 # [52] + srai a13,a13,12 # [53] + slli a8,a14,28 # [54] + slli a15,a14,8 # [56] + srai a15,a15,12 # [57] + srai a8,a8,12 # [59] + + l32i.n a12,a1,8 # [328] qacc_scratch+8 + or a8,a8,a9 # [61] + extui a14,a14,24,8 # [62] + l32i.n a9,a1,0 # [63] qacc_scratch + or a13,a13,a14 # [64] +//insert to q3 + ee.movi.32.q q3,a8,0 # [65] + ee.movi.32.q q3,a15,1 # [67] + ee.movi.32.q q3,a13,2 # [69] + ee.movi.32.q q3,a11,3 # [70] + + l32i.n a14,a1,4 # [66] qacc_scratch+4 + sext a10,a9,19 # [68] + extui a9,a9,20,12 # [72] + slli a13,a12,16 # [73] + slli a8,a14,24 # [74] + extui a12,a12,16,16 # [75] + srai a13,a13,12 # [76] + srai a8,a8,12 # [77] + slli a15,a14,4 # [78] + srai a15,a15,12 # [79] + or a8,a8,a9 # [80] + extui a14,a14,28,4 # [81] + l32i.n a9,a1,12 # [82] qacc_scratch+12 + or a13,a13,a14 # [83] +// insert to q1 + ee.movi.32.q q1,a10,0 # [71] + ee.movi.32.q q1,a8,1 # [84] + ee.movi.32.q q1,a15,2 # [85] + ee.movi.32.q q1,a13,3 # [88] + +// load in_offset+bias factor + addmi a14,a1,256 # [86] + ld.qr q7,a14,-128 # [87] gra_spill_temp_134-256 + ld.qr q4,a14,-112 # [89] gra_spill_temp_135-256 + l32i.n a15,a1,16 # [90] qacc_scratch+16 + ld.qr q2,a14,-96 # [91] gra_spill_temp_133-256 + slli a11,a9,28 # [92] + slli a10,a9,8 # [93] + srai a10,a10,12 # [94] + srai a11,a11,12 # [95] + extui a9,a9,24,8 # [96] + or a11,a11,a12 # [97] + ee.vadds.s32 q0,q0,q2 # [98] + slli a8,a15,20 # [99] + ee.vadds.s32 q3,q3,q4 # [100] + st.qr q3,a1,80 # [101] gra_spill_temp_137-256 + srai a15,a15,12 # [102] + ld.qr q2,a14,-80 # [103] gra_spill_temp_132-256 + srai a8,a8,12 # [105] + or a8,a8,a9 # [108] + +// insert to q6 + ee.movi.32.q q6,a11,0 # [100] + ee.movi.32.q q6,a10,1 # [107] + ee.movi.32.q q6,a8,2 # [112] + ee.movi.32.q q6,a15,3 # [113] + + ee.vadds.s32 q1,q1,q2 # [110] + ee.vadds.s32 q6,q6,q7 # [114] + st.qr q1,a1,16 # [111] gra_spill_temp_120 + s32i.n a7,a1,32 # [0] // tmp + s32i.n a6,a1,36 # [106] // tmp + l32i a7,a1,52 # [109] gra_spill_temp_105, out_mult_ptr + l32i a6,a1,56 # [106] gra_spill_temp_106, out_shift_ptr + addi.n a10,a7,0 + addi.n a11,a6,0 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [116] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + mv.qr q5,q0 + ld.qr q0,a1,80 # [4] gra_spill_temp_137-256 + addi.n a10,a7,16 + addi.n a11,a6,16 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [5] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + mv.qr q4,q0 + ld.qr q0,a1,16 # [5] gra_spill_temp_120 + addi.n a10,a7,32 + addi.n a11,a6,32 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [6] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + st.qr q0,a1,0 # [3] gra_spill_temp_141 + mv.qr q0,q6 + addi.n a10,a7,48 + addi.n a11,a6,48 + call8 esp_nn_multiply_by_quantized_mult_ver1_esp32s3 # [6] esp_nn_multiply_by_quantized_mult_ver1_esp32s3 + + + l32i.n a6,a1,36 # [106] // tmp + l32i.n a7,a1,32 # [0] // tmp + l32i a15,a1,40 # gra_spill_temp_103, stride_wd * channels + l32i a11,a1,120 # [3] gra_spill_temp_130 + + add.n a3,a3,a15 # [0] + add.n a4,a4,a15 # [1] + add.n a7,a7,a15 # [2] + addi.n a5,a5,1 # [4] + + // add offset, apply activation and store + addmi a13,a1,256 # [8] + ld.qr q3,a1,0 # [10] gra_spill_temp_141 + mv.qr q2,q5 + addi a8,a13,88 # [14] + addi a9,a13,100 # [15] + addi a15,a13,104 # [13] + ee.vldbc.32 q6,a9 # [17] id:723 activation_min + ee.vldbc.32 q1,a8 # [18] id:722 out_offset + ee.vldbc.32 q7,a15 # [19] id:724 activation_max + ee.vadds.s32 q4,q4,q1 # [20] + ee.vadds.s32 q2,q2,q1 # [21] + ee.vadds.s32 q5,q0,q1 # [22] + ee.vadds.s32 q3,q3,q1 # [23] + ee.vmin.s32 q3,q3,q7 # [24] + ee.vmin.s32 q5,q5,q7 # [25] + ee.vmin.s32 q2,q2,q7 # [26] + ee.vmin.s32 q4,q4,q7 # [27] + ee.vmax.s32 q4,q4,q6 # [28] + ee.vmax.s32 q2,q2,q6 # [29] + ee.vmax.s32 q5,q5,q6 # [30] + ee.vmax.s32 q3,q3,q6 # [31] + ee.vunzip.16 q3,q5 # [32] + ee.vunzip.16 q2,q4 # [33] + ee.vunzip.8 q2,q3 # [34] + ee.vst.128.xp q2,a6,a2 # [35] id:725 + bne a5,a11,.Lt_7_7170 # [36] + +.Lt_7_6658: # 0x112f +# Part of loop body line 548, head labeled .Lt_7_6402 + l32i a15,a1,112 # [3] gra_spill_temp_113, input_wd*channels + l32i a10,a1,320 # gra_spill_temp_103 + l32i a13,a1,340 # [0] // out_ht + l32i a9,a1,116 # [1] gra_spill_temp_114, input_wd + l32i a12,a1,96 # [4] gra_spill_temp_118 + mull a15,a10,a15 # // (input_wd * stride_ht) * channels + l32i a14,a1,104 # [5] gra_spill_temp_120 + l32i a8,a1,100 # [2] gra_spill_temp_119 + + addi.n a12,a12,1 # [6] + s32i a12,a1,96 # [7] gra_spill_temp_118 + add.n a14,a14,a15 # [8] + add.n a8,a8,a9 # [9] + s32i a8,a1,100 # [10] gra_spill_temp_119 + s32i a14,a1,104 # [11] gra_spill_temp_120, (input_wd * stride_wd) * channels + bne a12,a13,.Lt_7_6402 # [13] // iterate over height loop + +# Part of loop body line 348, head labeled .Lt_7_5122 + l32i a11,a1,56 # [6] gra_spill_temp_106 // out_shift_ptr + l32i a15,a1,52 # [2] gra_spill_temp_105, out_mult_ptr + l32i a10,a1,60 # [24] gra_spill_temp_108, ch_idx + addi a11,a11,64 # [8] + addi a15,a15,64 # [13] + s32i a11,a1,56 # [23] gra_spill_temp_106 + s32i a15,a1,52 # [18] gra_spill_temp_105, out_mult_ptr + l32i a11,a1,64 # [25] gra_spill_temp_109 + addi a10,a10,16 # [26] + s32i a10,a1,60 # [27] gra_spill_temp_108, ch_idx + blt a10,a11,.Lt_7_5122 # [28] // iterate over outer most out_ch loop + +.Lt_7_4610: # 0x11ad + retw.n # [0] + + .size esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3, . - esp_nn_depthwise_conv_s8_mult1_3x3_padded_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_ansi.c new file mode 100644 index 0000000..788a65b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_ansi.c @@ -0,0 +1,54 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +void esp_nn_fully_connected_s8_ansi(const int8_t *input_data, + const int32_t input_offset, + const uint16_t row_len, + const int8_t *filter_data, + const int32_t filter_offset, + const int32_t *bias, + int8_t *out_data, + const uint16_t out_channels, + const int32_t out_offset, + const int32_t out_shift, + const int32_t out_mult, + const int32_t activation_min, + const int32_t activation_max) +{ + for (int32_t out_c = 0; out_c < out_channels; ++out_c) { + int32_t result = 0; + for (int32_t data_idx = 0; data_idx < row_len; data_idx++) { + int32_t filter_index = row_len * out_c + data_idx; + int32_t input_val = input_data[data_idx]; + int32_t filter_val = filter_data[filter_index]; + result += (filter_val + filter_offset) * (input_val + input_offset); + } + if (bias) { + result += bias[out_c]; + } + result = esp_nn_multiply_by_quantized_mult(result, out_mult, out_shift); + result += out_offset; + result = max(result, activation_min); + result = min(result, activation_max); + out_data[out_c] = (int8_t) result; + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_s8_esp32s3.S new file mode 100644 index 0000000..9c1a835 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/fully_connected/esp_nn_fully_connected_s8_esp32s3.S @@ -0,0 +1,220 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// +// SPDX-FileCopyrightText: 2020-2023 Espressif Systems (Shanghai) CO LTD +// +// SPDX-License-Identifier: Apache-2.0 +// + .text + .align 4 + .literal_position + .literal .LC3_26_101, 1073741824 // nudge (1 << 30) + + # Program Unit: esp_nn_fully_connected_s8_esp32s3 + .type esp_nn_fully_connected_s8_esp32s3, @function + .align 4 + .global esp_nn_fully_connected_s8_esp32s3 + +// a2: input_data +// a3: input_offset +// a4: row_len +// a5: filter_data +// a6: filter_offset +// a7: bias +// on stack: out_data +// on stack: out_channels +// on stack: out_offset +// on stack: out_shift +// on stack: out_mult +// on stack: activation_min +// on stack: activation_max + +esp_nn_fully_connected_s8_esp32s3: # 0x4 + # qacc_scratch = 0 + // 40, filter_offset + // 44, input_offset + # gra_spill_temp_7 = 48 + # gra_spill_temp_0 = 52 + # gra_spill_temp_1 = 56 + # gra_spill_temp_2 = 60 + # gra_spill_temp_3 = 64 + # gra_spill_temp_4 = 68 + # gra_spill_temp_5 = 72 + # gra_spill_temp_6 = 76 + + entry a1,112 # + s32i.n a5,a1,60 # [0] gra_spill_temp_2, filter_data + s32i a7,a1,48 # [1] gra_spill_temp_7, bias + s32i a6,a1,40 # [2] id:252 filter_offset+0x0 + s32i a3,a1,44 # [3] id:251 input_offset+0x0 + mov.n a13,a2 # [5] + mov.n a12,a4 # [6] + + // out_channel loop + l16ui a2,a1,116 # [7] id:255 out_channels+0x0 + addi a4,a1,40 # [8] + addi a8,a1,44 # [9] + ee.vldbc.16 q5,a8 # [10] id:253 input_offset + ee.vldbc.16 q6,a4 # [12] id:254 filter_offset + beqz.n a2,.Lt_0_7938 # [13] + + ee.zero.q q7 # [0] + srai a11,a12,3 # [2] + l32i a10,a1,128 # [5] id:257 out_mult+0x0 + l32i a8,a1,112 # [6] id:259 out_data+0x0 + addi a9,a12,-7 # [7] + s32i a9,a1,76 # [8] gra_spill_temp_6 + s32i a8,a1,72 # [9] gra_spill_temp_5 + s32i a11,a1,64 # [14] gra_spill_temp_3 + slli a11,a11,3 # [16] + s32i a11,a1,68 # [18] gra_spill_temp_4 + l32i a10,a1,124 # [25] id:256 out_shift+0x0 + movi.n a15,0 # [17] + mov.n a14,a7 # [15] + max a11,a10,a15 # [29] + s32i a11,a1,52 # [30] gra_spill_temp_0 // left_shift + sub a10,a11,a10 # // right_shift + s32i.n a10,a1,56 # [28] gra_spill_temp_1 // right_shift + mov.n a11,a5 # [31] + movi.n a10,0 # [32] + mov.n a2,a11 # [33] + +.Lt_0_8450: # 0x12b + + l32i a9,a1,76 # [2] gra_spill_temp_6 + extui a5,a11,0,3 # [34] + ee.zero.accx + slli a5,a5,1 # [3] + bgei a9,0,.LBB6_esp_nn_fully_connected_s8_esp32s3 # [9] + + mov.n a5,a10 # [6] + movi.n a2,0 # [0] + j .Lt_0_8706 # [1] + +.LBB6_esp_nn_fully_connected_s8_esp32s3: # 0x147 + wur.sar_byte a5 # [5] + ee.vld.l.64.ip q4,a2,8 # [4] id:267 + l32i a4,a1,64 # [0] gra_spill_temp_3 + mov.n a3,a13 # [1] + addx8 a5,a4,a10 # [2] + ee.vcmp.lt.s8 q2,q4,q7 # [7] + ee.vzip.8 q4,q2 # [8] + loopgtz a4,.LBB45_esp_nn_fully_connected_s8_esp32s3 # [3] + + ee.vld.l.64.ip q0,a2,8 # [0*II+0] id:268 + ee.vld.l.64.ip q1,a3,8 # [0*II+1] id:270 + ee.vcmp.lt.s8 q2,q0,q7 # [0*II+2] + ee.vcmp.lt.s8 q3,q1,q7 # [0*II+3] + ee.vzip.8 q0,q2 # [0*II+4] + ee.vzip.8 q1,q3 # [0*II+5] + ee.vadds.s16 q1,q1,q5 # [0*II+6] + ee.src.q.qup q2,q4,q0 # [0*II+7] + ee.vadds.s16 q2,q2,q6 # [0*II+8] + ee.vmulas.s16.accx q1,q2 # [0*II+9] + +.LBB45_esp_nn_fully_connected_s8_esp32s3: # 0x170 + l32i a2,a1,68 # [0] gra_spill_temp_4 + +.Lt_0_8706: # 0x173 + movi a9, 0 + ee.srs.accx a6, a9, 0 + + bge a2,a12,.Lt_0_9730 # [38] + +// prepare remaining loop + l32i a8,a1,44 # [0] id:251 input_offset+0x0 + l32i a7,a1,40 # [1] id:252 filter_offset+0x0 + sub a3,a12,a2 # [2] + l32i.n a4,a1,60 # [3] gra_spill_temp_2 + add.n a2,a2,a13 # [4] + add.n a4,a4,a5 # [5] + loopgtz a3,.LBB60_esp_nn_fully_connected_s8_esp32s3 # [6] + +// remaining c loop + l8ui a3,a2,0 # [0*II+0] id:299 + l8ui a5,a4,0 # [0*II+1] id:300 + sext a3,a3,7 # [0*II+2] + sext a5,a5,7 # [0*II+3] + add.n a5,a5,a7 # [0*II+5] + add.n a3,a3,a8 # [0*II+6] + mull a3,a3,a5 # [0*II+7] + addi.n a2,a2,1 # [0*II+8] + addi.n a4,a4,1 # [0*II+4] + add.n a6,a6,a3 # [0*II+9] + +.LBB60_esp_nn_fully_connected_s8_esp32s3: # 0x20f + +// add bias +.Lt_0_9730: # 0x20f + l32i a8,a1,48 # [0] gra_spill_temp_7, bias + beqz.n a8,.Lt_0_10754 # [2], skip_bias + + l32i.n a9,a14,0 # [0] id:301 + add.n a6,a6,a9 # [2] + +// apply quantization +.Lt_0_10754: # 0x218 + l32i a2,a1,52 # [1] gra_spill_temp_0 // left_shift + l32i a5,a1,56 # [2] gra_spill_temp_1 // right_shift + ssl a2 # [3] + sll a6,a6 # [5] // x * (1 << left_shift) + + l32r a3,.LC3_26_101 # [0] + + add.n a10,a10,a12 # [0] + addi.n a14,a14,4 # [1] + + l32i a4,a1,128 # [2] gra_spill_temp_10 //out_mult + add.n a11,a11,a12 # [6] + +// multiply add nudge and pick high32 + ssai 31 + mulsh a7,a4,a6 # [4] + mull a4,a4,a6 # [5] + + mov.n a2,a11 # [27] + add a4,a4,a3 + saltu a8,a4,a3 + add.n a7,a7,a8 + src a3,a7,a4 + +// divide_by_power_of2_step + blti a5,1,.skip_divide_by2 + movi.n a8,1 # [28] + addi a4,a5,-1 + ssl a4 // load left_shift + sll a8,a8 // to_add factor ( 1 << (exponent - 1)) + extui a6,a3,31,1 # [33] + sub a8,a8,a6 // modified to_add factor ( 1 << (exponent - 1) - (val < 0)) + add a3,a3,a8 // val + to_add + ssr a5 # [29] //load right_shift + sra a3,a3 # [31] +.skip_divide_by2: + + l32i a8,a1,120 # [41] out_offset + l32i a7,a1,132 # [44] // activation_min + l32i a4,a1,136 # [45] // activation_max + + add.n a8,a8,a3 # [46] // add out_offset + l32i a6,a1,72 # [47] gra_spill_temp_5 + l32i.n a3,a1,116 # [48] out_channels + max a7,a7,a8 # [49] + add.n a6,a15,a6 # [50] + min a4,a4,a7 # [51] + addi.n a15,a15,1 # [52] + s8i a4,a6,0 # [53] id:302 + bne a3,a15,.Lt_0_8450 # [55] + +.Lt_0_7938: # 0x25c + retw.n # [0] + + .size esp_nn_fully_connected_s8_esp32s3, . - esp_nn_fully_connected_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_ansi.c new file mode 100644 index 0000000..84bb786 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_ansi.c @@ -0,0 +1,76 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +void esp_nn_avg_pool_s8_ansi(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels) +{ + int32_t base_y = -pad_ht; + for (int32_t out_y = 0; out_y < output_ht; out_y++, base_y += stride_ht) { + int32_t base_x = -pad_wd; + for (int32_t out_x = 0; out_x < output_wd; out_x++, base_x += stride_wd) { + for (int32_t ch_idx = 0; ch_idx < channels; ch_idx++) { + int32_t result = 0; + int32_t filter_cnt = 0; + /* Make sure filter does not cross the input box */ + int32_t filter_y_start = max(0, -base_y); + int32_t filter_x_start = max(0, -base_x); + + int32_t filter_y_end = min(filter_ht, input_ht - base_y); + int32_t filter_x_end = min(filter_wd, input_wd - base_x); + + for (int32_t filter_y = filter_y_start; filter_y < filter_y_end; filter_y++) { + for (int32_t filter_x = filter_x_start; filter_x < filter_x_end; filter_x++) { + int32_t in_x_idx = base_x + filter_x; + int32_t in_y_idx = base_y + filter_y; + int32_t input_index = (in_y_idx * input_wd + in_x_idx) * channels + ch_idx; + result += input[input_index]; + filter_cnt++; + } + } + + /* Rounded average */ + result = result > 0 ? (result + filter_cnt / 2) / filter_cnt + : (result - filter_cnt / 2) / filter_cnt; + + /* Activation function */ + result = max(result, activation_min); + result = min(result, activation_max); + + int32_t output_index = (out_y * output_wd + out_x) * channels + ch_idx; + output[output_index] = (int8_t) result; + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_s8_esp32s3.S new file mode 100644 index 0000000..9e76a1e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_avg_pool_s8_esp32s3.S @@ -0,0 +1,686 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .align 4 + .literal_position + + # Program Unit: esp_nn_avg_pool_s8_esp32s3 + .type esp_nn_avg_pool_s8_esp32s3, @function + .align 4 + .global esp_nn_avg_pool_s8_esp32s3 + +// no of channels must be multiple of 4. + +// a2: input +// a3: input_wd +// a4: input_ht +// a5: output +// a6: output_wd +// a7: output_ht +// on stack: stride_wd +// on stack: stride_ht +// on stack: filter_wd +// on stack: filter_ht +// on stack: pad_wd +// on stack: pad_ht +// on stack: activation_min +// on stack: activation_max +// on stack: channels + +esp_nn_avg_pool_s8_esp32s3: # 0x4 + # activation_min = 0 + # activation_max = 4 + # gra_spill_temp_0 = 8 + # gra_spill_temp_1 = 12 + # gra_spill_temp_2 = 16 + # gra_spill_temp_3 = 20 + # gra_spill_temp_4 = 24 + # gra_spill_temp_5 = 28 + # gra_spill_temp_6 = 32 + # gra_spill_temp_7 = 36 + # gra_spill_temp_8 = 40 + # gra_spill_temp_9 = 44 + # gra_spill_temp_10 = 48 + # gra_spill_temp_11 = 52 + # gra_spill_temp_12 = 56 + # gra_spill_temp_13 = 60 + # gra_spill_temp_14 = 64 + # gra_spill_temp_15 = 68 + # gra_spill_temp_16 = 72 + # gra_spill_temp_17 = 76 + # gra_spill_temp_18 = 80 + # gra_spill_temp_19 = 84 + # gra_spill_temp_20 = 88 + # gra_spill_temp_21 = 92 + # gra_spill_temp_22 = 96 + # gra_spill_temp_23 = 100 + # gra_spill_temp_24 = 104 + # gra_spill_temp_25 = 108 + # gra_spill_temp_26 = 112 + # gra_spill_temp_27 = 116 + # gra_spill_temp_28 = 120 + # gra_spill_temp_29 = 124 + # gra_spill_temp_30 = 128 + # gra_spill_temp_31 = 132 + # gra_spill_temp_32 = 136 + # gra_spill_temp_33 = 140 + # gra_spill_temp_34 = 144 + # gra_spill_temp_35 = 148 + # gra_spill_temp_36 = 152 + # gra_spill_temp_37 = 156 + # gra_spill_temp_38 = 160 + # gra_spill_temp_39 = 164 + # gra_spill_temp_40 = 168 + # gra_spill_temp_41 = 172 + # gra_spill_temp_43 = 180 + + entry a1,240 # + mov.n a11,a3 # [0] + mov.n a12,a2 # [1] + s32i a5,a1,136 # [4] gra_spill_temp_30 + s32i a6,a1,128 # [3] gra_spill_temp_32 + + l16ui a5,a1,272 # [5] id:663 channels+0x0 + s32i a7,a1,72 # [6] gra_spill_temp_16 + + l32i a9,a1,264 # [1] id:664 activation_min+0x0 + l32i a10,a1,268 # [2] id:666 activation_max+0x0 + s32i.n a9,a1,0 # [4] activation_min + s32i.n a10,a1,4 # [3] activation_max + addi.n a8,a1,4 # [0] activation_max + ee.vldbc.32 q7,a1 # [5] id:668 activation_min + ee.vldbc.32 q6,a8 # [6] id:669 activation_max + ee.zero.q q4 # [0] + + extui a10,a5,0,3 # [7] + beqz.n a10,.LBB3_esp_nn_avg_pool_s8_esp32s3 # [8], if (channels % 8 == 0) + + extui a13,a5,0,2 # [0] + beqz.n a13,.LBB52_esp_nn_avg_pool_s8_esp32s3 # [1], if (channels % 4 == 0) + +// exit +.Lt_0_44546: # 0x1e9 + retw.n # [0] + +.LBB3_esp_nn_avg_pool_s8_esp32s3: # 0x1eb // if (channels % 8 == 0) + + l16ui a7,a1,256 # [1] id:671 pad_wd+0x0 + l16ui a10,a1,260 # [5] id:670 pad_ht+0x0 + l32i a15,a1,72 # [12] gra_spill_temp_16 + movi.n a14,0 # [13] + movi.n a8,0 # [14] + neg a10,a10 # [15] + s32i a10,a1,56 # [16] gra_spill_temp_12 + s32i a8,a1,44 # [17] gra_spill_temp_9 + s32i.n a14,a1,20 # [18] gra_spill_temp_3 + sub a9,a4,a10 # [19] + s32i a9,a1,40 # [20] gra_spill_temp_8 + mul16u a15,a15,a5 # [21] + neg a13,a7 # [22] + s32i a13,a1,104 # [23] gra_spill_temp_24 + s32i.n a15,a1,16 # [24] gra_spill_temp_2 + sub a13,a3,a13 # [25] + s32i.n a13,a1,12 # [26] gra_spill_temp_1 + j .Lt_0_28162 # [27] + +.Lt_0_28418: # 0x24e +# Part of loop body line 44, head labeled .Lt_0_28162 + l32i a15,a1,260 # [0] pad_ht + l32i a14,a1,56 # [1] gra_spill_temp_12 + l32i.n a9,a1,16 # [2] gra_spill_temp_2 + l32i a13,a1,244 # [3] stride_ht + l32i a10,a1,40 # [4] gra_spill_temp_8 + l32i a8,a1,44 # [5] gra_spill_temp_9 + sub a10,a10,a13 # [6] + add.n a8,a8,a9 # [7] + add.n a14,a14,a13 # [8] + sub a15,a15,a13 # [9] + s32i a15,a1,260 # [10] pad_ht + s32i a14,a1,56 # [11] gra_spill_temp_12 + s32i a8,a1,44 # [12] gra_spill_temp_9 + s32i a10,a1,40 # [13] gra_spill_temp_8 + l32i.n a8,a1,20 # [14] gra_spill_temp_3 + l32i a9,a1,72 # [15] gra_spill_temp_16 + addi.n a8,a8,1 # [16] + s32i.n a8,a1,20 # [17] gra_spill_temp_3 + beq a8,a9,.Lt_0_44546 # [18] + +.Lt_0_28162: # 0x281 + l32i a10,a1,128 # [0] gra_spill_temp_32 + beqz.n a10,.Lt_0_28418 # [2] + +.LBB7_esp_nn_avg_pool_s8_esp32s3: # 0x286 +# Part of loop body line 44, head labeled .Lt_0_28162 + s32i a7,a1,112 # [0] gra_spill_temp_26 + movi.n a10,0 # [1] + l32i a9,a1,260 # [2] pad_ht + l32i.n a6,a1,12 # [3] gra_spill_temp_1 + l32i a8,a1,44 # [4] gra_spill_temp_9 + movi.n a13,0 # [5] + l32i a15,a1,104 # [6] gra_spill_temp_24 + s32i a15,a1,116 # [7] gra_spill_temp_27 + s32i a13,a1,48 # [8] gra_spill_temp_10 + s32i a8,a1,124 # [9] gra_spill_temp_29 + s32i a6,a1,120 # [10] gra_spill_temp_28 + l32i a8,a1,40 # [11] gra_spill_temp_8 + l32i a6,a1,252 # [12] filter_ht + movi.n a13,0 # [13] + max a9,a9,a10 # [14] + s32i a9,a1,160 # [15] gra_spill_temp_38 + s32i a13,a1,92 # [16] gra_spill_temp_21 + min a6,a6,a8 # [17] + bnez.n a5,.LBB10_esp_nn_avg_pool_s8_esp32s3 # [18] + +.Lt_0_29186: # 0x2ba + l32i a8,a1,116 # [0] gra_spill_temp_27 + l32i a15,a1,120 # [1] gra_spill_temp_28 + l32i a9,a1,48 # [2] gra_spill_temp_10 + l32i a14,a1,240 # [3] stride_wd + l32i a10,a1,124 # [4] gra_spill_temp_29 + l32i a13,a1,112 # [5] gra_spill_temp_26 + add.n a10,a10,a5 # [6] + s32i a10,a1,124 # [7] gra_spill_temp_29 + sub a13,a13,a14 # [8] + add.n a9,a9,a14 # [9] + sub a15,a15,a14 # [10] + add.n a8,a8,a14 # [11] + s32i a8,a1,116 # [12] gra_spill_temp_27 + s32i a15,a1,120 # [13] gra_spill_temp_28 + s32i a9,a1,48 # [14] gra_spill_temp_10 + s32i a13,a1,112 # [15] gra_spill_temp_26 + l32i a9,a1,92 # [16] gra_spill_temp_21 + l32i a10,a1,128 # [17] gra_spill_temp_32 + addi.n a9,a9,1 # [18] + s32i a9,a1,92 # [19] gra_spill_temp_21 + beq a9,a10,.Lt_0_28418 # [20] + +.Lt_0_28930: # 0x2f5 +# Part of loop body line 46, head labeled .Lt_0_29186 + beqz.n a5,.Lt_0_29186 # [0] + +.LBB10_esp_nn_avg_pool_s8_esp32s3: # 0x2f7 +# Part of loop body line 44, head labeled .Lt_0_28162 + l32i a14,a1,120 # [0] gra_spill_temp_28 + l32i a13,a1,248 # [1] filter_wd + l32i a9,a1,136 # [2] gra_spill_temp_30 + l32i a8,a1,124 # [3] gra_spill_temp_29 + movi.n a15,0 # [4] + s32i a15,a1,24 # [5] gra_spill_temp_60 + add.n a10,a8,a5 # [6] + movi.n a15,0 # [7] + add.n a8,a8,a9 # [8] + min a13,a13,a14 # [9] + add.n a10,a9,a10 # [10] + s32i a10,a1,180 # [11] gra_spill_temp_43 + s32i a13,a1,76 # [12] gra_spill_temp_17 + l32i a14,a1,112 # [13] gra_spill_temp_26 + s32i a8,a1,148 # [14] gra_spill_temp_45 + max a14,a14,a15 # [15] + l32i a15,a1,116 # [16] gra_spill_temp_27 + s32i a14,a1,152 # [17] gra_spill_temp_63 + add.n a8,a15,a14 # [18] + s32i a8,a1,36 # [19] gra_spill_temp_7 + add.n a15,a15,a13 # [20] + s32i a15,a1,204 # [21] gra_spill_temp_39 + sub a13,a13,a14 # [22] + s32i a13,a1,280 # [23] gra_spill_temp_58 + j .Lt_0_29698 # [24] + +.LBB13_esp_nn_avg_pool_s8_esp32s3: # 0x33b +# Part of loop body line 16, head labeled .Lt_0_29698 + l32i a10,a1,56 # [0] gra_spill_temp_12 + l32i a14,a1,204 # [1] gra_spill_temp_39 + add.n a10,a10,a15 # [2] + mull a10,a11,a10 # [3] + movi.n a15,0 # [4] + add.n a14,a10,a14 # [5] + +.Lt_0_30466: # 0x34a +# Loop body line 61, nesting depth: 4, estimated iterations: 252 + l32i a9,a1,76 # [0] gra_spill_temp_17 + l32i a8,a1,152 # [1] gra_spill_temp_63 + add.n a14,a14,a11 # [2] + bge a8,a9,.Lt_0_30722 # [3] + +.LBB16_esp_nn_avg_pool_s8_esp32s3: # 0x355 +# Part of loop body line 61, head labeled .Lt_0_30466 + l32i a3,a1,36 # [0] gra_spill_temp_7 + l32i a2,a1,24 # [1] gra_spill_temp_4 + add.n a3,a3,a10 # [2] + mull a3,a3,a5 # [3] + movi.n a8,0 # [4] + add.n a2,a2,a3 # [5] + l32i a3,a1,280 # [6] gra_spill_temp_58 + add.n a2,a12,a2 # [7] + loopgtz a3,.LBB140_esp_nn_avg_pool_s8_esp32s3 # [8] + + ee.vld.l.64.xp q0,a2,a5 # [0*II+1] id:677 + ee.vcmp.lt.s8 q1,q0,q4 # [0*II+3] + ee.vzip.8 q0,q1 # [0*II+4] + ee.vcmp.lt.s16 q1,q0,q4 # [0*II+5] + ee.vzip.16 q0,q1 # [0*II+6] + ee.vadds.s32 q2,q2,q1 # [0*II+7] + ee.vadds.s32 q3,q3,q0 # [0*II+8] + + +.LBB140_esp_nn_avg_pool_s8_esp32s3: # 0x385 +# Part of loop body line 61, head labeled .Lt_0_30466 + l32i a2,a1,48 # [0] gra_spill_temp_10 + sub a9,a7,a2 # [2] + sub a2,a2,a7 # [3] + max a9,a9,a8 # [4] + l32i a8,a1,248 # [5] filter_wd + sub a2,a11,a2 # [6] + min a8,a8,a2 # [7] + sub a8,a8,a9 # [8] + add.n a15,a15,a8 # [9] + +.Lt_0_30722: # 0x39f +# Part of loop body line 61, head labeled .Lt_0_30466 + add.n a10,a10,a11 # [0] + addi.n a13,a13,1 # [1] + bne a6,a13,.Lt_0_30466 # [2] + +.Lt_0_29954: # 0x3a6 + srai a2,a15,1 # [3] + +// move data to general purpose registers and average + ee.movi.32.a q3,a9,0 # [0] + ee.movi.32.a q3,a4,1 # [0] + + blti a9,1,.Lt_0_32258 # [4] + add.n a9,a9,a2 # [0] + j .Lt_0_32002 # [2] +.Lt_0_32258: # 0x45e + sub a9,a9,a2 # [0] +.Lt_0_32002: # 0x3b9 + + blti a4,1,.Lt_0_32770 # [1] + add.n a4,a2,a4 # [0] + j .Lt_0_32514 # [2] +.Lt_0_32770: + sub a4,a4,a2 # [0] +.Lt_0_32514: # 0x3c4 + + quos a9,a9,a15 # [1] + quos a4,a4,a15 # [1] + ee.movi.32.q q3,a9,0 # [0] + ee.movi.32.q q3,a4,1 # [1] + + ee.movi.32.a q3,a9,2 # [2] + ee.movi.32.a q3,a14,3 # [0] + + blti a9,1,.Lt_0_33282 # [3] + add.n a9,a9,a2 # [0] + j .Lt_0_33026 # [2] +.Lt_0_33282: # 0x470 + sub a9,a9,a2 # [0] +.Lt_0_33026: # 0x3d5 + + blti a14,1,.Lt_0_33794 # [1] + add.n a14,a2,a14 # [0] + j .Lt_0_33538 # [2] +.Lt_0_33794: # 0x479 + sub a14,a14,a2 # [0] +.Lt_0_33538: # 0x3e0 + + quos a9,a9,a15 # [1] + quos a14,a14,a15 # [1] + ee.movi.32.q q3,a9,2 # [0] + ee.movi.32.q q3,a14,3 # [1] + + + ee.movi.32.a q2,a9,0 # [0] + ee.movi.32.a q2,a4,1 # [0] + + blti a9,1,.Lt_0_34306 # [3] + add.n a9,a9,a2 # [0] + j .Lt_0_34050 # [2] +.Lt_0_34306: # 0x482 + sub a9,a9,a2 # [0] +.Lt_0_34050: # 0x3f1 + + blti a4,1,.Lt_0_34818 # [1] + add.n a4,a2,a4 # [0] + j .Lt_0_34562 # [2] +.Lt_0_34818: # 0x48b + sub a4,a4,a2 # [0] +.Lt_0_34562: # 0x3fc + + quos a9,a9,a15 # [1] + quos a4,a4,a15 # [1] + ee.movi.32.q q2,a9,0 # [0] + ee.movi.32.q q2,a4,1 # [1] + + ee.movi.32.a q2,a9,2 # [2] + ee.movi.32.a q2,a14,3 # [0] + + blti a9,1,.Lt_0_35330 # [3] + add.n a9,a9,a2 # [0] + j .Lt_0_35074 # [2] +.Lt_0_35330: # 0x494 + sub a9,a9,a2 # [0] +.Lt_0_35074: # 0x40d + + blti a14,1,.Lt_0_35842 # [1] + add.n a14,a2,a14 # [0] + j .Lt_0_35586 # [2] +.Lt_0_35842: # 0x49d + sub a14,a14,a2 # [0] +.Lt_0_35586: # 0x418 + + quos a9,a9,a15 # [1] + quos a14,a14,a15 # [1] + ee.movi.32.q q2,a9,2 # [0] + ee.movi.32.q q2,a14,3 # [1] + + + l32i a9,a1,180 # [0] gra_spill_temp_43 + l32i a14,a1,24 # [1] gra_spill_temp_4 + l32i a13,a1,148 # [2] gra_spill_temp_45 + ee.vmin.s32 q1,q3,q6 # [4] + ee.vmax.s32 q1,q1,q7 # [5] + ee.vmin.s32 q5,q2,q6 # [8] + addi.n a14,a14,8 # [9] + s32i a14,a1,24 # [10] gra_spill_temp_4 + ee.vmax.s32 q5,q5,q7 # [11] + addi.n a8,a13,8 # [12] + s32i a8,a1,148 # [13] gra_spill_temp_45 + ee.vunzip.16 q1,q5 # [14] + ee.vunzip.8 q1,q5 # [15] + ee.vst.l.64.ip q1,a13,0 # [16] id:678 + bge a8,a9,.Lt_0_29186 # [17] + +.Lt_0_29698: # 0x44b +# Loop body line 16, nesting depth: 3, estimated iterations: 252 + mv.qr q3,q4 # [0] + l32i a15,a1,160 # [1] gra_spill_temp_38 + mv.qr q2,q4 # [2] + mov.n a13,a15 # [3] + blt a15,a6,.LBB13_esp_nn_avg_pool_s8_esp32s3 # [4] + +.Lt_0_51458: # 0x459 +# Part of loop body line 16, head labeled .Lt_0_29698 + movi.n a15,0 # [0] + j .Lt_0_29954 # [1] + + +.LBB52_esp_nn_avg_pool_s8_esp32s3: # 0x4a6 // if (channels % 4 == 0) + + l16ui a7,a1,256 # [1] id:671 pad_wd+0x0 + l16ui a13,a1,260 # [5] id:670 pad_ht+0x0 + s32i a13,a1,64 # [8] gra_spill_temp_4 + l32i a8,a1,72 # [12] gra_spill_temp_16 + movi.n a15,0 # [13] + movi.n a9,0 # [14] + neg a13,a13 # [15] + s32i a13,a1,192 # [16] gra_spill_temp_36 + s32i a9,a1,32 # [17] gra_spill_temp_6 + s32i.n a15,a1,8 # [18] gra_spill_temp_0 + sub a10,a4,a13 # [19] + s32i a10,a1,28 # [20] gra_spill_temp_5 + mul16u a8,a8,a5 # [21] + neg a14,a7 # [22] + s32i a14,a1,104 # [23] gra_spill_temp_24 + s32i.n a8,a1,16 # [24] gra_spill_temp_2 + sub a14,a3,a14 # [25] + s32i.n a14,a1,12 # [26] gra_spill_temp_1 + j .Lt_0_37890 # [27] + +.Lt_0_38146: # 0x50b +# Part of loop body line 161, head labeled .Lt_0_37890 + l32i a15,a1,64 # [0] gra_spill_temp_4 + l32i a14,a1,192 # [1] gra_spill_temp_36 + l32i.n a9,a1,16 # [2] gra_spill_temp_2 + l32i a13,a1,244 # [3] stride_ht + l32i a10,a1,28 # [4] gra_spill_temp_5 + l32i a8,a1,32 # [5] gra_spill_temp_6 + sub a10,a10,a13 # [6] + add.n a8,a8,a9 # [7] + add.n a14,a14,a13 # [8] + sub a15,a15,a13 # [9] + s32i a15,a1,64 # [10] gra_spill_temp_4 + s32i a14,a1,192 # [11] gra_spill_temp_36 + s32i a8,a1,32 # [12] gra_spill_temp_6 + s32i a10,a1,28 # [13] gra_spill_temp_5 + l32i.n a8,a1,8 # [14] gra_spill_temp_0 + l32i a9,a1,72 # [15] gra_spill_temp_16 + addi.n a8,a8,1 # [16] + s32i.n a8,a1,8 # [17] gra_spill_temp_0 + sub a8,a8,a9 # [18] + beqz a8,.Lt_0_44546 # [19] + +.Lt_0_37890: # 0x541 +# Loop body line 161, nesting depth: 1, estimated iterations: 252 + l32i a10,a1,128 # [0] gra_spill_temp_32 + beqz.n a10,.Lt_0_38146 # [2] + +# Part of loop body line 161, head labeled .Lt_0_37890 + s32i a7,a1,96 # [0] gra_spill_temp_22 + movi.n a10,0 # [1] + l32i a9,a1,64 # [2] gra_spill_temp_4 + l32i.n a6,a1,12 # [3] gra_spill_temp_1 + l32i a8,a1,32 # [4] gra_spill_temp_6 + movi.n a13,0 # [5] + l32i a15,a1,104 # [6] gra_spill_temp_24 + s32i a15,a1,100 # [7] gra_spill_temp_23 + s32i a13,a1,148 # [8] gra_spill_temp_35 + s32i a8,a1,108 # [9] gra_spill_temp_25 + s32i a6,a1,144 # [10] gra_spill_temp_24 + l32i a8,a1,28 # [11] gra_spill_temp_5 + l32i a6,a1,252 # [12] filter_ht + max a9,a9,a10 # [14] + s32i a9,a1,168 # [15] gra_spill_temp_40 + s32i a13,a1,88 # [16] gra_spill_temp_20 + min a6,a6,a8 # [17] + bnez.n a5,.LBB59_esp_nn_avg_pool_s8_esp32s3 # [18] + +.Lt_0_38914: # 0x57a +# Loop body line 163 + l32i a8,a1,100 # [0] gra_spill_temp_23 + l32i a15,a1,144 # [1] gra_spill_temp_24 + l32i a9,a1,148 # [2] gra_spill_temp_35 + l32i a14,a1,240 # [3] stride_wd + l32i a10,a1,108 # [4] gra_spill_temp_25 + l32i a13,a1,96 # [5] gra_spill_temp_22 + add.n a10,a10,a5 # [6] + s32i a10,a1,108 # [7] gra_spill_temp_25 + sub a13,a13,a14 # [8] + add.n a9,a9,a14 # [9] + sub a15,a15,a14 # [10] + add.n a8,a8,a14 # [11] + s32i a8,a1,100 # [12] gra_spill_temp_23 + s32i a15,a1,144 # [13] gra_spill_temp_24 + s32i a9,a1,148 # [14] gra_spill_temp_35 + s32i a13,a1,96 # [15] gra_spill_temp_22 + l32i a9,a1,88 # [16] gra_spill_temp_20 + l32i a10,a1,128 # [17] gra_spill_temp_32 + addi.n a9,a9,1 # [18] + s32i a9,a1,88 # [19] gra_spill_temp_20 + beq a9,a10,.Lt_0_38146 # [20] + + beqz.n a5,.Lt_0_38914 # [0] + +.LBB59_esp_nn_avg_pool_s8_esp32s3: # 0x5b7 +# Part of loop body line 161, head labeled .Lt_0_37890 + l32i a14,a1,144 # [0] gra_spill_temp_24 + l32i a13,a1,248 # [1] filter_wd + l32i a9,a1,136 # [2] gra_spill_temp_30 + l32i a8,a1,108 # [3] gra_spill_temp_25 + movi.n a15,0 # [4] + s32i a15,a1,216 # [5] gra_spill_temp_52 + add.n a10,a8,a5 # [6] + add.n a8,a8,a9 # [8] + min a13,a13,a14 # [9] + add.n a10,a9,a10 # [10] + s32i a10,a1,172 # [11] gra_spill_temp_41 + s32i a13,a1,132 # [12] gra_spill_temp_31 + l32i a14,a1,96 # [13] gra_spill_temp_22 + s32i a8,a1,164 # [14] gra_spill_temp_39 + max a14,a14,a15 # [15] + l32i a15,a1,100 # [16] gra_spill_temp_23 + s32i a14,a1,208 # [17] gra_spill_temp_50 + add.n a8,a15,a14 # [18] + s32i a8,a1,60 # [19] gra_spill_temp_13 + add.n a15,a15,a13 # [20] + s32i a15,a1,196 # [21] gra_spill_temp_37 + sub a13,a13,a14 # [22] + s32i a13,a1,52 # [23] gra_spill_temp_11 + j .Lt_0_39426 # [24] + +.LBB62_esp_nn_avg_pool_s8_esp32s3: # 0x5fb +# Part of loop body line 173, head labeled .Lt_0_39426 + l32i a10,a1,192 # [0] gra_spill_temp_36 + l32i a14,a1,196 # [1] gra_spill_temp_37 + add.n a10,a10,a15 # [2] + mull a10,a11,a10 # [3] + movi.n a15,0 # [4] + add.n a14,a10,a14 # [5] + +.Lt_0_40194: # 0x60a +# Loop body line 178, nesting depth: 4, estimated iterations: 252 + l32i a9,a1,132 # [0] gra_spill_temp_31 + l32i a8,a1,208 # [1] gra_spill_temp_50 + add.n a14,a14,a11 # [2] + bge a8,a9,.Lt_0_40450 # [3] + +.LBB65_esp_nn_avg_pool_s8_esp32s3: # 0x615 +# Part of loop body line 178, head labeled .Lt_0_40194 + l32i a3,a1,60 # [0] gra_spill_temp_13 + l32i a2,a1,216 # [1] gra_spill_temp_52 + add.n a3,a3,a10 # [2] + mull a3,a3,a5 # [3] + l32i a4,a1,52 # [4] gra_spill_temp_11 + add.n a2,a2,a3 # [5] + add.n a2,a12,a2 # [6] + loopgtz a4,.LBB155_esp_nn_avg_pool_s8_esp32s3 # [7] + + ee.vldbc.32.xp q0,a2,a5 # [0*II+0] id:684 + ee.vcmp.lt.s8 q1,q0,q4 # [0*II+2] + ee.vzip.8 q0,q1 # [0*II+3] + ee.vcmp.lt.s16 q1,q0,q4 # [0*II+4] + ee.vzip.16 q0,q1 # [0*II+5] + ee.vadds.s32 q2,q2,q0 # [0*II+6] + +.LBB155_esp_nn_avg_pool_s8_esp32s3: # 0x63e +# Part of loop body line 178, head labeled .Lt_0_40194 + l32i a2,a1,148 # [0] gra_spill_temp_35 + movi.n a8,0 # [1] + sub a9,a7,a2 # [2] + sub a2,a2,a7 # [3] + max a9,a9,a8 # [4] + l32i a8,a1,248 # [5] filter_wd + sub a2,a11,a2 # [6] + min a8,a8,a2 # [7] + sub a8,a8,a9 # [8] + add.n a15,a15,a8 # [9] + +.Lt_0_40450: # 0x65a +# Part of loop body line 178, head labeled .Lt_0_40194 + add.n a10,a10,a11 # [0] + addi.n a13,a13,1 # [1] + bne a6,a13,.Lt_0_40194 # [2] + +.Lt_0_39682: # 0x661 +# Part of loop body line 173, head labeled .Lt_0_39426 + srai a2,a15,1 # [5] + +// move to gp registers and average + + ee.movi.32.a q2,a9,0 # [0] + ee.movi.32.a q2,a4,1 # [0] + + blti a9,1,.Lt_0_41986 # [3] + add.n a9,a9,a2 # [0] + j .Lt_0_41730 # [2] +.Lt_0_41986: # 0x482 + sub a9,a9,a2 # [0] +.Lt_0_41730: # 0x3f1 + + blti a4,1,.Lt_0_42498 # [1] + add.n a4,a2,a4 # [0] + j .Lt_0_42242 # [2] +.Lt_0_42498: # 0x48b + sub a4,a4,a2 # [0] +.Lt_0_42242: # 0x3fc + + + quos a9,a9,a15 # [1] + quos a4,a4,a15 # [1] + ee.movi.32.q q2,a9,0 # [0] + ee.movi.32.q q2,a4,1 # [1] + + ee.movi.32.a q2,a9,2 # [2] + ee.movi.32.a q2,a14,3 # [0] + + blti a9,1,.Lt_0_43010 # [3] + add.n a9,a9,a2 # [0] + j .Lt_0_42754 # [2] +.Lt_0_43010: # 0x494 + sub a9,a9,a2 # [0] +.Lt_0_42754: # 0x40d + + + blti a14,1,.Lt_0_43522 # [1] + add.n a14,a2,a14 # [0] + j .Lt_0_43266 # [2] +.Lt_0_43522: # 0x49d + sub a14,a14,a2 # [0] +.Lt_0_43266: # 0x418 + + quos a9,a9,a15 # [1] + quos a14,a14,a15 # [1] + ee.movi.32.q q2,a9,2 # [0] + ee.movi.32.q q2,a14,3 # [1] + + + l32i a9,a1,172 # [0] gra_spill_temp_41 + l32i a8,a1,164 # [1] gra_spill_temp_39 + l32i a14,a1,216 # [2] gra_spill_temp_52 + addi.n a14,a14,4 # [5] + ee.vmin.s32 q2,q2,q6 # [6] + s32i a14,a1,216 # [7] gra_spill_temp_52 + ee.vmax.s32 q2,q2,q7 # [8] + ee.vunzip.16 q2,q1 # [9] + ee.vunzip.8 q2,q1 # [10] + ee.vst.l.64.ip q2,a1,0 # [11] id:691 + l32i.n a13,a1,0 # [12] id:692 + s32i.n a13,a8,0 # [13] id:693 + addi.n a8,a8,4 # [14] + s32i a8,a1,164 # [15] gra_spill_temp_39 + bge a8,a9,.Lt_0_38914 # [16] + +.Lt_0_39426: # 0x6cb + l32i a15,a1,168 # [0] gra_spill_temp_40 + mv.qr q2,q4 # [1] + mov.n a13,a15 # [2] + blt a15,a6,.LBB62_esp_nn_avg_pool_s8_esp32s3 # [3] + +.Lt_0_52738: # 0x6d6 + movi.n a15,0 # [0] + j .Lt_0_39682 # [1] + + .size esp_nn_avg_pool_s8_esp32s3, . - esp_nn_avg_pool_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_ansi.c new file mode 100644 index 0000000..94d2344 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_ansi.c @@ -0,0 +1,70 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2020-2021 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +void esp_nn_max_pool_s8_ansi(const int8_t *input, + const uint16_t input_wd, + const uint16_t input_ht, + int8_t *output, + const uint16_t output_wd, + const uint16_t output_ht, + const uint16_t stride_wd, + const uint16_t stride_ht, + const uint16_t filter_wd, + const uint16_t filter_ht, + const uint16_t pad_wd, + const uint16_t pad_ht, + const int32_t activation_min, + const int32_t activation_max, + const uint16_t channels) +{ + int32_t base_y = -pad_ht; + for (int32_t out_y = 0; out_y < output_ht; out_y++, base_y += stride_ht) { + int32_t base_x = -pad_wd; + for (int32_t out_x = 0; out_x < output_wd; out_x++, base_x += stride_wd) { + /* Make sure filter does not cross the input box */ + int32_t filter_y_start = max(0, -base_y); + int32_t filter_x_start = max(0, -base_x); + int32_t filter_y_end = min(filter_ht, input_ht - base_y); + int32_t filter_x_end = min(filter_wd, input_wd - base_x); + + for (int32_t ch_idx = 0; ch_idx < channels; ch_idx++) { + int8_t result = INT8_MIN; + + for (int32_t filter_y = filter_y_start; filter_y < filter_y_end; filter_y++) { + for (int32_t filter_x = filter_x_start; filter_x < filter_x_end; filter_x++) { + int32_t in_x_idx = base_x + filter_x; + int32_t in_y_idx = base_y + filter_y; + int32_t input_index = (in_y_idx * input_wd + in_x_idx) * channels + ch_idx; + result = max(input[input_index], result); + } + } + + /* Activation function */ + result = max(result, activation_min); + result = min(result, activation_max); + + int32_t output_index = (out_y * output_wd + out_x) * channels + ch_idx; + output[output_index] = result; + } + } + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_s8_esp32s3.S b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_s8_esp32s3.S new file mode 100644 index 0000000..722e0db --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/pooling/esp_nn_max_pool_s8_esp32s3.S @@ -0,0 +1,449 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 +// Copyright 2021-2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + .text + .align 4 + .literal_position + + # Program Unit: esp_nn_max_pool_s8_esp32s3 + .type esp_nn_max_pool_s8_esp32s3, @function + .align 4 + .global esp_nn_max_pool_s8_esp32s3 + +// no of channels must be multiple of 4 + +esp_nn_max_pool_s8_esp32s3: # 0x4 + # int8_min = 0 + # gra_spill_temp_0 = 4 + # gra_spill_temp_1 = 8 + # gra_spill_temp_2 = 12 + # gra_spill_temp_3 = 16 + # gra_spill_temp_4 = 20 + # gra_spill_temp_5 = 24 + # gra_spill_temp_6 = 28 + # gra_spill_temp_7 = 32 + # gra_spill_temp_8 = 36 + # gra_spill_temp_9 = 40 + # gra_spill_temp_10 = 44 + # gra_spill_temp_11 = 48 + # gra_spill_temp_12 = 52 + # gra_spill_temp_13 = 56 + # gra_spill_temp_14 = 60 + # gra_spill_temp_15 = 64 + # gra_spill_temp_16 = 68 + # gra_spill_temp_17 = 72 + # gra_spill_temp_18 = 76 + # gra_spill_temp_19 = 80 + # gra_spill_temp_20 = 84 + # gra_spill_temp_21 = 88 + # gra_spill_temp_22 = 92 + # gra_spill_temp_23 = 96 + +// a2: input +// a3: input_wd +// a4: input_ht +// a5: output +// a6: output_wd +// a7: output_ht +// on stack: stride_wd = 120 +// on stack: stride_ht = 124 +// on stack: filter_wd = 128 +// on stack: filter_ht = 132 +// on stack: pad_wd = 136 +// on stack: pad_ht = 140 +// on stack: activation_min +// on stack: activation_max +// on stack: channels + + + entry a1,120 # + mov.n a12,a2 # [0] + s32i a6,a1,4 # [2] gra_spill_temp_0 + s32i a7,a1,68 # [3] gra_spill_temp_16 + mov.n a11,a3 # [4] + s32i a5,a1,96 # [5] gra_spill_temp_23 + + l16ui a5,a1,152 # [6] id:465 channels+0x0 + movi a3,-128 # [7] + s32i.n a3,a1,0 # [1] int8_min + + addi.n a9,a1,148 # [0] activation_max + addi.n a15,a1,144 # [1] activation_min + ee.vldbc.8 q3,a1 # [7] id:473 int8_min+0x0 + ee.vldbc.8 q5,a15 # [8] id:470 activation_min+0x0 + ee.vldbc.8 q4,a9 # [9] id:471 activation_max+0x0 + + extui a8,a5,0,3 # [8] + beqz.n a8,.LBB3_esp_nn_max_pool_s8_esp32s3 # [9] // if (channels % 8 == 0) + + extui a14,a5,0,2 # [0] + beqz.n a14,.LBB25_esp_nn_max_pool_s8_esp32s3 # [1] // if (channels % 4 == 0) + + retw.n # [0] // exit + +.LBB3_esp_nn_max_pool_s8_esp32s3: # 0x1c5 // if (channels % 8 == 0) + + l16ui a15,a1,136 # [1] id:475 pad_wd+0x0 + l16ui a14,a1,140 # [4] id:474 pad_ht+0x0 + movi.n a8,0 # [13] + movi.n a10,0 # [15] + s32i a14,a1,44 # [7] gra_spill_temp_10 + neg a15,a15 # [12] + mul16u a9,a6,a5 # [14] + neg a14,a14 # [16] + s32i a14,a1,92 # [17] gra_spill_temp_22 + s32i a10,a1,52 # [18] gra_spill_temp_12 + s32i a9,a1,60 # [19] gra_spill_temp_14 + s32i.n a8,a1,36 # [16] gra_spill_temp_8 + s32i a15,a1,56 # [21] gra_spill_temp_13 + sub a13,a4,a14 # [22] + s32i a13,a1,48 # [23] gra_spill_temp_11 + sub a15,a11,a15 # [24] + s32i.n a15,a1,40 # [25] gra_spill_temp_9 + +.Lt_0_21506: # 0x229 + l32i a8,a1,4 # [0] gra_spill_temp_0 + beqz.n a8,.Lt_0_21762 # [2] + + movi.n a10,0 # [0] + l32i a9,a1,44 # [1] gra_spill_temp_10 + l32i.n a15,a1,40 # [2] gra_spill_temp_9 + l32i a8,a1,52 # [3] gra_spill_temp_12 + l32i.n a13,a1,136 # [4] ,pad_wd + l32i a14,a1,56 # [5] gra_spill_temp_13 + s32i a14,a1,80 # [6] gra_spill_temp_19 + s32i a13,a1,76 # [7] gra_spill_temp_18 + s32i a8,a1,88 # [8] gra_spill_temp_21 + s32i a15,a1,84 # [9] gra_spill_temp_20 + l32i a8,a1,48 # [10] gra_spill_temp_11 + max a9,a9,a10 # [11] + l32i a15,a1,132 # [12] filter_ht + s32i a9,a1,8 # [13] gra_spill_temp_1 + movi.n a9,0 # [14] + min a15,a15,a8 # [15] + s32i a9,a1,64 # [16] gra_spill_temp_15 + +.Lt_0_22274: # 0x25d + beqz.n a5,.Lt_0_22530 # [0] + +.LBB10_esp_nn_max_pool_s8_esp32s3: # 0x25f +# Part of loop body line 46, head labeled .Lt_0_22274 + l32i a6,a1,76 # [0] gra_spill_temp_18 + l32i a13,a1,96 # [1] gra_spill_temp_23 + l32i a8,a1,84 # [2] gra_spill_temp_20 + l32i a7,a1,128 # [3] filter_wd + l32i a10,a1,88 # [4] gra_spill_temp_21 + movi.n a9,0 # [5] + s32i a9,a1,20 # [6] gra_spill_temp_4 + add.n a14,a10,a5 # [7] + min a7,a7,a8 # [8] + add.n a10,a10,a13 # [9] + add.n a14,a13,a14 # [10] + s32i a14,a1,12 # [11] gra_spill_temp_2 + s32i a10,a1,16 # [12] gra_spill_temp_3 + movi.n a8,0 # [13] + l32i a10,a1,80 # [14] gra_spill_temp_19 + max a6,a6,a8 # [15] + sub a9,a7,a6 # [16] + s32i a9,a1,28 # [17] gra_spill_temp_6 + add.n a13,a10,a6 # [18] + s32i a13,a1,24 # [19] gra_spill_temp_5 + add.n a10,a10,a7 # [16] + s32i a10,a1,72 # [21] gra_spill_temp_17 + +.Lt_0_23042: # 0x29a + l32i a8,a1,8 # [0] gra_spill_temp_1 + mv.qr q1,q3 # [1] + mov.n a13,a8 # [2] + bge a8,a15,.Lt_0_23298 # [3] + +.LBB13_esp_nn_max_pool_s8_esp32s3: # 0x2a5 +# Part of loop body line 40, head labeled .Lt_0_23042 + l32i a10,a1,92 # [0] gra_spill_temp_22 + l32i a14,a1,72 # [1] gra_spill_temp_17 + add.n a10,a10,a8 # [2] + mull a10,a11,a10 # [3] + add.n a14,a10,a14 # [5] + +.Lt_0_23810: # 0x2b2 + add.n a14,a14,a11 # [0] + addi.n a13,a13,1 # [1] + bge a6,a7,.Lt_0_24066 # [2] + +.LBB16_esp_nn_max_pool_s8_esp32s3: # 0x2b9 + l32i a3,a1,24 # [0] gra_spill_temp_5 + l32i a2,a1,20 # [1] gra_spill_temp_4 + add.n a3,a3,a10 # [2] + mull a3,a3,a5 # [3] + add.n a2,a2,a3 # [5] + l32i a3,a1,28 # [6] gra_spill_temp_6 + add.n a2,a12,a2 # [7] + loopgtz a3,.LBB93_esp_nn_max_pool_s8_esp32s3 # [8] + + ee.vld.l.64.ip q0,a2,0 # [0*II+1] id:481 + add.n a2,a2,a5 # [0*II+2] + ee.vmax.s8 q1,q1,q0 # [0*II+3] +.LBB93_esp_nn_max_pool_s8_esp32s3: # 0x2d8 + +.Lt_0_24066: # 0x2d8 + add.n a10,a10,a11 # [0] + bne a15,a13,.Lt_0_23810 # [1] + +.Lt_0_23298: # 0x2dd + l32i a9,a1,12 # [0] gra_spill_temp_2 + l32i a13,a1,20 # [1] gra_spill_temp_4 + l32i a8,a1,16 # [2] gra_spill_temp_3 + ee.vmin.s8 q2,q1,q4 # [3] + ee.vmax.s8 q2,q2,q5 # [4] + mov.n a10,a8 # [5] + addi.n a13,a13,8 # [6] + s32i a13,a1,20 # [7] gra_spill_temp_4 + ee.vst.l.64.ip q2,a10,0 # [8] id:482 + addi.n a8,a8,8 # [9] + s32i a8,a1,16 # [10] gra_spill_temp_3 + blt a8,a9,.Lt_0_23042 # [11] + +.Lt_0_22530: # 0x2fe + l32i a13,a1,84 # [0] gra_spill_temp_20 + l32i a14,a1,80 # [1] gra_spill_temp_19 + l32i a10,a1,120 # [2] stride_wd + l32i a8,a1,88 # [3] gra_spill_temp_21 + l32i a9,a1,76 # [4] gra_spill_temp_18 + add.n a8,a8,a5 # [5] + s32i a8,a1,88 # [6] gra_spill_temp_21 + sub a9,a9,a10 # [7] + add.n a14,a14,a10 # [8] + sub a13,a13,a10 # [9] + s32i a13,a1,84 # [10] gra_spill_temp_20 + s32i a14,a1,80 # [11] gra_spill_temp_19 + s32i a9,a1,76 # [12] gra_spill_temp_18 + l32i a14,a1,64 # [13] gra_spill_temp_15 + l32i a8,a1,4 # [14] gra_spill_temp_0 + addi.n a14,a14,1 # [15] + s32i a14,a1,64 # [16] gra_spill_temp_15 + sub a14,a14,a8 # [17] + bnez a14,.Lt_0_22274 # [18] + +.Lt_0_21762: # 0x334 +# Part of loop body line 20, head labeled .Lt_0_21506 + l32i a8,a1,44 # [0] gra_spill_temp_10 + l32i a15,a1,92 # [1] gra_spill_temp_22 + l32i a10,a1,60 # [2] gra_spill_temp_14 + l32i a14,a1,124 # [3] stride_ht + l32i a13,a1,48 # [4] gra_spill_temp_11 + l32i a9,a1,52 # [5] gra_spill_temp_12 + sub a13,a13,a14 # [6] + add.n a9,a9,a10 # [7] + add.n a15,a15,a14 # [8] + sub a8,a8,a14 # [9] + s32i a8,a1,44 # [10] gra_spill_temp_10 + s32i a15,a1,92 # [11] gra_spill_temp_22 + s32i a9,a1,52 # [12] gra_spill_temp_12 + s32i a13,a1,48 # [13] gra_spill_temp_11 + l32i.n a9,a1,36 # [14] gra_spill_temp_8 + l32i a10,a1,68 # [15] gra_spill_temp_16 + addi.n a9,a9,1 # [16] + s32i.n a9,a1,36 # [17] gra_spill_temp_8 + sub a9,a9,a10 # [18] + bnez a9,.Lt_0_21506 # [19] + + retw.n # [0] // exit + +.LBB25_esp_nn_max_pool_s8_esp32s3: # 0x36d // if (channels % 4 == 0) + + l16ui a10,a1,136 # [1] id:475 pad_wd+0x0 + l16ui a9,a1,140 # [4] id:474 pad_ht+0x0 + movi.n a13,0 # [13] + movi.n a15,0 # [15] + neg a10,a10 # [12] + s32i a9,a1,44 # [7] gra_spill_temp_10 + mul16u a14,a6,a5 # [14] + neg a9,a9 # [16] + s32i a9,a1,92 # [17] gra_spill_temp_22 + s32i a15,a1,52 # [18] gra_spill_temp_12 + s32i a14,a1,60 # [19] gra_spill_temp_14 + s32i.n a13,a1,36 # [16] gra_spill_temp_8 + s32i a10,a1,56 # [21] gra_spill_temp_13 + sub a8,a4,a9 # [22] + s32i a8,a1,48 # [23] gra_spill_temp_11 + sub a10,a11,a10 # [24] + s32i.n a10,a1,40 # [25] gra_spill_temp_9 + +.Lt_0_27138: # 0x3d5 + l32i a13,a1,4 # [0] gra_spill_temp_0 + beqz.n a13,.Lt_0_27394 # [2] + +.LBB29_esp_nn_max_pool_s8_esp32s3: # 0x3da +# Part of loop body line 107, head labeled .Lt_0_27138 + movi.n a10,0 # [0] + l32i a9,a1,44 # [1] gra_spill_temp_10 + l32i.n a15,a1,40 # [2] gra_spill_temp_9 + l32i a8,a1,52 # [3] gra_spill_temp_12 + l32i a14,a1,56 # [4] gra_spill_temp_13 + l32i.n a13,a1,136 # [5] pad_wd + s32i a13,a1,76 # [6] gra_spill_temp_18 + s32i a14,a1,80 # [7] gra_spill_temp_19 + s32i a8,a1,88 # [8] gra_spill_temp_21 + s32i a15,a1,84 # [9] gra_spill_temp_20 + l32i a8,a1,48 # [10] gra_spill_temp_11 + l32i a15,a1,132 # [11] filter_ht + movi.n a14,0 # [12] + max a9,a9,a10 # [13] + s32i a9,a1,8 # [14] gra_spill_temp_1 + s32i a14,a1,64 # [15] gra_spill_temp_15 + min a15,a15,a8 # [16] + +.Lt_0_27906: # 0x409 +# Loop body line 109, nesting depth: 2, estimated iterations: 56 + beqz.n a5,.Lt_0_28162 # [0] + +.LBB32_esp_nn_max_pool_s8_esp32s3: # 0x40b +# Part of loop body line 109, head labeled .Lt_0_27906 + l32i a6,a1,76 # [0] gra_spill_temp_18 + l32i a13,a1,96 # [1] gra_spill_temp_23 + l32i a8,a1,84 # [2] gra_spill_temp_20 + l32i a7,a1,128 # [3] filter_wd + l32i a10,a1,88 # [4] gra_spill_temp_21 + movi.n a9,0 # [5] + s32i a9,a1,32 # [6] gra_spill_temp_7 + add.n a14,a10,a5 # [7] + min a7,a7,a8 # [8] + add.n a10,a10,a13 # [9] + add.n a14,a13,a14 # [10] + s32i a14,a1,12 # [11] gra_spill_temp_2 + s32i a10,a1,16 # [12] gra_spill_temp_3 + movi.n a8,0 # [13] + l32i a10,a1,80 # [14] gra_spill_temp_19 + max a6,a6,a8 # [15] + sub a9,a7,a6 # [16] + s32i a9,a1,28 # [17] gra_spill_temp_6 + add.n a13,a10,a6 # [18] + s32i a13,a1,24 # [19] gra_spill_temp_5 + add.n a10,a10,a7 # [16] + s32i a10,a1,72 # [21] gra_spill_temp_17 + +.Lt_0_28674: # 0x446 +# Loop body line 8, nesting depth: 3, estimated iterations: 56 + l32i a8,a1,8 # [0] gra_spill_temp_1 + mv.qr q1,q3 # [1] + mov.n a13,a8 # [2] + bge a8,a15,.Lt_0_28930 # [3] + +.LBB35_esp_nn_max_pool_s8_esp32s3: # 0x451 +# Part of loop body line 8, head labeled .Lt_0_28674 + l32i a10,a1,92 # [0] gra_spill_temp_22 + l32i a14,a1,72 # [1] gra_spill_temp_17 + add.n a10,a10,a8 # [2] + mull a10,a11,a10 # [3] + add.n a14,a10,a14 # [5] + +.Lt_0_29442: # 0x45e + add.n a14,a14,a11 # [0] + addi.n a13,a13,1 # [1] + bge a6,a7,.Lt_0_29698 # [2] + +.LBB38_esp_nn_max_pool_s8_esp32s3: # 0x465 + l32i a3,a1,24 # [0] gra_spill_temp_5 + l32i a2,a1,32 # [1] gra_spill_temp_7 + add.n a3,a3,a10 # [2] + mull a3,a3,a5 # [3] + l32i a4,a1,28 # [4] gra_spill_temp_6 + add.n a2,a2,a3 # [5] + add.n a2,a12,a2 # [6] + loopgtz a4,.LBB108_esp_nn_max_pool_s8_esp32s3 # [7] + + ee.vldbc.32 q0,a2 # [0*II+0] id:489 + add.n a2,a2,a5 # [0*II+1] + ee.vmax.s8 q1,q1,q0 # [0*II+2] +.LBB108_esp_nn_max_pool_s8_esp32s3: # 0x482 + +.Lt_0_29698: # 0x482 + add.n a10,a10,a11 # [0] + bne a15,a13,.Lt_0_29442 # [1] + +.Lt_0_28930: # 0x487 +# Part of loop body line 8, head labeled .Lt_0_28674 + l32i a9,a1,12 # [0] gra_spill_temp_2 + l32i a8,a1,16 # [1] gra_spill_temp_3 + l32i a10,a1,32 # [3] gra_spill_temp_7 + + ee.vmin.s8 q5,q1,q4 # [4] + ee.vmax.s8 q5,q5,q5 # [5] + addi.n a10,a10,4 # [6] + ee.movi.32.a q5,a13,0 + s32i a10,a1,32 # [9] gra_spill_temp_7 + s32i.n a13,a8,0 # [10] id:492 + addi.n a8,a8,4 # [11] + s32i a8,a1,16 # [12] gra_spill_temp_3 + blt a8,a9,.Lt_0_28674 # [13] + +.Lt_0_28162: # 0x4ad +# Part of loop body line 109, head labeled .Lt_0_27906 + l32i a13,a1,84 # [0] gra_spill_temp_20 + l32i a14,a1,80 # [1] gra_spill_temp_19 + l32i a10,a1,120 # [2] stride_wd + l32i a8,a1,88 # [3] gra_spill_temp_21 + l32i a9,a1,76 # [4] gra_spill_temp_18 + add.n a8,a8,a5 # [5] + s32i a8,a1,88 # [6] gra_spill_temp_21 + sub a9,a9,a10 # [7] + add.n a14,a14,a10 # [8] + sub a13,a13,a10 # [9] + s32i a13,a1,84 # [10] gra_spill_temp_20 + s32i a14,a1,80 # [11] gra_spill_temp_19 + s32i a9,a1,76 # [12] gra_spill_temp_18 + l32i a14,a1,64 # [13] gra_spill_temp_15 + l32i a8,a1,4 # [14] gra_spill_temp_0 + addi.n a14,a14,1 # [15] + s32i a14,a1,64 # [16] gra_spill_temp_15 + sub a14,a14,a8 # [17] + bnez a14,.Lt_0_27906 # [18] + +.Lt_0_27394: # 0x4e3 +# Part of loop body line 107, head labeled .Lt_0_27138 + l32i a8,a1,44 # [0] gra_spill_temp_10 + l32i a15,a1,92 # [1] gra_spill_temp_22 + l32i a10,a1,60 # [2] gra_spill_temp_14 + l32i a14,a1,124 # [3] stride_ht + l32i a13,a1,48 # [4] gra_spill_temp_11 + l32i a9,a1,52 # [5] gra_spill_temp_12 + sub a13,a13,a14 # [6] + add.n a9,a9,a10 # [7] + add.n a15,a15,a14 # [8] + sub a8,a8,a14 # [9] + s32i a8,a1,44 # [10] gra_spill_temp_10 + s32i a15,a1,92 # [11] gra_spill_temp_22 + s32i a9,a1,52 # [12] gra_spill_temp_12 + s32i a13,a1,48 # [13] gra_spill_temp_11 + l32i.n a9,a1,36 # [14] gra_spill_temp_8 + l32i a10,a1,68 # [15] gra_spill_temp_16 + addi.n a9,a9,1 # [16] + s32i.n a9,a1,36 # [17] gra_spill_temp_8 + sub a9,a9,a10 # [18] + bnez a9,.Lt_0_27138 # [19] + + retw.n # [0] // exit + + .size esp_nn_max_pool_s8_esp32s3, . - esp_nn_max_pool_s8_esp32s3 + +#elif defined(WIO_TERMINAL) +// dummy code, added for old ARM toolchain +.syntax unified +.thumb +.cpu cortex-m0 + +.section .text +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN && EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN_S3 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_ansi.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_ansi.c new file mode 100644 index 0000000..219f3c0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_ansi.c @@ -0,0 +1,92 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "softmax_common.h" + +int32_t esp_nn_get_softmax_scratch_size_ansi(const int32_t width, const int32_t height) +{ + (void) width; + (void) height; + return 0; +} + +void esp_nn_set_softmax_scratch_buf_ansi(void *buffer) +{ + (void) buffer; + return; +} + +void esp_nn_softmax_s8_ansi(const int8_t *input_data, + const int32_t height, + const int32_t width, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output_data) +{ + // The representation chosen for the input to the exp() function is Q5.26. + // We need to leave extra space since values that we skip might be as large as + // -32 before multiplying by input mult, and therefore as large as + // -16 afterwards. Note that exp(-8) is definitely not insignificant to + // accumulation, but exp(-16) definitely is. +#define ACCUM_BITS 12 +#define DIFF_BITS 5 + + const int32_t mask = (1 << shift); + int32_t col = 0; + const int8_t *in_ptr = input_data; + int8_t *out_ptr = output_data; + + for (int row_idx = 0; row_idx < height; row_idx++) { + int8_t max_in_row = in_ptr[0]; + for (col = 1; col < width; col++) { + max_in_row = max(max_in_row, in_ptr[col]); + } + + int32_t input_diff = 0; + int32_t sum_of_exps = 0; + + for (col = 0; col < width; col++) { + input_diff = in_ptr[col] - max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_rescaled = SAT_HIGH_MUL(input_diff * mask, mult); + const int32_t exp_raw = esp_nn_exp_on_negative_values(input_diff_rescaled); + sum_of_exps += DIV_POW2(exp_raw, ACCUM_BITS); + } + } + + const int32_t headroom_plus1 = esp_nn_clz32((uint32_t) sum_of_exps); + const int32_t shifted_scale = ONE_OVER_ONE_X((sum_of_exps << headroom_plus1) - (1 << 31)); + const int32_t bits_over_unit = ACCUM_BITS - headroom_plus1 + 31 - sizeof(int8_t) * 8; + + for (col = 0; col < width; col++) { + input_diff = in_ptr[col] - max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_rescaled = SAT_HIGH_MUL(input_diff * mask, mult); + const int32_t exp_raw = esp_nn_exp_on_negative_values(input_diff_rescaled); + const int32_t shifted_output = SAT_HIGH_MUL(shifted_scale, exp_raw); + const int32_t result = DIV_POW2(shifted_output, bits_over_unit) - 128; + out_ptr[col] = (int8_t) esp_nn_saturate8(result); + } else { + out_ptr[col] = -128; + } + } + in_ptr += width; + out_ptr += width; + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_opt.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_opt.c new file mode 100644 index 0000000..8d001eb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/esp_nn_softmax_opt.c @@ -0,0 +1,112 @@ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN +// Copyright 2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "softmax_common.h" +#include + +static int32_t *scratch_buf = NULL; + +/** + * @brief Get scratch buffer size needed by softmax function + * + * @param width + * @param height + * @return size in bytes + * + * @note buffer must be 4 byte aligned + */ +int32_t esp_nn_get_softmax_scratch_size_opt(const int32_t width, const int32_t height) +{ + (void) height; + return width * 4; +} + +/** + * @brief Set scratch buffer to be used by softmax function + * + * @param buffer this can be NULL if one needs to unset it + * must be aligned to 4 bytes + */ +void esp_nn_set_softmax_scratch_buf_opt(void *buffer) +{ + scratch_buf = (int32_t *) buffer; +} + +void esp_nn_softmax_s8_opt(const int8_t *input_data, + const int32_t height, + const int32_t width, + const int32_t mult, + const int32_t shift, + const int32_t diff_min, + int8_t *output_data) +{ + if (scratch_buf == NULL) { + printf("%s error! scratch buffer not set\n", __FUNCTION__); + return; + } + // The representation chosen for the input to the exp() function is Q5.26. + // We need to leave extra space since values that we skip might be as large as + // -32 before multiplying by input mult, and therefore as large as + // -16 afterwards. Note that exp(-8) is definitely not insignificant to + // accumulation, but exp(-16) definitely is. +#define ACCUM_BITS 12 +#define DIFF_BITS 5 + + const int32_t mask = (1 << shift); + int32_t col = 0; + const int8_t *in_ptr = input_data; + int8_t *out_ptr = output_data; + + for (int row_idx = 0; row_idx < height; row_idx++) { + int8_t max_in_row = in_ptr[0]; + for (col = 1; col < width; col++) { + max_in_row = max(max_in_row, in_ptr[col]); + } + + int32_t input_diff = 0; + int32_t sum_of_exps = 0; + + for (col = 0; col < width; col++) { + input_diff = in_ptr[col] - max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_rescaled = SAT_HIGH_MUL(input_diff * mask, mult); + const int32_t exp_raw = esp_nn_exp_on_negative_values(input_diff_rescaled); + scratch_buf[col] = exp_raw; // store to avoid duplicate calculation later + sum_of_exps += DIV_POW2(exp_raw, ACCUM_BITS); + } + } + + const int32_t headroom_plus1 = esp_nn_clz32((uint32_t) sum_of_exps); + const int32_t shifted_scale = ONE_OVER_ONE_X((sum_of_exps << headroom_plus1) - (1 << 31)); + const int32_t bits_over_unit = ACCUM_BITS - headroom_plus1 + 31 - sizeof(int8_t) * 8; + + for (col = 0; col < width; col++) { + input_diff = in_ptr[col] - max_in_row; + if (input_diff >= diff_min) { + int32_t exp_raw = scratch_buf[col]; + const int32_t shifted_output = SAT_HIGH_MUL(shifted_scale, exp_raw); + const int32_t result = DIV_POW2(shifted_output, bits_over_unit) - 128; + out_ptr[col] = (int8_t) esp_nn_saturate8(result); + } else { + out_ptr[col] = -128; + } + } + in_ptr += width; + out_ptr += width; + } +} + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/softmax_common.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/softmax_common.h new file mode 100644 index 0000000..6d1847a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ESP-NN/src/softmax/softmax_common.h @@ -0,0 +1,104 @@ +// Copyright 2022 Espressif Systems (Shanghai) PTE LTD +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include + +#define MASK_IF_ZERO(x) (x) == 0 ? ~0 : 0 +#define MASK_IF_NON_ZERO(x) (x) != 0 ? ~0 : 0 +#define SELECT_USING_MASK(mask, a, b) ((mask) & (a)) ^ (~(mask) & (b)) +#define SAT_HIGH_MUL(x, y) esp_nn_sat_round_doubling_high_mul((x), (y)) +#define DIV_POW2(x,y) esp_nn_div_by_power_of_two((x), (y)) + +__NN_FORCE_INLINE__ int32_t mul_power_of_2(int val, int exp) +{ + const int32_t thresh = ((1 << (31 - exp)) - 1); + int32_t result = val << exp; + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val > thresh), INT32_MAX, result); + result = SELECT_USING_MASK(MASK_IF_NON_ZERO(val < -thresh), INT32_MIN, result); + return result; +} + +/** + * @brief Calculate `1 / (1 + x)` for x in [0, 1] + * + * @param val input value to calculate `1/(1+x)` for + * @return `int32_t` result + * @note Newton-Raphson division + * + * https://en.wikipedia.org/wiki/Division_algorithm#Newton.E2.80.93Raphson_division + * Refer to that page for the logic behind the 48/17 and 32/17 constants. + * Pseudocode: https://en.wikipedia.org/wiki/Division_algorithm#Pseudocode + */ +__NN_FORCE_INLINE__ int32_t esp_nn_one_over_one_plus_x_for_x_in_0_1(int32_t val) +{ + const int64_t sum = (int64_t) val + INT32_MAX; + const int32_t half_denominator = (int32_t) ((sum + (sum >= 0 ? 1 : -1)) / 2L); + int32_t constant_48_over_17 = 1515870810; + int32_t constant_neg_32_over_17 = -1010580540; + int32_t x = constant_48_over_17 + SAT_HIGH_MUL(half_denominator, constant_neg_32_over_17); + const int32_t fixed_2_one = (1 << 29); + + x += mul_power_of_2(SAT_HIGH_MUL(x, fixed_2_one - SAT_HIGH_MUL(half_denominator, x)), 2); + x += mul_power_of_2(SAT_HIGH_MUL(x, fixed_2_one - SAT_HIGH_MUL(half_denominator, x)), 2); + x += mul_power_of_2(SAT_HIGH_MUL(x, fixed_2_one - SAT_HIGH_MUL(half_denominator, x)), 2); + + return mul_power_of_2(x, 1); +} + +#define ONE_OVER_ONE_X(x) esp_nn_one_over_one_plus_x_for_x_in_0_1((x)) + +/** + * @brief Return exp(x) for x < 0. + * + */ +__NN_FORCE_INLINE__ int32_t esp_nn_exp_on_negative_values(int32_t val) +{ + int32_t shift = 24; + + const int32_t one_quarter = (1 << shift); + int32_t mask = one_quarter - 1; + const int32_t val_mod_minus_quarter = (val & mask) - one_quarter; + const int32_t remainder = val_mod_minus_quarter - val; + + // calculate exponent for x in [-1/4, 0) in `result` + const int32_t x = (val_mod_minus_quarter << 5) + (1 << 28); + const int32_t x2 = SAT_HIGH_MUL(x, x); + const int32_t x3 = SAT_HIGH_MUL(x2, x); + const int32_t x4 = SAT_HIGH_MUL(x2, x2); + const int32_t one_over_3 = 715827883; + const int32_t one_over_8 = 1895147668; + + const int32_t x4_over_4 = DIV_POW2(x4, 2); + const int32_t x4_over_4_plus_x3_over_6_plus_x2_over_2 = DIV_POW2(SAT_HIGH_MUL(x4_over_4 + x3, one_over_3) + x2, 1); + int32_t result = one_over_8 + SAT_HIGH_MUL(one_over_8, x + x4_over_4_plus_x3_over_6_plus_x2_over_2); + +#define SELECT_IF_NON_ZERO(x) { \ + mask = MASK_IF_NON_ZERO(remainder & (1 << shift++)); \ + result = SELECT_USING_MASK(mask, SAT_HIGH_MUL(result, x), result); \ +} + + SELECT_IF_NON_ZERO(1672461947) + SELECT_IF_NON_ZERO(1302514674) + SELECT_IF_NON_ZERO(790015084) + SELECT_IF_NON_ZERO(290630308) + SELECT_IF_NON_ZERO(39332535) + SELECT_IF_NON_ZERO(720401) + SELECT_IF_NON_ZERO(242) + +#undef SELECT_IF_NON_ZERO + + mask = MASK_IF_ZERO(val); + return SELECT_USING_MASK(mask, INT32_MAX, result); +} \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/debug_log.cpp new file mode 100644 index 0000000..be789c9 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_ESPRESSIF == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// On mbed platforms, we set up a serial port and write to it for debug logging. +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_ESPRESSIF diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ei_classifier_porting.cpp new file mode 100644 index 0000000..840ebe6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/espressif/ei_classifier_porting.cpp @@ -0,0 +1,122 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_ESPRESSIF == 1 + +#include +#include +#include +#include +// Include FreeRTOS for delay +#include +#include + +// for millis and micros +#include "esp_timer.h" +#include "esp_idf_version.h" + +// memory handling +#include "esp_heap_caps.h" + +#define EI_WEAK_FN __attribute__((weak)) + +EI_WEAK_FN EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +EI_WEAK_FN EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + vTaskDelay(time_ms / portTICK_RATE_MS); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return esp_timer_get_time()/1000; +} + +uint64_t ei_read_timer_us() { + return esp_timer_get_time(); +} + +void ei_putchar(char c) +{ + /* Send char to serial output */ + putchar(c); +} + +/** + * Printf function uses vsnprintf and output using USB Serial + */ +__attribute__((weak)) void ei_printf(const char *format, ...) { + static char print_buf[1024] = { 0 }; + + va_list args; + va_start(args, format); + int r = vsnprintf(print_buf, sizeof(print_buf), format, args); + va_end(args); + + if (r > 0) { + printf(print_buf); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +// we use alligned alloc instead of regular malloc +// due to https://github.com/espressif/esp-nn/issues/7 +__attribute__((weak)) void *ei_malloc(size_t size) { +#if defined(CONFIG_IDF_TARGET_ESP32S3) +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 2, 1) + return heap_caps_aligned_alloc(16, size, MALLOC_CAP_DEFAULT); +#else + return aligned_alloc(16, size); +#endif +#endif + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { +#if defined(CONFIG_IDF_TARGET_ESP32S3) +#if ESP_IDF_VERSION >= ESP_IDF_VERSION_VAL(5, 2, 1) + return heap_caps_calloc(nitems, size, MALLOC_CAP_DEFAULT); +#else + void *p; + p = aligned_alloc(16, nitems * size); + if (p == nullptr) + return p; + + memset(p, '\0', nitems * size); + return p; +#endif +#endif + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_ESPRESSIF == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/CMakeLists.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/CMakeLists.txt new file mode 100644 index 0000000..d6e8030 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/CMakeLists.txt @@ -0,0 +1,96 @@ +# +# Copyright (c) 2019-2021 Arm Limited. All rights reserved. +# +# SPDX-License-Identifier: Apache-2.0 +# +# Licensed under the Apache License, Version 2.0 (the License); you may +# not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an AS IS BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +cmake_minimum_required(VERSION 3.15.6) + +project(ethosu_core_driver VERSION 0.0.1) + +# +# Build options +# + +option(DRIVER_PMU_AUTOINIT "Enable PMU boot auto-initialization" OFF) + +set(CMSIS_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../cmsis" CACHE PATH "Path to CMSIS.") + +set(LOG_NAMES err warning info debug) +set(ETHOSU_LOG_SEVERITY "warning" CACHE STRING "Driver log severity level ${LOG_NAMES} (Defaults to 'warning')") +set(ETHOSU_TARGET_NPU_CONFIG "ethos-u55-128" CACHE STRING "Default NPU configuration") +set_property(CACHE ETHOSU_LOG_SEVERITY PROPERTY STRINGS ${LOG_NAMES}) + +# +# Global settings +# + +# Check that ETHOSU_LOG_SEVERITY has one of the supported levels +list(FIND LOG_NAMES ${ETHOSU_LOG_SEVERITY} LOG_SEVERITY) +if (${LOG_SEVERITY} EQUAL -1) + message(FATAL_ERROR "Unsupported log level ${ETHOSU_LOG_SEVERITY}") +endif() + +# Make include directories available for current- and sub projects +include_directories(include src) +include_directories(${CMSIS_PATH}/CMSIS/Core/Include) + +# +# Build libraries +# + +# Build driver library +add_library(ethosu_core_driver STATIC) +target_include_directories(ethosu_core_driver PUBLIC include) +target_sources(ethosu_core_driver PRIVATE src/ethosu_driver.c src/ethosu_pmu.c) + +string(TOLOWER ${ETHOSU_TARGET_NPU_CONFIG} ETHOSU_TARGET_NPU_CONFIG) +if(ETHOSU_TARGET_NPU_CONFIG MATCHES "^ethos-(u[0-9]+|uz)-([0-9]+$)") + set(ETHOSU_ARCH ${CMAKE_MATCH_1}) + set(ETHOSU_MACS ${CMAKE_MATCH_2}) +else() + message(FATAL_ERROR "Invalid Ethos-U target configuration '${ETHOSU_TARGET_NPU_CONFIG}") +endif() + +target_compile_definitions(ethosu_core_driver PRIVATE + ETHOSU_ARCH=${ETHOSU_ARCH} + ETHOS$) + +if (ETHOSU_ARCH STREQUAL "u55" OR ETHOSU_ARCH STREQUAL "u65") + target_sources(ethosu_core_driver PRIVATE src/ethosu_device_u55_u65.c) +else() + message(FATAL_ERROR "Invalid NPU configuration") +endif() + + +# Set the log level for the target +target_compile_definitions(ethosu_core_driver PRIVATE ETHOSU_LOG_SEVERITY=${LOG_SEVERITY}) + +# Install library and include files +install(TARGETS ethosu_core_driver LIBRARY DESTINATION "lib") +install(FILES include/ethosu_device.h include/ethosu_driver.h include/pmu_ethosu.h + DESTINATION "include") + +# +# Print build status +# + +message(STATUS "*******************************************************") +message(STATUS "PROJECT_NAME : ${PROJECT_NAME}") +message(STATUS "ETHOSU_TARGET_NPU_CONFIG : ${ETHOSU_TARGET_NPU_CONFIG}") +message(STATUS "CMAKE_SYSTEM_PROCESSOR : ${CMAKE_SYSTEM_PROCESSOR}") +message(STATUS "CMSIS_PATH : ${CMSIS_PATH}") +message(STATUS "ETHOSU_LOG_SEVERITY : ${ETHOSU_LOG_SEVERITY}") +message(STATUS "*******************************************************") \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/LICENSE.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/LICENSE.txt new file mode 100644 index 0000000..9c8f3ea --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/LICENSE.txt @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/README.MD b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/README.MD new file mode 100644 index 0000000..9e077b3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/README.MD @@ -0,0 +1,271 @@ +# Arm(R) Ethos(TM)-U core driver + +This repository contains a device driver for the Arm(R) Ethos(TM)-U NPU. + +## Building + +The source code comes with a CMake based build system. The driver is expected to +be cross compiled for any of the supported Arm Cortex(R)-M CPUs, which requires +the user to configure the build to match their system configuration. + + +One such requirement is to define the target CPU, normally by setting +`CMAKE_SYSTEM_PROCESSOR`. **Note** that when using the toolchain files provided +in [core_platform](https://git.mlplatform.org/ml/ethos-u/ethos-u-core-platform.git), +the variable `TARGET_CPU` must be used instead of `CMAKE_SYSTEM_PROCESSOR`. + +Target CPU is specified on the form "cortex-m", for example: +"cortex-m55+nodsp+nofp". + +Similarly the target NPU configuration is +controlled by setting `ETHOSU_TARGET_NPU_CONFIG`, for example "ethos-u55-128". + +The build configuration can be defined either in the toolchain file or +by passing options on the command line. + +```[bash] +$ cmake -B build \ + -DCMAKE_TOOLCHAIN_FILE= \ + -DCMAKE_SYSTEM_PROCESSOR=cortex-m \ + -DETHOSU_TARGET_NPU_CONFIG=ethos-u- +$ cmake --build build +``` + +or when using toolchain files from [core_platform](https://git.mlplatform.org/ml/ethos-u/ethos-u-core-platform.git) + +```[bash] +$ cmake -B build \ + -DCMAKE_TOOLCHAIN_FILE= \ + -DTARGET_CPU=cortex-m \ + -DETHOSU_TARGET_NPU_CONFIG=ethos-u- +$ cmake --build build +``` + +## Driver APIs + +The driver APIs are defined in `include/ethosu_driver.h` and the related types +in `include/ethosu_types.h`. Inferences can be invoked in two manners: +synchronously or asynchronously. The two types of invocation can be freely mixed +in a single application. + +### Synchronous invocation + +A typical usage of the driver can be the following: + +```[C] +// reserve a driver to be used (this call could block until a driver is available) +struct ethosu_driver *drv = ethosu_reserve_driver(); +... +// run one or more inferences +int result = ethosu_invoke(drv, + custom_data_ptr, + custom_data_size, + base_addr, + base_addr_size, + num_base_addr); +... +// release the driver for others to use +ethosu_release_driver(drv); +``` + +### Asynchronous invocation + +A typical usage of the driver can be the following: + +```[C] +// reserve a driver to be used (this call could block until a driver is available) +struct ethosu_driver *drv = ethosu_reserve_driver(); +... +// run one or more inferences +int result = ethosu_invoke_async(drv, + custom_data_ptr, + custom_data_size, + base_addr, + base_addr_size, + num_base_addr, + user_arg); +... +// do some other work +... +int ret; +do { + // true = blocking, false = non-blocking + // ret > 0 means inference not completed (only for non-blocking mode) + ret = ethosu_wait(drv, ); +} while(ret > 0); +... +// release the driver for others to use +ethosu_release_driver(drv); +``` + +Note that if `ethosu_wait` is invoked from a different thread and concurrently +with `ethosu_invoke_async`, the user is responsible to guarantee that +`ethosu_wait` is called after a successful completion of `ethosu_invoke_async`. +Otherwise `ethosu_wait` might fail and not actually wait for the inference +completion. + +### Driver initialization + +In order to use a driver it first needs to be initialized by calling the `init` +function, which will also register the handle in the list of available drivers. +A driver can be torn down by using the `deinit` function, which also removes the +driver from the list. + +The correct mapping is one driver per NPU device. Note that the NPUs must have +the same configuration, indeed the NPU configuration can be only one, which is +defined at compile time. + +## Implementation design + +The driver is structured in two main parts: the driver, which is responsible to +provide an unified API to the user; and the device part, which deals with the +details at the hardware level. + +In order to do its task the driver needs a device implementation. There could be +multiple device implementation for different hardware model and/or +configurations. Note that the driver can be compiled to target only one NPU +configuration by specializing the device part at compile time. + +## Data caching + +For running the driver on Arm CPUs which are configured with data cache, the +cache maintenance functions in the driver are exported with weakly linked +symbols that should be overridden. An example implementation using the CMSIS +primitives found in cachel1_armv7.h could be as below: + +```[C++] +extern "C" { +void ethosu_flush_dcache(uint32_t *p, size_t bytes) { + if (p) + SCB_CleanDCache_by_Addr(p, bytes); + else + SCB_CleanDCache(); +} + +void ethosu_invalidate_dcache(uint32_t *p, size_t bytes) { + if (p) + SCB_InvalidateDCache_by_Addr(p, bytes); + else + SCB_InvalidateDCache(); +} +} +``` + +## Mutex and semaphores + +To ensure the correct functionality of the driver mutexes and semaphores are +used internally. The default implementations of mutexes and semaphores are +designed for a single-threaded baremetal environment. Hence for integration in +environemnts where multi-threading is possible, e.g., RTOS, the user is +responsible to provide implementation for mutexes and semaphores to be used by +the driver. + +The mutex and semaphore APIs are defined as weak linked functions that can be +overridden by the user. The APIs are the usual ones and described below: + +```[C] +// create a mutex by returning back a handle +void *ethosu_mutex_create(void); +// lock the given mutex +void ethosu_mutex_lock(void *mutex); +// unlock the given mutex +void ethosu_mutex_unlock(void *mutex); + +// create a (binary) semaphore by returning back a handle +void *ethosu_semaphore_create(void); +// take from the given semaphore +void ethosu_semaphore_take(void *sem); +// give from the given semaphore +void ethosu_semaphore_give(void *sem); +``` + +## Begin/End inference callbacks + +The driver provide weak linked functions as hooks to receive callbacks whenever +an inference begins and ends. The user can override such functions when needed. +To avoid memory leaks, any allocations done in the ethosu_inference_begin() must +be balanced by a corresponding free of the memory in the ethosu_inference_end() +callback. + +```[C] +void ethosu_inference_begin(struct ethosu_driver *drv, void *user_arg); +void ethosu_inference_end(struct ethosu_driver *drv, void *user_arg); +``` + +Note that the `void *user_arg` pointer passed to invoke() function is the same +pointer passed to the begin() and end() callbacks. For example: + +```[C] +void my_function() { + ... + struct my_data data = {...}; + int result = int ethosu_invoke_v3(drv, + custom_data_ptr, + custom_data_size, + base_addr, + base_addr_size, + num_base_addr, + (void *)&data); + .... +} + +void ethosu_inference_begin(struct ethosu_driver *drv, void *user_arg) { + struct my_data *data = (struct my_data*) user_arg; + // use drv and data here +} + +void ethosu_inference_end(struct ethosu_driver *drv, void *user_arg) { + struct my_data *data = (struct my_data*) user_arg; + // use drv and data here +} +``` + +## License + +The Arm Ethos-U core driver is provided under an Apache-2.0 license. Please see +[LICENSE.txt](LICENSE.txt) for more information. + +## Contributions + +The Arm Ethos-U project welcomes contributions under the Apache-2.0 license. + +Before we can accept your contribution, you need to certify its origin and give +us your permission. For this process we use the Developer Certificate of Origin +(DCO) V1.1 (https://developercertificate.org). + +To indicate that you agree to the terms of the DCO, you "sign off" your +contribution by adding a line with your name and e-mail address to every git +commit message. You must use your real name, no pseudonyms or anonymous +contributions are accepted. If there are more than one contributor, everyone +adds their name and e-mail to the commit message. + +```[] +Author: John Doe \ +Date: Mon Feb 29 12:12:12 2016 +0000 + +Title of the commit + +Short description of the change. + +Signed-off-by: John Doe john.doe@example.org +Signed-off-by: Foo Bar foo.bar@example.org +``` + +The contributions will be code reviewed by Arm before they can be accepted into +the repository. + +In order to submit a contribution push your patch to +`ssh://@review.mlplatform.org:29418/ml/ethos-u/ethos-u-core-driver`. +To do this you will need to sign-in to +[review.mlplatform.org](https://review.mlplatform.org) using a GitHub account +and add your SSH key under your settings. If there is a problem adding the SSH +key make sure there is a valid email address in the Email Addresses field. + +## Security + +Please see [Security](SECURITY.md). + +## Trademark notice + +Arm, Cortex and Ethos are registered trademarks of Arm Limited (or its +subsidiaries) in the US and/or elsewhere. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/SECURITY.md b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/SECURITY.md new file mode 100644 index 0000000..29c6ce4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/SECURITY.md @@ -0,0 +1,85 @@ +# Security + +If you believe you have identified a security related issue or vulnerability, +then we encourage you to responsibly disclose it to us as soon as possible. + +## Reporting vulnerabilities + +Arm takes security issues seriously and welcomes feedback from researchers and +the security community in order to improve the security of its products and +services. We operate a coordinated disclosure policy for disclosing +vulnerabilities and other security issues. + +Security issues can be complex and one single timescale doesn't fit all +circumstances. We will make best endeavours to inform you when we expect +security notifications and fixes to be available and facilitate coordinated +disclosure when notifications and patches/mitigations are available. + +### Report + +For all security issues, contact Arm by email at +[arm-security@arm.com](mailto:arm-security@arm.com). In the body of the email +include as much information as possible about the issue or vulnerability and any +additional contact details. + +### Secure submission using PGP + +We support and encourage secure submission of vulnerability reports using PGP, +using the key below. If you would like replies to be encrypted, please provide +your own public key through a secure mechanism. + +~~~none +-----BEGIN PGP PUBLIC KEY BLOCK----- +mQINBFr7/RMBEACjHR5QZL/z1t2aLCRNXLE4KJiQmCo7edU5Be+7MTjIJDzZNu68 +lNEUYRoLexeayif8eC4T19bUsSbGpxHiYsFFjV8ewLXDyDJRRuaBGPfQ5rn/mE6X +Nvu+9Pputr+mB1R3CXcvrNkhmzPkK7zVM15oeyBMhogqPssuT4OeMduQdip8smfK +xTMk91RrJTLb+G3eE1tf+81kXBYvzp2e24Sn0/VeYe0IWnBobjVBZk3TmcYxDvz5 +Y47fU9V6cNj3Zq4VYrgxuLoFCA2VtetyiFQm5IYa3Bt3SWcAwihr8nbR2HoNdWyA +u8wJYYVzSq3hvT5l/IjTHxEcY+6RBq8poDSsftzvX386u9hmw7sJQFlTw6/pUjdr +gbsZ2ZzRBzKtU17ercpn4kU6VgVP3WRB5HiTFFkEpZuqAznOYaHbMq4dfd/g7Quq +C0VTbWiJnhku2i+g4BdHHRDtIF6U3aVQAfbrDb1LjVTa65p5ULOeY3HRAWtMNtu/ +Cj8cD98JDanzXtcnisds8vMQ8LZ6iMFChEnF8K4V0eLw9Ju6CMNiFYY7SEBndD/H +M4KcU4li7mROSbJcshgEbe1SYkxdMuI9eY4DNYxl3VjxoPUGzeqXo/ADFKE9bHsi +GTyEoij4ku0HspLVKnYHXn/LqHGwEcwjF8zphS+w5cn/e01akYwz5EVSQwARAQAB +tB1Bcm0gU3VwcG9ydCA8c3VwcG9ydEBhcm0uY29tPokCTgQTAQgAOBYhBN9zqDwZ +RL/vF0ihcdfNKdz4bBRiBQJa+/0TAhsDBQsJCAcCBhUKCQgLAgQWAgMBAh4BAheA +AAoJENfNKdz4bBRibK0P/jLlJR/QYypcjb+8BnHT9tCDgcV2KFYXS15VpbSNviN9 +Xs/UOnSadoGUMGCXDyb1PRNn98yUn7JlNR9rtsqPRmkpbo5cuw46ehgxjVlfcHnk +CttaE8Davx6zo0fyrBib2+oVVW8usi9+uRK4vhhPUicO3oXwzldsVFz+RbpubZxc +Bg/CZ+dQ2jMKOv1zDtInOG6OBnbQZRAeiWXgGhcIoPZ4zBQOi8nr0+bLcfvMeZi2 +uz6uKnylpXwZbl4ijcG8MKV/ei+7du+SzA9NY0WOT2g3FXDREWUhjKs8bmEZgIx4 +QgvDNpxAUThF+TqQ7zrsA8nT8POvDD0MhN/Z+A3QdPTdcaZFaXzIdxbDg+0FKmzu +OgtQBH4C01RWrkmZlhO5w7/Qjt0vLlhfyQIL9BW/HeEPtjnH2Hnq8xYnZhlVqJEh +FJU7F9sMvyiJiKviobFTd6AmpVkhxhcJ3k2L2C03imTsmUwAoejQCXwiYcOhyQ2t +Z9Nk8YIZTEw2urGFi4HSQPwPq2j/2j7ABJ4rlzJvO6vs5ppGkumvzIIP9JnpVXbp +wcbK6Ev6KdkX4s14Mzd6Hsd8LpL8t5nHhxUey6G0xKe2eSlHVm5Mlfhoete9UmIZ +dzIOZkgTgWXlYXRIxwGQ2Pss7pURtofykvLklq4jcobQuHxurl9cteodETfbWk/J +uQINBFr7/RMBEADWZG8eqt5D22g3T7ehnH/T3fuTX8LPUBnODMWGAEUY8uv64To8 +46odvrXFgWBgCf0sROEyJchc3SGLyR9S4lJsVJRde3QLN3WZkHlB4pSn4IQHFyQd +wsLQi+S9uggHMPlQ6MNvc5n0P3k5bT9fLUmtJWJ3QVjW7k963ZXpzf1zbQJqs30w +rlqGUZllfRoYQTfcYxFEaUFhwRJ//skNImWH8Vz+PTnqg2zRtFn3usrBV4GpNvsM +6jy+YEsSvUa7IY8k4wpPzEhIfqDjGbZxFSQ1H1G+mLUL+DD7oGffej/ZoC86TIdM +p6ew1rGhJdQBLh9nx+1ADOLWjNo2R0h60u7VR5q/K6V4fwWmeGFipPXZCD92I+nR +t/cjznwNyD/6J9YrBMF7mbGrS1TyfLaLt4tpdcBnsgqDTodd5OmG65mroXsg/lNO +7YZdecLZ34krfaLrWTtKkqULXbppB+uQvbVj8p8ONRImn6bZ+iAhnNaH9wJ06ico +b1F0imJ2SJWnFr6PzPRr0gPStLgu9wrRKheaORwF/H/HxSyPZxNVxFqu81q518A/ +plhub9INQLaxHf/TTjXpqZCcfdNTYUAW8rwbQfW9doSIT4lHY8bJXktb6BsVjkFj +PzDeYpXeOoTWetQqsEuTdg/F+qg041QBLtNj9Lr3Vy3StgMciRUIP8m0XwARAQAB +iQI2BBgBCAAgFiEE33OoPBlEv+8XSKFx180p3PhsFGIFAlr7/RMCGwwACgkQ180p +3PhsFGLWMA//V/XKrnI2YBh/SptUrgg7knPXva45bb7tGSH1fJg8f/wqycOSFFCY +ES45boA5jlQ3z8uw6BYCz5KeOucGhxAMw+x5EDdxZ33ksY5zqXB35WaMXzEwGYYb +E113/yhOsTbzu4bBKABSXbJO98MdAWvWpyCpp2MHIR3S9+ycM7/FMZ5xi3czZNRg +9+WZP+7W4qWhJptQ0kBh5C3N/tiltju5WQ2Y7XIn+5dMOJdtseFS7CNerxXZGAtH +nfRxaD/4ENdbWOwaVJiVW7+ioUJz09OWgy0gLYSDW+hciDnW1QAaJLpdAbniGZ0S +JsTmaZla8JnUKqZPgbFfA2OcnH9H+DWc0pHv17c5tJzTMP7rgirgGRX/U2LOzmFZ +1UxjQj5nn3Oa5frXbIAzb8xKiR0VDaquCM/3sti1AesYiS0Gw0Sqnw8qpFypgFXN +CKVgYXppIT+TmbDbNJDOB2UycxeI4vbiBwU8fI4qSpW12WsGdAJt/rx3UsyhZ+02 +4aSqDHzhJmtDPQ6lnaKe1fUkC90tgp8loVGmriWQx82jAQMqATVjIklTpE4vm00f +ocQIWOKEE90mKNEoV6rNbfl5QevmapTVdV/pmrRBzhbsa1uAUS4HZdH0Nf/OXEyv +yYCr2gCFPymkkRYhPr2w5EgbWyzLaBIwqjyIbXaveuB3DYi2Lhbf64I= +=EaN7 +-----END PGP PUBLIC KEY BLOCK----- +~~~ + +For more information visit + \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_driver.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_driver.h new file mode 100644 index 0000000..9c9f173 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_driver.h @@ -0,0 +1,361 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_DRIVER_H +#define ETHOSU_DRIVER_H + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include "ethosu_types.h" + +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define ETHOSU_DRIVER_VERSION_MAJOR 0 ///< Driver major version +#define ETHOSU_DRIVER_VERSION_MINOR 16 ///< Driver minor version +#define ETHOSU_DRIVER_VERSION_PATCH 0 ///< Driver patch version + +/****************************************************************************** + * Types + ******************************************************************************/ + +// Forward declare +struct ethosu_device; + +enum ethosu_job_state +{ + ETHOSU_JOB_IDLE = 0, + ETHOSU_JOB_RUNNING, + ETHOSU_JOB_DONE +}; + +struct ethosu_job +{ + volatile enum ethosu_job_state state; + const void *custom_data_ptr; + int custom_data_size; + const uint64_t *base_addr; + const size_t *base_addr_size; + int num_base_addr; + void *user_arg; +}; + +struct ethosu_driver +{ + struct ethosu_device *dev; + struct ethosu_driver *next; + struct ethosu_job job; + void *semaphore; + uint64_t fast_memory; + size_t fast_memory_size; + uint32_t power_request_counter; + bool status_error; + bool reserved; +}; + +struct ethosu_driver_version +{ + uint8_t major; + uint8_t minor; + uint8_t patch; +}; + +enum ethosu_request_clients +{ + ETHOSU_PMU_REQUEST = 0, + ETHOSU_INFERENCE_REQUEST = 1, +}; + +/****************************************************************************** + * Prototypes (weak functions in driver) + ******************************************************************************/ + +/** + * Interrupt handler to be called on IRQ from Ethos-U + * + * @param drv Pointer to driver handle + */ +void ethosu_irq_handler(struct ethosu_driver *drv); + +/** + * Flush/clean the data cache by address and size. Passing NULL as p argument + * expects the whole cache to be flushed. + * + * Addresses passed to this function must be 16 byte aligned. + * + * @param p 16 byte aligned address + * @param bytes Size of memory block in bytes + */ +void ethosu_flush_dcache(uint32_t *p, size_t bytes); + +/** + * Invalidate the data cache by address and size. Passing NULL as p argument + * expects the whole cache to be invalidated. + * + * Addresses passed to this function must be 16 byte aligned. + * + * @param p 16 byte aligned address + * @param bytes Size in bytes + */ +void ethosu_invalidate_dcache(uint32_t *p, size_t bytes); + +/** + * Minimal mutex implementation for baremetal applications. See + * ethosu_driver.c. + * + * @return Pointer to mutex handle + */ +void *ethosu_mutex_create(void); + +/** + * Minimal sempahore implementation for baremetal applications. See + * ethosu_driver.c. + * + * @return Pointer to semaphore handle + */ +void *ethosu_semaphore_create(void); + +/** + * Lock mutex. + * + * @param mutex Pointer to mutex handle + * @returns 0 on success, else negative error code + */ +int ethosu_mutex_lock(void *mutex); + +/** + * Unlock mutex. + * + * @param mutex Pointer to mutex handle + * @returns 0 on success, else negative error code + */ +int ethosu_mutex_unlock(void *mutex); + +/** + * Take semaphore. + * + * @param sem Pointer to semaphore handle + * @returns 0 on success, else negative error code + */ +int ethosu_semaphore_take(void *sem); + +/** + * Give semaphore. + * + * @param sem Pointer to semaphore handle + * @returns 0 on success, else negative error code + */ +int ethosu_semaphore_give(void *sem); + +/** + * Callback invoked just before the inference is started. + * + * @param drv Pointer to driver handle + * @param user_arg User argument provided to ethosu_invoke_*() + */ +void ethosu_inference_begin(struct ethosu_driver *drv, void *user_arg); + +/** + * Callback invoked just after the inference has completed. + * + * @param drv Pointer to driver handle + * @param user_arg User argument provided to ethosu_invoke_*() + */ +void ethosu_inference_end(struct ethosu_driver *drv, void *user_arg); + +/** + * Remapping command stream and base pointer addresses. + * + * @param address Address to be remapped. + * @param index -1 command stream, 0-n base address index + * + * @return Remapped address + */ +uint64_t ethosu_address_remap(uint64_t address, int index); + +/****************************************************************************** + * Prototypes + ******************************************************************************/ + +/** + * Initialize the Ethos-U driver. + * + * @param drv Pointer to driver handle + * @param base_address NPU register base address + * @param fast_memory Fast memory area, used for Ethos-U65 with spilling + * @param fast_memory_size Size in bytes of fast memory area + * @param secure_enable Configure NPU in secure- or non-secure mode + * @param privilege_enable Configure NPU in privileged- or non-privileged mode + * @return 0 on success, else negative error code + */ +int ethosu_init(struct ethosu_driver *drv, + void *const base_address, + const void *fast_memory, + const size_t fast_memory_size, + uint32_t secure_enable, + uint32_t privilege_enable); + +/** + * Deinitialize the Ethos-U driver. + * + * @param drv Pointer to driver handle + */ +void ethosu_deinit(struct ethosu_driver *drv); + +/** + * Soft resets the Ethos-U device. + * + * @param drv Pointer to driver handle + * @return 0 on success, else negative error code + */ +int ethosu_soft_reset(struct ethosu_driver *drv); + +/** + * Request to disable Q-channel power gating of the Ethos-U device. + * Power requests are ref.counted. Increases count. + * (Note: clock gating is made to follow power gating) + * + * @param drv Pointer to driver handle + * @return 0 on success, else negative error code + */ +int ethosu_request_power(struct ethosu_driver *drv); + +/** + * Release disable request for Q-channel power gating of the Ethos-U device. + * Power requests are ref.counted. Decreases count. + * + * @param drv Pointer to driver handle + */ +void ethosu_release_power(struct ethosu_driver *drv); + +/** + * Get Ethos-U driver version. + * + * @param ver Driver version struct + */ +void ethosu_get_driver_version(struct ethosu_driver_version *ver); + +/** + * Get Ethos-U hardware information. + * + * @param drv Pointer to driver handle + * @param hw Hardware information struct + */ +void ethosu_get_hw_info(struct ethosu_driver *drv, struct ethosu_hw_info *hw); + +/** + * Invoke command stream. + * + * @param drv Pointer to driver handle + * @param custom_data_ptr Custom data payload + * @param custom_data_size Size in bytes of custom data + * @param base_addr Array of base address pointers + * @param base_addr_size Size in bytes of each address in base_addr + * @param num_base_addr Number of elements in base_addr array + * @param user_arg User argument, will be passed to + * ethosu_inference_begin() and ethosu_inference_end() + * @return 0 on success, else negative error code + */ +int ethosu_invoke_v3(struct ethosu_driver *drv, + const void *custom_data_ptr, + const int custom_data_size, + uint64_t *const base_addr, + const size_t *base_addr_size, + const int num_base_addr, + void *user_arg); + +#define ethosu_invoke(drv, custom_data_ptr, custom_data_size, base_addr, base_addr_size, num_base_addr) \ + ethosu_invoke_v3(drv, custom_data_ptr, custom_data_size, base_addr, base_addr_size, num_base_addr, 0) + +/** + * Invoke command stream using async interface. + * Must be followed by call(s) to ethosu_wait() upon successful return. + * + * @see ethosu_invoke_v3 for documentation. + */ +int ethosu_invoke_async(struct ethosu_driver *drv, + const void *custom_data_ptr, + const int custom_data_size, + uint64_t *const base_addr, + const size_t *base_addr_size, + const int num_base_addr, + void *user_arg); + +/** + * Wait for inference to complete (block=true) + * Poll status or finish up if inference is complete (block=false) + * (This function is only intended to be used in conjuction with ethosu_invoke_async) + * + * @param drv Pointer to driver handle + * @param block If call should block if inference is running + * @return -2 on inference not invoked, -1 on inference error, 0 on success, 1 on inference running + */ +int ethosu_wait(struct ethosu_driver *drv, bool block); + +/** + * Reserves a driver to execute inference with. Call will block until a driver + * is available. + * + * @return Pointer to driver handle. + */ +struct ethosu_driver *ethosu_reserve_driver(void); + +/** + * Release driver that was previously reserved with @see ethosu_reserve_driver. + * + * @param drv Pointer to driver handle + */ +void ethosu_release_driver(struct ethosu_driver *drv); + +/** + * Static inline for backwards-compatibility. + * + * @see ethosu_invoke_v3 for documentation. + */ +static inline int ethosu_invoke_v2(const void *custom_data_ptr, + const int custom_data_size, + uint64_t *const base_addr, + const size_t *base_addr_size, + const int num_base_addr) +{ + struct ethosu_driver *drv = ethosu_reserve_driver(); + if (!drv) + { + return -1; + } + int result = ethosu_invoke_v3(drv, custom_data_ptr, custom_data_size, base_addr, base_addr_size, num_base_addr, 0); + ethosu_release_driver(drv); + return result; +} + +#ifdef __cplusplus +} +#endif + +#endif // ETHOSU_DRIVER_H diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_types.h new file mode 100644 index 0000000..a8062dd --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/ethosu_types.h @@ -0,0 +1,76 @@ +/* + * Copyright (c) 2019-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_TYPES_H +#define ETHOSU_TYPES_H + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include + +/****************************************************************************** + * Types + ******************************************************************************/ + +enum ethosu_error_codes +{ + ETHOSU_SUCCESS = 0, ///< Success + ETHOSU_GENERIC_FAILURE = -1, ///< Generic failure + ETHOSU_INVALID_PARAM = -2 ///< Invalid parameter +}; + +enum ethosu_clock_q_request +{ + ETHOSU_CLOCK_Q_DISABLE = 0, ///< Disable NPU signal ready for clock off. + ETHOSU_CLOCK_Q_ENABLE = 1, ///< Enable NPU signal ready for clock off when stop+idle state reached. + ETHOSU_CLOCK_Q_UNCHANGED = 2 ///< Keep current clock q setting +}; + +enum ethosu_power_q_request +{ + ETHOSU_POWER_Q_DISABLE = 0, ///< Disable NPU signal ready for power off. + ETHOSU_POWER_Q_ENABLE = 1, ///< Enable NPU signal ready for power off when stop+idle state reached. + ETHOSU_POWER_Q_UNCHANGED = 2 ///< Keep current power q setting +}; + +struct ethosu_id +{ + uint32_t version_status; ///< Version status + uint32_t version_minor; ///< Version minor + uint32_t version_major; ///< Version major + uint32_t product_major; ///< Product major + uint32_t arch_patch_rev; ///< Architecture version patch + uint32_t arch_minor_rev; ///< Architecture version minor + uint32_t arch_major_rev; ///< Architecture version major +}; + +struct ethosu_config +{ + uint32_t macs_per_cc; ///< MACs per clock cycle + uint32_t cmd_stream_version; ///< NPU command stream version + uint32_t custom_dma; ///< Custom DMA enabled +}; + +struct ethosu_hw_info +{ + struct ethosu_id version; + struct ethosu_config cfg; +}; +#endif // ETHOSU_TYPES_H diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/pmu_ethosu.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/pmu_ethosu.h new file mode 100644 index 0000000..b717130 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/include/pmu_ethosu.h @@ -0,0 +1,326 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2022 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef PMU_ETHOSU_H +#define PMU_ETHOSU_H + +/***************************************************************************** + * Includes + *****************************************************************************/ + +#include + +#include "ethosu_driver.h" + +#ifdef __cplusplus +extern "C" { +#endif + +/***************************************************************************** + * Defines + *****************************************************************************/ + +#define ETHOSU_PMU_NCOUNTERS 4 + +#define ETHOSU_PMU_CNT1_Msk (1UL << 0) +#define ETHOSU_PMU_CNT2_Msk (1UL << 1) +#define ETHOSU_PMU_CNT3_Msk (1UL << 2) +#define ETHOSU_PMU_CNT4_Msk (1UL << 3) +#define ETHOSU_PMU_CCNT_Msk (1UL << 31) + +/***************************************************************************** + * Types + *****************************************************************************/ + +/** \brief HW Supported ETHOSU PMU Events + * + * Note: These values are symbolic. Actual HW-values may change. I.e. always use API + * to set/get actual event-type value. + * */ +enum ethosu_pmu_event_type +{ + ETHOSU_PMU_NO_EVENT = 0, + ETHOSU_PMU_CYCLE, + ETHOSU_PMU_NPU_IDLE, + ETHOSU_PMU_CC_STALLED_ON_BLOCKDEP, + ETHOSU_PMU_CC_STALLED_ON_SHRAM_RECONFIG, + ETHOSU_PMU_NPU_ACTIVE, + ETHOSU_PMU_MAC_ACTIVE, + ETHOSU_PMU_MAC_ACTIVE_8BIT, + ETHOSU_PMU_MAC_ACTIVE_16BIT, + ETHOSU_PMU_MAC_DPU_ACTIVE, + ETHOSU_PMU_MAC_STALLED_BY_WD_ACC, + ETHOSU_PMU_MAC_STALLED_BY_WD, + ETHOSU_PMU_MAC_STALLED_BY_ACC, + ETHOSU_PMU_MAC_STALLED_BY_IB, + ETHOSU_PMU_MAC_ACTIVE_32BIT, + ETHOSU_PMU_MAC_STALLED_BY_INT_W, + ETHOSU_PMU_MAC_STALLED_BY_INT_ACC, + ETHOSU_PMU_AO_ACTIVE, + ETHOSU_PMU_AO_ACTIVE_8BIT, + ETHOSU_PMU_AO_ACTIVE_16BIT, + ETHOSU_PMU_AO_STALLED_BY_OFMP_OB, + ETHOSU_PMU_AO_STALLED_BY_OFMP, + ETHOSU_PMU_AO_STALLED_BY_OB, + ETHOSU_PMU_AO_STALLED_BY_ACC_IB, + ETHOSU_PMU_AO_STALLED_BY_ACC, + ETHOSU_PMU_AO_STALLED_BY_IB, + ETHOSU_PMU_WD_ACTIVE, + ETHOSU_PMU_WD_STALLED, + ETHOSU_PMU_WD_STALLED_BY_WS, + ETHOSU_PMU_WD_STALLED_BY_WD_BUF, + ETHOSU_PMU_WD_PARSE_ACTIVE, + ETHOSU_PMU_WD_PARSE_STALLED, + ETHOSU_PMU_WD_PARSE_STALLED_IN, + ETHOSU_PMU_WD_PARSE_STALLED_OUT, + ETHOSU_PMU_WD_TRANS_WS, + ETHOSU_PMU_WD_TRANS_WB, + ETHOSU_PMU_WD_TRANS_DW0, + ETHOSU_PMU_WD_TRANS_DW1, + ETHOSU_PMU_AXI0_RD_TRANS_ACCEPTED, + ETHOSU_PMU_AXI0_RD_TRANS_COMPLETED, + ETHOSU_PMU_AXI0_RD_DATA_BEAT_RECEIVED, + ETHOSU_PMU_AXI0_RD_TRAN_REQ_STALLED, + ETHOSU_PMU_AXI0_WR_TRANS_ACCEPTED, + ETHOSU_PMU_AXI0_WR_TRANS_COMPLETED_M, + ETHOSU_PMU_AXI0_WR_TRANS_COMPLETED_S, + ETHOSU_PMU_AXI0_WR_DATA_BEAT_WRITTEN, + ETHOSU_PMU_AXI0_WR_TRAN_REQ_STALLED, + ETHOSU_PMU_AXI0_WR_DATA_BEAT_STALLED, + ETHOSU_PMU_AXI0_ENABLED_CYCLES, + ETHOSU_PMU_AXI0_RD_STALL_LIMIT, + ETHOSU_PMU_AXI0_WR_STALL_LIMIT, + ETHOSU_PMU_AXI_LATENCY_ANY, + ETHOSU_PMU_AXI_LATENCY_32, + ETHOSU_PMU_AXI_LATENCY_64, + ETHOSU_PMU_AXI_LATENCY_128, + ETHOSU_PMU_AXI_LATENCY_256, + ETHOSU_PMU_AXI_LATENCY_512, + ETHOSU_PMU_AXI_LATENCY_1024, + ETHOSU_PMU_ECC_DMA, + ETHOSU_PMU_ECC_SB0, + ETHOSU_PMU_AXI1_RD_TRANS_ACCEPTED, + ETHOSU_PMU_AXI1_RD_TRANS_COMPLETED, + ETHOSU_PMU_AXI1_RD_DATA_BEAT_RECEIVED, + ETHOSU_PMU_AXI1_RD_TRAN_REQ_STALLED, + ETHOSU_PMU_AXI1_WR_TRANS_ACCEPTED, + ETHOSU_PMU_AXI1_WR_TRANS_COMPLETED_M, + ETHOSU_PMU_AXI1_WR_TRANS_COMPLETED_S, + ETHOSU_PMU_AXI1_WR_DATA_BEAT_WRITTEN, + ETHOSU_PMU_AXI1_WR_TRAN_REQ_STALLED, + ETHOSU_PMU_AXI1_WR_DATA_BEAT_STALLED, + ETHOSU_PMU_AXI1_ENABLED_CYCLES, + ETHOSU_PMU_AXI1_RD_STALL_LIMIT, + ETHOSU_PMU_AXI1_WR_STALL_LIMIT, + ETHOSU_PMU_ECC_SB1, + + ETHOSU_PMU_SENTINEL // End-marker (not event) +}; + +/***************************************************************************** + * Functions + *****************************************************************************/ + +/** + * \brief Enable the PMU + */ +void ETHOSU_PMU_Enable(struct ethosu_driver *drv); + +/** + * \brief Disable the PMU + */ +void ETHOSU_PMU_Disable(struct ethosu_driver *drv); + +/** + * \brief Set event to count for PMU eventer counter + * \param [in] num Event counter (0-ETHOSU_PMU_NCOUNTERS) to configure + * \param [in] type Event to count + */ +void ETHOSU_PMU_Set_EVTYPER(struct ethosu_driver *drv, uint32_t num, enum ethosu_pmu_event_type type); + +/** + * \brief Get number of PMU event counters + * \return Number of event counters + */ +uint32_t ETHOSU_PMU_Get_NumEventCounters(void); + +/** + * \brief Get event to count for PMU eventer counter + * \param [in] num Event counter (0-ETHOSU_PMU_NCOUNTERS) to configure + * \return type Event to count + */ +enum ethosu_pmu_event_type ETHOSU_PMU_Get_EVTYPER(struct ethosu_driver *drv, uint32_t num); + +/** + * \brief Reset cycle counter + */ +void ETHOSU_PMU_CYCCNT_Reset(struct ethosu_driver *drv); + +/** + * \brief Reset all event counters + */ +void ETHOSU_PMU_EVCNTR_ALL_Reset(struct ethosu_driver *drv); + +/** + * \brief Enable counters + * \param [in] mask Counters to enable + * \note Enables one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + */ +void ETHOSU_PMU_CNTR_Enable(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Disable counters + * \param [in] mask Counters to disable + * \note Disables one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + */ +void ETHOSU_PMU_CNTR_Disable(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Determine counters activation + * + * \return Event count + * \param [in] mask Counters to enable + * \return a bitmask where bit-set means: + * - event counters activated (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter activate (bit 31) + * \note ETHOSU specific. Usage breaks CMSIS complience + */ +uint32_t ETHOSU_PMU_CNTR_Status(struct ethosu_driver *drv); + +/** + * \brief Read cycle counter (64 bit) + * \return Cycle count + * \note Two HW 32-bit registers that can increment independently in-between reads. + * To work-around raciness yet still avoid turning + * off the event both are read as one value twice. If the latter read + * is not greater than the former, it means overflow of LSW without + * incrementing MSW has occurred, in which case the former value is used. + */ +uint64_t ETHOSU_PMU_Get_CCNTR(struct ethosu_driver *drv); + +/** + * \brief Set cycle counter (64 bit) + * \param [in] val Conter value + * \note Two HW 32-bit registers that can increment independently in-between reads. + * To work-around raciness, counter is temporary disabled if enabled. + * \note ETHOSU specific. Usage breaks CMSIS complience + */ +void ETHOSU_PMU_Set_CCNTR(struct ethosu_driver *drv, uint64_t val); + +/** + * \brief Read event counter + * \param [in] num Event counter (0-ETHOSU_PMU_NCOUNTERS) + * \return Event count + */ +uint32_t ETHOSU_PMU_Get_EVCNTR(struct ethosu_driver *drv, uint32_t num); + +/** + * \brief Set event counter value + * \param [in] num Event counter (0-ETHOSU_PMU_NCOUNTERS) + * \param [in] val Conter value + * \note ETHOSU specific. Usage breaks CMSIS complience + */ +void ETHOSU_PMU_Set_EVCNTR(struct ethosu_driver *drv, uint32_t num, uint32_t val); + +/** + * \brief Read counter overflow status + * \return Counter overflow status bits for the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS)) + * - cycle counter (bit 31) + */ +uint32_t ETHOSU_PMU_Get_CNTR_OVS(struct ethosu_driver *drv); + +/** + * \brief Clear counter overflow status + * \param [in] mask Counter overflow status bits to clear + * \note Clears overflow status bits for one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + */ +void ETHOSU_PMU_Set_CNTR_OVS(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Enable counter overflow interrupt request + * \param [in] mask Counter overflow interrupt request bits to set + * \note Sets overflow interrupt request bits for one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + */ +void ETHOSU_PMU_Set_CNTR_IRQ_Enable(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Disable counter overflow interrupt request + * \param [in] mask Counter overflow interrupt request bits to clear + * \note Clears overflow interrupt request bits for one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + */ +void ETHOSU_PMU_Set_CNTR_IRQ_Disable(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Get counters overflow interrupt request stiinings + * \return mask Counter overflow interrupt request bits + * \note Sets overflow interrupt request bits for one or more of the following: + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + * \note ETHOSU specific. Usage breaks CMSIS compliance + */ +uint32_t ETHOSU_PMU_Get_IRQ_Enable(struct ethosu_driver *drv); + +/** + * \brief Software increment event counter + * \param [in] mask Counters to increment + * - event counters (bit 0-ETHOSU_PMU_NCOUNTERS) + * - cycle counter (bit 31) + * \note Software increment bits for one or more event counters. + */ +void ETHOSU_PMU_CNTR_Increment(struct ethosu_driver *drv, uint32_t mask); + +/** + * \brief Set start event number for the cycle counter + * \param [in] start_event Event to trigger start of the cycle counter + * \note Sets the event number that starts the cycle counter. + */ +void ETHOSU_PMU_PMCCNTR_CFG_Set_Start_Event(struct ethosu_driver *drv, enum ethosu_pmu_event_type start_event); + +/** + * \brief Set stop event number for the cycle counter + * \param [in] stop_event Event number + * \note Sets the event number that stops the cycle counter. + */ +void ETHOSU_PMU_PMCCNTR_CFG_Set_Stop_Event(struct ethosu_driver *drv, enum ethosu_pmu_event_type stop_event); + +/** + * \brief Read qread register + */ +uint32_t ETHOSU_PMU_Get_QREAD(struct ethosu_driver *drv); + +/** + * \brief Read status register + */ +uint32_t ETHOSU_PMU_Get_STATUS(struct ethosu_driver *drv); + +#ifdef __cplusplus +} +#endif + +#endif /* PMU_ETHOSU_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ehtosu_config_u65.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ehtosu_config_u65.h new file mode 100644 index 0000000..b115f43 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ehtosu_config_u65.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019-2020,2022 Arm Limited. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_CONFIG_H +#define ETHOSU_CONFIG_H + +/* Set default values if not manually overriden */ + +#ifndef NPU_QCONFIG +#define NPU_QCONFIG 2 +#endif + +#ifndef NPU_REGIONCFG_0 +#define NPU_REGIONCFG_0 3 +#endif + +#ifndef NPU_REGIONCFG_1 +#define NPU_REGIONCFG_1 2 +#endif + +#ifndef NPU_REGIONCFG_2 +#define NPU_REGIONCFG_2 1 +#endif + +#ifndef NPU_REGIONCFG_3 +#define NPU_REGIONCFG_3 1 +#endif + +#ifndef NPU_REGIONCFG_4 +#define NPU_REGIONCFG_4 1 +#endif + +#ifndef NPU_REGIONCFG_5 +#define NPU_REGIONCFG_5 1 +#endif + +#ifndef NPU_REGIONCFG_6 +#define NPU_REGIONCFG_6 1 +#endif + +#ifndef NPU_REGIONCFG_7 +#define NPU_REGIONCFG_7 1 +#endif + +#ifndef AXI_LIMIT0_MAX_BEATS_BYTES +#define AXI_LIMIT0_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT0_MEM_TYPE +#define AXI_LIMIT0_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT0_MAX_OUTSTANDING_READS +#define AXI_LIMIT0_MAX_OUTSTANDING_READS 64 +#endif + +#ifndef AXI_LIMIT0_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT0_MAX_OUTSTANDING_WRITES 32 +#endif + +#ifndef AXI_LIMIT1_MAX_BEATS_BYTES +#define AXI_LIMIT1_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT1_MEM_TYPE +#define AXI_LIMIT1_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT1_MAX_OUTSTANDING_READS +#define AXI_LIMIT1_MAX_OUTSTANDING_READS 64 +#endif + +#ifndef AXI_LIMIT1_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT1_MAX_OUTSTANDING_WRITES 32 +#endif + +#ifndef AXI_LIMIT2_MAX_BEATS_BYTES +#define AXI_LIMIT2_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT2_MEM_TYPE +#define AXI_LIMIT2_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT2_MAX_OUTSTANDING_READS +#define AXI_LIMIT2_MAX_OUTSTANDING_READS 64 +#endif + +#ifndef AXI_LIMIT2_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT2_MAX_OUTSTANDING_WRITES 32 +#endif + +#ifndef AXI_LIMIT3_MAX_BEATS_BYTES +#define AXI_LIMIT3_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT3_MEM_TYPE +#define AXI_LIMIT3_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT3_MAX_OUTSTANDING_READS +#define AXI_LIMIT3_MAX_OUTSTANDING_READS 64 +#endif + +#ifndef AXI_LIMIT3_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT3_MAX_OUTSTANDING_WRITES 32 +#endif + +#endif /* #ifndef ETHOSU_CONFIG_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu55_interface.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu55_interface.h new file mode 100644 index 0000000..9c0d230 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu55_interface.h @@ -0,0 +1,26198 @@ + +/* + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU55_INTERFACE_H +#define ETHOSU55_INTERFACE_H + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#if !defined(__cplusplus) || __cplusplus < 201402L +#define CONSTEXPR +#else +#define CONSTEXPR constexpr +#endif + +#ifndef __cplusplus +#define STRUCT struct +#else +#define STRUCT +#endif + +#if defined(__cplusplus) && defined(NPU_DISASSEMBLE) +#include +#include +#include +#endif + +#if defined(__cplusplus) && !defined(NPU_NAMESPACE) +#define NPU_NAMESPACE npu +#endif + +#ifdef __cplusplus +#include +#include +#endif + +#ifdef __cplusplus +namespace NPU_NAMESPACE +{ +#endif +#define NNX_ARCH_VERSION_MAJOR 1 +#define NNX_ARCH_VERSION_MINOR 1 +#define NNX_ARCH_VERSION_PATCH 0 + +// Register offsets +// +// Register subpage BASE +// +#define NPU_REG_ID 0x0000 +#define NPU_REG_STATUS 0x0004 +#define NPU_REG_CMD 0x0008 +#define NPU_REG_RESET 0x000C +#define NPU_REG_QBASE 0x0010 +#define NPU_REG_QBASE_HI 0x0014 +#define NPU_REG_QREAD 0x0018 +#define NPU_REG_QCONFIG 0x001C +#define NPU_REG_QSIZE 0x0020 +#define NPU_REG_PROT 0x0024 +#define NPU_REG_CONFIG 0x0028 +#define NPU_REG_LOCK 0x002C +#define NPU_REG_REGIONCFG 0x003C +#define NPU_REG_AXI_LIMIT0 0x0040 +#define NPU_REG_AXI_LIMIT1 0x0044 +#define NPU_REG_AXI_LIMIT2 0x0048 +#define NPU_REG_AXI_LIMIT3 0x004C +#define BASE_REGISTERS_SIZE 0x0080 + +// +// Register subpage BASE_POINTERS +// +#define NPU_REG_BASEP_BASE 0x0080 +#define NPU_REG_BASEP_ARRLEN 0x0008 +#define BASE_POINTERS_REGISTERS_SIZE 0x0100 + +// +// Register subpage DEBUG +// +#define NPU_REG_WD_STATUS 0x0100 +#define NPU_REG_MAC_STATUS 0x0104 +#define NPU_REG_AO_STATUS 0x0108 +#define NPU_REG_DMA_STATUS0 0x0110 +#define NPU_REG_DMA_STATUS1 0x0114 +#define NPU_REG_CLKFORCE 0x0140 +#define NPU_REG_DEBUG_ADDRESS 0x0144 +#define NPU_REG_DEBUG_MISC 0x0148 +#define NPU_REG_DEBUG_BLOCK 0x0150 +#define DEBUG_REGISTERS_SIZE 0x0180 + +// +// Register subpage PMU +// +#define NPU_REG_PMCR 0x0180 +#define NPU_REG_PMCNTENSET 0x0184 +#define NPU_REG_PMCNTENCLR 0x0188 +#define NPU_REG_PMOVSSET 0x018C +#define NPU_REG_PMOVSCLR 0x0190 +#define NPU_REG_PMINTSET 0x0194 +#define NPU_REG_PMINTCLR 0x0198 +#define NPU_REG_PMCCNTR 0x01A0 +#define NPU_REG_PMCCNTR_HI 0x01A4 +#define NPU_REG_PMCCNTR_CFG 0x01A8 +#define NPU_REG_PMCAXI_CHAN 0x01AC +#define PMU_REGISTERS_SIZE 0x0200 + +// +// Register subpage TSU_DEBUG +// +#define NPU_REG_KERNEL_X 0x0200 +#define NPU_REG_KERNEL_Y 0x0204 +#define NPU_REG_KERNEL_W_M1 0x0208 +#define NPU_REG_KERNEL_H_M1 0x020C +#define NPU_REG_OFM_CBLK_WIDTH_M1 0x0210 +#define NPU_REG_OFM_CBLK_HEIGHT_M1 0x0214 +#define NPU_REG_OFM_CBLK_DEPTH_M1 0x0218 +#define NPU_REG_IFM_CBLK_DEPTH_M1 0x021C +#define NPU_REG_OFM_X 0x0220 +#define NPU_REG_OFM_Y 0x0224 +#define NPU_REG_OFM_Z 0x0228 +#define NPU_REG_IFM_Z 0x022C +#define NPU_REG_PAD_TOP 0x0230 +#define NPU_REG_PAD_LEFT 0x0234 +#define NPU_REG_IFM_CBLK_WIDTH 0x0238 +#define NPU_REG_IFM_CBLK_HEIGHT 0x023C +#define NPU_REG_DMA_IFM_SRC 0x0240 +#define NPU_REG_DMA_IFM_SRC_HI 0x0244 +#define NPU_REG_DMA_IFM_DST 0x0248 +#define NPU_REG_DMA_OFM_SRC 0x024C +#define NPU_REG_DMA_OFM_DST 0x0250 +#define NPU_REG_DMA_OFM_DST_HI 0x0254 +#define NPU_REG_DMA_WEIGHT_SRC 0x0258 +#define NPU_REG_DMA_WEIGHT_SRC_HI 0x025C +#define NPU_REG_DMA_CMD_SRC 0x0260 +#define NPU_REG_DMA_CMD_SRC_HI 0x0264 +#define NPU_REG_DMA_CMD_SIZE 0x0268 +#define NPU_REG_DMA_M2M_SRC 0x026C +#define NPU_REG_DMA_M2M_SRC_HI 0x0270 +#define NPU_REG_DMA_M2M_DST 0x0274 +#define NPU_REG_DMA_M2M_DST_HI 0x0278 +#define NPU_REG_CURRENT_QREAD 0x027C +#define NPU_REG_DMA_SCALE_SRC 0x0280 +#define NPU_REG_DMA_SCALE_SRC_HI 0x0284 +#define NPU_REG_CURRENT_BLOCK 0x02B4 +#define NPU_REG_CURRENT_OP 0x02B8 +#define NPU_REG_CURRENT_CMD 0x02BC +#define TSU_DEBUG_REGISTERS_SIZE 0x02C0 + +// +// Register subpage PMU_COUNTERS +// +#define NPU_REG_PMEVCNTR_BASE 0x0300 +#define NPU_REG_PMEVCNTR_ARRLEN 0x0004 +#define NPU_REG_PMEVTYPER_BASE 0x0380 +#define NPU_REG_PMEVTYPER_ARRLEN 0x0004 +#define PMU_COUNTERS_REGISTERS_SIZE 0x0400 + +// +// Register subpage SHARED_BUFFER +// +#define NPU_REG_SHARED_BUFFER_BASE 0x0400 +#define NPU_REG_SHARED_BUFFER_ARRLEN 0x0100 +#define SHARED_BUFFER_REGISTERS_SIZE 0x0800 + +// +// Register subpage TSU_IFM +// +#define NPU_REG_IFM_PAD_TOP 0x0800 +#define NPU_REG_IFM_PAD_LEFT 0x0804 +#define NPU_REG_IFM_PAD_RIGHT 0x0808 +#define NPU_REG_IFM_PAD_BOTTOM 0x080C +#define NPU_REG_IFM_DEPTH_M1 0x0810 +#define NPU_REG_IFM_PRECISION 0x0814 +#define NPU_REG_IFM_UPSCALE 0x081C +#define NPU_REG_IFM_ZERO_POINT 0x0824 +#define NPU_REG_IFM_WIDTH0_M1 0x0828 +#define NPU_REG_IFM_HEIGHT0_M1 0x082C +#define NPU_REG_IFM_HEIGHT1_M1 0x0830 +#define NPU_REG_IFM_IB_END 0x0834 +#define NPU_REG_IFM_REGION 0x083C +#define TSU_IFM_REGISTERS_SIZE 0x0840 + +// +// Register subpage TSU_OFM +// +#define NPU_REG_OFM_WIDTH_M1 0x0844 +#define NPU_REG_OFM_HEIGHT_M1 0x0848 +#define NPU_REG_OFM_DEPTH_M1 0x084C +#define NPU_REG_OFM_PRECISION 0x0850 +#define NPU_REG_OFM_BLK_WIDTH_M1 0x0854 +#define NPU_REG_OFM_BLK_HEIGHT_M1 0x0858 +#define NPU_REG_OFM_BLK_DEPTH_M1 0x085C +#define NPU_REG_OFM_ZERO_POINT 0x0860 +#define NPU_REG_OFM_WIDTH0_M1 0x0868 +#define NPU_REG_OFM_HEIGHT0_M1 0x086C +#define NPU_REG_OFM_HEIGHT1_M1 0x0870 +#define NPU_REG_OFM_REGION 0x087C +#define TSU_OFM_REGISTERS_SIZE 0x0880 + +// +// Register subpage TSU_KERNEL +// +#define NPU_REG_KERNEL_WIDTH_M1 0x0880 +#define NPU_REG_KERNEL_HEIGHT_M1 0x0884 +#define NPU_REG_KERNEL_STRIDE 0x0888 +#define NPU_REG_ACC_FORMAT 0x0890 +#define NPU_REG_ACTIVATION 0x0894 +#define NPU_REG_ACTIVATION_MIN 0x0898 +#define NPU_REG_ACTIVATION_MAX 0x089C +#define NPU_REG_WEIGHT_REGION 0x08A0 +#define NPU_REG_SCALE_REGION 0x08A4 +#define NPU_REG_AB_START 0x08B4 +#define NPU_REG_BLOCKDEP 0x08BC +#define TSU_KERNEL_REGISTERS_SIZE 0x08C0 + +// +// Register subpage TSU_DMA +// +#define NPU_REG_DMA0_SRC_REGION 0x08C0 +#define NPU_REG_DMA0_DST_REGION 0x08C4 +#define NPU_REG_DMA0_SIZE0 0x08C8 +#define NPU_REG_DMA0_SIZE1 0x08CC +#define TSU_DMA_REGISTERS_SIZE 0x0900 + +// +// Register subpage TSU_IFM2 +// +#define NPU_REG_IFM2_BROADCAST 0x0900 +#define NPU_REG_IFM2_SCALAR 0x0904 +#define NPU_REG_IFM2_PRECISION 0x0914 +#define NPU_REG_IFM2_ZERO_POINT 0x0924 +#define NPU_REG_IFM2_WIDTH0_M1 0x0928 +#define NPU_REG_IFM2_HEIGHT0_M1 0x092C +#define NPU_REG_IFM2_HEIGHT1_M1 0x0930 +#define NPU_REG_IFM2_IB_START 0x0934 +#define NPU_REG_IFM2_REGION 0x093C +#define TSU_IFM2_REGISTERS_SIZE 0x0940 + +// +// Register subpage TSU_IFM_BASE +// +#define NPU_REG_IFM_BASE0 0x0A00 +#define NPU_REG_IFM_BASE0_HI 0x0A04 +#define NPU_REG_IFM_BASE1 0x0A08 +#define NPU_REG_IFM_BASE1_HI 0x0A0C +#define NPU_REG_IFM_BASE2 0x0A10 +#define NPU_REG_IFM_BASE2_HI 0x0A14 +#define NPU_REG_IFM_BASE3 0x0A18 +#define NPU_REG_IFM_BASE3_HI 0x0A1C +#define NPU_REG_IFM_STRIDE_X 0x0A20 +#define NPU_REG_IFM_STRIDE_X_HI 0x0A24 +#define NPU_REG_IFM_STRIDE_Y 0x0A28 +#define NPU_REG_IFM_STRIDE_Y_HI 0x0A2C +#define NPU_REG_IFM_STRIDE_C 0x0A30 +#define NPU_REG_IFM_STRIDE_C_HI 0x0A34 +#define TSU_IFM_BASE_REGISTERS_SIZE 0x0A40 + +// +// Register subpage TSU_OFM_BASE +// +#define NPU_REG_OFM_BASE0 0x0A40 +#define NPU_REG_OFM_BASE0_HI 0x0A44 +#define NPU_REG_OFM_BASE1 0x0A48 +#define NPU_REG_OFM_BASE1_HI 0x0A4C +#define NPU_REG_OFM_BASE2 0x0A50 +#define NPU_REG_OFM_BASE2_HI 0x0A54 +#define NPU_REG_OFM_BASE3 0x0A58 +#define NPU_REG_OFM_BASE3_HI 0x0A5C +#define NPU_REG_OFM_STRIDE_X 0x0A60 +#define NPU_REG_OFM_STRIDE_X_HI 0x0A64 +#define NPU_REG_OFM_STRIDE_Y 0x0A68 +#define NPU_REG_OFM_STRIDE_Y_HI 0x0A6C +#define NPU_REG_OFM_STRIDE_C 0x0A70 +#define NPU_REG_OFM_STRIDE_C_HI 0x0A74 +#define TSU_OFM_BASE_REGISTERS_SIZE 0x0A80 + +// +// Register subpage TSU_WS_BASE +// +#define NPU_REG_WEIGHT_BASE 0x0A80 +#define NPU_REG_WEIGHT_BASE_HI 0x0A84 +#define NPU_REG_WEIGHT_LENGTH 0x0A88 +#define NPU_REG_WEIGHT_LENGTH_HI 0x0A8C +#define NPU_REG_SCALE_BASE 0x0A90 +#define NPU_REG_SCALE_BASE_HI 0x0A94 +#define NPU_REG_SCALE_LENGTH 0x0A98 +#define NPU_REG_SCALE_LENGTH_HI 0x0A9C +#define NPU_REG_OFM_SCALE 0x0AA0 +#define NPU_REG_OFM_SCALE_SHIFT 0x0AA4 +#define NPU_REG_OPA_SCALE 0x0AA8 +#define NPU_REG_OPA_SCALE_SHIFT 0x0AAC +#define NPU_REG_OPB_SCALE 0x0AB0 +#define TSU_WS_BASE_REGISTERS_SIZE 0x0AC0 + +// +// Register subpage TSU_DMA_BASE +// +#define NPU_REG_DMA0_SRC 0x0AC0 +#define NPU_REG_DMA0_SRC_HI 0x0AC4 +#define NPU_REG_DMA0_DST 0x0AC8 +#define NPU_REG_DMA0_DST_HI 0x0ACC +#define NPU_REG_DMA0_LEN 0x0AD0 +#define NPU_REG_DMA0_LEN_HI 0x0AD4 +#define TSU_DMA_BASE_REGISTERS_SIZE 0x0B00 + +// +// Register subpage TSU_IFM2_BASE +// +#define NPU_REG_IFM2_BASE0 0x0B00 +#define NPU_REG_IFM2_BASE0_HI 0x0B04 +#define NPU_REG_IFM2_BASE1 0x0B08 +#define NPU_REG_IFM2_BASE1_HI 0x0B0C +#define NPU_REG_IFM2_BASE2 0x0B10 +#define NPU_REG_IFM2_BASE2_HI 0x0B14 +#define NPU_REG_IFM2_BASE3 0x0B18 +#define NPU_REG_IFM2_BASE3_HI 0x0B1C +#define NPU_REG_IFM2_STRIDE_X 0x0B20 +#define NPU_REG_IFM2_STRIDE_X_HI 0x0B24 +#define NPU_REG_IFM2_STRIDE_Y 0x0B28 +#define NPU_REG_IFM2_STRIDE_Y_HI 0x0B2C +#define NPU_REG_IFM2_STRIDE_C 0x0B30 +#define NPU_REG_IFM2_STRIDE_C_HI 0x0B34 +#define TSU_IFM2_BASE_REGISTERS_SIZE 0x0B40 + +// +// Register subpage TSU_WS1_BASE +// +#define TSU_WS1_BASE_REGISTERS_SIZE 0x0B80 + +// +// Register subpage TSU_USER_BASE +// +#define NPU_REG_USER_DEFINED_BASE 0x0B80 +#define NPU_REG_USER_DEFINED_ARRLEN 0x0008 +#define TSU_USER_BASE_REGISTERS_SIZE 0x0BC0 + +// +// Register subpage TSU_DMA_EBASE +// +#define TSU_DMA_EBASE_REGISTERS_SIZE 0x0C00 + +// +// Register subpage ID +// +#define NPU_REG_REVISION 0x0FC0 +#define NPU_REG_PID4 0x0FD0 +#define NPU_REG_PID5 0x0FD4 +#define NPU_REG_PID6 0x0FD8 +#define NPU_REG_PID7 0x0FDC +#define NPU_REG_PID0 0x0FE0 +#define NPU_REG_PID1 0x0FE4 +#define NPU_REG_PID2 0x0FE8 +#define NPU_REG_PID3 0x0FEC +#define NPU_REG_CID0 0x0FF0 +#define NPU_REG_CID1 0x0FF4 +#define NPU_REG_CID2 0x0FF8 +#define NPU_REG_CID3 0x0FFC +#define ID_REGISTERS_SIZE 0x1000 + +#ifdef __cplusplus +// Enum types +enum class acc_format : uint8_t +{ + I32 = 0, + I40 = 1, + F16 = 2, +}; + +enum class activation_clip_range : uint8_t +{ + OFM_PRECISION = 0, + FORCE_UINT8 = 2, + FORCE_INT8 = 3, + FORCE_INT16 = 5, +}; + +enum class activation_format : uint8_t +{ + NHWC = 0, + NHCWB16 = 1, +}; + +enum class activation_function : uint8_t +{ + RELU = 0, + TANH = 3, + SIGMOID = 4, + TABLE_0 = 16, + TABLE_1 = 17, + TABLE_2 = 18, + TABLE_3 = 19, + TABLE_4 = 20, + TABLE_5 = 21, + TABLE_6 = 22, + TABLE_7 = 23, +}; + +enum class activation_precision : uint8_t +{ + B8 = 0, + B16 = 1, + B32 = 2, + B64 = 3, +}; + +enum class activation_type : uint8_t +{ + UNSIGNED = 0, + SIGNED = 1, +}; + +enum class axi_mem_encoding : uint8_t +{ + DEVICE_NON_BUFFERABLE = 0, + DEVICE_BUFFERABLE = 1, + NORMAL_NON_CACHEABLE_NON_BUFFERABLE = 2, + NORMAL_NON_CACHEABLE_BUFFERABLE = 3, + WRITE_THROUGH_NO_ALLOCATE = 4, + WRITE_THROUGH_READ_ALLOCATE = 5, + WRITE_THROUGH_WRITE_ALLOCATE = 6, + WRITE_THROUGH_READ_AND_WRITE_ALLOCATE = 7, + WRITE_BACK_NO_ALLOCATE = 8, + WRITE_BACK_READ_ALLOCATE = 9, + WRITE_BACK_WRITE_ALLOCATE = 10, + WRITE_BACK_READ_AND_WRITE_ALLOCATE = 11, +}; + +enum class broadcast_mode : uint8_t +{ + DISABLE = 0, + ENABLE = 1, +}; + +enum class cmd0_opcode : uint16_t +{ + NPU_OP_STOP = 0, + NPU_OP_IRQ = 1, + NPU_OP_CONV = 2, + NPU_OP_DEPTHWISE = 3, + NPU_OP_POOL = 5, + NPU_OP_ELEMENTWISE = 6, + NPU_OP_DMA_START = 16, + NPU_OP_DMA_WAIT = 17, + NPU_OP_KERNEL_WAIT = 18, + NPU_OP_PMU_MASK = 19, + NPU_SET_IFM_PAD_TOP = 256, + NPU_SET_IFM_PAD_LEFT = 257, + NPU_SET_IFM_PAD_RIGHT = 258, + NPU_SET_IFM_PAD_BOTTOM = 259, + NPU_SET_IFM_DEPTH_M1 = 260, + NPU_SET_IFM_PRECISION = 261, + NPU_SET_IFM_UPSCALE = 263, + NPU_SET_IFM_ZERO_POINT = 265, + NPU_SET_IFM_WIDTH0_M1 = 266, + NPU_SET_IFM_HEIGHT0_M1 = 267, + NPU_SET_IFM_HEIGHT1_M1 = 268, + NPU_SET_IFM_IB_END = 269, + NPU_SET_IFM_REGION = 271, + NPU_SET_OFM_WIDTH_M1 = 273, + NPU_SET_OFM_HEIGHT_M1 = 274, + NPU_SET_OFM_DEPTH_M1 = 275, + NPU_SET_OFM_PRECISION = 276, + NPU_SET_OFM_BLK_WIDTH_M1 = 277, + NPU_SET_OFM_BLK_HEIGHT_M1 = 278, + NPU_SET_OFM_BLK_DEPTH_M1 = 279, + NPU_SET_OFM_ZERO_POINT = 280, + NPU_SET_OFM_WIDTH0_M1 = 282, + NPU_SET_OFM_HEIGHT0_M1 = 283, + NPU_SET_OFM_HEIGHT1_M1 = 284, + NPU_SET_OFM_REGION = 287, + NPU_SET_KERNEL_WIDTH_M1 = 288, + NPU_SET_KERNEL_HEIGHT_M1 = 289, + NPU_SET_KERNEL_STRIDE = 290, + NPU_SET_ACC_FORMAT = 292, + NPU_SET_ACTIVATION = 293, + NPU_SET_ACTIVATION_MIN = 294, + NPU_SET_ACTIVATION_MAX = 295, + NPU_SET_WEIGHT_REGION = 296, + NPU_SET_SCALE_REGION = 297, + NPU_SET_AB_START = 301, + NPU_SET_BLOCKDEP = 303, + NPU_SET_DMA0_SRC_REGION = 304, + NPU_SET_DMA0_DST_REGION = 305, + NPU_SET_DMA0_SIZE0 = 306, + NPU_SET_DMA0_SIZE1 = 307, + NPU_SET_IFM2_BROADCAST = 384, + NPU_SET_IFM2_SCALAR = 385, + NPU_SET_IFM2_PRECISION = 389, + NPU_SET_IFM2_ZERO_POINT = 393, + NPU_SET_IFM2_WIDTH0_M1 = 394, + NPU_SET_IFM2_HEIGHT0_M1 = 395, + NPU_SET_IFM2_HEIGHT1_M1 = 396, + NPU_SET_IFM2_IB_START = 397, + NPU_SET_IFM2_REGION = 399, +}; + +enum class cmd1_opcode : uint16_t +{ + NPU_SET_IFM_BASE0 = 0, + NPU_SET_IFM_BASE1 = 1, + NPU_SET_IFM_BASE2 = 2, + NPU_SET_IFM_BASE3 = 3, + NPU_SET_IFM_STRIDE_X = 4, + NPU_SET_IFM_STRIDE_Y = 5, + NPU_SET_IFM_STRIDE_C = 6, + NPU_SET_OFM_BASE0 = 16, + NPU_SET_OFM_BASE1 = 17, + NPU_SET_OFM_BASE2 = 18, + NPU_SET_OFM_BASE3 = 19, + NPU_SET_OFM_STRIDE_X = 20, + NPU_SET_OFM_STRIDE_Y = 21, + NPU_SET_OFM_STRIDE_C = 22, + NPU_SET_WEIGHT_BASE = 32, + NPU_SET_WEIGHT_LENGTH = 33, + NPU_SET_SCALE_BASE = 34, + NPU_SET_SCALE_LENGTH = 35, + NPU_SET_OFM_SCALE = 36, + NPU_SET_OPA_SCALE = 37, + NPU_SET_OPB_SCALE = 38, + NPU_SET_DMA0_SRC = 48, + NPU_SET_DMA0_DST = 49, + NPU_SET_DMA0_LEN = 50, + NPU_SET_IFM2_BASE0 = 128, + NPU_SET_IFM2_BASE1 = 129, + NPU_SET_IFM2_BASE2 = 130, + NPU_SET_IFM2_BASE3 = 131, + NPU_SET_IFM2_STRIDE_X = 132, + NPU_SET_IFM2_STRIDE_Y = 133, + NPU_SET_IFM2_STRIDE_C = 134, + NPU_SET_USER_DEFINED0 = 160, + NPU_SET_USER_DEFINED1 = 161, + NPU_SET_USER_DEFINED2 = 162, + NPU_SET_USER_DEFINED3 = 163, + NPU_SET_USER_DEFINED4 = 164, + NPU_SET_USER_DEFINED5 = 165, + NPU_SET_USER_DEFINED6 = 166, + NPU_SET_USER_DEFINED7 = 167, +}; + +enum class cmd_ctrl : uint8_t +{ + CMD0_CTRL = 0, + CMD1_CTRL = 1, +}; + +enum class custom_dma_cs : uint8_t +{ + DISABLE = 0, + ENABLE = 1, +}; + +enum class custom_dma : uint8_t +{ + NOT_IMPLEMENTED = 0, + IMPLEMENTED = 1, +}; + +enum class dma_fault_src : uint8_t +{ + AXI_M0 = 0, + AXI_M1 = 1, +}; + +enum class dma_region_mode : uint8_t +{ + EXTERNAL = 0, + INTERNAL = 1, +}; + +enum class dma_stride_mode : uint8_t +{ + D1 = 0, +}; + +enum class elementwise_mode : uint8_t +{ + MUL = 0, + ADD = 1, + SUB = 2, + MIN = 3, + MAX = 4, + LRELU = 5, + ABS = 6, + CLZ = 7, + SHR = 8, + SHL = 9, +}; + +enum class functional_safety : uint8_t +{ + NOT_IMPLEMENTED = 0, + IMPLEMENTED = 1, +}; + +enum class ifm2_operand_order : uint8_t +{ + ORDER_B = 0, + ORDER_A = 1, +}; + +enum class ifm_scale_mode : uint8_t +{ + OPA_OPB_16 = 0, + OPA_32 = 1, + OPB_32 = 2, +}; + +enum class ifm_upscale_mode : uint8_t +{ + NONE = 0, + NEAREST = 1, + ZEROS = 2, +}; + +enum class kernel_decomposition : uint8_t +{ + D8X8 = 0, + D4X4 = 1, +}; + +enum class kernel_dilation : uint8_t +{ + NONE = 0, + X2 = 1, +}; + +enum class max_beats : uint8_t +{ + B64 = 0, + B128 = 1, + B256 = 2, +}; + +enum class mem_attr : uint8_t +{ + AXI0_OUTSTANDING_COUNTER0 = 0, + AXI0_OUTSTANDING_COUNTER1 = 1, + AXI1_OUTSTANDING_COUNTER2 = 2, + AXI1_OUTSTANDING_COUNTER3 = 3, +}; + +enum class ofm_scale_mode : uint8_t +{ + PER_CHANNEL = 0, + GLOBAL = 1, +}; + +enum class pmu_axi_channel : uint8_t +{ + RD_CMD = 0, + RD_IFM = 1, + RD_WEIGHTS = 2, + RD_SCALE_BIAS = 3, + RD_MEM2MEM = 4, + WR_OFM = 8, + WR_MEM2MEM = 9, +}; + +enum class pmu_event : uint16_t +{ + NO_EVENT = 0, + CYCLE = 17, + NPU_IDLE = 32, + CC_STALLED_ON_BLOCKDEP = 33, + CC_STALLED_ON_SHRAM_RECONFIG = 34, + NPU_ACTIVE = 35, + MAC_ACTIVE = 48, + MAC_ACTIVE_8BIT = 49, + MAC_ACTIVE_16BIT = 50, + MAC_DPU_ACTIVE = 51, + MAC_STALLED_BY_WD_ACC = 52, + MAC_STALLED_BY_WD = 53, + MAC_STALLED_BY_ACC = 54, + MAC_STALLED_BY_IB = 55, + MAC_ACTIVE_32BIT = 56, + MAC_STALLED_BY_INT_W = 57, + MAC_STALLED_BY_INT_ACC = 58, + AO_ACTIVE = 64, + AO_ACTIVE_8BIT = 65, + AO_ACTIVE_16BIT = 66, + AO_STALLED_BY_OFMP_OB = 67, + AO_STALLED_BY_OFMP = 68, + AO_STALLED_BY_OB = 69, + AO_STALLED_BY_ACC_IB = 70, + AO_STALLED_BY_ACC = 71, + AO_STALLED_BY_IB = 72, + WD_ACTIVE = 80, + WD_STALLED = 81, + WD_STALLED_BY_WS = 82, + WD_STALLED_BY_WD_BUF = 83, + WD_PARSE_ACTIVE = 84, + WD_PARSE_STALLED = 85, + WD_PARSE_STALLED_IN = 86, + WD_PARSE_STALLED_OUT = 87, + WD_TRANS_WS = 88, + WD_TRANS_WB = 89, + WD_TRANS_DW0 = 90, + WD_TRANS_DW1 = 91, + AXI0_RD_TRANS_ACCEPTED = 128, + AXI0_RD_TRANS_COMPLETED = 129, + AXI0_RD_DATA_BEAT_RECEIVED = 130, + AXI0_RD_TRAN_REQ_STALLED = 131, + AXI0_WR_TRANS_ACCEPTED = 132, + AXI0_WR_TRANS_COMPLETED_M = 133, + AXI0_WR_TRANS_COMPLETED_S = 134, + AXI0_WR_DATA_BEAT_WRITTEN = 135, + AXI0_WR_TRAN_REQ_STALLED = 136, + AXI0_WR_DATA_BEAT_STALLED = 137, + AXI0_ENABLED_CYCLES = 140, + AXI0_RD_STALL_LIMIT = 142, + AXI0_WR_STALL_LIMIT = 143, + AXI_LATENCY_ANY = 160, + AXI_LATENCY_32 = 161, + AXI_LATENCY_64 = 162, + AXI_LATENCY_128 = 163, + AXI_LATENCY_256 = 164, + AXI_LATENCY_512 = 165, + AXI_LATENCY_1024 = 166, + ECC_DMA = 176, + ECC_SB0 = 177, + AXI1_RD_TRANS_ACCEPTED = 384, + AXI1_RD_TRANS_COMPLETED = 385, + AXI1_RD_DATA_BEAT_RECEIVED = 386, + AXI1_RD_TRAN_REQ_STALLED = 387, + AXI1_WR_TRANS_ACCEPTED = 388, + AXI1_WR_TRANS_COMPLETED_M = 389, + AXI1_WR_TRANS_COMPLETED_S = 390, + AXI1_WR_DATA_BEAT_WRITTEN = 391, + AXI1_WR_TRAN_REQ_STALLED = 392, + AXI1_WR_DATA_BEAT_STALLED = 393, + AXI1_ENABLED_CYCLES = 396, + AXI1_RD_STALL_LIMIT = 398, + AXI1_WR_STALL_LIMIT = 399, + ECC_SB1 = 433, +}; + +enum class pooling_mode : uint8_t +{ + MAX = 0, + AVERAGE = 1, + REDUCE_SUM = 2, +}; + +enum class privilege_level : uint8_t +{ + USER = 0, + PRIVILEGED = 1, +}; + +enum class round_mode : uint8_t +{ + DBL = 0, + TRUNCATE = 1, + NATURAL = 2, +}; + +enum class security_level : uint8_t +{ + SECURE = 0, + NON_SECURE = 1, +}; + +enum class state : uint8_t +{ + STOPPED = 0, + RUNNING = 1, +}; + +enum class wd_core_slice_state : uint8_t +{ + HEADER = 0, + PALETTE = 1, + WEIGHTS = 2, +}; + +enum class wd_ctrl_state : uint8_t +{ + IDLE = 0, + DRAIN = 1, + OFD_INIT = 2, + OFD_RUN = 3, +}; + +enum class weight_order : uint8_t +{ + DEPTH_FIRST = 0, + PART_KERNEL_FIRST = 1, +}; + +#else + +enum acc_format +{ + ACC_FORMAT_I32 = 0, + ACC_FORMAT_I40 = 1, + ACC_FORMAT_F16 = 2, +}; + +enum activation_clip_range +{ + ACTIVATION_CLIP_RANGE_OFM_PRECISION = 0, + ACTIVATION_CLIP_RANGE_FORCE_UINT8 = 2, + ACTIVATION_CLIP_RANGE_FORCE_INT8 = 3, + ACTIVATION_CLIP_RANGE_FORCE_INT16 = 5, +}; + +enum activation_format +{ + ACTIVATION_FORMAT_NHWC = 0, + ACTIVATION_FORMAT_NHCWB16 = 1, +}; + +enum activation_function +{ + ACTIVATION_FUNCTION_RELU = 0, + ACTIVATION_FUNCTION_TANH = 3, + ACTIVATION_FUNCTION_SIGMOID = 4, + ACTIVATION_FUNCTION_TABLE_0 = 16, + ACTIVATION_FUNCTION_TABLE_1 = 17, + ACTIVATION_FUNCTION_TABLE_2 = 18, + ACTIVATION_FUNCTION_TABLE_3 = 19, + ACTIVATION_FUNCTION_TABLE_4 = 20, + ACTIVATION_FUNCTION_TABLE_5 = 21, + ACTIVATION_FUNCTION_TABLE_6 = 22, + ACTIVATION_FUNCTION_TABLE_7 = 23, +}; + +enum activation_precision +{ + ACTIVATION_PRECISION_B8 = 0, + ACTIVATION_PRECISION_B16 = 1, + ACTIVATION_PRECISION_B32 = 2, + ACTIVATION_PRECISION_B64 = 3, +}; + +enum activation_type +{ + ACTIVATION_TYPE_UNSIGNED = 0, + ACTIVATION_TYPE_SIGNED = 1, +}; + +enum axi_mem_encoding +{ + AXI_MEM_ENCODING_DEVICE_NON_BUFFERABLE = 0, + AXI_MEM_ENCODING_DEVICE_BUFFERABLE = 1, + AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_NON_BUFFERABLE = 2, + AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_BUFFERABLE = 3, + AXI_MEM_ENCODING_WRITE_THROUGH_NO_ALLOCATE = 4, + AXI_MEM_ENCODING_WRITE_THROUGH_READ_ALLOCATE = 5, + AXI_MEM_ENCODING_WRITE_THROUGH_WRITE_ALLOCATE = 6, + AXI_MEM_ENCODING_WRITE_THROUGH_READ_AND_WRITE_ALLOCATE = 7, + AXI_MEM_ENCODING_WRITE_BACK_NO_ALLOCATE = 8, + AXI_MEM_ENCODING_WRITE_BACK_READ_ALLOCATE = 9, + AXI_MEM_ENCODING_WRITE_BACK_WRITE_ALLOCATE = 10, + AXI_MEM_ENCODING_WRITE_BACK_READ_AND_WRITE_ALLOCATE = 11, +}; + +enum broadcast_mode +{ + BROADCAST_MODE_DISABLE = 0, + BROADCAST_MODE_ENABLE = 1, +}; + +enum cmd0_opcode +{ + CMD0_OPCODE_NPU_OP_STOP = 0, + CMD0_OPCODE_NPU_OP_IRQ = 1, + CMD0_OPCODE_NPU_OP_CONV = 2, + CMD0_OPCODE_NPU_OP_DEPTHWISE = 3, + CMD0_OPCODE_NPU_OP_POOL = 5, + CMD0_OPCODE_NPU_OP_ELEMENTWISE = 6, + CMD0_OPCODE_NPU_OP_DMA_START = 16, + CMD0_OPCODE_NPU_OP_DMA_WAIT = 17, + CMD0_OPCODE_NPU_OP_KERNEL_WAIT = 18, + CMD0_OPCODE_NPU_OP_PMU_MASK = 19, + CMD0_OPCODE_NPU_SET_IFM_PAD_TOP = 256, + CMD0_OPCODE_NPU_SET_IFM_PAD_LEFT = 257, + CMD0_OPCODE_NPU_SET_IFM_PAD_RIGHT = 258, + CMD0_OPCODE_NPU_SET_IFM_PAD_BOTTOM = 259, + CMD0_OPCODE_NPU_SET_IFM_DEPTH_M1 = 260, + CMD0_OPCODE_NPU_SET_IFM_PRECISION = 261, + CMD0_OPCODE_NPU_SET_IFM_UPSCALE = 263, + CMD0_OPCODE_NPU_SET_IFM_ZERO_POINT = 265, + CMD0_OPCODE_NPU_SET_IFM_WIDTH0_M1 = 266, + CMD0_OPCODE_NPU_SET_IFM_HEIGHT0_M1 = 267, + CMD0_OPCODE_NPU_SET_IFM_HEIGHT1_M1 = 268, + CMD0_OPCODE_NPU_SET_IFM_IB_END = 269, + CMD0_OPCODE_NPU_SET_IFM_REGION = 271, + CMD0_OPCODE_NPU_SET_OFM_WIDTH_M1 = 273, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT_M1 = 274, + CMD0_OPCODE_NPU_SET_OFM_DEPTH_M1 = 275, + CMD0_OPCODE_NPU_SET_OFM_PRECISION = 276, + CMD0_OPCODE_NPU_SET_OFM_BLK_WIDTH_M1 = 277, + CMD0_OPCODE_NPU_SET_OFM_BLK_HEIGHT_M1 = 278, + CMD0_OPCODE_NPU_SET_OFM_BLK_DEPTH_M1 = 279, + CMD0_OPCODE_NPU_SET_OFM_ZERO_POINT = 280, + CMD0_OPCODE_NPU_SET_OFM_WIDTH0_M1 = 282, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT0_M1 = 283, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT1_M1 = 284, + CMD0_OPCODE_NPU_SET_OFM_REGION = 287, + CMD0_OPCODE_NPU_SET_KERNEL_WIDTH_M1 = 288, + CMD0_OPCODE_NPU_SET_KERNEL_HEIGHT_M1 = 289, + CMD0_OPCODE_NPU_SET_KERNEL_STRIDE = 290, + CMD0_OPCODE_NPU_SET_ACC_FORMAT = 292, + CMD0_OPCODE_NPU_SET_ACTIVATION = 293, + CMD0_OPCODE_NPU_SET_ACTIVATION_MIN = 294, + CMD0_OPCODE_NPU_SET_ACTIVATION_MAX = 295, + CMD0_OPCODE_NPU_SET_WEIGHT_REGION = 296, + CMD0_OPCODE_NPU_SET_SCALE_REGION = 297, + CMD0_OPCODE_NPU_SET_AB_START = 301, + CMD0_OPCODE_NPU_SET_BLOCKDEP = 303, + CMD0_OPCODE_NPU_SET_DMA0_SRC_REGION = 304, + CMD0_OPCODE_NPU_SET_DMA0_DST_REGION = 305, + CMD0_OPCODE_NPU_SET_DMA0_SIZE0 = 306, + CMD0_OPCODE_NPU_SET_DMA0_SIZE1 = 307, + CMD0_OPCODE_NPU_SET_IFM2_BROADCAST = 384, + CMD0_OPCODE_NPU_SET_IFM2_SCALAR = 385, + CMD0_OPCODE_NPU_SET_IFM2_PRECISION = 389, + CMD0_OPCODE_NPU_SET_IFM2_ZERO_POINT = 393, + CMD0_OPCODE_NPU_SET_IFM2_WIDTH0_M1 = 394, + CMD0_OPCODE_NPU_SET_IFM2_HEIGHT0_M1 = 395, + CMD0_OPCODE_NPU_SET_IFM2_HEIGHT1_M1 = 396, + CMD0_OPCODE_NPU_SET_IFM2_IB_START = 397, + CMD0_OPCODE_NPU_SET_IFM2_REGION = 399, +}; + +enum cmd1_opcode +{ + CMD1_OPCODE_NPU_SET_IFM_BASE0 = 0, + CMD1_OPCODE_NPU_SET_IFM_BASE1 = 1, + CMD1_OPCODE_NPU_SET_IFM_BASE2 = 2, + CMD1_OPCODE_NPU_SET_IFM_BASE3 = 3, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_X = 4, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_Y = 5, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_C = 6, + CMD1_OPCODE_NPU_SET_OFM_BASE0 = 16, + CMD1_OPCODE_NPU_SET_OFM_BASE1 = 17, + CMD1_OPCODE_NPU_SET_OFM_BASE2 = 18, + CMD1_OPCODE_NPU_SET_OFM_BASE3 = 19, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_X = 20, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_Y = 21, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_C = 22, + CMD1_OPCODE_NPU_SET_WEIGHT_BASE = 32, + CMD1_OPCODE_NPU_SET_WEIGHT_LENGTH = 33, + CMD1_OPCODE_NPU_SET_SCALE_BASE = 34, + CMD1_OPCODE_NPU_SET_SCALE_LENGTH = 35, + CMD1_OPCODE_NPU_SET_OFM_SCALE = 36, + CMD1_OPCODE_NPU_SET_OPA_SCALE = 37, + CMD1_OPCODE_NPU_SET_OPB_SCALE = 38, + CMD1_OPCODE_NPU_SET_DMA0_SRC = 48, + CMD1_OPCODE_NPU_SET_DMA0_DST = 49, + CMD1_OPCODE_NPU_SET_DMA0_LEN = 50, + CMD1_OPCODE_NPU_SET_IFM2_BASE0 = 128, + CMD1_OPCODE_NPU_SET_IFM2_BASE1 = 129, + CMD1_OPCODE_NPU_SET_IFM2_BASE2 = 130, + CMD1_OPCODE_NPU_SET_IFM2_BASE3 = 131, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_X = 132, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_Y = 133, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_C = 134, + CMD1_OPCODE_NPU_SET_USER_DEFINED0 = 160, + CMD1_OPCODE_NPU_SET_USER_DEFINED1 = 161, + CMD1_OPCODE_NPU_SET_USER_DEFINED2 = 162, + CMD1_OPCODE_NPU_SET_USER_DEFINED3 = 163, + CMD1_OPCODE_NPU_SET_USER_DEFINED4 = 164, + CMD1_OPCODE_NPU_SET_USER_DEFINED5 = 165, + CMD1_OPCODE_NPU_SET_USER_DEFINED6 = 166, + CMD1_OPCODE_NPU_SET_USER_DEFINED7 = 167, +}; + +enum cmd_ctrl +{ + CMD_CTRL_CMD0_CTRL = 0, + CMD_CTRL_CMD1_CTRL = 1, +}; + +enum custom_dma_cs +{ + CUSTOM_DMA_CS_DISABLE = 0, + CUSTOM_DMA_CS_ENABLE = 1, +}; + +enum custom_dma +{ + CUSTOM_DMA_NOT_IMPLEMENTED = 0, + CUSTOM_DMA_IMPLEMENTED = 1, +}; + +enum dma_fault_src +{ + DMA_FAULT_SRC_AXI_M0 = 0, + DMA_FAULT_SRC_AXI_M1 = 1, +}; + +enum dma_region_mode +{ + DMA_REGION_MODE_EXTERNAL = 0, + DMA_REGION_MODE_INTERNAL = 1, +}; + +enum dma_stride_mode +{ + DMA_STRIDE_MODE_D1 = 0, +}; + +enum elementwise_mode +{ + ELEMENTWISE_MODE_MUL = 0, + ELEMENTWISE_MODE_ADD = 1, + ELEMENTWISE_MODE_SUB = 2, + ELEMENTWISE_MODE_MIN = 3, + ELEMENTWISE_MODE_MAX = 4, + ELEMENTWISE_MODE_LRELU = 5, + ELEMENTWISE_MODE_ABS = 6, + ELEMENTWISE_MODE_CLZ = 7, + ELEMENTWISE_MODE_SHR = 8, + ELEMENTWISE_MODE_SHL = 9, +}; + +enum functional_safety +{ + FUNCTIONAL_SAFETY_NOT_IMPLEMENTED = 0, + FUNCTIONAL_SAFETY_IMPLEMENTED = 1, +}; + +enum ifm2_operand_order +{ + IFM2_OPERAND_ORDER_ORDER_B = 0, + IFM2_OPERAND_ORDER_ORDER_A = 1, +}; + +enum ifm_scale_mode +{ + IFM_SCALE_MODE_OPA_OPB_16 = 0, + IFM_SCALE_MODE_OPA_32 = 1, + IFM_SCALE_MODE_OPB_32 = 2, +}; + +enum ifm_upscale_mode +{ + IFM_UPSCALE_MODE_NONE = 0, + IFM_UPSCALE_MODE_NEAREST = 1, + IFM_UPSCALE_MODE_ZEROS = 2, +}; + +enum kernel_decomposition +{ + KERNEL_DECOMPOSITION_D8X8 = 0, + KERNEL_DECOMPOSITION_D4X4 = 1, +}; + +enum kernel_dilation +{ + KERNEL_DILATION_NONE = 0, + KERNEL_DILATION_X2 = 1, +}; + +enum max_beats +{ + MAX_BEATS_B64 = 0, + MAX_BEATS_B128 = 1, + MAX_BEATS_B256 = 2, +}; + +enum mem_attr +{ + MEM_ATTR_AXI0_OUTSTANDING_COUNTER0 = 0, + MEM_ATTR_AXI0_OUTSTANDING_COUNTER1 = 1, + MEM_ATTR_AXI1_OUTSTANDING_COUNTER2 = 2, + MEM_ATTR_AXI1_OUTSTANDING_COUNTER3 = 3, +}; + +enum ofm_scale_mode +{ + OFM_SCALE_MODE_PER_CHANNEL = 0, + OFM_SCALE_MODE_GLOBAL = 1, +}; + +enum pmu_axi_channel +{ + PMU_AXI_CHANNEL_RD_CMD = 0, + PMU_AXI_CHANNEL_RD_IFM = 1, + PMU_AXI_CHANNEL_RD_WEIGHTS = 2, + PMU_AXI_CHANNEL_RD_SCALE_BIAS = 3, + PMU_AXI_CHANNEL_RD_MEM2MEM = 4, + PMU_AXI_CHANNEL_WR_OFM = 8, + PMU_AXI_CHANNEL_WR_MEM2MEM = 9, +}; + +enum pmu_event +{ + PMU_EVENT_NO_EVENT = 0, + PMU_EVENT_CYCLE = 17, + PMU_EVENT_NPU_IDLE = 32, + PMU_EVENT_CC_STALLED_ON_BLOCKDEP = 33, + PMU_EVENT_CC_STALLED_ON_SHRAM_RECONFIG = 34, + PMU_EVENT_NPU_ACTIVE = 35, + PMU_EVENT_MAC_ACTIVE = 48, + PMU_EVENT_MAC_ACTIVE_8BIT = 49, + PMU_EVENT_MAC_ACTIVE_16BIT = 50, + PMU_EVENT_MAC_DPU_ACTIVE = 51, + PMU_EVENT_MAC_STALLED_BY_WD_ACC = 52, + PMU_EVENT_MAC_STALLED_BY_WD = 53, + PMU_EVENT_MAC_STALLED_BY_ACC = 54, + PMU_EVENT_MAC_STALLED_BY_IB = 55, + PMU_EVENT_MAC_ACTIVE_32BIT = 56, + PMU_EVENT_MAC_STALLED_BY_INT_W = 57, + PMU_EVENT_MAC_STALLED_BY_INT_ACC = 58, + PMU_EVENT_AO_ACTIVE = 64, + PMU_EVENT_AO_ACTIVE_8BIT = 65, + PMU_EVENT_AO_ACTIVE_16BIT = 66, + PMU_EVENT_AO_STALLED_BY_OFMP_OB = 67, + PMU_EVENT_AO_STALLED_BY_OFMP = 68, + PMU_EVENT_AO_STALLED_BY_OB = 69, + PMU_EVENT_AO_STALLED_BY_ACC_IB = 70, + PMU_EVENT_AO_STALLED_BY_ACC = 71, + PMU_EVENT_AO_STALLED_BY_IB = 72, + PMU_EVENT_WD_ACTIVE = 80, + PMU_EVENT_WD_STALLED = 81, + PMU_EVENT_WD_STALLED_BY_WS = 82, + PMU_EVENT_WD_STALLED_BY_WD_BUF = 83, + PMU_EVENT_WD_PARSE_ACTIVE = 84, + PMU_EVENT_WD_PARSE_STALLED = 85, + PMU_EVENT_WD_PARSE_STALLED_IN = 86, + PMU_EVENT_WD_PARSE_STALLED_OUT = 87, + PMU_EVENT_WD_TRANS_WS = 88, + PMU_EVENT_WD_TRANS_WB = 89, + PMU_EVENT_WD_TRANS_DW0 = 90, + PMU_EVENT_WD_TRANS_DW1 = 91, + PMU_EVENT_AXI0_RD_TRANS_ACCEPTED = 128, + PMU_EVENT_AXI0_RD_TRANS_COMPLETED = 129, + PMU_EVENT_AXI0_RD_DATA_BEAT_RECEIVED = 130, + PMU_EVENT_AXI0_RD_TRAN_REQ_STALLED = 131, + PMU_EVENT_AXI0_WR_TRANS_ACCEPTED = 132, + PMU_EVENT_AXI0_WR_TRANS_COMPLETED_M = 133, + PMU_EVENT_AXI0_WR_TRANS_COMPLETED_S = 134, + PMU_EVENT_AXI0_WR_DATA_BEAT_WRITTEN = 135, + PMU_EVENT_AXI0_WR_TRAN_REQ_STALLED = 136, + PMU_EVENT_AXI0_WR_DATA_BEAT_STALLED = 137, + PMU_EVENT_AXI0_ENABLED_CYCLES = 140, + PMU_EVENT_AXI0_RD_STALL_LIMIT = 142, + PMU_EVENT_AXI0_WR_STALL_LIMIT = 143, + PMU_EVENT_AXI_LATENCY_ANY = 160, + PMU_EVENT_AXI_LATENCY_32 = 161, + PMU_EVENT_AXI_LATENCY_64 = 162, + PMU_EVENT_AXI_LATENCY_128 = 163, + PMU_EVENT_AXI_LATENCY_256 = 164, + PMU_EVENT_AXI_LATENCY_512 = 165, + PMU_EVENT_AXI_LATENCY_1024 = 166, + PMU_EVENT_ECC_DMA = 176, + PMU_EVENT_ECC_SB0 = 177, + PMU_EVENT_AXI1_RD_TRANS_ACCEPTED = 384, + PMU_EVENT_AXI1_RD_TRANS_COMPLETED = 385, + PMU_EVENT_AXI1_RD_DATA_BEAT_RECEIVED = 386, + PMU_EVENT_AXI1_RD_TRAN_REQ_STALLED = 387, + PMU_EVENT_AXI1_WR_TRANS_ACCEPTED = 388, + PMU_EVENT_AXI1_WR_TRANS_COMPLETED_M = 389, + PMU_EVENT_AXI1_WR_TRANS_COMPLETED_S = 390, + PMU_EVENT_AXI1_WR_DATA_BEAT_WRITTEN = 391, + PMU_EVENT_AXI1_WR_TRAN_REQ_STALLED = 392, + PMU_EVENT_AXI1_WR_DATA_BEAT_STALLED = 393, + PMU_EVENT_AXI1_ENABLED_CYCLES = 396, + PMU_EVENT_AXI1_RD_STALL_LIMIT = 398, + PMU_EVENT_AXI1_WR_STALL_LIMIT = 399, + PMU_EVENT_ECC_SB1 = 433, +}; + +enum pooling_mode +{ + POOLING_MODE_MAX = 0, + POOLING_MODE_AVERAGE = 1, + POOLING_MODE_REDUCE_SUM = 2, +}; + +enum privilege_level +{ + PRIVILEGE_LEVEL_USER = 0, + PRIVILEGE_LEVEL_PRIVILEGED = 1, +}; + +enum round_mode +{ + ROUND_MODE_DBL = 0, + ROUND_MODE_TRUNCATE = 1, + ROUND_MODE_NATURAL = 2, +}; + +enum security_level +{ + SECURITY_LEVEL_SECURE = 0, + SECURITY_LEVEL_NON_SECURE = 1, +}; + +enum state +{ + STATE_STOPPED = 0, + STATE_RUNNING = 1, +}; + +enum wd_core_slice_state +{ + WD_CORE_SLICE_STATE_HEADER = 0, + WD_CORE_SLICE_STATE_PALETTE = 1, + WD_CORE_SLICE_STATE_WEIGHTS = 2, +}; + +enum wd_ctrl_state +{ + WD_CTRL_STATE_IDLE = 0, + WD_CTRL_STATE_DRAIN = 1, + WD_CTRL_STATE_OFD_INIT = 2, + WD_CTRL_STATE_OFD_RUN = 3, +}; + +enum weight_order +{ + WEIGHT_ORDER_DEPTH_FIRST = 0, + WEIGHT_ORDER_PART_KERNEL_FIRST = 1, +}; + +#endif + +#ifdef NPU_DISASSEMBLE + +static const char *acc_format_str[] = { + "ACC_FORMAT_I32", + "ACC_FORMAT_I40", + "ACC_FORMAT_F16", +}; + +static const char *activation_clip_range_str[] = { + "ACTIVATION_CLIP_RANGE_OFM_PRECISION", + "****", + "ACTIVATION_CLIP_RANGE_FORCE_UINT8", + "ACTIVATION_CLIP_RANGE_FORCE_INT8", + "****", + "ACTIVATION_CLIP_RANGE_FORCE_INT16", +}; + +static const char *activation_format_str[] = { + "ACTIVATION_FORMAT_NHWC", + "ACTIVATION_FORMAT_NHCWB16", +}; + +static const char *activation_function_str[] = { + "ACTIVATION_FUNCTION_RELU", + "****", + "****", + "ACTIVATION_FUNCTION_TANH", + "ACTIVATION_FUNCTION_SIGMOID", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "ACTIVATION_FUNCTION_TABLE_0", + "ACTIVATION_FUNCTION_TABLE_1", + "ACTIVATION_FUNCTION_TABLE_2", + "ACTIVATION_FUNCTION_TABLE_3", + "ACTIVATION_FUNCTION_TABLE_4", + "ACTIVATION_FUNCTION_TABLE_5", + "ACTIVATION_FUNCTION_TABLE_6", + "ACTIVATION_FUNCTION_TABLE_7", +}; + +static const char *activation_precision_str[] = { + "ACTIVATION_PRECISION_B8", + "ACTIVATION_PRECISION_B16", + "ACTIVATION_PRECISION_B32", + "ACTIVATION_PRECISION_B64", +}; + +static const char *activation_type_str[] = { + "ACTIVATION_TYPE_UNSIGNED", + "ACTIVATION_TYPE_SIGNED", +}; + +static const char *axi_mem_encoding_str[] = { + "AXI_MEM_ENCODING_DEVICE_NON_BUFFERABLE", + "AXI_MEM_ENCODING_DEVICE_BUFFERABLE", + "AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_NON_BUFFERABLE", + "AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_BUFFERABLE", + "AXI_MEM_ENCODING_WRITE_THROUGH_NO_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_READ_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_READ_AND_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_NO_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_READ_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_READ_AND_WRITE_ALLOCATE", +}; + +static const char *broadcast_mode_str[] = { + "BROADCAST_MODE_DISABLE", + "BROADCAST_MODE_ENABLE", +}; + +static const char *cmd0_opcode_str[] = { + "CMD0_OPCODE_NPU_OP_STOP", + "CMD0_OPCODE_NPU_OP_IRQ", + "CMD0_OPCODE_NPU_OP_CONV", + "CMD0_OPCODE_NPU_OP_DEPTHWISE", + "****", + "CMD0_OPCODE_NPU_OP_POOL", + "CMD0_OPCODE_NPU_OP_ELEMENTWISE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_OP_DMA_START", + "CMD0_OPCODE_NPU_OP_DMA_WAIT", + "CMD0_OPCODE_NPU_OP_KERNEL_WAIT", + "CMD0_OPCODE_NPU_OP_PMU_MASK", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM_PAD_TOP", + "CMD0_OPCODE_NPU_SET_IFM_PAD_LEFT", + "CMD0_OPCODE_NPU_SET_IFM_PAD_RIGHT", + "CMD0_OPCODE_NPU_SET_IFM_PAD_BOTTOM", + "CMD0_OPCODE_NPU_SET_IFM_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_IFM_PRECISION", + "****", + "CMD0_OPCODE_NPU_SET_IFM_UPSCALE", + "****", + "CMD0_OPCODE_NPU_SET_IFM_ZERO_POINT", + "CMD0_OPCODE_NPU_SET_IFM_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_IFM_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_IFM_HEIGHT1_M1", + "CMD0_OPCODE_NPU_SET_IFM_IB_END", + "****", + "CMD0_OPCODE_NPU_SET_IFM_REGION", + "****", + "CMD0_OPCODE_NPU_SET_OFM_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_OFM_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_PRECISION", + "CMD0_OPCODE_NPU_SET_OFM_BLK_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_BLK_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_OFM_BLK_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_ZERO_POINT", + "****", + "CMD0_OPCODE_NPU_SET_OFM_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT1_M1", + "****", + "****", + "CMD0_OPCODE_NPU_SET_OFM_REGION", + "CMD0_OPCODE_NPU_SET_KERNEL_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_KERNEL_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_KERNEL_STRIDE", + "****", + "CMD0_OPCODE_NPU_SET_ACC_FORMAT", + "CMD0_OPCODE_NPU_SET_ACTIVATION", + "CMD0_OPCODE_NPU_SET_ACTIVATION_MIN", + "CMD0_OPCODE_NPU_SET_ACTIVATION_MAX", + "CMD0_OPCODE_NPU_SET_WEIGHT_REGION", + "CMD0_OPCODE_NPU_SET_SCALE_REGION", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_AB_START", + "****", + "CMD0_OPCODE_NPU_SET_BLOCKDEP", + "CMD0_OPCODE_NPU_SET_DMA0_SRC_REGION", + "CMD0_OPCODE_NPU_SET_DMA0_DST_REGION", + "CMD0_OPCODE_NPU_SET_DMA0_SIZE0", + "CMD0_OPCODE_NPU_SET_DMA0_SIZE1", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_BROADCAST", + "CMD0_OPCODE_NPU_SET_IFM2_SCALAR", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_PRECISION", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_ZERO_POINT", + "CMD0_OPCODE_NPU_SET_IFM2_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_IFM2_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_IFM2_HEIGHT1_M1", + "CMD0_OPCODE_NPU_SET_IFM2_IB_START", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_REGION", +}; + +static const char *cmd1_opcode_str[] = { + "CMD1_OPCODE_NPU_SET_IFM_BASE0", + "CMD1_OPCODE_NPU_SET_IFM_BASE1", + "CMD1_OPCODE_NPU_SET_IFM_BASE2", + "CMD1_OPCODE_NPU_SET_IFM_BASE3", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_X", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_OFM_BASE0", + "CMD1_OPCODE_NPU_SET_OFM_BASE1", + "CMD1_OPCODE_NPU_SET_OFM_BASE2", + "CMD1_OPCODE_NPU_SET_OFM_BASE3", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_X", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_WEIGHT_BASE", + "CMD1_OPCODE_NPU_SET_WEIGHT_LENGTH", + "CMD1_OPCODE_NPU_SET_SCALE_BASE", + "CMD1_OPCODE_NPU_SET_SCALE_LENGTH", + "CMD1_OPCODE_NPU_SET_OFM_SCALE", + "CMD1_OPCODE_NPU_SET_OPA_SCALE", + "CMD1_OPCODE_NPU_SET_OPB_SCALE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_DMA0_SRC", + "CMD1_OPCODE_NPU_SET_DMA0_DST", + "CMD1_OPCODE_NPU_SET_DMA0_LEN", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_IFM2_BASE0", + "CMD1_OPCODE_NPU_SET_IFM2_BASE1", + "CMD1_OPCODE_NPU_SET_IFM2_BASE2", + "CMD1_OPCODE_NPU_SET_IFM2_BASE3", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_X", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_USER_DEFINED0", + "CMD1_OPCODE_NPU_SET_USER_DEFINED1", + "CMD1_OPCODE_NPU_SET_USER_DEFINED2", + "CMD1_OPCODE_NPU_SET_USER_DEFINED3", + "CMD1_OPCODE_NPU_SET_USER_DEFINED4", + "CMD1_OPCODE_NPU_SET_USER_DEFINED5", + "CMD1_OPCODE_NPU_SET_USER_DEFINED6", + "CMD1_OPCODE_NPU_SET_USER_DEFINED7", +}; + +static const char *cmd_ctrl_str[] = { + "CMD_CTRL_CMD0_CTRL", + "CMD_CTRL_CMD1_CTRL", +}; + +static const char *custom_dma_cs_str[] = { + "CUSTOM_DMA_CS_DISABLE", + "CUSTOM_DMA_CS_ENABLE", +}; + +static const char *custom_dma_str[] = { + "CUSTOM_DMA_NOT_IMPLEMENTED", + "CUSTOM_DMA_IMPLEMENTED", +}; + +static const char *dma_fault_src_str[] = { + "DMA_FAULT_SRC_AXI_M0", + "DMA_FAULT_SRC_AXI_M1", +}; + +static const char *dma_region_mode_str[] = { + "DMA_REGION_MODE_EXTERNAL", + "DMA_REGION_MODE_INTERNAL", +}; + +static const char *dma_stride_mode_str[] = { + "DMA_STRIDE_MODE_D1", +}; + +static const char *elementwise_mode_str[] = { + "ELEMENTWISE_MODE_MUL", + "ELEMENTWISE_MODE_ADD", + "ELEMENTWISE_MODE_SUB", + "ELEMENTWISE_MODE_MIN", + "ELEMENTWISE_MODE_MAX", + "ELEMENTWISE_MODE_LRELU", + "ELEMENTWISE_MODE_ABS", + "ELEMENTWISE_MODE_CLZ", + "ELEMENTWISE_MODE_SHR", + "ELEMENTWISE_MODE_SHL", +}; + +static const char *functional_safety_str[] = { + "FUNCTIONAL_SAFETY_NOT_IMPLEMENTED", + "FUNCTIONAL_SAFETY_IMPLEMENTED", +}; + +static const char *ifm2_operand_order_str[] = { + "IFM2_OPERAND_ORDER_ORDER_B", + "IFM2_OPERAND_ORDER_ORDER_A", +}; + +static const char *ifm_scale_mode_str[] = { + "IFM_SCALE_MODE_OPA_OPB_16", + "IFM_SCALE_MODE_OPA_32", + "IFM_SCALE_MODE_OPB_32", +}; + +static const char *ifm_upscale_mode_str[] = { + "IFM_UPSCALE_MODE_NONE", + "IFM_UPSCALE_MODE_NEAREST", + "IFM_UPSCALE_MODE_ZEROS", +}; + +static const char *kernel_decomposition_str[] = { + "KERNEL_DECOMPOSITION_D8X8", + "KERNEL_DECOMPOSITION_D4X4", +}; + +static const char *kernel_dilation_str[] = { + "KERNEL_DILATION_NONE", + "KERNEL_DILATION_X2", +}; + +static const char *max_beats_str[] = { + "MAX_BEATS_B64", + "MAX_BEATS_B128", + "MAX_BEATS_B256", +}; + +static const char *mem_attr_str[] = { + "MEM_ATTR_AXI0_OUTSTANDING_COUNTER0", + "MEM_ATTR_AXI0_OUTSTANDING_COUNTER1", + "MEM_ATTR_AXI1_OUTSTANDING_COUNTER2", + "MEM_ATTR_AXI1_OUTSTANDING_COUNTER3", +}; + +static const char *ofm_scale_mode_str[] = { + "OFM_SCALE_MODE_PER_CHANNEL", + "OFM_SCALE_MODE_GLOBAL", +}; + +static const char *pmu_axi_channel_str[] = { + "PMU_AXI_CHANNEL_RD_CMD", + "PMU_AXI_CHANNEL_RD_IFM", + "PMU_AXI_CHANNEL_RD_WEIGHTS", + "PMU_AXI_CHANNEL_RD_SCALE_BIAS", + "PMU_AXI_CHANNEL_RD_MEM2MEM", + "****", + "****", + "****", + "PMU_AXI_CHANNEL_WR_OFM", + "PMU_AXI_CHANNEL_WR_MEM2MEM", +}; + +static const char *pmu_event_str[] = { + "PMU_EVENT_NO_EVENT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_CYCLE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_NPU_IDLE", + "PMU_EVENT_CC_STALLED_ON_BLOCKDEP", + "PMU_EVENT_CC_STALLED_ON_SHRAM_RECONFIG", + "PMU_EVENT_NPU_ACTIVE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_MAC_ACTIVE", + "PMU_EVENT_MAC_ACTIVE_8BIT", + "PMU_EVENT_MAC_ACTIVE_16BIT", + "PMU_EVENT_MAC_DPU_ACTIVE", + "PMU_EVENT_MAC_STALLED_BY_WD_ACC", + "PMU_EVENT_MAC_STALLED_BY_WD", + "PMU_EVENT_MAC_STALLED_BY_ACC", + "PMU_EVENT_MAC_STALLED_BY_IB", + "PMU_EVENT_MAC_ACTIVE_32BIT", + "PMU_EVENT_MAC_STALLED_BY_INT_W", + "PMU_EVENT_MAC_STALLED_BY_INT_ACC", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AO_ACTIVE", + "PMU_EVENT_AO_ACTIVE_8BIT", + "PMU_EVENT_AO_ACTIVE_16BIT", + "PMU_EVENT_AO_STALLED_BY_OFMP_OB", + "PMU_EVENT_AO_STALLED_BY_OFMP", + "PMU_EVENT_AO_STALLED_BY_OB", + "PMU_EVENT_AO_STALLED_BY_ACC_IB", + "PMU_EVENT_AO_STALLED_BY_ACC", + "PMU_EVENT_AO_STALLED_BY_IB", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_WD_ACTIVE", + "PMU_EVENT_WD_STALLED", + "PMU_EVENT_WD_STALLED_BY_WS", + "PMU_EVENT_WD_STALLED_BY_WD_BUF", + "PMU_EVENT_WD_PARSE_ACTIVE", + "PMU_EVENT_WD_PARSE_STALLED", + "PMU_EVENT_WD_PARSE_STALLED_IN", + "PMU_EVENT_WD_PARSE_STALLED_OUT", + "PMU_EVENT_WD_TRANS_WS", + "PMU_EVENT_WD_TRANS_WB", + "PMU_EVENT_WD_TRANS_DW0", + "PMU_EVENT_WD_TRANS_DW1", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI0_RD_TRANS_ACCEPTED", + "PMU_EVENT_AXI0_RD_TRANS_COMPLETED", + "PMU_EVENT_AXI0_RD_DATA_BEAT_RECEIVED", + "PMU_EVENT_AXI0_RD_TRAN_REQ_STALLED", + "PMU_EVENT_AXI0_WR_TRANS_ACCEPTED", + "PMU_EVENT_AXI0_WR_TRANS_COMPLETED_M", + "PMU_EVENT_AXI0_WR_TRANS_COMPLETED_S", + "PMU_EVENT_AXI0_WR_DATA_BEAT_WRITTEN", + "PMU_EVENT_AXI0_WR_TRAN_REQ_STALLED", + "PMU_EVENT_AXI0_WR_DATA_BEAT_STALLED", + "****", + "****", + "PMU_EVENT_AXI0_ENABLED_CYCLES", + "****", + "PMU_EVENT_AXI0_RD_STALL_LIMIT", + "PMU_EVENT_AXI0_WR_STALL_LIMIT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI_LATENCY_ANY", + "PMU_EVENT_AXI_LATENCY_32", + "PMU_EVENT_AXI_LATENCY_64", + "PMU_EVENT_AXI_LATENCY_128", + "PMU_EVENT_AXI_LATENCY_256", + "PMU_EVENT_AXI_LATENCY_512", + "PMU_EVENT_AXI_LATENCY_1024", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_ECC_DMA", + "PMU_EVENT_ECC_SB0", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI1_RD_TRANS_ACCEPTED", + "PMU_EVENT_AXI1_RD_TRANS_COMPLETED", + "PMU_EVENT_AXI1_RD_DATA_BEAT_RECEIVED", + "PMU_EVENT_AXI1_RD_TRAN_REQ_STALLED", + "PMU_EVENT_AXI1_WR_TRANS_ACCEPTED", + "PMU_EVENT_AXI1_WR_TRANS_COMPLETED_M", + "PMU_EVENT_AXI1_WR_TRANS_COMPLETED_S", + "PMU_EVENT_AXI1_WR_DATA_BEAT_WRITTEN", + "PMU_EVENT_AXI1_WR_TRAN_REQ_STALLED", + "PMU_EVENT_AXI1_WR_DATA_BEAT_STALLED", + "****", + "****", + "PMU_EVENT_AXI1_ENABLED_CYCLES", + "****", + "PMU_EVENT_AXI1_RD_STALL_LIMIT", + "PMU_EVENT_AXI1_WR_STALL_LIMIT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_ECC_SB1", +}; + +static const char *pooling_mode_str[] = { + "POOLING_MODE_MAX", + "POOLING_MODE_AVERAGE", + "POOLING_MODE_REDUCE_SUM", +}; + +static const char *privilege_level_str[] = { + "PRIVILEGE_LEVEL_USER", + "PRIVILEGE_LEVEL_PRIVILEGED", +}; + +static const char *round_mode_str[] = { + "ROUND_MODE_DBL", + "ROUND_MODE_TRUNCATE", + "ROUND_MODE_NATURAL", +}; + +static const char *security_level_str[] = { + "SECURITY_LEVEL_SECURE", + "SECURITY_LEVEL_NON_SECURE", +}; + +static const char *state_str[] = { + "STATE_STOPPED", + "STATE_RUNNING", +}; + +static const char *wd_core_slice_state_str[] = { + "WD_CORE_SLICE_STATE_HEADER", + "WD_CORE_SLICE_STATE_PALETTE", + "WD_CORE_SLICE_STATE_WEIGHTS", +}; + +static const char *wd_ctrl_state_str[] = { + "WD_CTRL_STATE_IDLE", + "WD_CTRL_STATE_DRAIN", + "WD_CTRL_STATE_OFD_INIT", + "WD_CTRL_STATE_OFD_RUN", +}; + +static const char *weight_order_str[] = { + "WEIGHT_ORDER_DEPTH_FIRST", + "WEIGHT_ORDER_PART_KERNEL_FIRST", +}; + +#endif + +// Register type structs +// id_r - ID register +struct id_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t version_status : 4; // This is the version of the product + uint32_t version_minor : 4; // This is the n for the P part of an RnPn release number + uint32_t version_major : 4; // This is the n for the R part of an RnPn release number + uint32_t product_major : 4; // Product major ID number (unique per base product) + uint32_t arch_patch_rev : 4; // This is the patch number of the architecture version a.b + uint32_t + arch_minor_rev : 8; // This is the minor architecture version number, b in the architecture version a.b + uint32_t + arch_major_rev : 4; // This is the major architecture version number, a in the architecture version a.b + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR id_r() : word0(269500929) {} + CONSTEXPR id_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + id_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_version_status() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + uint32_t get_version_status() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR id_r &set_version_status(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + volatile id_r &set_version_status(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_version_minor() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + uint32_t get_version_minor() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR id_r &set_version_minor(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + volatile id_r &set_version_minor(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_version_major() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 8); + return value; + } + uint32_t get_version_major() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR id_r &set_version_major(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 8) & word0) | ((((1U << 4) - 1) & value) << 8); + return *this; + } + volatile id_r &set_version_major(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 8) & word0) | ((((1U << 4) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_product_major() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + uint32_t get_product_major() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR id_r &set_product_major(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + volatile id_r &set_product_major(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_arch_patch_rev() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 16); + return value; + } + uint32_t get_arch_patch_rev() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR id_r &set_arch_patch_rev(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 16) & word0) | ((((1U << 4) - 1) & value) << 16); + return *this; + } + volatile id_r &set_arch_patch_rev(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 16) & word0) | ((((1U << 4) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_arch_minor_rev() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 20); + return value; + } + uint32_t get_arch_minor_rev() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR id_r &set_arch_minor_rev(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 20) & word0) | ((((1U << 8) - 1) & value) << 20); + return *this; + } + volatile id_r &set_arch_minor_rev(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 20) & word0) | ((((1U << 8) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_arch_major_rev() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + uint32_t get_arch_major_rev() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR id_r &set_arch_major_rev(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } + volatile id_r &set_arch_major_rev(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } +#endif +}; + +// status_r - Register describes the current operating status of the NPU +struct status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t state : 1; // NPU state, 0 = Stopped, 1 = Running + uint32_t irq_raised : 1; // Raw IRQ status, 0 = IRQ not raised, 1 = IRQ raised. IRQ is cleared using command + // register bit 1 + uint32_t + bus_status : 1; // 0=OK, 1=Bus abort detected and processing halted (NPU will reach IDLE state and not + // to start process any more commands/AXI transactions). Can only be cleared by a reset + uint32_t reset_status : 1; // Reset is ongoing and only this register can be read (other registers read as 0 + // and writes are ignored.) A value of 0 means NPU is not being reset and can be + // accessed as normal + uint32_t + cmd_parse_error : 1; // 0=No error 1=Command stream parsing error detected. Can only be cleared by reset + uint32_t cmd_end_reached : 1; // 0=Not reached, 1=Reached. Cleared by writing QBASE or QSIZE when NPU is in + // stopped state + uint32_t pmu_irq_raised : 1; // 0=No PMU IRQ, 1=PMU IRQ raised. Cleared by using command register bit 1 + uint32_t wd_fault : 1; // Weight decoder state: 0=no fault 1=weight decoder decompression fault. Can only be + // cleared by reset + uint32_t ecc_fault : 1; // ECC state for internal RAMs: 0=no fault 1=ECC fault signalled. Can only be + // cleared by reset + uint32_t reserved0 : 2; + uint32_t faulting_interface : 1; // Faulting interface on bus abort + uint32_t faulting_channel : 4; // Faulting channel on a bus abort. Read: 0=Cmd 1=IFM 2=Weights 3=Scale+Bias + // 4=Mem2Mem; Write: 8=OFM 9=Mem2Mem + uint32_t irq_history_mask : 16; // IRQ History mask + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR status_r() : word0(8) {} + CONSTEXPR status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + status_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::state get_state() const + { + NPU_NAMESPACE::state value = static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::state get_state() const volatile + { + NPU_NAMESPACE::state value = static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR status_r &set_state(NPU_NAMESPACE::state value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile status_r &set_state(NPU_NAMESPACE::state value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_irq_raised() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_irq_raised() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR status_r &set_irq_raised(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile status_r &set_irq_raised(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_bus_status() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_bus_status() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR status_r &set_bus_status(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile status_r &set_bus_status(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_reset_status() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_reset_status() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR status_r &set_reset_status(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile status_r &set_reset_status(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_cmd_parse_error() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_parse_error() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR status_r &set_cmd_parse_error(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile status_r &set_cmd_parse_error(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_cmd_end_reached() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_cmd_end_reached() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR status_r &set_cmd_end_reached(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile status_r &set_cmd_end_reached(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_pmu_irq_raised() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_pmu_irq_raised() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR status_r &set_pmu_irq_raised(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile status_r &set_pmu_irq_raised(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_wd_fault() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_wd_fault() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR status_r &set_wd_fault(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile status_r &set_wd_fault(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_ecc_fault() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_ecc_fault() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR status_r &set_ecc_fault(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile status_r &set_ecc_fault(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_fault_src get_faulting_interface() const + { + NPU_NAMESPACE::dma_fault_src value = static_cast(((1U << 1) - 1) & (word0 >> 11)); + return value; + } + NPU_NAMESPACE::dma_fault_src get_faulting_interface() const volatile + { + NPU_NAMESPACE::dma_fault_src value = static_cast(((1U << 1) - 1) & (word0 >> 11)); + return value; + } + CONSTEXPR status_r &set_faulting_interface(NPU_NAMESPACE::dma_fault_src value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 11); + return *this; + } + volatile status_r &set_faulting_interface(NPU_NAMESPACE::dma_fault_src value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 11); + return *this; + } + CONSTEXPR uint32_t get_faulting_channel() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + uint32_t get_faulting_channel() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR status_r &set_faulting_channel(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + volatile status_r &set_faulting_channel(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_irq_history_mask() const + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + uint32_t get_irq_history_mask() const volatile + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR status_r &set_irq_history_mask(uint32_t value) + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } + volatile status_r &set_irq_history_mask(uint32_t value) volatile + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } +#endif +}; + +// cmd_r - Command register, reads as last written command +struct cmd_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t transition_to_running_state : 1; // Write 1 to transition the NPU to running state. Writing 0 has + // no effect + uint32_t clear_irq : 1; // Write 1 to clear the IRQ status in the STATUS register. Writing 0 has no effect + uint32_t clock_q_enable : 1; // Write 1 to this bit to enable clock off using clock q-interface and enable + // the requester clock gate + uint32_t power_q_enable : 1; // Write 1 to this bit to enable power off using power q-interface + uint32_t + stop_request : 1; // Write 1 to this bit to request STOP after completing any already-started commands + uint32_t reserved0 : 11; + uint32_t clear_irq_history : 16; // Clears the IRQ history mask + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cmd_r() : word0(12) {} + CONSTEXPR cmd_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cmd_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_transition_to_running_state() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_transition_to_running_state() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR cmd_r &set_transition_to_running_state(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile cmd_r &set_transition_to_running_state(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_clear_irq() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_clear_irq() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR cmd_r &set_clear_irq(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile cmd_r &set_clear_irq(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_clock_q_enable() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_clock_q_enable() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR cmd_r &set_clock_q_enable(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile cmd_r &set_clock_q_enable(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_power_q_enable() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_power_q_enable() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR cmd_r &set_power_q_enable(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile cmd_r &set_power_q_enable(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_stop_request() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_stop_request() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR cmd_r &set_stop_request(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile cmd_r &set_stop_request(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_clear_irq_history() const + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + uint32_t get_clear_irq_history() const volatile + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR cmd_r &set_clear_irq_history(uint32_t value) + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } + volatile cmd_r &set_clear_irq_history(uint32_t value) volatile + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } +#endif +}; + +// reset_r - Request Reset and new security mode +struct reset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t pending_CPL : 1; // Current privilege level 0=User 1=Privileged + uint32_t pending_CSL : 1; // Current security level 0=Secure 1=Non secure + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR reset_r() : word0(0) {} + CONSTEXPR reset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + reset_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::privilege_level get_pending_CPL() const + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::privilege_level get_pending_CPL() const volatile + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR reset_r &set_pending_CPL(NPU_NAMESPACE::privilege_level value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile reset_r &set_pending_CPL(NPU_NAMESPACE::privilege_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::security_level get_pending_CSL() const + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + NPU_NAMESPACE::security_level get_pending_CSL() const volatile + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + CONSTEXPR reset_r &set_pending_CSL(NPU_NAMESPACE::security_level value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } + volatile reset_r &set_pending_CSL(NPU_NAMESPACE::security_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } +#endif +}; + +// qbase_r - Base address of the command stream in bytes +struct qbase_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR qbase_r() : word0(0), word1(0) {} + CONSTEXPR qbase_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + qbase_r copy() volatile + { + return *this; + } +#endif +}; + +// qread_r - Read offset in the command stream in bytes. Multiple of 4 in the range 0 to 16 MB +struct qread_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t QREAD : 32; // The read offset of the current command under execution + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qread_r() : word0(0) {} + CONSTEXPR qread_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qread_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_QREAD() const + { + uint32_t value = word0; + return value; + } + uint32_t get_QREAD() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR qread_r &set_QREAD(uint32_t value) + { + word0 = value; + return *this; + } + volatile qread_r &set_QREAD(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// qconfig_r - AXI configuration for the command stream in the range 0-3. Same encoding as for REGIONCFG +struct qconfig_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_region0 : 2; // Command region configuration + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qconfig_r() : word0(0) {} + CONSTEXPR qconfig_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qconfig_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_cmd_region0() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::mem_attr get_cmd_region0() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR qconfig_r &set_cmd_region0(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile qconfig_r &set_cmd_region0(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } +#endif +}; + +// qsize_r - Size of the command stream in bytes. Multiple of 4 in the range 0 to 16 MB +struct qsize_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t QSIZE : 32; // Size of the next command stream to be executed by the NPU + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qsize_r() : word0(0) {} + CONSTEXPR qsize_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qsize_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_QSIZE() const + { + uint32_t value = word0; + return value; + } + uint32_t get_QSIZE() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR qsize_r &set_QSIZE(uint32_t value) + { + word0 = value; + return *this; + } + volatile qsize_r &set_QSIZE(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// prot_r - Protection level configured for the NPU when acting as an AXI requester +struct prot_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t active_CPL : 1; // Current privilege level 0=User 1=Privileged + uint32_t active_CSL : 1; // Current security level 0=Secure 1=Non secure + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR prot_r() : word0(0) {} + CONSTEXPR prot_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + prot_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::privilege_level get_active_CPL() const + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::privilege_level get_active_CPL() const volatile + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR prot_r &set_active_CPL(NPU_NAMESPACE::privilege_level value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile prot_r &set_active_CPL(NPU_NAMESPACE::privilege_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::security_level get_active_CSL() const + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + NPU_NAMESPACE::security_level get_active_CSL() const volatile + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + CONSTEXPR prot_r &set_active_CSL(NPU_NAMESPACE::security_level value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } + volatile prot_r &set_active_CSL(NPU_NAMESPACE::security_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } +#endif +}; + +// config_r - RTL configuration +struct config_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t macs_per_cc : 4; // The log2(macs/clock cycle) + uint32_t cmd_stream_version : 4; // command stream version accepted by this NPU + uint32_t shram_size : 8; // Total size in KB of internal SHRAM + uint32_t reserved0 : 10; + uint32_t functional_safety : 1; // Functional safety configuration + uint32_t custom_dma : 1; // Custom DMA configuration + uint32_t product : 4; // Product configuration + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR config_r() : word0(0) {} + CONSTEXPR config_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + config_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_macs_per_cc() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + uint32_t get_macs_per_cc() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR config_r &set_macs_per_cc(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + volatile config_r &set_macs_per_cc(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cmd_stream_version() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_stream_version() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR config_r &set_cmd_stream_version(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + volatile config_r &set_cmd_stream_version(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_shram_size() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 8); + return value; + } + uint32_t get_shram_size() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR config_r &set_shram_size(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 8) & word0) | ((((1U << 8) - 1) & value) << 8); + return *this; + } + volatile config_r &set_shram_size(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 8) & word0) | ((((1U << 8) - 1) & value) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::functional_safety get_functional_safety() const + { + NPU_NAMESPACE::functional_safety value = + static_cast(((1U << 1) - 1) & (word0 >> 26)); + return value; + } + NPU_NAMESPACE::functional_safety get_functional_safety() const volatile + { + NPU_NAMESPACE::functional_safety value = + static_cast(((1U << 1) - 1) & (word0 >> 26)); + return value; + } + CONSTEXPR config_r &set_functional_safety(NPU_NAMESPACE::functional_safety value) + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 26); + return *this; + } + volatile config_r &set_functional_safety(NPU_NAMESPACE::functional_safety value) volatile + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 26); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma get_custom_dma() const + { + NPU_NAMESPACE::custom_dma value = static_cast(((1U << 1) - 1) & (word0 >> 27)); + return value; + } + NPU_NAMESPACE::custom_dma get_custom_dma() const volatile + { + NPU_NAMESPACE::custom_dma value = static_cast(((1U << 1) - 1) & (word0 >> 27)); + return value; + } + CONSTEXPR config_r &set_custom_dma(NPU_NAMESPACE::custom_dma value) + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 27); + return *this; + } + volatile config_r &set_custom_dma(NPU_NAMESPACE::custom_dma value) volatile + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 27); + return *this; + } + CONSTEXPR uint32_t get_product() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + uint32_t get_product() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR config_r &set_product(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } + volatile config_r &set_product(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } +#endif +}; + +// lock_r - Lock register. This register is designed for driver use and does not affect NPU functionality +struct lock_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t LOCK : 32; // 32 bit value for LOCK configuration + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR lock_r() : word0(0) {} + CONSTEXPR lock_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + lock_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_LOCK() const + { + uint32_t value = word0; + return value; + } + uint32_t get_LOCK() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR lock_r &set_LOCK(uint32_t value) + { + word0 = value; + return *this; + } + volatile lock_r &set_LOCK(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// regioncfg_r - Region memory type configuration. Bits[2*k+1:2*k] give the memory type for REGION[k] +struct regioncfg_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t region0 : 2; // Bits for Region0 Configuration + uint32_t region1 : 2; // Bits for Region1 Configuration + uint32_t region2 : 2; // Bits for Region2 Configuration + uint32_t region3 : 2; // Bits for Region3 Configuration + uint32_t region4 : 2; // Bits for Region4 Configuration + uint32_t region5 : 2; // Bits for Region5 Configuration + uint32_t region6 : 2; // Bits for Region6 Configuration + uint32_t region7 : 2; // Bits for Region7 Configuration + uint32_t reserved0 : 16; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR regioncfg_r() : word0(0) {} + CONSTEXPR regioncfg_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + regioncfg_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region0() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::mem_attr get_region0() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR regioncfg_r &set_region0(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile regioncfg_r &set_region0(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region1() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 2)); + return value; + } + NPU_NAMESPACE::mem_attr get_region1() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 2)); + return value; + } + CONSTEXPR regioncfg_r &set_region1(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 2) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 2); + return *this; + } + volatile regioncfg_r &set_region1(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 2) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 2); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region2() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::mem_attr get_region2() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR regioncfg_r &set_region2(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 4) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 4); + return *this; + } + volatile regioncfg_r &set_region2(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 4) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region3() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 6)); + return value; + } + NPU_NAMESPACE::mem_attr get_region3() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 6)); + return value; + } + CONSTEXPR regioncfg_r &set_region3(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 6) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 6); + return *this; + } + volatile regioncfg_r &set_region3(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 6) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 6); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region4() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 8)); + return value; + } + NPU_NAMESPACE::mem_attr get_region4() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 8)); + return value; + } + CONSTEXPR regioncfg_r &set_region4(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 8); + return *this; + } + volatile regioncfg_r &set_region4(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region5() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 10)); + return value; + } + NPU_NAMESPACE::mem_attr get_region5() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 10)); + return value; + } + CONSTEXPR regioncfg_r &set_region5(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 10) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 10); + return *this; + } + volatile regioncfg_r &set_region5(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 10) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 10); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region6() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 12)); + return value; + } + NPU_NAMESPACE::mem_attr get_region6() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 12)); + return value; + } + CONSTEXPR regioncfg_r &set_region6(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 12) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 12); + return *this; + } + volatile regioncfg_r &set_region6(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 12) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 12); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region7() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 14)); + return value; + } + NPU_NAMESPACE::mem_attr get_region7() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 14)); + return value; + } + CONSTEXPR regioncfg_r &set_region7(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 14) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 14); + return *this; + } + volatile regioncfg_r &set_region7(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 14) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 14); + return *this; + } +#endif +}; + +// axi_limit0_r - AXI limits for port 0 counter 0 +struct axi_limit0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 5; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 31 + uint32_t reserved2 : 3; + uint32_t max_outstanding_write_m1 : 4; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 15 + uint32_t reserved3 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit0_r() : word0(0) {} + CONSTEXPR axi_limit0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit0_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit0_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit0_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit0_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit0_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit0_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + volatile axi_limit0_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit0_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } + volatile axi_limit0_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit1_r - AXI limits for port 0 counter 1 +struct axi_limit1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 5; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 31 + uint32_t reserved2 : 3; + uint32_t max_outstanding_write_m1 : 4; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 15 + uint32_t reserved3 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit1_r() : word0(0) {} + CONSTEXPR axi_limit1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit1_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit1_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit1_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit1_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit1_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit1_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + volatile axi_limit1_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit1_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } + volatile axi_limit1_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit2_r - AXI limits for port 1 counter 2 +struct axi_limit2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 5; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 31 + uint32_t reserved2 : 3; + uint32_t max_outstanding_write_m1 : 4; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 15 + uint32_t reserved3 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit2_r() : word0(0) {} + CONSTEXPR axi_limit2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit2_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit2_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit2_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit2_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit2_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit2_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + volatile axi_limit2_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit2_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } + volatile axi_limit2_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit3_r - AXI limits for port 1 counter 3 +struct axi_limit3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 5; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 31 + uint32_t reserved2 : 3; + uint32_t max_outstanding_write_m1 : 4; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 15 + uint32_t reserved3 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit3_r() : word0(0) {} + CONSTEXPR axi_limit3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit3_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit3_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit3_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit3_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit3_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit3_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + volatile axi_limit3_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 16) & word0) | ((((1U << 5) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit3_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } + volatile axi_limit3_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 24) & word0) | ((((1U << 4) - 1) & value) << 24); + return *this; + } +#endif +}; + +// basep_r - The driver can use this address to relocate the command stream on region 0. If the region contains data +// requiring A-byte alignment then the base must be a multiple of A +struct basep_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR basep_r() : word0(0), word1(0) {} + CONSTEXPR basep_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + basep_r copy() volatile + { + return *this; + } +#endif +}; + +// wd_status_r - WD_STATUS +struct wd_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t core_slice_state : 2; // WD core slice parser state + uint32_t core_idle : 1; // Core idle + uint32_t ctrl_state : 2; // WD control state + uint32_t ctrl_idle : 1; // All stripe jobs idle (all weights consumed) + uint32_t write_buf_index0 : 3; // current write index for next data from core + uint32_t write_buf_valid0 : 1; // write buf valid (full) + uint32_t write_buf_idle0 : 1; // write buf idle (empty) + uint32_t write_buf_index1 : 3; // current write index for next data from core + uint32_t write_buf_valid1 : 1; // write buf valid (full) + uint32_t write_buf_idle1 : 1; // write buf idle (empty) + uint32_t events : 12; // WD events mapped as appendix A + uint32_t reserved0 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR wd_status_r() : word0(0) {} + CONSTEXPR wd_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + wd_status_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::wd_core_slice_state get_core_slice_state() const + { + NPU_NAMESPACE::wd_core_slice_state value = + static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::wd_core_slice_state get_core_slice_state() const volatile + { + NPU_NAMESPACE::wd_core_slice_state value = + static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR wd_status_r &set_core_slice_state(NPU_NAMESPACE::wd_core_slice_state value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile wd_status_r &set_core_slice_state(NPU_NAMESPACE::wd_core_slice_state value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_core_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_core_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR wd_status_r &set_core_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile wd_status_r &set_core_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR NPU_NAMESPACE::wd_ctrl_state get_ctrl_state() const + { + NPU_NAMESPACE::wd_ctrl_state value = static_cast(((1U << 2) - 1) & (word0 >> 3)); + return value; + } + NPU_NAMESPACE::wd_ctrl_state get_ctrl_state() const volatile + { + NPU_NAMESPACE::wd_ctrl_state value = static_cast(((1U << 2) - 1) & (word0 >> 3)); + return value; + } + CONSTEXPR wd_status_r &set_ctrl_state(NPU_NAMESPACE::wd_ctrl_state value) + { + word0 = (((~((1U << 2) - 1)) << 3) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 3); + return *this; + } + volatile wd_status_r &set_ctrl_state(NPU_NAMESPACE::wd_ctrl_state value) volatile + { + word0 = (((~((1U << 2) - 1)) << 3) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 3); + return *this; + } + CONSTEXPR uint32_t get_ctrl_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_ctrl_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR wd_status_r &set_ctrl_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile wd_status_r &set_ctrl_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_write_buf_index0() const + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 6); + return value; + } + uint32_t get_write_buf_index0() const volatile + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_index0(uint32_t value) + { + word0 = (((~((1U << 3) - 1)) << 6) & word0) | ((((1U << 3) - 1) & value) << 6); + return *this; + } + volatile wd_status_r &set_write_buf_index0(uint32_t value) volatile + { + word0 = (((~((1U << 3) - 1)) << 6) & word0) | ((((1U << 3) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_write_buf_valid0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_write_buf_valid0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_valid0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile wd_status_r &set_write_buf_valid0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_write_buf_idle0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_write_buf_idle0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_idle0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile wd_status_r &set_write_buf_idle0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_write_buf_index1() const + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 11); + return value; + } + uint32_t get_write_buf_index1() const volatile + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_index1(uint32_t value) + { + word0 = (((~((1U << 3) - 1)) << 11) & word0) | ((((1U << 3) - 1) & value) << 11); + return *this; + } + volatile wd_status_r &set_write_buf_index1(uint32_t value) volatile + { + word0 = (((~((1U << 3) - 1)) << 11) & word0) | ((((1U << 3) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_write_buf_valid1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_write_buf_valid1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_valid1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile wd_status_r &set_write_buf_valid1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_write_buf_idle1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_write_buf_idle1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_idle1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile wd_status_r &set_write_buf_idle1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 12) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 12) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR wd_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 12) - 1)) << 16) & word0) | ((((1U << 12) - 1) & value) << 16); + return *this; + } + volatile wd_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 12) - 1)) << 16) & word0) | ((((1U << 12) - 1) & value) << 16); + return *this; + } +#endif +}; + +// mac_status_r - MAC_STATUS +struct mac_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t block_cfg_valid : 1; // MAC has a valid block configuration + uint32_t trav_en : 1; // MAC is doing block traversal + uint32_t wait_for_ib : 1; // MAC is waiting for an Input Buffer to become available + uint32_t wait_for_acc_buf : 1; // MAC is waiting for an Accumulator Buffer to become available + uint32_t wait_for_weights : 1; // MAC is waiting for a Weight Block to become available + uint32_t stall_stripe : 1; // MAC is stalling between two stripes + uint32_t dw_sel : 1; // Currently used weight interface in MAC AI + uint32_t wait_for_dw0_ready : 1; // MAC AI is waiting for MAC DPU to send dw0_ready to WD + uint32_t wait_for_dw1_ready : 1; // MAC AI is waiting for MAC DPU to send dw1_ready to WD + uint32_t acc_buf_sel_ai : 1; // Currently used AccBuf interface in MAC AI + uint32_t wait_for_acc0_ready : 1; // MAC AI is waiting for acc0_ready from AO + uint32_t wait_for_acc1_ready : 1; // MAC AI is waiting for acc1_ready from AO + uint32_t acc_buf_sel_aa : 1; // Currently used AccBuf interface in MAC ADDER_ARRAY + uint32_t acc0_valid : 1; // MAC outgoing value of acc0_valid + uint32_t acc1_valid : 1; // MAC outgoing value of acc1_valid + uint32_t reserved0 : 1; + uint32_t events : 11; // Mapped to MAC events described in Appendix A + uint32_t reserved1 : 5; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR mac_status_r() : word0(0) {} + CONSTEXPR mac_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + mac_status_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_block_cfg_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_block_cfg_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR mac_status_r &set_block_cfg_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile mac_status_r &set_block_cfg_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_trav_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_trav_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR mac_status_r &set_trav_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile mac_status_r &set_trav_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_wait_for_ib() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_wait_for_ib() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_ib(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile mac_status_r &set_wait_for_ib(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc_buf() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_wait_for_acc_buf() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc_buf(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile mac_status_r &set_wait_for_acc_buf(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_wait_for_weights() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_wait_for_weights() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_weights(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile mac_status_r &set_wait_for_weights(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_stall_stripe() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_stall_stripe() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR mac_status_r &set_stall_stripe(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile mac_status_r &set_stall_stripe(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_dw_sel() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_dw_sel() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR mac_status_r &set_dw_sel(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile mac_status_r &set_dw_sel(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_wait_for_dw0_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_wait_for_dw0_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_dw0_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile mac_status_r &set_wait_for_dw0_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_wait_for_dw1_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_wait_for_dw1_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_dw1_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile mac_status_r &set_wait_for_dw1_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_acc_buf_sel_ai() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_acc_buf_sel_ai() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR mac_status_r &set_acc_buf_sel_ai(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile mac_status_r &set_acc_buf_sel_ai(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc0_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_wait_for_acc0_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc0_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile mac_status_r &set_wait_for_acc0_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc1_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_wait_for_acc1_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc1_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile mac_status_r &set_wait_for_acc1_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_acc_buf_sel_aa() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_acc_buf_sel_aa() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR mac_status_r &set_acc_buf_sel_aa(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile mac_status_r &set_acc_buf_sel_aa(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_acc0_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_acc0_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR mac_status_r &set_acc0_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile mac_status_r &set_acc0_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_acc1_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_acc1_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR mac_status_r &set_acc1_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile mac_status_r &set_acc1_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 11) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 11) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR mac_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 11) - 1)) << 16) & word0) | ((((1U << 11) - 1) & value) << 16); + return *this; + } + volatile mac_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 11) - 1)) << 16) & word0) | ((((1U << 11) - 1) & value) << 16); + return *this; + } +#endif +}; + +// ao_status_r - AO_STATUS +struct ao_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_sbw_valid : 1; // Block command to shared buffer write module is valid + uint32_t cmd_act_valid : 1; // Block command to activation function module is valid + uint32_t cmd_ctl_valid : 1; // Block command to control module is valid + uint32_t cmd_scl_valid : 1; // Block command to scale module is valid + uint32_t cmd_sbr_valid : 1; // Block command to shared buffer read module is valid + uint32_t cmd_ofm_valid : 1; // Block command to ofm parameter module is valid + uint32_t blk_cmd_ready : 1; // Ready to accept block command + uint32_t blk_cmd_valid : 1; // Block command from CC is valid + uint32_t reserved0 : 8; + uint32_t events : 8; // Mapped to AO events described in Appendix A + uint32_t reserved1 : 8; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ao_status_r() : word0(0) {} + CONSTEXPR ao_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ao_status_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cmd_sbw_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cmd_sbw_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR ao_status_r &set_cmd_sbw_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile ao_status_r &set_cmd_sbw_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cmd_act_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_cmd_act_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR ao_status_r &set_cmd_act_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile ao_status_r &set_cmd_act_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_cmd_ctl_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_cmd_ctl_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR ao_status_r &set_cmd_ctl_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile ao_status_r &set_cmd_ctl_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_cmd_scl_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_cmd_scl_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR ao_status_r &set_cmd_scl_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile ao_status_r &set_cmd_scl_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_cmd_sbr_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_sbr_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR ao_status_r &set_cmd_sbr_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile ao_status_r &set_cmd_sbr_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_cmd_ofm_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_cmd_ofm_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR ao_status_r &set_cmd_ofm_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile ao_status_r &set_cmd_ofm_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_blk_cmd_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_blk_cmd_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR ao_status_r &set_blk_cmd_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile ao_status_r &set_blk_cmd_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_blk_cmd_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_blk_cmd_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR ao_status_r &set_blk_cmd_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile ao_status_r &set_blk_cmd_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR ao_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 16) & word0) | ((((1U << 8) - 1) & value) << 16); + return *this; + } + volatile ao_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 16) & word0) | ((((1U << 8) - 1) & value) << 16); + return *this; + } +#endif +}; + +// dma_status0_r - DMA_STATUS0 +struct dma_status0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_idle : 1; // When this bit is high means that the CMD block is not busy in generating addresses + // for a CMD job + uint32_t ifm_idle : 1; // When this bit is high means that there are no ongoing IFM jobs + uint32_t wgt_idle_c0 : 1; // When this bit is high means that the WGT block is not busy in generating + // addresses for a WGT job + uint32_t bas_idle_c0 : 1; // When this bit is high means that the BAS block is not busy in generating + // addresses for a BAS job + uint32_t m2m_idle : 1; // When this bit is high means that there are no ongoing M2M jobs + uint32_t ofm_idle : 1; // When this bit is high means that there are no ongoing OFM jobs + uint32_t halt_req : 1; // CPM has requested to HALT AXI bus before soft reset + uint32_t halt_ack : 1; // DMA is in condition to halt the AXI bus since there are no pending transactions + uint32_t pause_req : 1; // CC has requested to pause the AXI + uint32_t pause_ack : 1; // DMA is in condition to pause the AXI bus since there are no pending transactions + uint32_t ib0_ai_valid_c0 : 1; // Data for AI to be read in IFM input buffer 0 - Core 0 + uint32_t ib0_ai_ready_c0 : 1; // Data consumed from AI in IFM input buffer 0 - Core 0 + uint32_t ib1_ai_valid_c0 : 1; // Data for AI to be read in IFM input buffer 1 - Core 0 + uint32_t ib1_ai_ready_c0 : 1; // Data consumed from AI in IFM input buffer 1 - Core 0 + uint32_t ib0_ao_valid_c0 : 1; // Data for AO to be read in IFM input buffer 0 - Core 0 + uint32_t ib0_ao_ready_c0 : 1; // Data consumed from AO in IFM input buffer 0 - Core 0 + uint32_t ib1_ao_valid_c0 : 1; // Data for AO to be read in IFM input buffer 0 - Core 0 + uint32_t ib1_ao_ready_c0 : 1; // Data consumed from AO in IFM input buffer 1 - Core 0 + uint32_t ob0_valid_c0 : 1; // Data for DMA ready to be consumed in OFM output buffer 0 - Core 0 + uint32_t ob0_ready_c0 : 1; // Data consumed from DMA in OFM output buffer 0 - Core 0 + uint32_t ob1_valid_c0 : 1; // Data for DMA ready to be consumed in OFM output buffer 1 - Core 0 + uint32_t ob1_ready_c0 : 1; // Data consumed from DMA in OFM output buffer 1 - Core 0 + uint32_t cmd_valid : 1; // New command word for CC to be consumed + uint32_t cmd_ready : 1; // command word consumed by CC + uint32_t wd_bitstream_valid_c0 : 1; // New weight word for WD to be consumed - Core 0 + uint32_t wd_bitstream_ready_c0 : 1; // Weight word consumed by WD - Core 0 + uint32_t bs_bitstream_valid_c0 : 1; // New BaS word for AO to be consumed - Core 0 + uint32_t bs_bitstream_ready_c0 : 1; // BaS word consumed by AO - Core 0 + uint32_t axi0_ar_stalled : 1; // Read transfer request stalled on arready low AXI0 (due to memory system) + uint32_t axi0_rd_limit_stall : 1; // Read stalled due to one AXI0 limit counter being reached + uint32_t axi0_aw_stalled : 1; // Write transfer request stalled on awready low AXI0 (due to memory system) + uint32_t axi0_w_stalled : 1; // Write transfer stalled on awready low AXI0 (due to memory system) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_status0_r() : word0(0) {} + CONSTEXPR dma_status0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_status0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cmd_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cmd_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile dma_status0_r &set_cmd_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_ifm_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_ifm_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR dma_status0_r &set_ifm_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile dma_status0_r &set_ifm_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_wgt_idle_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_wgt_idle_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR dma_status0_r &set_wgt_idle_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile dma_status0_r &set_wgt_idle_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_bas_idle_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_bas_idle_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR dma_status0_r &set_bas_idle_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile dma_status0_r &set_bas_idle_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_m2m_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_m2m_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR dma_status0_r &set_m2m_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile dma_status0_r &set_m2m_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_ofm_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_ofm_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR dma_status0_r &set_ofm_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile dma_status0_r &set_ofm_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_halt_req() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_halt_req() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR dma_status0_r &set_halt_req(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile dma_status0_r &set_halt_req(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_halt_ack() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_halt_ack() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR dma_status0_r &set_halt_ack(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile dma_status0_r &set_halt_ack(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_pause_req() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_pause_req() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR dma_status0_r &set_pause_req(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile dma_status0_r &set_pause_req(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_pause_ack() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_pause_ack() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR dma_status0_r &set_pause_ack(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile dma_status0_r &set_pause_ack(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_ib0_ai_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ai_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile dma_status0_r &set_ib0_ai_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_ib0_ai_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ai_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile dma_status0_r &set_ib0_ai_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_ib1_ai_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ai_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile dma_status0_r &set_ib1_ai_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_ib1_ai_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ai_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile dma_status0_r &set_ib1_ai_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_ib0_ao_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ao_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile dma_status0_r &set_ib0_ao_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_ib0_ao_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ao_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile dma_status0_r &set_ib0_ao_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + uint32_t get_ib1_ao_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ao_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + volatile dma_status0_r &set_ib1_ao_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + uint32_t get_ib1_ao_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ao_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + volatile dma_status0_r &set_ib1_ao_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + CONSTEXPR uint32_t get_ob0_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + uint32_t get_ob0_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + CONSTEXPR dma_status0_r &set_ob0_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + volatile dma_status0_r &set_ob0_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + CONSTEXPR uint32_t get_ob0_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + uint32_t get_ob0_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + CONSTEXPR dma_status0_r &set_ob0_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + volatile dma_status0_r &set_ob0_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + CONSTEXPR uint32_t get_ob1_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + uint32_t get_ob1_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR dma_status0_r &set_ob1_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + volatile dma_status0_r &set_ob1_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_ob1_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + uint32_t get_ob1_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + CONSTEXPR dma_status0_r &set_ob1_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + volatile dma_status0_r &set_ob1_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + CONSTEXPR uint32_t get_cmd_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + uint32_t get_cmd_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + volatile dma_status0_r &set_cmd_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + CONSTEXPR uint32_t get_cmd_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + uint32_t get_cmd_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + volatile dma_status0_r &set_cmd_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 24); + return value; + } + uint32_t get_wd_bitstream_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR dma_status0_r &set_wd_bitstream_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 24) & word0) | ((((1U << 1) - 1) & value) << 24); + return *this; + } + volatile dma_status0_r &set_wd_bitstream_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 24) & word0) | ((((1U << 1) - 1) & value) << 24); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 25); + return value; + } + uint32_t get_wd_bitstream_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 25); + return value; + } + CONSTEXPR dma_status0_r &set_wd_bitstream_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 25) & word0) | ((((1U << 1) - 1) & value) << 25); + return *this; + } + volatile dma_status0_r &set_wd_bitstream_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 25) & word0) | ((((1U << 1) - 1) & value) << 25); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 26); + return value; + } + uint32_t get_bs_bitstream_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 26); + return value; + } + CONSTEXPR dma_status0_r &set_bs_bitstream_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & value) << 26); + return *this; + } + volatile dma_status0_r &set_bs_bitstream_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & value) << 26); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 27); + return value; + } + uint32_t get_bs_bitstream_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 27); + return value; + } + CONSTEXPR dma_status0_r &set_bs_bitstream_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & value) << 27); + return *this; + } + volatile dma_status0_r &set_bs_bitstream_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & value) << 27); + return *this; + } + CONSTEXPR uint32_t get_axi0_ar_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 28); + return value; + } + uint32_t get_axi0_ar_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_ar_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 28) & word0) | ((((1U << 1) - 1) & value) << 28); + return *this; + } + volatile dma_status0_r &set_axi0_ar_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 28) & word0) | ((((1U << 1) - 1) & value) << 28); + return *this; + } + CONSTEXPR uint32_t get_axi0_rd_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 29); + return value; + } + uint32_t get_axi0_rd_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 29); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_rd_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 29) & word0) | ((((1U << 1) - 1) & value) << 29); + return *this; + } + volatile dma_status0_r &set_axi0_rd_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 29) & word0) | ((((1U << 1) - 1) & value) << 29); + return *this; + } + CONSTEXPR uint32_t get_axi0_aw_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 30); + return value; + } + uint32_t get_axi0_aw_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 30); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_aw_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 30) & word0) | ((((1U << 1) - 1) & value) << 30); + return *this; + } + volatile dma_status0_r &set_axi0_aw_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 30) & word0) | ((((1U << 1) - 1) & value) << 30); + return *this; + } + CONSTEXPR uint32_t get_axi0_w_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_axi0_w_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_w_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile dma_status0_r &set_axi0_w_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// dma_status1_r - DMA_STATUS1 +struct dma_status1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t axi0_wr_limit_stall : 1; // Write stalled due to one AXI0 limit counter being reached + uint32_t axi1_ar_stalled : 1; // Read transfer request stalled on arready low AXI1 (due to memory system) + uint32_t axi1_rd_limit_stall : 1; // Read stalled due to one AXI1 limit counter being reached + uint32_t axi1_wr_stalled : 1; // Write transfer request stalled on awready low AXI1 (due to memory system) + uint32_t axi1_w_stalled : 1; // Write transfer stalled on wready low AXI1 (due to memory system) + uint32_t axi1_wr_limit_stall : 1; // Write stalled due to one AXI1 limit counter being reached + uint32_t wgt_idle_c1 : 1; // When this bit is high means that the WGT block is not busy in generating + // addresses for a WGT job + uint32_t bas_idle_c1 : 1; // When this bit is high means that the BAS block is not busy in generating + // addresses for a BAS job + uint32_t ib0_ai_valid_c1 : 1; // Data for AI to be read in IFM input buffer 0 - Core 1 + uint32_t ib0_ai_ready_c1 : 1; // Data consumed from AI in IFM input buffer 0 - Core 1 + uint32_t ib1_ai_valid_c1 : 1; // Data for AI to be read in IFM input buffer 1 - Core 1 + uint32_t ib1_ai_ready_c1 : 1; // Data consumed from AI in IFM input buffer 1 - Core 1 + uint32_t ib0_ao_valid_c1 : 1; // Data for AO to be read in IFM input buffer 0 - Core 1 + uint32_t ib0_ao_ready_c1 : 1; // Data consumed from AO in IFM input buffer 0 - Core 1 + uint32_t ib1_ao_valid_c1 : 1; // Data for AO to be read in IFM input buffer 0 - Core 1 + uint32_t ib1_ao_ready_c1 : 1; // Data consumed from AO in IFM input buffer 1 - Core 1 + uint32_t ob0_valid_c1 : 1; // Data for DMA ready to be consumed in OFM output buffer 0 - Core 1 + uint32_t ob0_ready_c1 : 1; // Data consumed from DMA in OFM output buffer 0 - Core 1 + uint32_t ob1_valid_c1 : 1; // Data for DMA ready to be consumed in OFM output buffer 1 - Core 1 + uint32_t ob1_ready_c1 : 1; // Data consumed from DMA in OFM output buffer 1 - Core 1 + uint32_t wd_bitstream_valid_c1 : 1; // New weight word for WD to be consumed - Core 1 + uint32_t wd_bitstream_ready_c1 : 1; // Weight word consumed by WD - Core 1 + uint32_t bs_bitstream_valid_c1 : 1; // New BaS word for AO to be consumed - Core 1 + uint32_t bs_bitstream_ready_c1 : 1; // BaS word consumed by AO - Core 1 + uint32_t reserved0 : 8; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_status1_r() : word0(0) {} + CONSTEXPR dma_status1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_status1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_axi0_wr_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_axi0_wr_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR dma_status1_r &set_axi0_wr_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile dma_status1_r &set_axi0_wr_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_axi1_ar_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_axi1_ar_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_ar_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile dma_status1_r &set_axi1_ar_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_axi1_rd_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_axi1_rd_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_rd_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile dma_status1_r &set_axi1_rd_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_axi1_wr_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_axi1_wr_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_wr_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile dma_status1_r &set_axi1_wr_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_axi1_w_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_axi1_w_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_w_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile dma_status1_r &set_axi1_w_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_axi1_wr_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_axi1_wr_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_wr_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile dma_status1_r &set_axi1_wr_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_wgt_idle_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_wgt_idle_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR dma_status1_r &set_wgt_idle_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile dma_status1_r &set_wgt_idle_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_bas_idle_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_bas_idle_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR dma_status1_r &set_bas_idle_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile dma_status1_r &set_bas_idle_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_ib0_ai_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ai_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile dma_status1_r &set_ib0_ai_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_ib0_ai_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ai_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile dma_status1_r &set_ib0_ai_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_ib1_ai_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ai_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile dma_status1_r &set_ib1_ai_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_ib1_ai_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ai_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile dma_status1_r &set_ib1_ai_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_ib0_ao_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ao_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile dma_status1_r &set_ib0_ao_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_ib0_ao_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ao_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile dma_status1_r &set_ib0_ao_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_ib1_ao_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ao_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile dma_status1_r &set_ib1_ao_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_ib1_ao_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ao_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile dma_status1_r &set_ib1_ao_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_ob0_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + uint32_t get_ob0_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR dma_status1_r &set_ob0_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + volatile dma_status1_r &set_ob0_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_ob0_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + uint32_t get_ob0_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + CONSTEXPR dma_status1_r &set_ob0_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + volatile dma_status1_r &set_ob0_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + CONSTEXPR uint32_t get_ob1_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + uint32_t get_ob1_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + CONSTEXPR dma_status1_r &set_ob1_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + volatile dma_status1_r &set_ob1_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + CONSTEXPR uint32_t get_ob1_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + uint32_t get_ob1_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + CONSTEXPR dma_status1_r &set_ob1_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + volatile dma_status1_r &set_ob1_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + uint32_t get_wd_bitstream_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR dma_status1_r &set_wd_bitstream_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + volatile dma_status1_r &set_wd_bitstream_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + uint32_t get_wd_bitstream_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + CONSTEXPR dma_status1_r &set_wd_bitstream_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + volatile dma_status1_r &set_wd_bitstream_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + uint32_t get_bs_bitstream_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + CONSTEXPR dma_status1_r &set_bs_bitstream_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + volatile dma_status1_r &set_bs_bitstream_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + uint32_t get_bs_bitstream_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + CONSTEXPR dma_status1_r &set_bs_bitstream_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + volatile dma_status1_r &set_bs_bitstream_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } +#endif +}; + +// clkforce_r - Force clocks on for clock gating +struct clkforce_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t top_level_clk : 1; // set to 1 to force on TOP level clock + uint32_t cc_clk : 1; // set to 1 to force on CC clock + uint32_t dma_clk : 1; // set to 1 to force on DMA clock + uint32_t mac_clk : 1; // set to 1 to force on MAC clock + uint32_t ao_clk : 1; // set to 1 to force on AO clock + uint32_t wd_clk : 1; // set to 1 to force on WD clock + uint32_t reserved0 : 26; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR clkforce_r() : word0(0) {} + CONSTEXPR clkforce_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + clkforce_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_top_level_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_top_level_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR clkforce_r &set_top_level_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile clkforce_r &set_top_level_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cc_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_cc_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR clkforce_r &set_cc_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile clkforce_r &set_cc_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_dma_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_dma_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR clkforce_r &set_dma_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile clkforce_r &set_dma_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_mac_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_mac_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR clkforce_r &set_mac_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile clkforce_r &set_mac_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_ao_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_ao_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR clkforce_r &set_ao_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile clkforce_r &set_ao_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_wd_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_wd_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR clkforce_r &set_wd_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile clkforce_r &set_wd_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } +#endif +}; + +// debug_address_r - Set debug address for register reads 0x400-0x7FF. The address must be 1KB aligned +struct debug_address_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t addr : 32; // Register address + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_address_r() : word0(0) {} + CONSTEXPR debug_address_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_address_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + uint32_t value = word0; + return value; + } + uint32_t get_addr() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_address_r &set_addr(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_address_r &set_addr(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// debug_misc_r - 32-bit read/write register for driver debug use. This does not affect NPU function +struct debug_misc_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t misc : 32; // Debug misc + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_misc_r() : word0(0) {} + CONSTEXPR debug_misc_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_misc_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_misc() const + { + uint32_t value = word0; + return value; + } + uint32_t get_misc() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_misc_r &set_misc(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_misc_r &set_misc(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// debug_block_r - Set from which of four block banks the TSU registers are read. 0 = read from the current bank 256+n = +// force to read from bank n where n is in the range 0 to 3 +struct debug_block_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t block : 32; // Debug block + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_block_r() : word0(0) {} + CONSTEXPR debug_block_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_block_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_block() const + { + uint32_t value = word0; + return value; + } + uint32_t get_block() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_block_r &set_block(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_block_r &set_block(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmcr_r - PMU Register control +struct pmcr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cnt_en : 1; // Enable counter + uint32_t event_cnt_rst : 1; // Reset event counter + uint32_t cycle_cnt_rst : 1; // Reset cycle counter + uint32_t mask_en : 1; // PMU can be enabled/disabled by command stream operation NPU_OP_PMU_MASK + uint32_t reserved0 : 7; + uint32_t num_event_cnt : 5; // Number of event counters + uint32_t reserved1 : 16; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcr_r() : word0(8192) {} + CONSTEXPR pmcr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cnt_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cnt_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcr_r &set_cnt_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcr_r &set_cnt_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_event_cnt_rst() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_event_cnt_rst() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcr_r &set_event_cnt_rst(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcr_r &set_event_cnt_rst(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_cycle_cnt_rst() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_cycle_cnt_rst() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcr_r &set_cycle_cnt_rst(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcr_r &set_cycle_cnt_rst(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_mask_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_mask_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcr_r &set_mask_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcr_r &set_mask_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_num_event_cnt() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 11); + return value; + } + uint32_t get_num_event_cnt() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR pmcr_r &set_num_event_cnt(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 11) & word0) | ((((1U << 5) - 1) & value) << 11); + return *this; + } + volatile pmcr_r &set_num_event_cnt(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 11) & word0) | ((((1U << 5) - 1) & value) << 11); + return *this; + } +#endif +}; + +// pmcntenset_r - Count enable set register +struct pmcntenset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0 : 1; // Event counter enable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1 : 1; // Event counter enable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2 : 1; // Event counter enable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3 : 1; // Event counter enable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT : 1; // PMCCNTR enable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcntenset_r() : word0(0) {} + CONSTEXPR pmcntenset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcntenset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_2(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_2(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_3(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_3(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmcntenset_r &set_CYCLE_CNT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmcntenset_r &set_CYCLE_CNT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmcntenclr_r - Count enable clear register +struct pmcntenclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0 : 1; // Event counter disable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1 : 1; // Event counter disable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2 : 1; // Event counter disable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3 : 1; // Event counter disable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT : 1; // PMCCNTR disable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcntenclr_r() : word0(0) {} + CONSTEXPR pmcntenclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcntenclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_2(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_2(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_3(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_3(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmcntenclr_r &set_CYCLE_CNT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmcntenclr_r &set_CYCLE_CNT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmovsset_r - Overflow flag status set register +struct pmovsset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_OVF : 1; // Event counter overflow set bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_OVF : 1; // Event counter overflow set bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_OVF : 1; // Event counter overflow set bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_OVF : 1; // Event counter overflow set bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_OVF : 1; // PMCCNTR overflow set bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmovsset_r() : word0(0) {} + CONSTEXPR pmovsset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmovsset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_0_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_0_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_1_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_1_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_2_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_2_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_3_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_3_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmovsset_r &set_CYCLE_CNT_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmovsset_r &set_CYCLE_CNT_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmovsclr_r - Overflow flag status clear register +struct pmovsclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_OVF : 1; // Event counter overflow clear bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_OVF : 1; // Event counter overflow clear bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_OVF : 1; // Event counter overflow clear bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_OVF : 1; // Event counter overflow clear bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_OVF : 1; // PMCCNTR overflow clear bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmovsclr_r() : word0(0) {} + CONSTEXPR pmovsclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmovsclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_0_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_0_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_1_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_1_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_2_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_2_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_3_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_3_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmovsclr_r &set_CYCLE_CNT_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmovsclr_r &set_CYCLE_CNT_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmintset_r - Interrupt enable set register +struct pmintset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_INT : 1; // PMCCNTR overflow interrupt request enable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmintset_r() : word0(0) {} + CONSTEXPR pmintset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmintset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_0_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_0_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_1_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_1_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_2_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_2_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_3_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_3_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmintset_r &set_CYCLE_CNT_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmintset_r &set_CYCLE_CNT_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmintclr_r - Interrupt enable clear register +struct pmintclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_INT : 1; // PMCCNTR overflow interrupt request disable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmintclr_r() : word0(0) {} + CONSTEXPR pmintclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmintclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_0_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_0_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_1_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_1_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_2_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_2_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_3_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_3_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmintclr_r &set_CYCLE_CNT_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmintclr_r &set_CYCLE_CNT_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmccntr_r - Performance monitor cycle count register +struct pmccntr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CYCLE_CNT_LO : 32; // Cycle count - LSB + uint32_t CYCLE_CNT_HI : 16; // Cycle count - MSB + uint32_t reserved0 : 16; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR pmccntr_r() : word0(0), word1(0) {} + CONSTEXPR pmccntr_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + pmccntr_r copy() volatile + { + return *this; + } +#endif +}; + +// pmccntr_cfg_r - Set start/stop event on the cycle counter +struct pmccntr_cfg_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CYCLE_CNT_CFG_START : 10; // Cycle counter start event + uint32_t reserved0 : 6; + uint32_t CYCLE_CNT_CFG_STOP : 10; // Cycle counter stop event + uint32_t reserved1 : 6; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmccntr_cfg_r() : word0(0) {} + CONSTEXPR pmccntr_cfg_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmccntr_cfg_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_START() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_START() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmccntr_cfg_r &set_CYCLE_CNT_CFG_START(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmccntr_cfg_r &set_CYCLE_CNT_CFG_START(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_STOP() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 16)); + return value; + } + NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_STOP() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 16)); + return value; + } + CONSTEXPR pmccntr_cfg_r &set_CYCLE_CNT_CFG_STOP(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 16) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 16); + return *this; + } + volatile pmccntr_cfg_r &set_CYCLE_CNT_CFG_STOP(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 16) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 16); + return *this; + } +#endif +}; + +// pmcaxi_chan_r - Set which AXI channel to monitor for latency measurements in PMU +struct pmcaxi_chan_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CH_SEL : 4; // Channel select for latency measurements + uint32_t reserved0 : 4; + uint32_t AXI_CNT_SEL : 2; // AXI counter to monitor for latency measurements + uint32_t BW_CH_SEL_EN : 1; // Bandwidth channel selector + uint32_t reserved1 : 21; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcaxi_chan_r() : word0(0) {} + CONSTEXPR pmcaxi_chan_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcaxi_chan_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_axi_channel get_CH_SEL() const + { + NPU_NAMESPACE::pmu_axi_channel value = + static_cast(((1U << 4) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_axi_channel get_CH_SEL() const volatile + { + NPU_NAMESPACE::pmu_axi_channel value = + static_cast(((1U << 4) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_CH_SEL(NPU_NAMESPACE::pmu_axi_channel value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmcaxi_chan_r &set_CH_SEL(NPU_NAMESPACE::pmu_axi_channel value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_AXI_CNT_SEL() const + { + uint32_t value = ((1U << 2) - 1) & (word0 >> 8); + return value; + } + uint32_t get_AXI_CNT_SEL() const volatile + { + uint32_t value = ((1U << 2) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_AXI_CNT_SEL(uint32_t value) + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & value) << 8); + return *this; + } + volatile pmcaxi_chan_r &set_AXI_CNT_SEL(uint32_t value) volatile + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_BW_CH_SEL_EN() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_BW_CH_SEL_EN() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_BW_CH_SEL_EN(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile pmcaxi_chan_r &set_BW_CH_SEL_EN(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } +#endif +}; + +// kernel_x_r - Kernel X offset of in kernel decomposition +struct kernel_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_x_r() : word0(0) {} + CONSTEXPR kernel_x_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_x_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_x_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_x_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_y_r - Kernel Y offset of in kernel decomposition +struct kernel_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_y_r() : word0(0) {} + CONSTEXPR kernel_y_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_y_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_y_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_y_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_w_m1_r - Kernel (width-1) of current block +struct kernel_w_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_w_m1_r() : word0(0) {} + CONSTEXPR kernel_w_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_w_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_w_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_w_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_h_m1_r - Kernel (height-1) of current block +struct kernel_h_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_h_m1_r() : word0(0) {} + CONSTEXPR kernel_h_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_h_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_h_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_h_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_width_m1_r - OFM current block (width-1) +struct ofm_cblk_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_width_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_height_m1_r - OFM current block (height-1) +struct ofm_cblk_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_height_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_depth_m1_r - OFM current block (depth-1) +struct ofm_cblk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_depth_m1_r - IFM current block (depth-1) +struct ifm_cblk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_depth_m1_r() : word0(0) {} + CONSTEXPR ifm_cblk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_x_r - Block X coordinate in OFM +struct ofm_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_x_r() : word0(0) {} + CONSTEXPR ofm_x_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_x_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_x_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_x_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_y_r - Block Y coordinate in OFM +struct ofm_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_y_r() : word0(0) {} + CONSTEXPR ofm_y_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_y_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_y_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_y_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_z_r - Block Z (channel) coordinate in OFM +struct ofm_z_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_z_r() : word0(0) {} + CONSTEXPR ofm_z_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_z_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_z_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_z_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_z_r - Block Z (channel) coordinate in IFM +struct ifm_z_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_z_r() : word0(0) {} + CONSTEXPR ifm_z_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_z_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_z_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_z_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pad_top_r - Block top pad +struct pad_top_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pad_top_r() : word0(0) {} + CONSTEXPR pad_top_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pad_top_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pad_top_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile pad_top_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pad_left_r - Block left pad +struct pad_left_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pad_left_r() : word0(0) {} + CONSTEXPR pad_left_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pad_left_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pad_left_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile pad_left_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_width_r - IFM current block derived width +struct ifm_cblk_width_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_width_r() : word0(0) {} + CONSTEXPR ifm_cblk_width_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_width_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_width_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_width_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_height_r - IFM current block derived height +struct ifm_cblk_height_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_height_r() : word0(0) {} + CONSTEXPR ifm_cblk_height_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_height_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_height_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_height_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ifm_src_r - DMA IFM channel source position on AXI +struct dma_ifm_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_ifm_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_ifm_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_ifm_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_ifm_dst_r - DMA IFM channel destination position in SHRAM +struct dma_ifm_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_ifm_dst_r() : word0(0) {} + CONSTEXPR dma_ifm_dst_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_ifm_dst_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_ifm_dst_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_ifm_dst_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ofm_src_r - DMA OFM channel source position in SHRAM +struct dma_ofm_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_ofm_src_r() : word0(0) {} + CONSTEXPR dma_ofm_src_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_ofm_src_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_ofm_src_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_ofm_src_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ofm_dst_r - DMA OFM channel destination position on AXI +struct dma_ofm_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_ofm_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma_ofm_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_ofm_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_weight_src_r - DMA weight channel source position on AXI +struct dma_weight_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_weight_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_weight_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_weight_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_cmd_src_r - DMA command channel source position on AXI +struct dma_cmd_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_cmd_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_cmd_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_cmd_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_cmd_size_r - DMA command channel number of bytes buffered +struct dma_cmd_size_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_cmd_size_r() : word0(0) {} + CONSTEXPR dma_cmd_size_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_cmd_size_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_cmd_size_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_cmd_size_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_m2m_src_r - DMA memory to memory source position on AXI +struct dma_m2m_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_m2m_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_m2m_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_m2m_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_m2m_dst_r - DMA memory to memory destination position on AXI +struct dma_m2m_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_m2m_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma_m2m_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_m2m_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// current_qread_r - QREAD position being issued (rather than completed) +struct current_qread_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_qread_r() : word0(0) {} + CONSTEXPR current_qread_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_qread_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_qread_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_qread_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_scale_src_r - DMA scale and bias channel source position on AXI +struct dma_scale_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset : 32; // Offset + uint32_t reserved0 : 32; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_scale_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_scale_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_scale_src_r copy() volatile + { + return *this; + } +#endif +}; + +// current_block_r - 0-3. Current block bank being executed by the TSU or last one executed if TSU is stopped +struct current_block_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_block_r() : word0(0) {} + CONSTEXPR current_block_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_block_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_block_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_block_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// current_op_r - Current NPU OP command being executed by the TSU +struct current_op_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_op_r() : word0(0) {} + CONSTEXPR current_op_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_op_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_op_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_op_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// current_cmd_r - Current 32-bit command being parsed by the command stream parser +struct current_cmd_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_cmd_r() : word0(0) {} + CONSTEXPR current_cmd_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_cmd_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_cmd_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_cmd_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmevcntr_r - Performance monitor event 0 count register +struct pmevcntr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t count : 32; // Count word + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmevcntr_r() : word0(0) {} + CONSTEXPR pmevcntr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmevcntr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_count() const + { + uint32_t value = word0; + return value; + } + uint32_t get_count() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pmevcntr_r &set_count(uint32_t value) + { + word0 = value; + return *this; + } + volatile pmevcntr_r &set_count(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmevtyper_r - Performance monitor event type register 0 +struct pmevtyper_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EV_TYPE : 10; // Event Type + uint32_t reserved0 : 22; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmevtyper_r() : word0(0) {} + CONSTEXPR pmevtyper_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmevtyper_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_EV_TYPE() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_event get_EV_TYPE() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmevtyper_r &set_EV_TYPE(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmevtyper_r &set_EV_TYPE(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } +#endif +}; + +// shared_buffer_r - Shared buffer debug access. Only valid in STOPPED state +struct shared_buffer_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t mem_word : 32; // Memory word + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR shared_buffer_r() : word0(0) {} + CONSTEXPR shared_buffer_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + shared_buffer_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_mem_word() const + { + uint32_t value = word0; + return value; + } + uint32_t get_mem_word() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR shared_buffer_r &set_mem_word(uint32_t value) + { + word0 = value; + return *this; + } + volatile shared_buffer_r &set_mem_word(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_top_r - None +struct ifm_pad_top_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_top_r() : word0(0) {} + CONSTEXPR ifm_pad_top_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_top_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_top_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_top_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_left_r - None +struct ifm_pad_left_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_left_r() : word0(0) {} + CONSTEXPR ifm_pad_left_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_left_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_left_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_left_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_right_r - None +struct ifm_pad_right_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_right_r() : word0(0) {} + CONSTEXPR ifm_pad_right_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_right_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_right_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_right_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_bottom_r - None +struct ifm_pad_bottom_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_bottom_r() : word0(0) {} + CONSTEXPR ifm_pad_bottom_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_bottom_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_bottom_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_bottom_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_depth_m1_r - None +struct ifm_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_depth_m1_r() : word0(0) {} + CONSTEXPR ifm_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_precision_r - None +struct ifm_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_precision_r() : word0(0) {} + CONSTEXPR ifm_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_upscale_r - None +struct ifm_upscale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_upscale_r() : word0(0) {} + CONSTEXPR ifm_upscale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_upscale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_upscale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_upscale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_zero_point_r - None +struct ifm_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_zero_point_r() : word0(0) {} + CONSTEXPR ifm_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_width0_m1_r - None +struct ifm_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_width0_m1_r() : word0(0) {} + CONSTEXPR ifm_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_height0_m1_r - None +struct ifm_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_height0_m1_r() : word0(0) {} + CONSTEXPR ifm_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_height1_m1_r - None +struct ifm_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_height1_m1_r() : word0(0) {} + CONSTEXPR ifm_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_ib_end_r - None +struct ifm_ib_end_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_ib_end_r() : word0(0) {} + CONSTEXPR ifm_ib_end_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_ib_end_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_ib_end_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_ib_end_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_region_r - None +struct ifm_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_region_r() : word0(0) {} + CONSTEXPR ifm_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_width_m1_r - None +struct ofm_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_width_m1_r() : word0(0) {} + CONSTEXPR ofm_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height_m1_r - None +struct ofm_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height_m1_r() : word0(0) {} + CONSTEXPR ofm_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_depth_m1_r - None +struct ofm_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_precision_r - None +struct ofm_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_precision_r() : word0(0) {} + CONSTEXPR ofm_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_width_m1_r - None +struct ofm_blk_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_width_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_height_m1_r - None +struct ofm_blk_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_height_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_depth_m1_r - None +struct ofm_blk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_zero_point_r - None +struct ofm_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_zero_point_r() : word0(0) {} + CONSTEXPR ofm_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_width0_m1_r - None +struct ofm_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_width0_m1_r() : word0(0) {} + CONSTEXPR ofm_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height0_m1_r - None +struct ofm_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height0_m1_r() : word0(0) {} + CONSTEXPR ofm_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height1_m1_r - None +struct ofm_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height1_m1_r() : word0(0) {} + CONSTEXPR ofm_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_region_r - None +struct ofm_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_region_r() : word0(0) {} + CONSTEXPR ofm_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_width_m1_r - None +struct kernel_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_width_m1_r() : word0(0) {} + CONSTEXPR kernel_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_height_m1_r - None +struct kernel_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_height_m1_r() : word0(0) {} + CONSTEXPR kernel_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_stride_r - None +struct kernel_stride_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_stride_r() : word0(0) {} + CONSTEXPR kernel_stride_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_stride_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_stride_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_stride_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// acc_format_r - None +struct acc_format_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR acc_format_r() : word0(0) {} + CONSTEXPR acc_format_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + acc_format_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR acc_format_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile acc_format_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_r - None +struct activation_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_r() : word0(0) {} + CONSTEXPR activation_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_min_r - None +struct activation_min_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_min_r() : word0(0) {} + CONSTEXPR activation_min_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_min_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_min_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_min_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_max_r - None +struct activation_max_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_max_r() : word0(0) {} + CONSTEXPR activation_max_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_max_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_max_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_max_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// weight_region_r - None +struct weight_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR weight_region_r() : word0(0) {} + CONSTEXPR weight_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + weight_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR weight_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile weight_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// scale_region_r - None +struct scale_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR scale_region_r() : word0(0) {} + CONSTEXPR scale_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + scale_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR scale_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile scale_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ab_start_r - None +struct ab_start_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ab_start_r() : word0(0) {} + CONSTEXPR ab_start_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ab_start_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ab_start_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ab_start_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// blockdep_r - None +struct blockdep_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR blockdep_r() : word0(0) {} + CONSTEXPR blockdep_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + blockdep_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR blockdep_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile blockdep_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_src_region_r - None +struct dma0_src_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_src_region_r() : word0(0) {} + CONSTEXPR dma0_src_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_src_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_src_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_src_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_dst_region_r - None +struct dma0_dst_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_dst_region_r() : word0(0) {} + CONSTEXPR dma0_dst_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_dst_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_dst_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_dst_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_size0_r - None +struct dma0_size0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_size0_r() : word0(0) {} + CONSTEXPR dma0_size0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_size0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_size0_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_size0_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_size1_r - None +struct dma0_size1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_size1_r() : word0(0) {} + CONSTEXPR dma0_size1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_size1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_size1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_size1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_broadcast_r - None +struct ifm2_broadcast_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_broadcast_r() : word0(0) {} + CONSTEXPR ifm2_broadcast_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_broadcast_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_broadcast_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_broadcast_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_scalar_r - None +struct ifm2_scalar_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_scalar_r() : word0(0) {} + CONSTEXPR ifm2_scalar_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_scalar_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_scalar_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_scalar_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_precision_r - None +struct ifm2_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_precision_r() : word0(0) {} + CONSTEXPR ifm2_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_zero_point_r - None +struct ifm2_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_zero_point_r() : word0(0) {} + CONSTEXPR ifm2_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_width0_m1_r - None +struct ifm2_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_width0_m1_r() : word0(0) {} + CONSTEXPR ifm2_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_height0_m1_r - None +struct ifm2_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_height0_m1_r() : word0(0) {} + CONSTEXPR ifm2_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_height1_m1_r - None +struct ifm2_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_height1_m1_r() : word0(0) {} + CONSTEXPR ifm2_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_ib_start_r - None +struct ifm2_ib_start_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_ib_start_r() : word0(0) {} + CONSTEXPR ifm2_ib_start_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_ib_start_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_ib_start_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_ib_start_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_region_r - None +struct ifm2_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_region_r() : word0(0) {} + CONSTEXPR ifm2_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_base0_r - None +struct ifm_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base0_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base1_r - None +struct ifm_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base1_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base2_r - None +struct ifm_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base2_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base3_r - None +struct ifm_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base3_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_x_r - None +struct ifm_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_y_r - None +struct ifm_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_c_r - None +struct ifm_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base0_r - None +struct ofm_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base0_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base1_r - None +struct ofm_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base1_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base2_r - None +struct ofm_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base2_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base3_r - None +struct ofm_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base3_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_x_r - None +struct ofm_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_y_r - None +struct ofm_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_c_r - None +struct ofm_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// weight_base_r - None +struct weight_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight_base_r() : word0(0), word1(0) {} + CONSTEXPR weight_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight_base_r copy() volatile + { + return *this; + } +#endif +}; + +// weight_length_r - None +struct weight_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight_length_r() : word0(0), word1(0) {} + CONSTEXPR weight_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight_length_r copy() volatile + { + return *this; + } +#endif +}; + +// scale_base_r - None +struct scale_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale_base_r() : word0(0), word1(0) {} + CONSTEXPR scale_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale_base_r copy() volatile + { + return *this; + } +#endif +}; + +// scale_length_r - None +struct scale_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale_length_r() : word0(0), word1(0) {} + CONSTEXPR scale_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale_length_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_scale_r - None +struct ofm_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_scale_r() : word0(0) {} + CONSTEXPR ofm_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_scale_shift_r - None +struct ofm_scale_shift_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_scale_shift_r() : word0(0) {} + CONSTEXPR ofm_scale_shift_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_scale_shift_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_scale_shift_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_scale_shift_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opa_scale_r - None +struct opa_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opa_scale_r() : word0(0) {} + CONSTEXPR opa_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opa_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opa_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opa_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opa_scale_shift_r - None +struct opa_scale_shift_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opa_scale_shift_r() : word0(0) {} + CONSTEXPR opa_scale_shift_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opa_scale_shift_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opa_scale_shift_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opa_scale_shift_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opb_scale_r - None +struct opb_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opb_scale_r() : word0(0) {} + CONSTEXPR opb_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opb_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opb_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opb_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_src_r - None +struct dma0_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_src_r() : word0(0), word1(0) {} + CONSTEXPR dma0_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_dst_r - None +struct dma0_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma0_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_len_r - None +struct dma0_len_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_len_r() : word0(0), word1(0) {} + CONSTEXPR dma0_len_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_len_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base0_r - None +struct ifm2_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base0_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base1_r - None +struct ifm2_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base1_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base2_r - None +struct ifm2_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base2_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base3_r - None +struct ifm2_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base3_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_x_r - None +struct ifm2_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_y_r - None +struct ifm2_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_c_r - None +struct ifm2_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// revision_r - Internal FPGA build revision: first 32-bits of the Ultan Git hash used for the build +struct revision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR revision_r() : word0(0) {} + CONSTEXPR revision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + revision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR revision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile revision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid4_r - Peripheral ID byte 4 (Arm=code 4) +struct pid4_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID4 : 32; // Byte 4 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid4_r() : word0(4) {} + CONSTEXPR pid4_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid4_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID4() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID4() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid4_r &set_PID4(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid4_r &set_PID4(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid5_r - Peripheral ID byte 5 (reserved) +struct pid5_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID5 : 32; // Byte 5 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid5_r() : word0(0) {} + CONSTEXPR pid5_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid5_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID5() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID5() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid5_r &set_PID5(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid5_r &set_PID5(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid6_r - Peripheral ID byte 6 (reserved) +struct pid6_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID6 : 32; // Byte 6 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid6_r() : word0(0) {} + CONSTEXPR pid6_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid6_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID6() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID6() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid6_r &set_PID6(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid6_r &set_PID6(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid7_r - Peripheral ID byte 7 (reserved) +struct pid7_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID7 : 32; // Byte 7 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid7_r() : word0(0) {} + CONSTEXPR pid7_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid7_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID7() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID7() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid7_r &set_PID7(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid7_r &set_PID7(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid0_r - Peripheral ID byte 0. This is bits[7:0] of the part number +struct pid0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID0 : 32; // Byte 0 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid0_r() : word0(128) {} + CONSTEXPR pid0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID0() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID0() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid0_r &set_PID0(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid0_r &set_PID0(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid1_r - Peripheral ID byte 1. This is bits[11:8] of the part number in bits[3:0], and bits[3:0] of the Arm ID in +// bits[7:4] +struct pid1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID1 : 32; // Byte 1 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid1_r() : word0(181) {} + CONSTEXPR pid1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID1() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID1() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid1_r &set_PID1(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid1_r &set_PID1(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid2_r - Peripheral ID byte 2. This is bits[6:4] of the Arm ID in bits[2:0], and bit 3 indicates format B +struct pid2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID2 : 32; // Byte 2 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid2_r() : word0(11) {} + CONSTEXPR pid2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid2_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID2() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID2() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid2_r &set_PID2(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid2_r &set_PID2(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid3_r - Peripheral ID byte 3 +struct pid3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID3 : 32; // Byte 1 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid3_r() : word0(0) {} + CONSTEXPR pid3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid3_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID3() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID3() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid3_r &set_PID3(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid3_r &set_PID3(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid0_r - Component ID byte 0 +struct cid0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID0 : 32; // Byte 0 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid0_r() : word0(13) {} + CONSTEXPR cid0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID0() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID0() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid0_r &set_CID0(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid0_r &set_CID0(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid1_r - Component ID byte 1 +struct cid1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID1 : 32; // Byte 1 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid1_r() : word0(240) {} + CONSTEXPR cid1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID1() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID1() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid1_r &set_CID1(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid1_r &set_CID1(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid2_r - Component ID byte 2 +struct cid2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID2 : 32; // Byte 2 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid2_r() : word0(5) {} + CONSTEXPR cid2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid2_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID2() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID2() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid2_r &set_CID2(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid2_r &set_CID2(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid3_r - Component ID byte 3 +struct cid3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID3 : 32; // Byte 3 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid3_r() : word0(177) {} + CONSTEXPR cid3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid3_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID3() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID3() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid3_r &set_CID3(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid3_r &set_CID3(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +struct NPU_REG +{ + STRUCT id_r ID; // 0x0000 + STRUCT status_r STATUS; // 0x0004 + STRUCT cmd_r CMD; // 0x0008 + STRUCT reset_r RESET; // 0x000C + STRUCT qbase_r QBASE; // 0x0010 + STRUCT qread_r QREAD; // 0x0018 + STRUCT qconfig_r QCONFIG; // 0x001C + STRUCT qsize_r QSIZE; // 0x0020 + STRUCT prot_r PROT; // 0x0024 + STRUCT config_r CONFIG; // 0x0028 + STRUCT lock_r LOCK; // 0x002C + uint32_t unused0[3]; + STRUCT regioncfg_r REGIONCFG; // 0x003C + STRUCT axi_limit0_r AXI_LIMIT0; // 0x0040 + STRUCT axi_limit1_r AXI_LIMIT1; // 0x0044 + STRUCT axi_limit2_r AXI_LIMIT2; // 0x0048 + STRUCT axi_limit3_r AXI_LIMIT3; // 0x004C + uint32_t unused1[12]; + STRUCT basep_r BASEP[8]; // 0x0080 + uint32_t unused2[16]; + STRUCT wd_status_r WD_STATUS; // 0x0100 + STRUCT mac_status_r MAC_STATUS; // 0x0104 + STRUCT ao_status_r AO_STATUS; // 0x0108 + uint32_t unused3[1]; + STRUCT dma_status0_r DMA_STATUS0; // 0x0110 + STRUCT dma_status1_r DMA_STATUS1; // 0x0114 + uint32_t unused4[10]; + STRUCT clkforce_r CLKFORCE; // 0x0140 + STRUCT debug_address_r DEBUG_ADDRESS; // 0x0144 + STRUCT debug_misc_r DEBUG_MISC; // 0x0148 + uint32_t unused5[1]; + STRUCT debug_block_r DEBUG_BLOCK; // 0x0150 + uint32_t unused6[11]; + STRUCT pmcr_r PMCR; // 0x0180 + STRUCT pmcntenset_r PMCNTENSET; // 0x0184 + STRUCT pmcntenclr_r PMCNTENCLR; // 0x0188 + STRUCT pmovsset_r PMOVSSET; // 0x018C + STRUCT pmovsclr_r PMOVSCLR; // 0x0190 + STRUCT pmintset_r PMINTSET; // 0x0194 + STRUCT pmintclr_r PMINTCLR; // 0x0198 + uint32_t unused7[1]; + STRUCT pmccntr_r PMCCNTR; // 0x01A0 + STRUCT pmccntr_cfg_r PMCCNTR_CFG; // 0x01A8 + STRUCT pmcaxi_chan_r PMCAXI_CHAN; // 0x01AC + uint32_t unused8[20]; + STRUCT kernel_x_r KERNEL_X; // 0x0200 + STRUCT kernel_y_r KERNEL_Y; // 0x0204 + STRUCT kernel_w_m1_r KERNEL_W_M1; // 0x0208 + STRUCT kernel_h_m1_r KERNEL_H_M1; // 0x020C + STRUCT ofm_cblk_width_m1_r OFM_CBLK_WIDTH_M1; // 0x0210 + STRUCT ofm_cblk_height_m1_r OFM_CBLK_HEIGHT_M1; // 0x0214 + STRUCT ofm_cblk_depth_m1_r OFM_CBLK_DEPTH_M1; // 0x0218 + STRUCT ifm_cblk_depth_m1_r IFM_CBLK_DEPTH_M1; // 0x021C + STRUCT ofm_x_r OFM_X; // 0x0220 + STRUCT ofm_y_r OFM_Y; // 0x0224 + STRUCT ofm_z_r OFM_Z; // 0x0228 + STRUCT ifm_z_r IFM_Z; // 0x022C + STRUCT pad_top_r PAD_TOP; // 0x0230 + STRUCT pad_left_r PAD_LEFT; // 0x0234 + STRUCT ifm_cblk_width_r IFM_CBLK_WIDTH; // 0x0238 + STRUCT ifm_cblk_height_r IFM_CBLK_HEIGHT; // 0x023C + STRUCT dma_ifm_src_r DMA_IFM_SRC; // 0x0240 + STRUCT dma_ifm_dst_r DMA_IFM_DST; // 0x0248 + STRUCT dma_ofm_src_r DMA_OFM_SRC; // 0x024C + STRUCT dma_ofm_dst_r DMA_OFM_DST; // 0x0250 + STRUCT dma_weight_src_r DMA_WEIGHT_SRC; // 0x0258 + STRUCT dma_cmd_src_r DMA_CMD_SRC; // 0x0260 + STRUCT dma_cmd_size_r DMA_CMD_SIZE; // 0x0268 + STRUCT dma_m2m_src_r DMA_M2M_SRC; // 0x026C + STRUCT dma_m2m_dst_r DMA_M2M_DST; // 0x0274 + STRUCT current_qread_r CURRENT_QREAD; // 0x027C + STRUCT dma_scale_src_r DMA_SCALE_SRC; // 0x0280 + uint32_t unused9[11]; + STRUCT current_block_r CURRENT_BLOCK; // 0x02B4 + STRUCT current_op_r CURRENT_OP; // 0x02B8 + STRUCT current_cmd_r CURRENT_CMD; // 0x02BC + uint32_t unused10[16]; + STRUCT pmevcntr_r PMEVCNTR[4]; // 0x0300 + uint32_t unused11[28]; + STRUCT pmevtyper_r PMEVTYPER[4]; // 0x0380 + uint32_t unused12[28]; + STRUCT shared_buffer_r SHARED_BUFFER[256]; // 0x0400 + STRUCT ifm_pad_top_r IFM_PAD_TOP; // 0x0800 + STRUCT ifm_pad_left_r IFM_PAD_LEFT; // 0x0804 + STRUCT ifm_pad_right_r IFM_PAD_RIGHT; // 0x0808 + STRUCT ifm_pad_bottom_r IFM_PAD_BOTTOM; // 0x080C + STRUCT ifm_depth_m1_r IFM_DEPTH_M1; // 0x0810 + STRUCT ifm_precision_r IFM_PRECISION; // 0x0814 + uint32_t unused13[1]; + STRUCT ifm_upscale_r IFM_UPSCALE; // 0x081C + uint32_t unused14[1]; + STRUCT ifm_zero_point_r IFM_ZERO_POINT; // 0x0824 + STRUCT ifm_width0_m1_r IFM_WIDTH0_M1; // 0x0828 + STRUCT ifm_height0_m1_r IFM_HEIGHT0_M1; // 0x082C + STRUCT ifm_height1_m1_r IFM_HEIGHT1_M1; // 0x0830 + STRUCT ifm_ib_end_r IFM_IB_END; // 0x0834 + uint32_t unused15[1]; + STRUCT ifm_region_r IFM_REGION; // 0x083C + uint32_t unused16[1]; + STRUCT ofm_width_m1_r OFM_WIDTH_M1; // 0x0844 + STRUCT ofm_height_m1_r OFM_HEIGHT_M1; // 0x0848 + STRUCT ofm_depth_m1_r OFM_DEPTH_M1; // 0x084C + STRUCT ofm_precision_r OFM_PRECISION; // 0x0850 + STRUCT ofm_blk_width_m1_r OFM_BLK_WIDTH_M1; // 0x0854 + STRUCT ofm_blk_height_m1_r OFM_BLK_HEIGHT_M1; // 0x0858 + STRUCT ofm_blk_depth_m1_r OFM_BLK_DEPTH_M1; // 0x085C + STRUCT ofm_zero_point_r OFM_ZERO_POINT; // 0x0860 + uint32_t unused17[1]; + STRUCT ofm_width0_m1_r OFM_WIDTH0_M1; // 0x0868 + STRUCT ofm_height0_m1_r OFM_HEIGHT0_M1; // 0x086C + STRUCT ofm_height1_m1_r OFM_HEIGHT1_M1; // 0x0870 + uint32_t unused18[2]; + STRUCT ofm_region_r OFM_REGION; // 0x087C + STRUCT kernel_width_m1_r KERNEL_WIDTH_M1; // 0x0880 + STRUCT kernel_height_m1_r KERNEL_HEIGHT_M1; // 0x0884 + STRUCT kernel_stride_r KERNEL_STRIDE; // 0x0888 + uint32_t unused19[1]; + STRUCT acc_format_r ACC_FORMAT; // 0x0890 + STRUCT activation_r ACTIVATION; // 0x0894 + STRUCT activation_min_r ACTIVATION_MIN; // 0x0898 + STRUCT activation_max_r ACTIVATION_MAX; // 0x089C + STRUCT weight_region_r WEIGHT_REGION; // 0x08A0 + STRUCT scale_region_r SCALE_REGION; // 0x08A4 + uint32_t unused20[3]; + STRUCT ab_start_r AB_START; // 0x08B4 + uint32_t unused21[1]; + STRUCT blockdep_r BLOCKDEP; // 0x08BC + STRUCT dma0_src_region_r DMA0_SRC_REGION; // 0x08C0 + STRUCT dma0_dst_region_r DMA0_DST_REGION; // 0x08C4 + STRUCT dma0_size0_r DMA0_SIZE0; // 0x08C8 + STRUCT dma0_size1_r DMA0_SIZE1; // 0x08CC + uint32_t unused22[12]; + STRUCT ifm2_broadcast_r IFM2_BROADCAST; // 0x0900 + STRUCT ifm2_scalar_r IFM2_SCALAR; // 0x0904 + uint32_t unused23[3]; + STRUCT ifm2_precision_r IFM2_PRECISION; // 0x0914 + uint32_t unused24[3]; + STRUCT ifm2_zero_point_r IFM2_ZERO_POINT; // 0x0924 + STRUCT ifm2_width0_m1_r IFM2_WIDTH0_M1; // 0x0928 + STRUCT ifm2_height0_m1_r IFM2_HEIGHT0_M1; // 0x092C + STRUCT ifm2_height1_m1_r IFM2_HEIGHT1_M1; // 0x0930 + STRUCT ifm2_ib_start_r IFM2_IB_START; // 0x0934 + uint32_t unused25[1]; + STRUCT ifm2_region_r IFM2_REGION; // 0x093C + uint32_t unused26[48]; + STRUCT ifm_base0_r IFM_BASE0; // 0x0A00 + STRUCT ifm_base1_r IFM_BASE1; // 0x0A08 + STRUCT ifm_base2_r IFM_BASE2; // 0x0A10 + STRUCT ifm_base3_r IFM_BASE3; // 0x0A18 + STRUCT ifm_stride_x_r IFM_STRIDE_X; // 0x0A20 + STRUCT ifm_stride_y_r IFM_STRIDE_Y; // 0x0A28 + STRUCT ifm_stride_c_r IFM_STRIDE_C; // 0x0A30 + uint32_t unused27[2]; + STRUCT ofm_base0_r OFM_BASE0; // 0x0A40 + STRUCT ofm_base1_r OFM_BASE1; // 0x0A48 + STRUCT ofm_base2_r OFM_BASE2; // 0x0A50 + STRUCT ofm_base3_r OFM_BASE3; // 0x0A58 + STRUCT ofm_stride_x_r OFM_STRIDE_X; // 0x0A60 + STRUCT ofm_stride_y_r OFM_STRIDE_Y; // 0x0A68 + STRUCT ofm_stride_c_r OFM_STRIDE_C; // 0x0A70 + uint32_t unused28[2]; + STRUCT weight_base_r WEIGHT_BASE; // 0x0A80 + STRUCT weight_length_r WEIGHT_LENGTH; // 0x0A88 + STRUCT scale_base_r SCALE_BASE; // 0x0A90 + STRUCT scale_length_r SCALE_LENGTH; // 0x0A98 + STRUCT ofm_scale_r OFM_SCALE; // 0x0AA0 + STRUCT ofm_scale_shift_r OFM_SCALE_SHIFT; // 0x0AA4 + STRUCT opa_scale_r OPA_SCALE; // 0x0AA8 + STRUCT opa_scale_shift_r OPA_SCALE_SHIFT; // 0x0AAC + STRUCT opb_scale_r OPB_SCALE; // 0x0AB0 + uint32_t unused29[3]; + STRUCT dma0_src_r DMA0_SRC; // 0x0AC0 + STRUCT dma0_dst_r DMA0_DST; // 0x0AC8 + STRUCT dma0_len_r DMA0_LEN; // 0x0AD0 + uint32_t unused30[10]; + STRUCT ifm2_base0_r IFM2_BASE0; // 0x0B00 + STRUCT ifm2_base1_r IFM2_BASE1; // 0x0B08 + STRUCT ifm2_base2_r IFM2_BASE2; // 0x0B10 + STRUCT ifm2_base3_r IFM2_BASE3; // 0x0B18 + STRUCT ifm2_stride_x_r IFM2_STRIDE_X; // 0x0B20 + STRUCT ifm2_stride_y_r IFM2_STRIDE_Y; // 0x0B28 + STRUCT ifm2_stride_c_r IFM2_STRIDE_C; // 0x0B30 + uint32_t unused31[18]; + uint32_t USER_DEFINED[16]; // 0x0B80 + uint32_t unused32[256]; + STRUCT revision_r REVISION; // 0x0FC0 + uint32_t unused33[3]; + STRUCT pid4_r PID4; // 0x0FD0 + STRUCT pid5_r PID5; // 0x0FD4 + STRUCT pid6_r PID6; // 0x0FD8 + STRUCT pid7_r PID7; // 0x0FDC + STRUCT pid0_r PID0; // 0x0FE0 + STRUCT pid1_r PID1; // 0x0FE4 + STRUCT pid2_r PID2; // 0x0FE8 + STRUCT pid3_r PID3; // 0x0FEC + STRUCT cid0_r CID0; // 0x0FF0 + STRUCT cid1_r CID1; // 0x0FF4 + STRUCT cid2_r CID2; // 0x0FF8 + STRUCT cid3_r CID3; // 0x0FFC + +#ifdef __cplusplus + enum class access_type_t : uint8_t + { + RW, + RO, + WO + }; + NPU_REG() + { + reset(); + } + void reset() + { + ID = 269500929; + STATUS = 8; + CMD = 12; + RESET = 0; + QBASE = 0; + QREAD = 0; + QCONFIG = 0; + QSIZE = 0; + PROT = 0; + CONFIG = 0; + LOCK = 0; + REGIONCFG = 0; + AXI_LIMIT0 = 0; + AXI_LIMIT1 = 0; + AXI_LIMIT2 = 0; + AXI_LIMIT3 = 0; + for (size_t i = 0; i < (sizeof(BASEP) / sizeof(BASEP[0])); ++i) + BASEP[i] = 0; + WD_STATUS = 0; + MAC_STATUS = 0; + AO_STATUS = 0; + DMA_STATUS0 = 0; + DMA_STATUS1 = 0; + CLKFORCE = 0; + DEBUG_ADDRESS = 0; + DEBUG_MISC = 0; + DEBUG_BLOCK = 0; + PMCR = 8192; + PMCNTENSET = 0; + PMCNTENCLR = 0; + PMOVSSET = 0; + PMOVSCLR = 0; + PMINTSET = 0; + PMINTCLR = 0; + PMCCNTR = 0; + PMCCNTR_CFG = 0; + PMCAXI_CHAN = 0; + KERNEL_X = 0; + KERNEL_Y = 0; + KERNEL_W_M1 = 0; + KERNEL_H_M1 = 0; + OFM_CBLK_WIDTH_M1 = 0; + OFM_CBLK_HEIGHT_M1 = 0; + OFM_CBLK_DEPTH_M1 = 0; + IFM_CBLK_DEPTH_M1 = 0; + OFM_X = 0; + OFM_Y = 0; + OFM_Z = 0; + IFM_Z = 0; + PAD_TOP = 0; + PAD_LEFT = 0; + IFM_CBLK_WIDTH = 0; + IFM_CBLK_HEIGHT = 0; + DMA_IFM_SRC = 0; + DMA_IFM_DST = 0; + DMA_OFM_SRC = 0; + DMA_OFM_DST = 0; + DMA_WEIGHT_SRC = 0; + DMA_CMD_SRC = 0; + DMA_CMD_SIZE = 0; + DMA_M2M_SRC = 0; + DMA_M2M_DST = 0; + CURRENT_QREAD = 0; + DMA_SCALE_SRC = 0; + CURRENT_BLOCK = 0; + CURRENT_OP = 0; + CURRENT_CMD = 0; + for (size_t i = 0; i < (sizeof(PMEVCNTR) / sizeof(PMEVCNTR[0])); ++i) + PMEVCNTR[i] = 0; + for (size_t i = 0; i < (sizeof(PMEVTYPER) / sizeof(PMEVTYPER[0])); ++i) + PMEVTYPER[i] = 0; + for (size_t i = 0; i < (sizeof(SHARED_BUFFER) / sizeof(SHARED_BUFFER[0])); ++i) + SHARED_BUFFER[i] = 0; + IFM_PAD_TOP = 0; + IFM_PAD_LEFT = 0; + IFM_PAD_RIGHT = 0; + IFM_PAD_BOTTOM = 0; + IFM_DEPTH_M1 = 0; + IFM_PRECISION = 0; + IFM_UPSCALE = 0; + IFM_ZERO_POINT = 0; + IFM_WIDTH0_M1 = 0; + IFM_HEIGHT0_M1 = 0; + IFM_HEIGHT1_M1 = 0; + IFM_IB_END = 0; + IFM_REGION = 0; + OFM_WIDTH_M1 = 0; + OFM_HEIGHT_M1 = 0; + OFM_DEPTH_M1 = 0; + OFM_PRECISION = 0; + OFM_BLK_WIDTH_M1 = 0; + OFM_BLK_HEIGHT_M1 = 0; + OFM_BLK_DEPTH_M1 = 0; + OFM_ZERO_POINT = 0; + OFM_WIDTH0_M1 = 0; + OFM_HEIGHT0_M1 = 0; + OFM_HEIGHT1_M1 = 0; + OFM_REGION = 0; + KERNEL_WIDTH_M1 = 0; + KERNEL_HEIGHT_M1 = 0; + KERNEL_STRIDE = 0; + ACC_FORMAT = 0; + ACTIVATION = 0; + ACTIVATION_MIN = 0; + ACTIVATION_MAX = 0; + WEIGHT_REGION = 0; + SCALE_REGION = 0; + AB_START = 0; + BLOCKDEP = 0; + DMA0_SRC_REGION = 0; + DMA0_DST_REGION = 0; + DMA0_SIZE0 = 0; + DMA0_SIZE1 = 0; + IFM2_BROADCAST = 0; + IFM2_SCALAR = 0; + IFM2_PRECISION = 0; + IFM2_ZERO_POINT = 0; + IFM2_WIDTH0_M1 = 0; + IFM2_HEIGHT0_M1 = 0; + IFM2_HEIGHT1_M1 = 0; + IFM2_IB_START = 0; + IFM2_REGION = 0; + IFM_BASE0 = 0; + IFM_BASE1 = 0; + IFM_BASE2 = 0; + IFM_BASE3 = 0; + IFM_STRIDE_X = 0; + IFM_STRIDE_Y = 0; + IFM_STRIDE_C = 0; + OFM_BASE0 = 0; + OFM_BASE1 = 0; + OFM_BASE2 = 0; + OFM_BASE3 = 0; + OFM_STRIDE_X = 0; + OFM_STRIDE_Y = 0; + OFM_STRIDE_C = 0; + WEIGHT_BASE = 0; + WEIGHT_LENGTH = 0; + SCALE_BASE = 0; + SCALE_LENGTH = 0; + OFM_SCALE = 0; + OFM_SCALE_SHIFT = 0; + OPA_SCALE = 0; + OPA_SCALE_SHIFT = 0; + OPB_SCALE = 0; + DMA0_SRC = 0; + DMA0_DST = 0; + DMA0_LEN = 0; + IFM2_BASE0 = 0; + IFM2_BASE1 = 0; + IFM2_BASE2 = 0; + IFM2_BASE3 = 0; + IFM2_STRIDE_X = 0; + IFM2_STRIDE_Y = 0; + IFM2_STRIDE_C = 0; + for (size_t i = 0; i < (sizeof(USER_DEFINED) / sizeof(USER_DEFINED[0])); ++i) + USER_DEFINED[i] = 0; + REVISION = 0; + PID4 = 4; + PID5 = 0; + PID6 = 0; + PID7 = 0; + PID0 = 128; + PID1 = 181; + PID2 = 11; + PID3 = 0; + CID0 = 13; + CID1 = 240; + CID2 = 5; + CID3 = 177; + } + uint32_t &operator[](const int addr_offset) + { + return reinterpret_cast(this)[addr_offset / 4]; + } + access_type_t get_access_type(uint32_t offset) + { + switch (offset) + { + case 0: + return access_type_t::RO; + case 4: + return access_type_t::RO; + case 8: + return access_type_t::RW; + case 12: + return access_type_t::RW; + case 16: + return access_type_t::RW; + case 24: + return access_type_t::RO; + case 28: + return access_type_t::RW; + case 32: + return access_type_t::RW; + case 36: + return access_type_t::RO; + case 40: + return access_type_t::RO; + case 44: + return access_type_t::RW; + case 60: + return access_type_t::RW; + case 64: + return access_type_t::RW; + case 68: + return access_type_t::RW; + case 72: + return access_type_t::RW; + case 76: + return access_type_t::RW; + case 128: + return access_type_t::RW; + case 136: + return access_type_t::RW; + case 144: + return access_type_t::RW; + case 152: + return access_type_t::RW; + case 160: + return access_type_t::RW; + case 168: + return access_type_t::RW; + case 176: + return access_type_t::RW; + case 184: + return access_type_t::RW; + case 256: + return access_type_t::RO; + case 260: + return access_type_t::RO; + case 264: + return access_type_t::RO; + case 272: + return access_type_t::RO; + case 276: + return access_type_t::RO; + case 320: + return access_type_t::RW; + case 324: + return access_type_t::RW; + case 328: + return access_type_t::RW; + case 336: + return access_type_t::RW; + case 384: + return access_type_t::RW; + case 388: + return access_type_t::RW; + case 392: + return access_type_t::RW; + case 396: + return access_type_t::RW; + case 400: + return access_type_t::RW; + case 404: + return access_type_t::RW; + case 408: + return access_type_t::RW; + case 416: + return access_type_t::RW; + case 424: + return access_type_t::RW; + case 428: + return access_type_t::RW; + case 512: + return access_type_t::RO; + case 516: + return access_type_t::RO; + case 520: + return access_type_t::RO; + case 524: + return access_type_t::RO; + case 528: + return access_type_t::RO; + case 532: + return access_type_t::RO; + case 536: + return access_type_t::RO; + case 540: + return access_type_t::RO; + case 544: + return access_type_t::RO; + case 548: + return access_type_t::RO; + case 552: + return access_type_t::RO; + case 556: + return access_type_t::RO; + case 560: + return access_type_t::RO; + case 564: + return access_type_t::RO; + case 568: + return access_type_t::RO; + case 572: + return access_type_t::RO; + case 576: + return access_type_t::RO; + case 584: + return access_type_t::RO; + case 588: + return access_type_t::RO; + case 592: + return access_type_t::RO; + case 600: + return access_type_t::RO; + case 608: + return access_type_t::RO; + case 616: + return access_type_t::RO; + case 620: + return access_type_t::RO; + case 628: + return access_type_t::RO; + case 636: + return access_type_t::RO; + case 640: + return access_type_t::RO; + case 692: + return access_type_t::RO; + case 696: + return access_type_t::RO; + case 700: + return access_type_t::RO; + case 768: + return access_type_t::RW; + case 772: + return access_type_t::RW; + case 776: + return access_type_t::RW; + case 780: + return access_type_t::RW; + case 896: + return access_type_t::RW; + case 900: + return access_type_t::RW; + case 904: + return access_type_t::RW; + case 908: + return access_type_t::RW; + case 1024: + return access_type_t::RW; + case 1028: + return access_type_t::RW; + case 1032: + return access_type_t::RW; + case 1036: + return access_type_t::RW; + case 1040: + return access_type_t::RW; + case 1044: + return access_type_t::RW; + case 1048: + return access_type_t::RW; + case 1052: + return access_type_t::RW; + case 1056: + return access_type_t::RW; + case 1060: + return access_type_t::RW; + case 1064: + return access_type_t::RW; + case 1068: + return access_type_t::RW; + case 1072: + return access_type_t::RW; + case 1076: + return access_type_t::RW; + case 1080: + return access_type_t::RW; + case 1084: + return access_type_t::RW; + case 1088: + return access_type_t::RW; + case 1092: + return access_type_t::RW; + case 1096: + return access_type_t::RW; + case 1100: + return access_type_t::RW; + case 1104: + return access_type_t::RW; + case 1108: + return access_type_t::RW; + case 1112: + return access_type_t::RW; + case 1116: + return access_type_t::RW; + case 1120: + return access_type_t::RW; + case 1124: + return access_type_t::RW; + case 1128: + return access_type_t::RW; + case 1132: + return access_type_t::RW; + case 1136: + return access_type_t::RW; + case 1140: + return access_type_t::RW; + case 1144: + return access_type_t::RW; + case 1148: + return access_type_t::RW; + case 1152: + return access_type_t::RW; + case 1156: + return access_type_t::RW; + case 1160: + return access_type_t::RW; + case 1164: + return access_type_t::RW; + case 1168: + return access_type_t::RW; + case 1172: + return access_type_t::RW; + case 1176: + return access_type_t::RW; + case 1180: + return access_type_t::RW; + case 1184: + return access_type_t::RW; + case 1188: + return access_type_t::RW; + case 1192: + return access_type_t::RW; + case 1196: + return access_type_t::RW; + case 1200: + return access_type_t::RW; + case 1204: + return access_type_t::RW; + case 1208: + return access_type_t::RW; + case 1212: + return access_type_t::RW; + case 1216: + return access_type_t::RW; + case 1220: + return access_type_t::RW; + case 1224: + return access_type_t::RW; + case 1228: + return access_type_t::RW; + case 1232: + return access_type_t::RW; + case 1236: + return access_type_t::RW; + case 1240: + return access_type_t::RW; + case 1244: + return access_type_t::RW; + case 1248: + return access_type_t::RW; + case 1252: + return access_type_t::RW; + case 1256: + return access_type_t::RW; + case 1260: + return access_type_t::RW; + case 1264: + return access_type_t::RW; + case 1268: + return access_type_t::RW; + case 1272: + return access_type_t::RW; + case 1276: + return access_type_t::RW; + case 1280: + return access_type_t::RW; + case 1284: + return access_type_t::RW; + case 1288: + return access_type_t::RW; + case 1292: + return access_type_t::RW; + case 1296: + return access_type_t::RW; + case 1300: + return access_type_t::RW; + case 1304: + return access_type_t::RW; + case 1308: + return access_type_t::RW; + case 1312: + return access_type_t::RW; + case 1316: + return access_type_t::RW; + case 1320: + return access_type_t::RW; + case 1324: + return access_type_t::RW; + case 1328: + return access_type_t::RW; + case 1332: + return access_type_t::RW; + case 1336: + return access_type_t::RW; + case 1340: + return access_type_t::RW; + case 1344: + return access_type_t::RW; + case 1348: + return access_type_t::RW; + case 1352: + return access_type_t::RW; + case 1356: + return access_type_t::RW; + case 1360: + return access_type_t::RW; + case 1364: + return access_type_t::RW; + case 1368: + return access_type_t::RW; + case 1372: + return access_type_t::RW; + case 1376: + return access_type_t::RW; + case 1380: + return access_type_t::RW; + case 1384: + return access_type_t::RW; + case 1388: + return access_type_t::RW; + case 1392: + return access_type_t::RW; + case 1396: + return access_type_t::RW; + case 1400: + return access_type_t::RW; + case 1404: + return access_type_t::RW; + case 1408: + return access_type_t::RW; + case 1412: + return access_type_t::RW; + case 1416: + return access_type_t::RW; + case 1420: + return access_type_t::RW; + case 1424: + return access_type_t::RW; + case 1428: + return access_type_t::RW; + case 1432: + return access_type_t::RW; + case 1436: + return access_type_t::RW; + case 1440: + return access_type_t::RW; + case 1444: + return access_type_t::RW; + case 1448: + return access_type_t::RW; + case 1452: + return access_type_t::RW; + case 1456: + return access_type_t::RW; + case 1460: + return access_type_t::RW; + case 1464: + return access_type_t::RW; + case 1468: + return access_type_t::RW; + case 1472: + return access_type_t::RW; + case 1476: + return access_type_t::RW; + case 1480: + return access_type_t::RW; + case 1484: + return access_type_t::RW; + case 1488: + return access_type_t::RW; + case 1492: + return access_type_t::RW; + case 1496: + return access_type_t::RW; + case 1500: + return access_type_t::RW; + case 1504: + return access_type_t::RW; + case 1508: + return access_type_t::RW; + case 1512: + return access_type_t::RW; + case 1516: + return access_type_t::RW; + case 1520: + return access_type_t::RW; + case 1524: + return access_type_t::RW; + case 1528: + return access_type_t::RW; + case 1532: + return access_type_t::RW; + case 1536: + return access_type_t::RW; + case 1540: + return access_type_t::RW; + case 1544: + return access_type_t::RW; + case 1548: + return access_type_t::RW; + case 1552: + return access_type_t::RW; + case 1556: + return access_type_t::RW; + case 1560: + return access_type_t::RW; + case 1564: + return access_type_t::RW; + case 1568: + return access_type_t::RW; + case 1572: + return access_type_t::RW; + case 1576: + return access_type_t::RW; + case 1580: + return access_type_t::RW; + case 1584: + return access_type_t::RW; + case 1588: + return access_type_t::RW; + case 1592: + return access_type_t::RW; + case 1596: + return access_type_t::RW; + case 1600: + return access_type_t::RW; + case 1604: + return access_type_t::RW; + case 1608: + return access_type_t::RW; + case 1612: + return access_type_t::RW; + case 1616: + return access_type_t::RW; + case 1620: + return access_type_t::RW; + case 1624: + return access_type_t::RW; + case 1628: + return access_type_t::RW; + case 1632: + return access_type_t::RW; + case 1636: + return access_type_t::RW; + case 1640: + return access_type_t::RW; + case 1644: + return access_type_t::RW; + case 1648: + return access_type_t::RW; + case 1652: + return access_type_t::RW; + case 1656: + return access_type_t::RW; + case 1660: + return access_type_t::RW; + case 1664: + return access_type_t::RW; + case 1668: + return access_type_t::RW; + case 1672: + return access_type_t::RW; + case 1676: + return access_type_t::RW; + case 1680: + return access_type_t::RW; + case 1684: + return access_type_t::RW; + case 1688: + return access_type_t::RW; + case 1692: + return access_type_t::RW; + case 1696: + return access_type_t::RW; + case 1700: + return access_type_t::RW; + case 1704: + return access_type_t::RW; + case 1708: + return access_type_t::RW; + case 1712: + return access_type_t::RW; + case 1716: + return access_type_t::RW; + case 1720: + return access_type_t::RW; + case 1724: + return access_type_t::RW; + case 1728: + return access_type_t::RW; + case 1732: + return access_type_t::RW; + case 1736: + return access_type_t::RW; + case 1740: + return access_type_t::RW; + case 1744: + return access_type_t::RW; + case 1748: + return access_type_t::RW; + case 1752: + return access_type_t::RW; + case 1756: + return access_type_t::RW; + case 1760: + return access_type_t::RW; + case 1764: + return access_type_t::RW; + case 1768: + return access_type_t::RW; + case 1772: + return access_type_t::RW; + case 1776: + return access_type_t::RW; + case 1780: + return access_type_t::RW; + case 1784: + return access_type_t::RW; + case 1788: + return access_type_t::RW; + case 1792: + return access_type_t::RW; + case 1796: + return access_type_t::RW; + case 1800: + return access_type_t::RW; + case 1804: + return access_type_t::RW; + case 1808: + return access_type_t::RW; + case 1812: + return access_type_t::RW; + case 1816: + return access_type_t::RW; + case 1820: + return access_type_t::RW; + case 1824: + return access_type_t::RW; + case 1828: + return access_type_t::RW; + case 1832: + return access_type_t::RW; + case 1836: + return access_type_t::RW; + case 1840: + return access_type_t::RW; + case 1844: + return access_type_t::RW; + case 1848: + return access_type_t::RW; + case 1852: + return access_type_t::RW; + case 1856: + return access_type_t::RW; + case 1860: + return access_type_t::RW; + case 1864: + return access_type_t::RW; + case 1868: + return access_type_t::RW; + case 1872: + return access_type_t::RW; + case 1876: + return access_type_t::RW; + case 1880: + return access_type_t::RW; + case 1884: + return access_type_t::RW; + case 1888: + return access_type_t::RW; + case 1892: + return access_type_t::RW; + case 1896: + return access_type_t::RW; + case 1900: + return access_type_t::RW; + case 1904: + return access_type_t::RW; + case 1908: + return access_type_t::RW; + case 1912: + return access_type_t::RW; + case 1916: + return access_type_t::RW; + case 1920: + return access_type_t::RW; + case 1924: + return access_type_t::RW; + case 1928: + return access_type_t::RW; + case 1932: + return access_type_t::RW; + case 1936: + return access_type_t::RW; + case 1940: + return access_type_t::RW; + case 1944: + return access_type_t::RW; + case 1948: + return access_type_t::RW; + case 1952: + return access_type_t::RW; + case 1956: + return access_type_t::RW; + case 1960: + return access_type_t::RW; + case 1964: + return access_type_t::RW; + case 1968: + return access_type_t::RW; + case 1972: + return access_type_t::RW; + case 1976: + return access_type_t::RW; + case 1980: + return access_type_t::RW; + case 1984: + return access_type_t::RW; + case 1988: + return access_type_t::RW; + case 1992: + return access_type_t::RW; + case 1996: + return access_type_t::RW; + case 2000: + return access_type_t::RW; + case 2004: + return access_type_t::RW; + case 2008: + return access_type_t::RW; + case 2012: + return access_type_t::RW; + case 2016: + return access_type_t::RW; + case 2020: + return access_type_t::RW; + case 2024: + return access_type_t::RW; + case 2028: + return access_type_t::RW; + case 2032: + return access_type_t::RW; + case 2036: + return access_type_t::RW; + case 2040: + return access_type_t::RW; + case 2044: + return access_type_t::RW; + case 2048: + return access_type_t::RW; + case 2052: + return access_type_t::RW; + case 2056: + return access_type_t::RW; + case 2060: + return access_type_t::RW; + case 2064: + return access_type_t::RW; + case 2068: + return access_type_t::RW; + case 2076: + return access_type_t::RW; + case 2084: + return access_type_t::RW; + case 2088: + return access_type_t::RW; + case 2092: + return access_type_t::RW; + case 2096: + return access_type_t::RW; + case 2100: + return access_type_t::RW; + case 2108: + return access_type_t::RW; + case 2116: + return access_type_t::RW; + case 2120: + return access_type_t::RW; + case 2124: + return access_type_t::RW; + case 2128: + return access_type_t::RW; + case 2132: + return access_type_t::RW; + case 2136: + return access_type_t::RW; + case 2140: + return access_type_t::RW; + case 2144: + return access_type_t::RW; + case 2152: + return access_type_t::RW; + case 2156: + return access_type_t::RW; + case 2160: + return access_type_t::RW; + case 2172: + return access_type_t::RW; + case 2176: + return access_type_t::RW; + case 2180: + return access_type_t::RW; + case 2184: + return access_type_t::RW; + case 2192: + return access_type_t::RW; + case 2196: + return access_type_t::RW; + case 2200: + return access_type_t::RW; + case 2204: + return access_type_t::RW; + case 2208: + return access_type_t::RW; + case 2212: + return access_type_t::RW; + case 2228: + return access_type_t::RW; + case 2236: + return access_type_t::RW; + case 2240: + return access_type_t::RW; + case 2244: + return access_type_t::RW; + case 2248: + return access_type_t::RW; + case 2252: + return access_type_t::RW; + case 2304: + return access_type_t::RW; + case 2308: + return access_type_t::RW; + case 2324: + return access_type_t::RW; + case 2340: + return access_type_t::RW; + case 2344: + return access_type_t::RW; + case 2348: + return access_type_t::RW; + case 2352: + return access_type_t::RW; + case 2356: + return access_type_t::RW; + case 2364: + return access_type_t::RW; + case 2560: + return access_type_t::RW; + case 2568: + return access_type_t::RW; + case 2576: + return access_type_t::RW; + case 2584: + return access_type_t::RW; + case 2592: + return access_type_t::RW; + case 2600: + return access_type_t::RW; + case 2608: + return access_type_t::RW; + case 2624: + return access_type_t::RW; + case 2632: + return access_type_t::RW; + case 2640: + return access_type_t::RW; + case 2648: + return access_type_t::RW; + case 2656: + return access_type_t::RW; + case 2664: + return access_type_t::RW; + case 2672: + return access_type_t::RW; + case 2688: + return access_type_t::RW; + case 2696: + return access_type_t::RW; + case 2704: + return access_type_t::RW; + case 2712: + return access_type_t::RW; + case 2720: + return access_type_t::RW; + case 2724: + return access_type_t::RW; + case 2728: + return access_type_t::RW; + case 2732: + return access_type_t::RW; + case 2736: + return access_type_t::RW; + case 2752: + return access_type_t::RW; + case 2760: + return access_type_t::RW; + case 2768: + return access_type_t::RW; + case 2816: + return access_type_t::RW; + case 2824: + return access_type_t::RW; + case 2832: + return access_type_t::RW; + case 2840: + return access_type_t::RW; + case 2848: + return access_type_t::RW; + case 2856: + return access_type_t::RW; + case 2864: + return access_type_t::RW; + case 2944: + return access_type_t::RW; + case 2952: + return access_type_t::RW; + case 2960: + return access_type_t::RW; + case 2968: + return access_type_t::RW; + case 2976: + return access_type_t::RW; + case 2984: + return access_type_t::RW; + case 2992: + return access_type_t::RW; + case 3000: + return access_type_t::RW; + case 4032: + return access_type_t::RO; + case 4048: + return access_type_t::RO; + case 4052: + return access_type_t::RO; + case 4056: + return access_type_t::RO; + case 4060: + return access_type_t::RO; + case 4064: + return access_type_t::RO; + case 4068: + return access_type_t::RO; + case 4072: + return access_type_t::RO; + case 4076: + return access_type_t::RO; + case 4080: + return access_type_t::RO; + case 4084: + return access_type_t::RO; + case 4088: + return access_type_t::RO; + case 4092: + return access_type_t::RO; + default: + return access_type_t::RO; + } + } +#endif +}; + +#ifdef __cplusplus +struct isa +{ +#ifdef NPU_DISASSEMBLE + static int disassemble(const uint32_t *in, + std::string &op, + std::vector> &fields) + { + switch (*in & 0xffff) + { + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP): + { + const npu_op_stop_t &v = *reinterpret_cast(in); + op = "NPU_OP_STOP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ): + { + const npu_op_irq_t &v = *reinterpret_cast(in); + op = "NPU_OP_IRQ"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV): + { + const npu_op_conv_t &v = *reinterpret_cast(in); + op = "NPU_OP_CONV"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE): + { + const npu_op_depthwise_t &v = *reinterpret_cast(in); + op = "NPU_OP_DEPTHWISE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL): + { + const npu_op_pool_t &v = *reinterpret_cast(in); + op = "NPU_OP_POOL"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE): + { + const npu_op_elementwise_t &v = *reinterpret_cast(in); + op = "NPU_OP_ELEMENTWISE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START): + { + const npu_op_dma_start_t &v = *reinterpret_cast(in); + op = "NPU_OP_DMA_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT): + { + const npu_op_dma_wait_t &v = *reinterpret_cast(in); + op = "NPU_OP_DMA_WAIT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT): + { + const npu_op_kernel_wait_t &v = *reinterpret_cast(in); + op = "NPU_OP_KERNEL_WAIT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK): + { + const npu_op_pmu_mask_t &v = *reinterpret_cast(in); + op = "NPU_OP_PMU_MASK"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP): + { + const npu_set_ifm_pad_top_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_TOP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT): + { + const npu_set_ifm_pad_left_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_LEFT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT): + { + const npu_set_ifm_pad_right_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_RIGHT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM): + { + const npu_set_ifm_pad_bottom_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_BOTTOM"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1): + { + const npu_set_ifm_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION): + { + const npu_set_ifm_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE): + { + const npu_set_ifm_upscale_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_UPSCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT): + { + const npu_set_ifm_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1): + { + const npu_set_ifm_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1): + { + const npu_set_ifm_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1): + { + const npu_set_ifm_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END): + { + const npu_set_ifm_ib_end_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_IB_END"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION): + { + const npu_set_ifm_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1): + { + const npu_set_ofm_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1): + { + const npu_set_ofm_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1): + { + const npu_set_ofm_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION): + { + const npu_set_ofm_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1): + { + const npu_set_ofm_blk_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1): + { + const npu_set_ofm_blk_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1): + { + const npu_set_ofm_blk_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT): + { + const npu_set_ofm_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1): + { + const npu_set_ofm_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1): + { + const npu_set_ofm_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1): + { + const npu_set_ofm_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION): + { + const npu_set_ofm_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1): + { + const npu_set_kernel_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1): + { + const npu_set_kernel_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE): + { + const npu_set_kernel_stride_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_STRIDE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT): + { + const npu_set_acc_format_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACC_FORMAT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION): + { + const npu_set_activation_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN): + { + const npu_set_activation_min_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION_MIN"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX): + { + const npu_set_activation_max_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION_MAX"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION): + { + const npu_set_weight_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION): + { + const npu_set_scale_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START): + { + const npu_set_ab_start_t &v = *reinterpret_cast(in); + op = "NPU_SET_AB_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP): + { + const npu_set_blockdep_t &v = *reinterpret_cast(in); + op = "NPU_SET_BLOCKDEP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION): + { + const npu_set_dma0_src_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SRC_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION): + { + const npu_set_dma0_dst_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_DST_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0): + { + const npu_set_dma0_size0_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SIZE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1): + { + const npu_set_dma0_size1_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SIZE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST): + { + const npu_set_ifm2_broadcast_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BROADCAST"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR): + { + const npu_set_ifm2_scalar_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_SCALAR"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION): + { + const npu_set_ifm2_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT): + { + const npu_set_ifm2_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1): + { + const npu_set_ifm2_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1): + { + const npu_set_ifm2_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1): + { + const npu_set_ifm2_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START): + { + const npu_set_ifm2_ib_start_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_IB_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION): + { + const npu_set_ifm2_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0): + { + const npu_set_ifm_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1): + { + const npu_set_ifm_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2): + { + const npu_set_ifm_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3): + { + const npu_set_ifm_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X): + { + const npu_set_ifm_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y): + { + const npu_set_ifm_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C): + { + const npu_set_ifm_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0): + { + const npu_set_ofm_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1): + { + const npu_set_ofm_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2): + { + const npu_set_ofm_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3): + { + const npu_set_ofm_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X): + { + const npu_set_ofm_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y): + { + const npu_set_ofm_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C): + { + const npu_set_ofm_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE): + { + const npu_set_weight_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH): + { + const npu_set_weight_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_LENGTH"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE): + { + const npu_set_scale_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH): + { + const npu_set_scale_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_LENGTH"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE): + { + const npu_set_ofm_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE): + { + const npu_set_opa_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OPA_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE): + { + const npu_set_opb_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OPB_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC): + { + const npu_set_dma0_src_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SRC"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST): + { + const npu_set_dma0_dst_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_DST"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN): + { + const npu_set_dma0_len_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_LEN"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0): + { + const npu_set_ifm2_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1): + { + const npu_set_ifm2_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2): + { + const npu_set_ifm2_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3): + { + const npu_set_ifm2_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X): + { + const npu_set_ifm2_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y): + { + const npu_set_ifm2_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C): + { + const npu_set_ifm2_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED0): + { + const npu_set_user_defined0_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED1): + { + const npu_set_user_defined1_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED2): + { + const npu_set_user_defined2_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED3): + { + const npu_set_user_defined3_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED4): + { + const npu_set_user_defined4_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED4"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED5): + { + const npu_set_user_defined5_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED5"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED6): + { + const npu_set_user_defined6_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED6"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED7): + { + const npu_set_user_defined7_t &v = *reinterpret_cast(in); + op = "NPU_SET_USER_DEFINED7"; + v.disassemble(fields); + break; + } + } + return (*in & (3 << 14)) != 0 ? 2 : 1; + } +#endif +#endif + // Signal the end of command stream + struct npu_op_stop_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mask : 16; // Encoding for 16-bit mask value +#ifdef __cplusplus + public: + npu_op_stop_t(uint32_t _mask) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(_mask & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_op_stop_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_stop_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_stop_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_mask() const + { + return static_cast(mask); + } + CONSTEXPR npu_op_stop_t &set_mask(uint32_t value) + { + mask = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("mask", std::to_string(mask))); + } +#endif +#endif + }; + // Raises an IRQ to the host + struct npu_op_irq_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mask : 16; // Encoding for 16-bit mask value +#ifdef __cplusplus + public: + npu_op_irq_t(uint32_t _mask) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(_mask & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_op_irq_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_irq_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_irq_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_mask() const + { + return static_cast(mask); + } + CONSTEXPR npu_op_irq_t &set_mask(uint32_t value) + { + mask = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("mask", std::to_string(mask))); + } +#endif +#endif + }; + // 2D convolution + struct npu_op_conv_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_conv_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_conv_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_conv_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Depth-wise 2D convolution + struct npu_op_depthwise_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_depthwise_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_depthwise_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_depthwise_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Pooling + struct npu_op_pool_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pooling_mode : 3; // Pooling mode + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_op_pool_t(NPU_NAMESPACE::pooling_mode _pooling_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + pooling_mode(static_cast(_pooling_mode) & ((1U << 3) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_pool_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pooling_mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_pool_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_pool_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::pooling_mode get_pooling_mode() const + { + return static_cast(pooling_mode); + } + CONSTEXPR npu_op_pool_t &set_pooling_mode(NPU_NAMESPACE::pooling_mode value) + { + pooling_mode = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "pooling_mode", + (pooling_mode < (sizeof(pooling_mode_str) / sizeof(pooling_mode_str[0])) ? + pooling_mode_str[pooling_mode] : + "****"))); + } +#endif +#endif + }; + // Elementwise operation + struct npu_op_elementwise_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t elementwise_mode : 6; // Elementwise mode + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_op_elementwise_t(NPU_NAMESPACE::elementwise_mode _elementwise_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + elementwise_mode(static_cast(_elementwise_mode) & ((1U << 6) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_elementwise_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), elementwise_mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_elementwise_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_elementwise_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::elementwise_mode get_elementwise_mode() const + { + return static_cast(elementwise_mode); + } + CONSTEXPR npu_op_elementwise_t &set_elementwise_mode(NPU_NAMESPACE::elementwise_mode value) + { + elementwise_mode = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "elementwise_mode", + (elementwise_mode < (sizeof(elementwise_mode_str) / sizeof(elementwise_mode_str[0])) ? + elementwise_mode_str[elementwise_mode] : + "****"))); + } +#endif +#endif + }; + // Queue new DMA for the given channel + struct npu_op_dma_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_dma_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_dma_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_dma_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Wait for the DMA channel to have k or fewer active descriptors outstanding + struct npu_op_dma_wait_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t k : 4; // Number of outstanding descriptors + uint32_t reserved1 : 12; +#ifdef __cplusplus + public: + npu_op_dma_wait_t(uint32_t _k) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), k(_k & ((1U << 4) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_dma_wait_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), k(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_dma_wait_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_dma_wait_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_k() const + { + return static_cast(k); + } + CONSTEXPR npu_op_dma_wait_t &set_k(uint32_t value) + { + k = static_cast(value) & ((1U << 4) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("k", std::to_string(k))); + } +#endif +#endif + }; + // Wait for n or fewer kernel operations to be remaining + struct npu_op_kernel_wait_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t n : 2; // Number of kernel operations in range 0-3 + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_op_kernel_wait_t(uint32_t _n) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), n(_n & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_kernel_wait_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), n(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_kernel_wait_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_kernel_wait_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_n() const + { + return static_cast(n); + } + CONSTEXPR npu_op_kernel_wait_t &set_n(uint32_t value) + { + n = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("n", std::to_string(n))); + } +#endif +#endif + }; + // Enable or disable PMU counting (debug feature only) + struct npu_op_pmu_mask_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t enable : 1; // Enable or disable PMU mask + uint32_t reserved1 : 15; +#ifdef __cplusplus + public: + npu_op_pmu_mask_t(uint32_t _enable) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), enable(_enable & ((1U << 1) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_op_pmu_mask_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), enable(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_pmu_mask_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_pmu_mask_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_enable() const + { + return static_cast(enable); + } + CONSTEXPR npu_op_pmu_mask_t &set_enable(uint32_t value) + { + enable = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("enable", std::to_string(enable))); + } +#endif +#endif + }; + // IFM top pad + struct npu_set_ifm_pad_top_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 7; // IFM top pad + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ifm_pad_top_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 7) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_top_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM left pad + struct npu_set_ifm_pad_left_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 7; // IFM left pad + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ifm_pad_left_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 7) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_left_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM right pad + struct npu_set_ifm_pad_right_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 8; // IFM right pad. Max value is 128 + uint32_t reserved1 : 8; +#ifdef __cplusplus + public: + npu_set_ifm_pad_right_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 8) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_right_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 8) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM bottom pad + struct npu_set_ifm_pad_bottom_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 8; // IFM bottom pad. Max value is 128 + uint32_t reserved1 : 8; +#ifdef __cplusplus + public: + npu_set_ifm_pad_bottom_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 8) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_bottom_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 8) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // Number of input channels for convolution + struct npu_set_ifm_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 16; // Number of input channels for convolution +#ifdef __cplusplus + public: + npu_set_ifm_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // IFM Precision + struct npu_set_ifm_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // IFM type + uint32_t reserved1 : 1; + uint32_t activation_precision : 2; // IFM precision + uint32_t reserved2 : 2; + uint32_t activation_format : 2; // IFM format + uint32_t scale_mode : 2; // IFM scale mode + uint32_t reserved3 : 4; + uint32_t round_mode : 2; // IFM round mode +#ifdef __cplusplus + public: + npu_set_ifm_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format, + NPU_NAMESPACE::ifm_scale_mode _scale_mode, + NPU_NAMESPACE::round_mode _round_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), reserved1(0), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved2(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), + scale_mode(static_cast(_scale_mode) & ((1U << 2) - 1)), reserved3(0), + round_mode(static_cast(_round_mode) & ((1U << 2) - 1)) + { + } + CONSTEXPR npu_set_ifm_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), reserved1(0), + activation_precision(0), reserved2(0), activation_format(0), scale_mode(0), reserved3(0), round_mode(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm_scale_mode get_scale_mode() const + { + return static_cast(scale_mode); + } + CONSTEXPR npu_set_ifm_precision_t &set_scale_mode(NPU_NAMESPACE::ifm_scale_mode value) + { + scale_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::round_mode get_round_mode() const + { + return static_cast(round_mode); + } + CONSTEXPR npu_set_ifm_precision_t &set_round_mode(NPU_NAMESPACE::round_mode value) + { + round_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + fields.push_back(std::make_pair( + "scale_mode", + (scale_mode < (sizeof(ifm_scale_mode_str) / sizeof(ifm_scale_mode_str[0])) ? + ifm_scale_mode_str[scale_mode] : + "****"))); + fields.push_back(std::make_pair( + "round_mode", + (round_mode < (sizeof(round_mode_str) / sizeof(round_mode_str[0])) ? round_mode_str[round_mode] : + "****"))); + } +#endif +#endif + }; + // IFM upscale mode + struct npu_set_ifm_upscale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mode : 2; // IFM upscale mode + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_ifm_upscale_t(NPU_NAMESPACE::ifm_upscale_mode _mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + mode(static_cast(_mode) & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_upscale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_upscale_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_upscale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm_upscale_mode get_mode() const + { + return static_cast(mode); + } + CONSTEXPR npu_set_ifm_upscale_t &set_mode(NPU_NAMESPACE::ifm_upscale_mode value) + { + mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "mode", + (mode < (sizeof(ifm_upscale_mode_str) / sizeof(ifm_upscale_mode_str[0])) ? ifm_upscale_mode_str[mode] : + "****"))); + } +#endif +#endif + }; + // IFM zero point + struct npu_set_ifm_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ifm_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // IFM Tile 0 and tile 2 width + struct npu_set_ifm_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // IFM Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ifm_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // IFM Tile 0 height + struct npu_set_ifm_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM Tile 0 height +#ifdef __cplusplus + public: + npu_set_ifm_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // IFM Tile 1 height + struct npu_set_ifm_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM Tile 1 height +#ifdef __cplusplus + public: + npu_set_ifm_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // End of IB0,IB1 buffers + struct npu_set_ifm_ib_end_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ib_end : 6; // End of IB0,IB1 buffers in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ifm_ib_end_t(uint32_t _ib_end) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_end(_ib_end & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm_ib_end_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_end(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ib_end() const + { + return static_cast(ib_end); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_ib_end(uint32_t value) + { + ib_end = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ib_end", std::to_string(ib_end))); + } +#endif +#endif + }; + // Index n for IFM access + struct npu_set_ifm_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number n + uint32_t reserved1 : 12; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_ifm_region_t(uint32_t _region, NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0), custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_ifm_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ifm_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_ifm_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // Output feature map width + struct npu_set_ofm_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // Output feature map width +#ifdef __cplusplus + public: + npu_set_ofm_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // Output feature map height + struct npu_set_ofm_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // Output feature map height +#ifdef __cplusplus + public: + npu_set_ofm_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Output feature map depth + struct npu_set_ofm_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 16; // Output feature map depth +#ifdef __cplusplus + public: + npu_set_ofm_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // OFM Precision + struct npu_set_ofm_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // OFM type + uint32_t activation_precision : 2; // OFM precision + uint32_t reserved1 : 3; + uint32_t activation_format : 2; // OFM format + uint32_t scale_mode : 1; // OFM scale mode + uint32_t reserved2 : 5; + uint32_t round_mode : 2; // OFM round mode +#ifdef __cplusplus + public: + npu_set_ofm_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format, + NPU_NAMESPACE::ofm_scale_mode _scale_mode, + NPU_NAMESPACE::round_mode _round_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved1(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), + scale_mode(static_cast(_scale_mode) & ((1U << 1) - 1)), reserved2(0), + round_mode(static_cast(_round_mode) & ((1U << 2) - 1)) + { + } + CONSTEXPR npu_set_ofm_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), + activation_precision(0), reserved1(0), activation_format(0), scale_mode(0), reserved2(0), round_mode(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ofm_scale_mode get_scale_mode() const + { + return static_cast(scale_mode); + } + CONSTEXPR npu_set_ofm_precision_t &set_scale_mode(NPU_NAMESPACE::ofm_scale_mode value) + { + scale_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::round_mode get_round_mode() const + { + return static_cast(round_mode); + } + CONSTEXPR npu_set_ofm_precision_t &set_round_mode(NPU_NAMESPACE::round_mode value) + { + round_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + fields.push_back(std::make_pair( + "scale_mode", + (scale_mode < (sizeof(ofm_scale_mode_str) / sizeof(ofm_scale_mode_str[0])) ? + ofm_scale_mode_str[scale_mode] : + "****"))); + fields.push_back(std::make_pair( + "round_mode", + (round_mode < (sizeof(round_mode_str) / sizeof(round_mode_str[0])) ? round_mode_str[round_mode] : + "****"))); + } +#endif +#endif + }; + // OFM block width + struct npu_set_ofm_blk_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 6; // OFM block width + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ofm_blk_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // OFM block height + struct npu_set_ofm_blk_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 5; // OFM block height + uint32_t reserved1 : 11; +#ifdef __cplusplus + public: + npu_set_ofm_blk_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 5) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 5) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // OFM block depth + struct npu_set_ofm_blk_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 7; // OFM block depth + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ofm_blk_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 7) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // OFM zero point + struct npu_set_ofm_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ofm_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // OFM Tile 0 and tile 2 width + struct npu_set_ofm_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // OFM Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ofm_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // OFM Tile 0 height + struct npu_set_ofm_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // OFM Tile 0 height +#ifdef __cplusplus + public: + npu_set_ofm_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // OFM Tile 1 height + struct npu_set_ofm_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // OFM Tile 1 height +#ifdef __cplusplus + public: + npu_set_ofm_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Index n for OFM access + struct npu_set_ofm_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for OFM access + uint32_t reserved1 : 12; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_ofm_region_t(uint32_t _region, NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0), custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_ofm_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ofm_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_ofm_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // Kernel width + struct npu_set_kernel_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // Kernel width +#ifdef __cplusplus + public: + npu_set_kernel_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_kernel_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // Kernel height + struct npu_set_kernel_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // Kernel height +#ifdef __cplusplus + public: + npu_set_kernel_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_kernel_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Kernel stride + struct npu_set_kernel_stride_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t stride_x_lsb : 1; // Stride x LSB. (kernel_x_stride - 1)[0] + uint32_t stride_y_lsb : 1; // Stride y LSB. (kernel_y_stride - 1)[0] + uint32_t weight_order : 1; // Weight ordering mode + uint32_t dilation_x : 1; // Kernel x dilation + uint32_t dilation_y : 1; // Kernel y dilation + uint32_t decomposition : 1; // Kernel decomposition + uint32_t stride_x_msb : 1; // Stride x MSB. (kernel_x_stride - 1) >> 1 + uint32_t reserved1 : 2; + uint32_t stride_y_msb : 1; // Stride y MSB. (kernel_y_stride - 1) >> 1 + uint32_t reserved2 : 6; +#ifdef __cplusplus + public: + npu_set_kernel_stride_t(uint32_t _stride_x_lsb, + uint32_t _stride_y_lsb, + NPU_NAMESPACE::weight_order _weight_order, + NPU_NAMESPACE::kernel_dilation _dilation_x, + NPU_NAMESPACE::kernel_dilation _dilation_y, + NPU_NAMESPACE::kernel_decomposition _decomposition, + uint32_t _stride_x_msb, + uint32_t _stride_y_msb) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + stride_x_lsb(_stride_x_lsb & ((1U << 1) - 1)), stride_y_lsb(_stride_y_lsb & ((1U << 1) - 1)), + weight_order(static_cast(_weight_order) & ((1U << 1) - 1)), + dilation_x(static_cast(_dilation_x) & ((1U << 1) - 1)), + dilation_y(static_cast(_dilation_y) & ((1U << 1) - 1)), + decomposition(static_cast(_decomposition) & ((1U << 1) - 1)), + stride_x_msb(_stride_x_msb & ((1U << 1) - 1)), reserved1(0), stride_y_msb(_stride_y_msb & ((1U << 1) - 1)), + reserved2(0) + { + } + CONSTEXPR npu_set_kernel_stride_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), stride_x_lsb(0), stride_y_lsb(0), + weight_order(0), dilation_x(0), dilation_y(0), decomposition(0), stride_x_msb(0), reserved1(0), + stride_y_msb(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_stride_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_stride_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_x_lsb() const + { + return static_cast(stride_x_lsb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_x_lsb(uint32_t value) + { + stride_x_lsb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_y_lsb() const + { + return static_cast(stride_y_lsb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_y_lsb(uint32_t value) + { + stride_y_lsb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::weight_order get_weight_order() const + { + return static_cast(weight_order); + } + CONSTEXPR npu_set_kernel_stride_t &set_weight_order(NPU_NAMESPACE::weight_order value) + { + weight_order = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_dilation get_dilation_x() const + { + return static_cast(dilation_x); + } + CONSTEXPR npu_set_kernel_stride_t &set_dilation_x(NPU_NAMESPACE::kernel_dilation value) + { + dilation_x = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_dilation get_dilation_y() const + { + return static_cast(dilation_y); + } + CONSTEXPR npu_set_kernel_stride_t &set_dilation_y(NPU_NAMESPACE::kernel_dilation value) + { + dilation_y = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_decomposition get_decomposition() const + { + return static_cast(decomposition); + } + CONSTEXPR npu_set_kernel_stride_t &set_decomposition(NPU_NAMESPACE::kernel_decomposition value) + { + decomposition = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_x_msb() const + { + return static_cast(stride_x_msb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_x_msb(uint32_t value) + { + stride_x_msb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_y_msb() const + { + return static_cast(stride_y_msb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_y_msb(uint32_t value) + { + stride_y_msb = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("stride_x_lsb", std::to_string(stride_x_lsb))); + fields.push_back(std::make_pair("stride_y_lsb", std::to_string(stride_y_lsb))); + fields.push_back(std::make_pair( + "weight_order", + (weight_order < (sizeof(weight_order_str) / sizeof(weight_order_str[0])) ? + weight_order_str[weight_order] : + "****"))); + fields.push_back(std::make_pair( + "dilation_x", + (dilation_x < (sizeof(kernel_dilation_str) / sizeof(kernel_dilation_str[0])) ? + kernel_dilation_str[dilation_x] : + "****"))); + fields.push_back(std::make_pair( + "dilation_y", + (dilation_y < (sizeof(kernel_dilation_str) / sizeof(kernel_dilation_str[0])) ? + kernel_dilation_str[dilation_y] : + "****"))); + fields.push_back(std::make_pair( + "decomposition", + (decomposition < (sizeof(kernel_decomposition_str) / sizeof(kernel_decomposition_str[0])) ? + kernel_decomposition_str[decomposition] : + "****"))); + fields.push_back(std::make_pair("stride_x_msb", std::to_string(stride_x_msb))); + fields.push_back(std::make_pair("stride_y_msb", std::to_string(stride_y_msb))); + } +#endif +#endif + }; + // Accumulator format + struct npu_set_acc_format_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t acc_format : 2; // Accumulator format + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_acc_format_t(NPU_NAMESPACE::acc_format _acc_format) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + acc_format(static_cast(_acc_format) & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_acc_format_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), acc_format(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_acc_format_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_acc_format_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::acc_format get_acc_format() const + { + return static_cast(acc_format); + } + CONSTEXPR npu_set_acc_format_t &set_acc_format(NPU_NAMESPACE::acc_format value) + { + acc_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "acc_format", + (acc_format < (sizeof(acc_format_str) / sizeof(acc_format_str[0])) ? acc_format_str[acc_format] : + "****"))); + } +#endif +#endif + }; + // Activation function and clip range + struct npu_set_activation_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_function : 5; // Activation function (before table lookup) + uint32_t reserved1 : 7; + uint32_t activation_clip_range : 3; // Activation clip range. This must be set to 0 if table lookup is not used + uint32_t reserved2 : 1; +#ifdef __cplusplus + public: + npu_set_activation_t(NPU_NAMESPACE::activation_function _activation_function, + NPU_NAMESPACE::activation_clip_range _activation_clip_range) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_function(static_cast(_activation_function) & ((1U << 5) - 1)), reserved1(0), + activation_clip_range(static_cast(_activation_clip_range) & ((1U << 3) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_activation_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_function(0), reserved1(0), + activation_clip_range(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_function get_activation_function() const + { + return static_cast(activation_function); + } + CONSTEXPR npu_set_activation_t &set_activation_function(NPU_NAMESPACE::activation_function value) + { + activation_function = static_cast(value) & ((1U << 5) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_clip_range get_activation_clip_range() const + { + return static_cast(activation_clip_range); + } + CONSTEXPR npu_set_activation_t &set_activation_clip_range(NPU_NAMESPACE::activation_clip_range value) + { + activation_clip_range = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_function", + (activation_function < (sizeof(activation_function_str) / sizeof(activation_function_str[0])) ? + activation_function_str[activation_function] : + "****"))); + fields.push_back(std::make_pair( + "activation_clip_range", + (activation_clip_range < (sizeof(activation_clip_range_str) / sizeof(activation_clip_range_str[0])) ? + activation_clip_range_str[activation_clip_range] : + "****"))); + } +#endif +#endif + }; + // Lower bound clip + struct npu_set_activation_min_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t clip_boundary : 16; // Clip boundary for OFM activations +#ifdef __cplusplus + public: + npu_set_activation_min_t(uint32_t _clip_boundary) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + clip_boundary(_clip_boundary & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_activation_min_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), clip_boundary(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_min_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_min_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_clip_boundary() const + { + return static_cast(clip_boundary); + } + CONSTEXPR npu_set_activation_min_t &set_clip_boundary(uint32_t value) + { + clip_boundary = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("clip_boundary", std::to_string(clip_boundary))); + } +#endif +#endif + }; + // Upper bound clip + struct npu_set_activation_max_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t clip_boundary : 16; // Clip boundary for OFM activations +#ifdef __cplusplus + public: + npu_set_activation_max_t(uint32_t _clip_boundary) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + clip_boundary(_clip_boundary & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_activation_max_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), clip_boundary(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_max_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_max_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_clip_boundary() const + { + return static_cast(clip_boundary); + } + CONSTEXPR npu_set_activation_max_t &set_clip_boundary(uint32_t value) + { + clip_boundary = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("clip_boundary", std::to_string(clip_boundary))); + } +#endif +#endif + }; + // Index n for weight stream access + struct npu_set_weight_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for weight stream access + uint32_t reserved1 : 12; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_weight_region_t(uint32_t _region, NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0), custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_weight_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_weight_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_weight_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // Index n for scale stream access + struct npu_set_scale_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for scale stream access + uint32_t reserved1 : 12; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_scale_region_t(uint32_t _region, NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0), custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_scale_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_scale_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_scale_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // Start of ACC0,ACC1 buffers + struct npu_set_ab_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ab_start : 6; // Start of ACC0,ACC1 buffers in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ab_start_t(uint32_t _ab_start) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ab_start(_ab_start & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ab_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ab_start(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ab_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ab_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ab_start() const + { + return static_cast(ab_start); + } + CONSTEXPR npu_set_ab_start_t &set_ab_start(uint32_t value) + { + ab_start = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ab_start", std::to_string(ab_start))); + } +#endif +#endif + }; + // Block number of blocks dependency + struct npu_set_blockdep_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t blockdep : 2; // Block number of blocks dependency between kernel operations + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_blockdep_t(uint32_t _blockdep) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), blockdep(_blockdep & ((1U << 2) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_blockdep_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), blockdep(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_blockdep_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_blockdep_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_blockdep() const + { + return static_cast(blockdep); + } + CONSTEXPR npu_set_blockdep_t &set_blockdep(uint32_t value) + { + blockdep = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("blockdep", std::to_string(blockdep))); + } +#endif +#endif + }; + // DMA0 source region + struct npu_set_dma0_src_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number + uint32_t reserved1 : 5; + uint32_t region_mode : 1; // Region mode + uint32_t stride_mode : 2; // Stride mode + uint32_t reserved2 : 4; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_dma0_src_region_t(uint32_t _region, + NPU_NAMESPACE::dma_region_mode _region_mode, + NPU_NAMESPACE::dma_stride_mode _stride_mode, + NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + region(_region & ((1U << 3) - 1)), reserved1(0), + region_mode(static_cast(_region_mode) & ((1U << 1) - 1)), + stride_mode(static_cast(_stride_mode) & ((1U << 2) - 1)), reserved2(0), + custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_dma0_src_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), region_mode(0), + stride_mode(0), reserved2(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_src_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_dma0_src_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_region_mode get_region_mode() const + { + return static_cast(region_mode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_region_mode(NPU_NAMESPACE::dma_region_mode value) + { + region_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_stride_mode get_stride_mode() const + { + return static_cast(stride_mode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_stride_mode(NPU_NAMESPACE::dma_stride_mode value) + { + stride_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_dma0_src_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "region_mode", + (region_mode < (sizeof(dma_region_mode_str) / sizeof(dma_region_mode_str[0])) ? + dma_region_mode_str[region_mode] : + "****"))); + fields.push_back(std::make_pair( + "stride_mode", + (stride_mode < (sizeof(dma_stride_mode_str) / sizeof(dma_stride_mode_str[0])) ? + dma_stride_mode_str[stride_mode] : + "****"))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // DMA0 destination region + struct npu_set_dma0_dst_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number if region_mode is region_mode_external. Else core mask to write to (bit k + // set for core k=0,1) + uint32_t reserved1 : 5; + uint32_t region_mode : 1; // Region mode + uint32_t stride_mode : 2; // Stride mode + uint32_t reserved2 : 4; + uint32_t custom_dma_cs : 1; // Custom DMA select +#ifdef __cplusplus + public: + npu_set_dma0_dst_region_t(uint32_t _region, + NPU_NAMESPACE::dma_region_mode _region_mode, + NPU_NAMESPACE::dma_stride_mode _stride_mode, + NPU_NAMESPACE::custom_dma_cs _custom_dma_cs) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + region(_region & ((1U << 3) - 1)), reserved1(0), + region_mode(static_cast(_region_mode) & ((1U << 1) - 1)), + stride_mode(static_cast(_stride_mode) & ((1U << 2) - 1)), reserved2(0), + custom_dma_cs(static_cast(_custom_dma_cs) & ((1U << 1) - 1)) + { + } + CONSTEXPR npu_set_dma0_dst_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), region_mode(0), + stride_mode(0), reserved2(0), custom_dma_cs(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_region_mode get_region_mode() const + { + return static_cast(region_mode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_region_mode(NPU_NAMESPACE::dma_region_mode value) + { + region_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_stride_mode get_stride_mode() const + { + return static_cast(stride_mode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_stride_mode(NPU_NAMESPACE::dma_stride_mode value) + { + stride_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma_cs get_custom_dma_cs() const + { + return static_cast(custom_dma_cs); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_custom_dma_cs(NPU_NAMESPACE::custom_dma_cs value) + { + custom_dma_cs = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "region_mode", + (region_mode < (sizeof(dma_region_mode_str) / sizeof(dma_region_mode_str[0])) ? + dma_region_mode_str[region_mode] : + "****"))); + fields.push_back(std::make_pair( + "stride_mode", + (stride_mode < (sizeof(dma_stride_mode_str) / sizeof(dma_stride_mode_str[0])) ? + dma_stride_mode_str[stride_mode] : + "****"))); + fields.push_back(std::make_pair( + "custom_dma_cs", + (custom_dma_cs < (sizeof(custom_dma_cs_str) / sizeof(custom_dma_cs_str[0])) ? + custom_dma_cs_str[custom_dma_cs] : + "****"))); + } +#endif +#endif + }; + // Size of second dimension for 2D/3D transfers + struct npu_set_dma0_size0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t size : 16; // Size of second dimension for 2D/3D transfers +#ifdef __cplusplus + public: + npu_set_dma0_size0_t(uint32_t _size) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(_size & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_dma0_size0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_size0_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_size0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_size() const + { + return static_cast(size); + } + CONSTEXPR npu_set_dma0_size0_t &set_size(uint32_t value) + { + size = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("size", std::to_string(size))); + } +#endif +#endif + }; + // Size of third dimension for 3D transfers + struct npu_set_dma0_size1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t size : 16; // Size of third dimension for 3D transfers +#ifdef __cplusplus + public: + npu_set_dma0_size1_t(uint32_t _size) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(_size & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_dma0_size1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_size1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_size1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_size() const + { + return static_cast(size); + } + CONSTEXPR npu_set_dma0_size1_t &set_size(uint32_t value) + { + size = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("size", std::to_string(size))); + } +#endif +#endif + }; + // IFM2 broadcast configuration + struct npu_set_ifm2_broadcast_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t + broadcast_h : 1; // Broadcast H dimension (if set then any accesses to IFM2 sets y=0 and IFM2 height=1) + uint32_t broadcast_w : 1; // Broadcast W dimension (if set then any accesses to IFM2 sets x=0 and IFM2 width=1) + uint32_t broadcast_c : 1; // Broadcast C dimension (if set then any accesses to IFM2 sets c=0 and IFM2 depth=1) + uint32_t reserved1 : 3; + uint32_t operand_order : 1; // Operand order + uint32_t broadcast_constant : 1; // Broadcast constant given by NPU_SET_IFM2_SCALAR and so ignore BH, BW and BC + uint32_t reserved2 : 8; +#ifdef __cplusplus + public: + npu_set_ifm2_broadcast_t(NPU_NAMESPACE::broadcast_mode _broadcast_h, + NPU_NAMESPACE::broadcast_mode _broadcast_w, + NPU_NAMESPACE::broadcast_mode _broadcast_c, + NPU_NAMESPACE::ifm2_operand_order _operand_order, + NPU_NAMESPACE::broadcast_mode _broadcast_constant) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + broadcast_h(static_cast(_broadcast_h) & ((1U << 1) - 1)), + broadcast_w(static_cast(_broadcast_w) & ((1U << 1) - 1)), + broadcast_c(static_cast(_broadcast_c) & ((1U << 1) - 1)), reserved1(0), + operand_order(static_cast(_operand_order) & ((1U << 1) - 1)), + broadcast_constant(static_cast(_broadcast_constant) & ((1U << 1) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_ifm2_broadcast_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), broadcast_h(0), broadcast_w(0), + broadcast_c(0), reserved1(0), operand_order(0), broadcast_constant(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_h() const + { + return static_cast(broadcast_h); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_h(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_h = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_w() const + { + return static_cast(broadcast_w); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_w(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_w = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_c() const + { + return static_cast(broadcast_c); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_c(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_c = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm2_operand_order get_operand_order() const + { + return static_cast(operand_order); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_operand_order(NPU_NAMESPACE::ifm2_operand_order value) + { + operand_order = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_constant() const + { + return static_cast(broadcast_constant); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_constant(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_constant = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "broadcast_h", + (broadcast_h < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_h] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_w", + (broadcast_w < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_w] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_c", + (broadcast_c < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_c] : + "****"))); + fields.push_back(std::make_pair( + "operand_order", + (operand_order < (sizeof(ifm2_operand_order_str) / sizeof(ifm2_operand_order_str[0])) ? + ifm2_operand_order_str[operand_order] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_constant", + (broadcast_constant < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_constant] : + "****"))); + } +#endif +#endif + }; + // IFM2 scalar value + struct npu_set_ifm2_scalar_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t scalar : 16; // int16 or uint16 depending on ifm2_precision.type +#ifdef __cplusplus + public: + npu_set_ifm2_scalar_t(uint32_t _scalar) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), scalar(_scalar & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_scalar_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), scalar(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_scalar() const + { + return static_cast(scalar); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_scalar(uint32_t value) + { + scalar = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("scalar", std::to_string(scalar))); + } +#endif +#endif + }; + // IFM2 Precision + struct npu_set_ifm2_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // IFM type - MUST MATCH IFM + uint32_t reserved1 : 1; + uint32_t activation_precision : 2; // IFM precision - MUST MATCH IFM + uint32_t reserved2 : 2; + uint32_t activation_format : 2; // IFM format + uint32_t reserved3 : 8; +#ifdef __cplusplus + public: + npu_set_ifm2_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), reserved1(0), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved2(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), reserved3(0) + { + } + CONSTEXPR npu_set_ifm2_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), reserved1(0), + activation_precision(0), reserved2(0), activation_format(0), reserved3(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + } +#endif +#endif + }; + // IFM2 zero point + struct npu_set_ifm2_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ifm2_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // IFM2 Tile 0 and tile 2 width + struct npu_set_ifm2_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // IFM2 Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ifm2_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // IFM2 Tile 0 height + struct npu_set_ifm2_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM2 Tile 0 height +#ifdef __cplusplus + public: + npu_set_ifm2_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // IFM2 Tile 1 height + struct npu_set_ifm2_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM2 Tile 1 height +#ifdef __cplusplus + public: + npu_set_ifm2_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Start of IB0,IB1 buffers for IFM2 + struct npu_set_ifm2_ib_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ib_start : 6; // Start of IB0,IB1 buffers for IFM2 in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ifm2_ib_start_t(uint32_t _ib_start) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_start(_ib_start & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm2_ib_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_start(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ib_start() const + { + return static_cast(ib_start); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_ib_start(uint32_t value) + { + ib_start = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ib_start", std::to_string(ib_start))); + } +#endif +#endif + }; + // Index n for IFM2 access + struct npu_set_ifm2_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for IFM2 access + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_ifm2_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm2_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ifm2_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // IFM Tile 0 address + struct npu_set_ifm_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base0_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_base0_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_base0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_base0_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 1 address + struct npu_set_ifm_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base1_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_base1_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_base1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_base1_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 2 address + struct npu_set_ifm_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base2_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_base2_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_base2_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_base2_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 3 address + struct npu_set_ifm_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base3_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_base3_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_base3_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_base3_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between horizontal values + struct npu_set_ifm_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_x_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_stride_x_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_stride_x_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_stride_x_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between vertical values + struct npu_set_ifm_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_y_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_stride_y_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_stride_y_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_stride_y_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ifm_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_c_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_stride_c_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_stride_c_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm_stride_c_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 0 address + struct npu_set_ofm_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base0_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_base0_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_base0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_base0_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 1 address + struct npu_set_ofm_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base1_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_base1_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_base1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_base1_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 2 address + struct npu_set_ofm_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base2_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_base2_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_base2_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_base2_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 3 address + struct npu_set_ofm_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base3_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_base3_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_base3_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_base3_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between horizontal values + struct npu_set_ofm_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_x_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_stride_x_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_stride_x_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_stride_x_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between vertical values + struct npu_set_ofm_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_y_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_stride_y_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_stride_y_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_stride_y_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ofm_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_c_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ofm_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_stride_c_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_stride_c_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ofm_stride_c_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte offset in WEIGHT_REGION + struct npu_set_weight_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_weight_base_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_weight_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight_base_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight_base_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_weight_base_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte length + struct npu_set_weight_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 32; // Weight stream byte length +#ifdef __cplusplus + public: + npu_set_weight_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(_length) + { + } + CONSTEXPR npu_set_weight_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_weight_length_t &set_length(uint32_t value) + { + length = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; + // Scale and bias stream input byte offset from SCALE_REGION + struct npu_set_scale_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_scale_base_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_scale_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale_base_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale_base_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_scale_base_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Scale and bias stream input byte length + struct npu_set_scale_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 20; // Scale and bias stream byte length + uint32_t reserved2 : 12; +#ifdef __cplusplus + public: + npu_set_scale_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), + length(_length & ((1U << 20) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_scale_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_scale_length_t &set_length(uint32_t value) + { + length = value & ((1U << 20) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; + // OFM scale + struct npu_set_ofm_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t shift : 6; // Shift + uint32_t reserved1 : 10; + uint32_t scale : 32; // Scale. Not applied for 32-bit operations +#ifdef __cplusplus + public: + npu_set_ofm_scale_t(uint32_t _shift, uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(_shift & ((1U << 6) - 1)), + reserved1(0), scale(_scale) + { + } + CONSTEXPR npu_set_ofm_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(0), reserved1(0), scale(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_shift() const + { + return static_cast(shift); + } + CONSTEXPR npu_set_ofm_scale_t &set_shift(uint32_t value) + { + shift = static_cast(value) & ((1U << 6) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_ofm_scale_t &set_scale(uint32_t value) + { + scale = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("shift", std::to_string(shift))); + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // Input operand A scale + struct npu_set_opa_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t shift : 6; // Shift. Ignored if IFM scale mode is 0 + uint32_t reserved1 : 10; + uint32_t scale : 32; // Scale. 16-bit if IFM scale mode is 0 +#ifdef __cplusplus + public: + npu_set_opa_scale_t(uint32_t _shift, uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(_shift & ((1U << 6) - 1)), + reserved1(0), scale(_scale) + { + } + CONSTEXPR npu_set_opa_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(0), reserved1(0), scale(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_opa_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_opa_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_shift() const + { + return static_cast(shift); + } + CONSTEXPR npu_set_opa_scale_t &set_shift(uint32_t value) + { + shift = static_cast(value) & ((1U << 6) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_opa_scale_t &set_scale(uint32_t value) + { + scale = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("shift", std::to_string(shift))); + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // Input operand B scale + struct npu_set_opb_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t scale : 16; // Scale. Not used if IFM scale mode is 1 or 2 + uint32_t reserved2 : 16; +#ifdef __cplusplus + public: + npu_set_opb_scale_t(uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), + scale(_scale & ((1U << 16) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_opb_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), scale(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_opb_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_opb_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_opb_scale_t &set_scale(uint32_t value) + { + scale = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // DMA user channel 0 source byte offset from DMA0_SRC_REGION + struct npu_set_dma0_src_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_src_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_dma0_src_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_src_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_src_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_dma0_src_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // DMA user channel 0 destination byte offset from DMA0_DST_REGION + struct npu_set_dma0_dst_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_dst_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_dma0_dst_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_dst_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_dst_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_dma0_dst_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // DMA user channel 0 transfer length in bytes for each 1D transfer + struct npu_set_dma0_len_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_len_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_dma0_len_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_len_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_len_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_dma0_len_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 0 address + struct npu_set_ifm2_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base0_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_base0_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_base0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_base0_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 1 address + struct npu_set_ifm2_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base1_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_base1_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_base1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_base1_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 2 address + struct npu_set_ifm2_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base2_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_base2_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_base2_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_base2_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 3 address + struct npu_set_ifm2_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base3_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_base3_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_base3_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_base3_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between horizontal values + struct npu_set_ifm2_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_x_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_stride_x_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_stride_x_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_stride_x_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between vertical values + struct npu_set_ifm2_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_y_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_stride_y_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_stride_y_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_stride_y_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ifm2_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t addr : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_c_t(uint32_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(_addr) + { + } + CONSTEXPR npu_set_ifm2_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), addr(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_stride_c_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_stride_c_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + return static_cast(addr); + } + CONSTEXPR npu_set_ifm2_stride_c_t &set_addr(uint32_t value) + { + addr = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // User defined register 0 + struct npu_set_user_defined0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined0_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined0_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined0_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 1 + struct npu_set_user_defined1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined1_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined1_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined1_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 2 + struct npu_set_user_defined2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined2_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined2_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined2_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined2_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 3 + struct npu_set_user_defined3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined3_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined3_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined3_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined3_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 4 + struct npu_set_user_defined4_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined4_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED4)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined4_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED4)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED4) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED4); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined4_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined4_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined4_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 5 + struct npu_set_user_defined5_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined5_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED5)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined5_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED5)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED5) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED5); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined5_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined5_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined5_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 6 + struct npu_set_user_defined6_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined6_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED6)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined6_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED6)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED6) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED6); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined6_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined6_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined6_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; + // User defined register 7 + struct npu_set_user_defined7_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t user_reg : 32; // User defined register +#ifdef __cplusplus + public: + npu_set_user_defined7_t(uint32_t _user_reg) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED7)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(_user_reg) + { + } + CONSTEXPR npu_set_user_defined7_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED7)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), user_reg(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED7) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_USER_DEFINED7); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_user_defined7_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_user_defined7_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_user_reg() const + { + return static_cast(user_reg); + } + CONSTEXPR npu_set_user_defined7_t &set_user_reg(uint32_t value) + { + user_reg = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("user_reg", std::to_string(user_reg))); + } +#endif +#endif + }; +#ifdef __cplusplus +}; +#endif +#define NPU_OP_STRUCTS \ + NPU_OP_(stop) \ + NPU_OP_(irq) \ + NPU_OP_(conv) \ + NPU_OP_(depthwise) \ + NPU_OP_(pool) \ + NPU_OP_(elementwise) \ + NPU_OP_(dma_start) \ + NPU_OP_(dma_wait) \ + NPU_OP_(kernel_wait) \ + NPU_OP_(pmu_mask) + +#define NPU_SET_STRUCTS \ + NPU_SET_(ifm_pad_top) \ + NPU_SET_(ifm_pad_left) \ + NPU_SET_(ifm_pad_right) \ + NPU_SET_(ifm_pad_bottom) \ + NPU_SET_(ifm_depth_m1) \ + NPU_SET_(ifm_precision) \ + NPU_SET_(ifm_upscale) \ + NPU_SET_(ifm_zero_point) \ + NPU_SET_(ifm_width0_m1) \ + NPU_SET_(ifm_height0_m1) \ + NPU_SET_(ifm_height1_m1) \ + NPU_SET_(ifm_ib_end) \ + NPU_SET_(ifm_region) \ + NPU_SET_(ofm_width_m1) \ + NPU_SET_(ofm_height_m1) \ + NPU_SET_(ofm_depth_m1) \ + NPU_SET_(ofm_precision) \ + NPU_SET_(ofm_blk_width_m1) \ + NPU_SET_(ofm_blk_height_m1) \ + NPU_SET_(ofm_blk_depth_m1) \ + NPU_SET_(ofm_zero_point) \ + NPU_SET_(ofm_width0_m1) \ + NPU_SET_(ofm_height0_m1) \ + NPU_SET_(ofm_height1_m1) \ + NPU_SET_(ofm_region) \ + NPU_SET_(kernel_width_m1) \ + NPU_SET_(kernel_height_m1) \ + NPU_SET_(kernel_stride) \ + NPU_SET_(acc_format) \ + NPU_SET_(activation) \ + NPU_SET_(activation_min) \ + NPU_SET_(activation_max) \ + NPU_SET_(weight_region) \ + NPU_SET_(scale_region) \ + NPU_SET_(ab_start) \ + NPU_SET_(blockdep) \ + NPU_SET_(dma0_src_region) \ + NPU_SET_(dma0_dst_region) \ + NPU_SET_(dma0_size0) \ + NPU_SET_(dma0_size1) \ + NPU_SET_(ifm2_broadcast) \ + NPU_SET_(ifm2_scalar) \ + NPU_SET_(ifm2_precision) \ + NPU_SET_(ifm2_zero_point) \ + NPU_SET_(ifm2_width0_m1) \ + NPU_SET_(ifm2_height0_m1) \ + NPU_SET_(ifm2_height1_m1) \ + NPU_SET_(ifm2_ib_start) \ + NPU_SET_(ifm2_region) \ + NPU_SET_(ifm_base0) \ + NPU_SET_(ifm_base1) \ + NPU_SET_(ifm_base2) \ + NPU_SET_(ifm_base3) \ + NPU_SET_(ifm_stride_x) \ + NPU_SET_(ifm_stride_y) \ + NPU_SET_(ifm_stride_c) \ + NPU_SET_(ofm_base0) \ + NPU_SET_(ofm_base1) \ + NPU_SET_(ofm_base2) \ + NPU_SET_(ofm_base3) \ + NPU_SET_(ofm_stride_x) \ + NPU_SET_(ofm_stride_y) \ + NPU_SET_(ofm_stride_c) \ + NPU_SET_(weight_base) \ + NPU_SET_(weight_length) \ + NPU_SET_(scale_base) \ + NPU_SET_(scale_length) \ + NPU_SET_(ofm_scale) \ + NPU_SET_(opa_scale) \ + NPU_SET_(opb_scale) \ + NPU_SET_(dma0_src) \ + NPU_SET_(dma0_dst) \ + NPU_SET_(dma0_len) \ + NPU_SET_(ifm2_base0) \ + NPU_SET_(ifm2_base1) \ + NPU_SET_(ifm2_base2) \ + NPU_SET_(ifm2_base3) \ + NPU_SET_(ifm2_stride_x) \ + NPU_SET_(ifm2_stride_y) \ + NPU_SET_(ifm2_stride_c) \ + NPU_SET_(user_defined0) \ + NPU_SET_(user_defined1) \ + NPU_SET_(user_defined2) \ + NPU_SET_(user_defined3) \ + NPU_SET_(user_defined4) \ + NPU_SET_(user_defined5) \ + NPU_SET_(user_defined6) \ + NPU_SET_(user_defined7) + +#define EXPAND_ACC_FORMAT(FUNC, SEP) FUNC(acc_format, I32) SEP FUNC(acc_format, I40) SEP FUNC(acc_format, F16) + +#define EXPAND_ACTIVATION_CLIP_RANGE(FUNC, SEP) \ + FUNC(activation_clip_range, OFM_PRECISION) \ + SEP FUNC(activation_clip_range, FORCE_UINT8) SEP FUNC(activation_clip_range, FORCE_INT8) \ + SEP FUNC(activation_clip_range, FORCE_INT16) + +#define EXPAND_ACTIVATION_FORMAT(FUNC, SEP) FUNC(activation_format, NHWC) SEP FUNC(activation_format, NHCWB16) + +#define EXPAND_ACTIVATION_FUNCTION(FUNC, SEP) \ + FUNC(activation_function, RELU) \ + SEP FUNC(activation_function, TANH) SEP FUNC(activation_function, SIGMOID) SEP FUNC(activation_function, TABLE_0) \ + SEP FUNC(activation_function, TABLE_1) SEP FUNC(activation_function, TABLE_2) \ + SEP FUNC(activation_function, TABLE_3) SEP FUNC(activation_function, TABLE_4) \ + SEP FUNC(activation_function, TABLE_5) SEP FUNC(activation_function, TABLE_6) \ + SEP FUNC(activation_function, TABLE_7) + +#define EXPAND_ACTIVATION_PRECISION(FUNC, SEP) \ + FUNC(activation_precision, B8) \ + SEP FUNC(activation_precision, B16) SEP FUNC(activation_precision, B32) SEP FUNC(activation_precision, B64) + +#define EXPAND_ACTIVATION_TYPE(FUNC, SEP) FUNC(activation_type, UNSIGNED) SEP FUNC(activation_type, SIGNED) + +#define EXPAND_AXI_MEM_ENCODING(FUNC, SEP) \ + FUNC(axi_mem_encoding, DEVICE_NON_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, DEVICE_BUFFERABLE) SEP FUNC(axi_mem_encoding, NORMAL_NON_CACHEABLE_NON_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, NORMAL_NON_CACHEABLE_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_NO_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_READ_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_READ_AND_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_NO_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_READ_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_READ_AND_WRITE_ALLOCATE) + +#define EXPAND_BROADCAST_MODE(FUNC, SEP) FUNC(broadcast_mode, DISABLE) SEP FUNC(broadcast_mode, ENABLE) + +#define EXPAND_CMD0_OPCODE(FUNC, SEP) \ + FUNC(cmd0_opcode, NPU_OP_STOP) \ + SEP FUNC(cmd0_opcode, NPU_OP_IRQ) SEP FUNC(cmd0_opcode, NPU_OP_CONV) SEP FUNC( \ + cmd0_opcode, NPU_OP_DEPTHWISE) SEP FUNC(cmd0_opcode, NPU_OP_POOL) SEP FUNC(cmd0_opcode, NPU_OP_ELEMENTWISE) \ + SEP FUNC(cmd0_opcode, NPU_OP_DMA_START) SEP FUNC(cmd0_opcode, NPU_OP_DMA_WAIT) SEP FUNC( \ + cmd0_opcode, NPU_OP_KERNEL_WAIT) SEP FUNC(cmd0_opcode, NPU_OP_PMU_MASK) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_PAD_TOP) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_PAD_LEFT) SEP FUNC(cmd0_opcode, NPU_SET_IFM_PAD_RIGHT) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_PAD_BOTTOM) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_DEPTH_M1) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_PRECISION) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_UPSCALE) SEP FUNC(cmd0_opcode, NPU_SET_IFM_ZERO_POINT) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_WIDTH0_M1) SEP FUNC(cmd0_opcode, NPU_SET_IFM_HEIGHT0_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_HEIGHT1_M1) SEP FUNC(cmd0_opcode, NPU_SET_IFM_IB_END) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_REGION) SEP FUNC(cmd0_opcode, NPU_SET_OFM_WIDTH_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_HEIGHT_M1) SEP FUNC(cmd0_opcode, NPU_SET_OFM_DEPTH_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_PRECISION) SEP FUNC( \ + cmd0_opcode, NPU_SET_OFM_BLK_WIDTH_M1) SEP FUNC(cmd0_opcode, \ + NPU_SET_OFM_BLK_HEIGHT_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_BLK_DEPTH_M1) SEP FUNC( \ + cmd0_opcode, NPU_SET_OFM_ZERO_POINT) SEP FUNC(cmd0_opcode, NPU_SET_OFM_WIDTH0_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_HEIGHT0_M1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_OFM_HEIGHT1_M1) SEP FUNC(cmd0_opcode, NPU_SET_OFM_REGION) \ + SEP FUNC(cmd0_opcode, NPU_SET_KERNEL_WIDTH_M1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_KERNEL_HEIGHT_M1) SEP FUNC(cmd0_opcode, NPU_SET_KERNEL_STRIDE) \ + SEP FUNC(cmd0_opcode, NPU_SET_ACC_FORMAT) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_ACTIVATION) SEP FUNC(cmd0_opcode, NPU_SET_ACTIVATION_MIN) \ + SEP FUNC(cmd0_opcode, NPU_SET_ACTIVATION_MAX) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_WEIGHT_REGION) SEP FUNC(cmd0_opcode, NPU_SET_SCALE_REGION) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_AB_START) SEP FUNC(cmd0_opcode, NPU_SET_BLOCKDEP) \ + SEP FUNC(cmd0_opcode, NPU_SET_DMA0_SRC_REGION) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_DMA0_DST_REGION) SEP FUNC(cmd0_opcode, \ + NPU_SET_DMA0_SIZE0) \ + SEP FUNC(cmd0_opcode, NPU_SET_DMA0_SIZE1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_BROADCAST) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM2_SCALAR) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_PRECISION) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM2_ZERO_POINT) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM2_WIDTH0_M1) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM2_HEIGHT0_M1) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM2_HEIGHT1_M1) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM2_IB_START) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM2_REGION) + +#define EXPAND_CMD1_OPCODE(FUNC, SEP) \ + FUNC(cmd1_opcode, NPU_SET_IFM_BASE0) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM_BASE1) SEP FUNC(cmd1_opcode, NPU_SET_IFM_BASE2) SEP FUNC( \ + cmd1_opcode, NPU_SET_IFM_BASE3) SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_X) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_Y) SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_C) SEP FUNC( \ + cmd1_opcode, NPU_SET_OFM_BASE0) SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE1) \ + SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE2) SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE3) SEP FUNC( \ + cmd1_opcode, NPU_SET_OFM_STRIDE_X) SEP FUNC(cmd1_opcode, NPU_SET_OFM_STRIDE_Y) \ + SEP FUNC(cmd1_opcode, NPU_SET_OFM_STRIDE_C) SEP FUNC(cmd1_opcode, NPU_SET_WEIGHT_BASE) SEP FUNC( \ + cmd1_opcode, NPU_SET_WEIGHT_LENGTH) SEP FUNC(cmd1_opcode, NPU_SET_SCALE_BASE) \ + SEP FUNC(cmd1_opcode, NPU_SET_SCALE_LENGTH) SEP FUNC(cmd1_opcode, NPU_SET_OFM_SCALE) SEP FUNC( \ + cmd1_opcode, NPU_SET_OPA_SCALE) SEP FUNC(cmd1_opcode, NPU_SET_OPB_SCALE) \ + SEP FUNC(cmd1_opcode, NPU_SET_DMA0_SRC) SEP FUNC(cmd1_opcode, NPU_SET_DMA0_DST) SEP FUNC( \ + cmd1_opcode, NPU_SET_DMA0_LEN) SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE0) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE1) SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE2) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE3) SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_X) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_Y) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_C) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED0) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED1) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED2) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED3) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED4) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED5) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED6) \ + SEP FUNC(cmd1_opcode, NPU_SET_USER_DEFINED7) + +#define EXPAND_CMD_CTRL(FUNC, SEP) FUNC(cmd_ctrl, CMD0_CTRL) SEP FUNC(cmd_ctrl, CMD1_CTRL) + +#define EXPAND_CUSTOM_DMA_CS(FUNC, SEP) FUNC(custom_dma_cs, DISABLE) SEP FUNC(custom_dma_cs, ENABLE) + +#define EXPAND_CUSTOM_DMA(FUNC, SEP) FUNC(custom_dma, NOT_IMPLEMENTED) SEP FUNC(custom_dma, IMPLEMENTED) + +#define EXPAND_DMA_FAULT_SRC(FUNC, SEP) FUNC(dma_fault_src, AXI_M0) SEP FUNC(dma_fault_src, AXI_M1) + +#define EXPAND_DMA_REGION_MODE(FUNC, SEP) FUNC(dma_region_mode, EXTERNAL) SEP FUNC(dma_region_mode, INTERNAL) + +#define EXPAND_DMA_STRIDE_MODE(FUNC, SEP) FUNC(dma_stride_mode, D1) + +#define EXPAND_ELEMENTWISE_MODE(FUNC, SEP) \ + FUNC(elementwise_mode, MUL) \ + SEP FUNC(elementwise_mode, ADD) SEP FUNC(elementwise_mode, SUB) SEP FUNC(elementwise_mode, MIN) \ + SEP FUNC(elementwise_mode, MAX) SEP FUNC(elementwise_mode, LRELU) SEP FUNC(elementwise_mode, ABS) \ + SEP FUNC(elementwise_mode, CLZ) SEP FUNC(elementwise_mode, SHR) SEP FUNC(elementwise_mode, SHL) + +#define EXPAND_FUNCTIONAL_SAFETY(FUNC, SEP) \ + FUNC(functional_safety, NOT_IMPLEMENTED) SEP FUNC(functional_safety, IMPLEMENTED) + +#define EXPAND_IFM2_OPERAND_ORDER(FUNC, SEP) FUNC(ifm2_operand_order, ORDER_B) SEP FUNC(ifm2_operand_order, ORDER_A) + +#define EXPAND_IFM_SCALE_MODE(FUNC, SEP) \ + FUNC(ifm_scale_mode, OPA_OPB_16) SEP FUNC(ifm_scale_mode, OPA_32) SEP FUNC(ifm_scale_mode, OPB_32) + +#define EXPAND_IFM_UPSCALE_MODE(FUNC, SEP) \ + FUNC(ifm_upscale_mode, NONE) SEP FUNC(ifm_upscale_mode, NEAREST) SEP FUNC(ifm_upscale_mode, ZEROS) + +#define EXPAND_KERNEL_DECOMPOSITION(FUNC, SEP) FUNC(kernel_decomposition, D8X8) SEP FUNC(kernel_decomposition, D4X4) + +#define EXPAND_KERNEL_DILATION(FUNC, SEP) FUNC(kernel_dilation, NONE) SEP FUNC(kernel_dilation, X2) + +#define EXPAND_MAX_BEATS(FUNC, SEP) FUNC(max_beats, B64) SEP FUNC(max_beats, B128) SEP FUNC(max_beats, B256) + +#define EXPAND_MEM_ATTR(FUNC, SEP) \ + FUNC(mem_attr, AXI0_OUTSTANDING_COUNTER0) \ + SEP FUNC(mem_attr, AXI0_OUTSTANDING_COUNTER1) SEP FUNC(mem_attr, AXI1_OUTSTANDING_COUNTER2) \ + SEP FUNC(mem_attr, AXI1_OUTSTANDING_COUNTER3) + +#define EXPAND_OFM_SCALE_MODE(FUNC, SEP) FUNC(ofm_scale_mode, PER_CHANNEL) SEP FUNC(ofm_scale_mode, GLOBAL) + +#define EXPAND_PMU_AXI_CHANNEL(FUNC, SEP) \ + FUNC(pmu_axi_channel, RD_CMD) \ + SEP FUNC(pmu_axi_channel, RD_IFM) SEP FUNC(pmu_axi_channel, RD_WEIGHTS) SEP FUNC(pmu_axi_channel, RD_SCALE_BIAS) \ + SEP FUNC(pmu_axi_channel, RD_MEM2MEM) SEP FUNC(pmu_axi_channel, WR_OFM) SEP FUNC(pmu_axi_channel, WR_MEM2MEM) + +#define EXPAND_PMU_EVENT(FUNC, SEP) \ + FUNC(pmu_event, NO_EVENT) \ + SEP FUNC(pmu_event, CYCLE) SEP FUNC(pmu_event, NPU_IDLE) SEP FUNC(pmu_event, CC_STALLED_ON_BLOCKDEP) SEP FUNC( \ + pmu_event, CC_STALLED_ON_SHRAM_RECONFIG) SEP FUNC(pmu_event, NPU_ACTIVE) SEP FUNC(pmu_event, MAC_ACTIVE) \ + SEP FUNC(pmu_event, MAC_ACTIVE_8BIT) SEP FUNC(pmu_event, MAC_ACTIVE_16BIT) SEP FUNC( \ + pmu_event, MAC_DPU_ACTIVE) SEP FUNC(pmu_event, MAC_STALLED_BY_WD_ACC) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_WD) \ + SEP FUNC(pmu_event, MAC_STALLED_BY_ACC) SEP FUNC(pmu_event, MAC_STALLED_BY_IB) SEP FUNC( \ + pmu_event, \ + MAC_ACTIVE_32BIT) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_INT_W) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_INT_ACC) SEP FUNC(pmu_event, \ + AO_ACTIVE) \ + SEP FUNC(pmu_event, AO_ACTIVE_8BIT) SEP FUNC(pmu_event, AO_ACTIVE_16BIT) SEP FUNC( \ + pmu_event, AO_STALLED_BY_OFMP_OB) SEP FUNC(pmu_event, AO_STALLED_BY_OFMP) SEP \ + FUNC(pmu_event, AO_STALLED_BY_OB) SEP FUNC(pmu_event, AO_STALLED_BY_ACC_IB) SEP FUNC( \ + pmu_event, AO_STALLED_BY_ACC) SEP FUNC(pmu_event, AO_STALLED_BY_IB) SEP \ + FUNC(pmu_event, WD_ACTIVE) SEP FUNC(pmu_event, WD_STALLED) SEP FUNC(pmu_event, WD_STALLED_BY_WS) SEP FUNC( \ + pmu_event, WD_STALLED_BY_WD_BUF) SEP FUNC(pmu_event, \ + WD_PARSE_ACTIVE) SEP \ + FUNC(pmu_event, WD_PARSE_STALLED) SEP FUNC(pmu_event, WD_PARSE_STALLED_IN) SEP FUNC( \ + pmu_event, WD_PARSE_STALLED_OUT) SEP FUNC(pmu_event, \ + WD_TRANS_WS) SEP \ + FUNC(pmu_event, WD_TRANS_WB) SEP FUNC(pmu_event, WD_TRANS_DW0) SEP FUNC( \ + pmu_event, WD_TRANS_DW1) SEP FUNC(pmu_event, \ + AXI0_RD_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI0_RD_TRANS_COMPLETED) SEP FUNC(pmu_event, AXI0_RD_DATA_BEAT_RECEIVED) SEP FUNC( \ + pmu_event, AXI0_RD_TRAN_REQ_STALLED) SEP FUNC(pmu_event, \ + AXI0_WR_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI0_WR_TRANS_COMPLETED_M) SEP FUNC( \ + pmu_event, AXI0_WR_TRANS_COMPLETED_S) SEP \ + FUNC(pmu_event, AXI0_WR_DATA_BEAT_WRITTEN) SEP FUNC( \ + pmu_event, AXI0_WR_TRAN_REQ_STALLED) SEP \ + FUNC(pmu_event, AXI0_WR_DATA_BEAT_STALLED) SEP FUNC( \ + pmu_event, \ + AXI0_ENABLED_CYCLES) SEP FUNC(pmu_event, \ + AXI0_RD_STALL_LIMIT) SEP \ + FUNC(pmu_event, AXI0_WR_STALL_LIMIT) SEP FUNC( \ + pmu_event, \ + AXI_LATENCY_ANY) SEP FUNC(pmu_event, \ + AXI_LATENCY_32) SEP \ + FUNC(pmu_event, \ + AXI_LATENCY_64) SEP FUNC(pmu_event, \ + AXI_LATENCY_128) SEP \ + FUNC(pmu_event, AXI_LATENCY_256) SEP FUNC( \ + pmu_event, \ + AXI_LATENCY_512) SEP FUNC(pmu_event, \ + AXI_LATENCY_1024) SEP \ + FUNC(pmu_event, ECC_DMA) SEP FUNC( \ + pmu_event, \ + ECC_SB0) SEP FUNC(pmu_event, \ + AXI1_RD_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI1_RD_TRANS_COMPLETED) SEP FUNC( \ + pmu_event, AXI1_RD_DATA_BEAT_RECEIVED) SEP \ + FUNC(pmu_event, AXI1_RD_TRAN_REQ_STALLED) SEP FUNC( \ + pmu_event, AXI1_WR_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI1_WR_TRANS_COMPLETED_M) SEP FUNC( \ + pmu_event, \ + AXI1_WR_TRANS_COMPLETED_S) SEP \ + FUNC(pmu_event, \ + AXI1_WR_DATA_BEAT_WRITTEN) SEP \ + FUNC(pmu_event, \ + AXI1_WR_TRAN_REQ_STALLED) SEP \ + FUNC( \ + pmu_event, \ + AXI1_WR_DATA_BEAT_STALLED) SEP \ + FUNC( \ + pmu_event, \ + AXI1_ENABLED_CYCLES) SEP \ + FUNC( \ + pmu_event, \ + AXI1_RD_STALL_LIMIT) SEP \ + FUNC( \ + pmu_event, \ + AXI1_WR_STALL_LIMIT) \ + SEP FUNC( \ + pmu_event, \ + ECC_SB1) + +#define EXPAND_POOLING_MODE(FUNC, SEP) \ + FUNC(pooling_mode, MAX) SEP FUNC(pooling_mode, AVERAGE) SEP FUNC(pooling_mode, REDUCE_SUM) + +#define EXPAND_PRIVILEGE_LEVEL(FUNC, SEP) FUNC(privilege_level, USER) SEP FUNC(privilege_level, PRIVILEGED) + +#define EXPAND_ROUND_MODE(FUNC, SEP) FUNC(round_mode, DBL) SEP FUNC(round_mode, TRUNCATE) SEP FUNC(round_mode, NATURAL) + +#define EXPAND_SECURITY_LEVEL(FUNC, SEP) FUNC(security_level, SECURE) SEP FUNC(security_level, NON_SECURE) + +#define EXPAND_STATE(FUNC, SEP) FUNC(state, STOPPED) SEP FUNC(state, RUNNING) + +#define EXPAND_WD_CORE_SLICE_STATE(FUNC, SEP) \ + FUNC(wd_core_slice_state, HEADER) SEP FUNC(wd_core_slice_state, PALETTE) SEP FUNC(wd_core_slice_state, WEIGHTS) + +#define EXPAND_WD_CTRL_STATE(FUNC, SEP) \ + FUNC(wd_ctrl_state, IDLE) \ + SEP FUNC(wd_ctrl_state, DRAIN) SEP FUNC(wd_ctrl_state, OFD_INIT) SEP FUNC(wd_ctrl_state, OFD_RUN) + +#define EXPAND_WEIGHT_ORDER(FUNC, SEP) FUNC(weight_order, DEPTH_FIRST) SEP FUNC(weight_order, PART_KERNEL_FIRST) + +#ifdef __cplusplus +} +#endif +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu65_interface.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu65_interface.h new file mode 100644 index 0000000..c09cbf8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu65_interface.h @@ -0,0 +1,26061 @@ + +/* + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU65_INTERFACE_H +#define ETHOSU65_INTERFACE_H + +#ifdef __KERNEL__ +#include +#else +#include +#endif + +#if !defined(__cplusplus) || __cplusplus < 201402L +#define CONSTEXPR +#else +#define CONSTEXPR constexpr +#endif + +#ifndef __cplusplus +#define STRUCT struct +#else +#define STRUCT +#endif + +#if defined(__cplusplus) && defined(NPU_DISASSEMBLE) +#include +#include +#include +#endif + +#if defined(__cplusplus) && !defined(NPU_NAMESPACE) +#define NPU_NAMESPACE npu +#endif + +#ifdef __cplusplus +#include +#include +#endif + +#ifdef __cplusplus +namespace NPU_NAMESPACE +{ +#endif +#define NNX_ARCH_VERSION_MAJOR 1 +#define NNX_ARCH_VERSION_MINOR 0 +#define NNX_ARCH_VERSION_PATCH 6 + +// Register offsets +// +// Register subpage BASE +// +#define NPU_REG_ID 0x0000 +#define NPU_REG_STATUS 0x0004 +#define NPU_REG_CMD 0x0008 +#define NPU_REG_RESET 0x000C +#define NPU_REG_QBASE 0x0010 +#define NPU_REG_QBASE_HI 0x0014 +#define NPU_REG_QREAD 0x0018 +#define NPU_REG_QCONFIG 0x001C +#define NPU_REG_QSIZE 0x0020 +#define NPU_REG_PROT 0x0024 +#define NPU_REG_CONFIG 0x0028 +#define NPU_REG_LOCK 0x002C +#define NPU_REG_REGIONCFG 0x003C +#define NPU_REG_AXI_LIMIT0 0x0040 +#define NPU_REG_AXI_LIMIT1 0x0044 +#define NPU_REG_AXI_LIMIT2 0x0048 +#define NPU_REG_AXI_LIMIT3 0x004C +#define BASE_REGISTERS_SIZE 0x0080 + +// +// Register subpage BASE_POINTERS +// +#define NPU_REG_BASEP_BASE 0x0080 +#define NPU_REG_BASEP_ARRLEN 0x0008 +#define BASE_POINTERS_REGISTERS_SIZE 0x0100 + +// +// Register subpage DEBUG +// +#define NPU_REG_WD_STATUS 0x0100 +#define NPU_REG_MAC_STATUS 0x0104 +#define NPU_REG_AO_STATUS 0x0108 +#define NPU_REG_DMA_STATUS0 0x0110 +#define NPU_REG_DMA_STATUS1 0x0114 +#define NPU_REG_CLKFORCE 0x0140 +#define NPU_REG_DEBUG_ADDRESS 0x0144 +#define NPU_REG_DEBUG_MISC 0x0148 +#define NPU_REG_DEBUGCORE 0x014C +#define NPU_REG_DEBUG_BLOCK 0x0150 +#define DEBUG_REGISTERS_SIZE 0x0180 + +// +// Register subpage PMU +// +#define NPU_REG_PMCR 0x0180 +#define NPU_REG_PMCNTENSET 0x0184 +#define NPU_REG_PMCNTENCLR 0x0188 +#define NPU_REG_PMOVSSET 0x018C +#define NPU_REG_PMOVSCLR 0x0190 +#define NPU_REG_PMINTSET 0x0194 +#define NPU_REG_PMINTCLR 0x0198 +#define NPU_REG_PMCCNTR 0x01A0 +#define NPU_REG_PMCCNTR_HI 0x01A4 +#define NPU_REG_PMCCNTR_CFG 0x01A8 +#define NPU_REG_PMCAXI_CHAN 0x01AC +#define PMU_REGISTERS_SIZE 0x0200 + +// +// Register subpage TSU_DEBUG +// +#define NPU_REG_KERNEL_X 0x0200 +#define NPU_REG_KERNEL_Y 0x0204 +#define NPU_REG_KERNEL_W_M1 0x0208 +#define NPU_REG_KERNEL_H_M1 0x020C +#define NPU_REG_OFM_CBLK_WIDTH_M1 0x0210 +#define NPU_REG_OFM_CBLK_HEIGHT_M1 0x0214 +#define NPU_REG_OFM_CBLK_DEPTH_M1 0x0218 +#define NPU_REG_IFM_CBLK_DEPTH_M1 0x021C +#define NPU_REG_OFM_X 0x0220 +#define NPU_REG_OFM_Y 0x0224 +#define NPU_REG_OFM_Z 0x0228 +#define NPU_REG_IFM_Z 0x022C +#define NPU_REG_PAD_TOP 0x0230 +#define NPU_REG_PAD_LEFT 0x0234 +#define NPU_REG_IFM_CBLK_WIDTH 0x0238 +#define NPU_REG_IFM_CBLK_HEIGHT 0x023C +#define NPU_REG_DMA_IFM_SRC 0x0240 +#define NPU_REG_DMA_IFM_SRC_HI 0x0244 +#define NPU_REG_DMA_IFM_DST 0x0248 +#define NPU_REG_DMA_OFM_SRC 0x024C +#define NPU_REG_DMA_OFM_DST 0x0250 +#define NPU_REG_DMA_OFM_DST_HI 0x0254 +#define NPU_REG_DMA_WEIGHT_SRC 0x0258 +#define NPU_REG_DMA_WEIGHT_SRC_HI 0x025C +#define NPU_REG_DMA_CMD_SRC 0x0260 +#define NPU_REG_DMA_CMD_SRC_HI 0x0264 +#define NPU_REG_DMA_CMD_SIZE 0x0268 +#define NPU_REG_DMA_M2M_SRC 0x026C +#define NPU_REG_DMA_M2M_SRC_HI 0x0270 +#define NPU_REG_DMA_M2M_DST 0x0274 +#define NPU_REG_DMA_M2M_DST_HI 0x0278 +#define NPU_REG_CURRENT_QREAD 0x027C +#define NPU_REG_DMA_SCALE_SRC 0x0280 +#define NPU_REG_DMA_SCALE_SRC_HI 0x0284 +#define NPU_REG_CURRENT_BLOCK 0x02B4 +#define NPU_REG_CURRENT_OP 0x02B8 +#define NPU_REG_CURRENT_CMD 0x02BC +#define TSU_DEBUG_REGISTERS_SIZE 0x02C0 + +// +// Register subpage PMU_COUNTERS +// +#define NPU_REG_PMEVCNTR_BASE 0x0300 +#define NPU_REG_PMEVCNTR_ARRLEN 0x0004 +#define NPU_REG_PMEVTYPER_BASE 0x0380 +#define NPU_REG_PMEVTYPER_ARRLEN 0x0004 +#define PMU_COUNTERS_REGISTERS_SIZE 0x0400 + +// +// Register subpage SHARED_BUFFER +// +#define NPU_REG_SHARED_BUFFER_BASE 0x0400 +#define NPU_REG_SHARED_BUFFER_ARRLEN 0x0100 +#define SHARED_BUFFER_REGISTERS_SIZE 0x0800 + +// +// Register subpage TSU_IFM +// +#define NPU_REG_IFM_PAD_TOP 0x0800 +#define NPU_REG_IFM_PAD_LEFT 0x0804 +#define NPU_REG_IFM_PAD_RIGHT 0x0808 +#define NPU_REG_IFM_PAD_BOTTOM 0x080C +#define NPU_REG_IFM_DEPTH_M1 0x0810 +#define NPU_REG_IFM_PRECISION 0x0814 +#define NPU_REG_IFM_UPSCALE 0x081C +#define NPU_REG_IFM_ZERO_POINT 0x0824 +#define NPU_REG_IFM_WIDTH0_M1 0x0828 +#define NPU_REG_IFM_HEIGHT0_M1 0x082C +#define NPU_REG_IFM_HEIGHT1_M1 0x0830 +#define NPU_REG_IFM_IB_END 0x0834 +#define NPU_REG_IFM_REGION 0x083C +#define TSU_IFM_REGISTERS_SIZE 0x0840 + +// +// Register subpage TSU_OFM +// +#define NPU_REG_OFM_WIDTH_M1 0x0844 +#define NPU_REG_OFM_HEIGHT_M1 0x0848 +#define NPU_REG_OFM_DEPTH_M1 0x084C +#define NPU_REG_OFM_PRECISION 0x0850 +#define NPU_REG_OFM_BLK_WIDTH_M1 0x0854 +#define NPU_REG_OFM_BLK_HEIGHT_M1 0x0858 +#define NPU_REG_OFM_BLK_DEPTH_M1 0x085C +#define NPU_REG_OFM_ZERO_POINT 0x0860 +#define NPU_REG_OFM_WIDTH0_M1 0x0868 +#define NPU_REG_OFM_HEIGHT0_M1 0x086C +#define NPU_REG_OFM_HEIGHT1_M1 0x0870 +#define NPU_REG_OFM_REGION 0x087C +#define TSU_OFM_REGISTERS_SIZE 0x0880 + +// +// Register subpage TSU_KERNEL +// +#define NPU_REG_KERNEL_WIDTH_M1 0x0880 +#define NPU_REG_KERNEL_HEIGHT_M1 0x0884 +#define NPU_REG_KERNEL_STRIDE 0x0888 +#define NPU_REG_PARALLEL_MODE 0x088C +#define NPU_REG_ACC_FORMAT 0x0890 +#define NPU_REG_ACTIVATION 0x0894 +#define NPU_REG_ACTIVATION_MIN 0x0898 +#define NPU_REG_ACTIVATION_MAX 0x089C +#define NPU_REG_WEIGHT_REGION 0x08A0 +#define NPU_REG_SCALE_REGION 0x08A4 +#define NPU_REG_AB_START 0x08B4 +#define NPU_REG_BLOCKDEP 0x08BC +#define TSU_KERNEL_REGISTERS_SIZE 0x08C0 + +// +// Register subpage TSU_DMA +// +#define NPU_REG_DMA0_SRC_REGION 0x08C0 +#define NPU_REG_DMA0_DST_REGION 0x08C4 +#define NPU_REG_DMA0_SIZE0 0x08C8 +#define NPU_REG_DMA0_SIZE1 0x08CC +#define TSU_DMA_REGISTERS_SIZE 0x0900 + +// +// Register subpage TSU_IFM2 +// +#define NPU_REG_IFM2_BROADCAST 0x0900 +#define NPU_REG_IFM2_SCALAR 0x0904 +#define NPU_REG_IFM2_PRECISION 0x0914 +#define NPU_REG_IFM2_ZERO_POINT 0x0924 +#define NPU_REG_IFM2_WIDTH0_M1 0x0928 +#define NPU_REG_IFM2_HEIGHT0_M1 0x092C +#define NPU_REG_IFM2_HEIGHT1_M1 0x0930 +#define NPU_REG_IFM2_IB_START 0x0934 +#define NPU_REG_IFM2_REGION 0x093C +#define TSU_IFM2_REGISTERS_SIZE 0x0940 + +// +// Register subpage TSU_IFM_BASE +// +#define NPU_REG_IFM_BASE0 0x0A00 +#define NPU_REG_IFM_BASE0_HI 0x0A04 +#define NPU_REG_IFM_BASE1 0x0A08 +#define NPU_REG_IFM_BASE1_HI 0x0A0C +#define NPU_REG_IFM_BASE2 0x0A10 +#define NPU_REG_IFM_BASE2_HI 0x0A14 +#define NPU_REG_IFM_BASE3 0x0A18 +#define NPU_REG_IFM_BASE3_HI 0x0A1C +#define NPU_REG_IFM_STRIDE_X 0x0A20 +#define NPU_REG_IFM_STRIDE_X_HI 0x0A24 +#define NPU_REG_IFM_STRIDE_Y 0x0A28 +#define NPU_REG_IFM_STRIDE_Y_HI 0x0A2C +#define NPU_REG_IFM_STRIDE_C 0x0A30 +#define NPU_REG_IFM_STRIDE_C_HI 0x0A34 +#define TSU_IFM_BASE_REGISTERS_SIZE 0x0A40 + +// +// Register subpage TSU_OFM_BASE +// +#define NPU_REG_OFM_BASE0 0x0A40 +#define NPU_REG_OFM_BASE0_HI 0x0A44 +#define NPU_REG_OFM_BASE1 0x0A48 +#define NPU_REG_OFM_BASE1_HI 0x0A4C +#define NPU_REG_OFM_BASE2 0x0A50 +#define NPU_REG_OFM_BASE2_HI 0x0A54 +#define NPU_REG_OFM_BASE3 0x0A58 +#define NPU_REG_OFM_BASE3_HI 0x0A5C +#define NPU_REG_OFM_STRIDE_X 0x0A60 +#define NPU_REG_OFM_STRIDE_X_HI 0x0A64 +#define NPU_REG_OFM_STRIDE_Y 0x0A68 +#define NPU_REG_OFM_STRIDE_Y_HI 0x0A6C +#define NPU_REG_OFM_STRIDE_C 0x0A70 +#define NPU_REG_OFM_STRIDE_C_HI 0x0A74 +#define TSU_OFM_BASE_REGISTERS_SIZE 0x0A80 + +// +// Register subpage TSU_WS_BASE +// +#define NPU_REG_WEIGHT_BASE 0x0A80 +#define NPU_REG_WEIGHT_BASE_HI 0x0A84 +#define NPU_REG_WEIGHT_LENGTH 0x0A88 +#define NPU_REG_WEIGHT_LENGTH_HI 0x0A8C +#define NPU_REG_SCALE_BASE 0x0A90 +#define NPU_REG_SCALE_BASE_HI 0x0A94 +#define NPU_REG_SCALE_LENGTH 0x0A98 +#define NPU_REG_SCALE_LENGTH_HI 0x0A9C +#define NPU_REG_OFM_SCALE 0x0AA0 +#define NPU_REG_OFM_SCALE_SHIFT 0x0AA4 +#define NPU_REG_OPA_SCALE 0x0AA8 +#define NPU_REG_OPA_SCALE_SHIFT 0x0AAC +#define NPU_REG_OPB_SCALE 0x0AB0 +#define TSU_WS_BASE_REGISTERS_SIZE 0x0AC0 + +// +// Register subpage TSU_DMA_BASE +// +#define NPU_REG_DMA0_SRC 0x0AC0 +#define NPU_REG_DMA0_SRC_HI 0x0AC4 +#define NPU_REG_DMA0_DST 0x0AC8 +#define NPU_REG_DMA0_DST_HI 0x0ACC +#define NPU_REG_DMA0_LEN 0x0AD0 +#define NPU_REG_DMA0_LEN_HI 0x0AD4 +#define NPU_REG_DMA0_SKIP0 0x0AD8 +#define NPU_REG_DMA0_SKIP0_HI 0x0ADC +#define NPU_REG_DMA0_SKIP1 0x0AE0 +#define NPU_REG_DMA0_SKIP1_HI 0x0AE4 +#define TSU_DMA_BASE_REGISTERS_SIZE 0x0B00 + +// +// Register subpage TSU_IFM2_BASE +// +#define NPU_REG_IFM2_BASE0 0x0B00 +#define NPU_REG_IFM2_BASE0_HI 0x0B04 +#define NPU_REG_IFM2_BASE1 0x0B08 +#define NPU_REG_IFM2_BASE1_HI 0x0B0C +#define NPU_REG_IFM2_BASE2 0x0B10 +#define NPU_REG_IFM2_BASE2_HI 0x0B14 +#define NPU_REG_IFM2_BASE3 0x0B18 +#define NPU_REG_IFM2_BASE3_HI 0x0B1C +#define NPU_REG_IFM2_STRIDE_X 0x0B20 +#define NPU_REG_IFM2_STRIDE_X_HI 0x0B24 +#define NPU_REG_IFM2_STRIDE_Y 0x0B28 +#define NPU_REG_IFM2_STRIDE_Y_HI 0x0B2C +#define NPU_REG_IFM2_STRIDE_C 0x0B30 +#define NPU_REG_IFM2_STRIDE_C_HI 0x0B34 +#define TSU_IFM2_BASE_REGISTERS_SIZE 0x0B40 + +// +// Register subpage TSU_WS1_BASE +// +#define NPU_REG_WEIGHT1_BASE 0x0B40 +#define NPU_REG_WEIGHT1_BASE_HI 0x0B44 +#define NPU_REG_WEIGHT1_LENGTH 0x0B48 +#define NPU_REG_WEIGHT1_LENGTH_HI 0x0B4C +#define NPU_REG_SCALE1_BASE 0x0B50 +#define NPU_REG_SCALE1_BASE_HI 0x0B54 +#define NPU_REG_SCALE1_LENGTH 0x0B58 +#define NPU_REG_SCALE1_LENGTH_HI 0x0B5C +#define TSU_WS1_BASE_REGISTERS_SIZE 0x0B80 + +// +// Register subpage TSU_USER_BASE +// +#define TSU_USER_BASE_REGISTERS_SIZE 0x0BC0 + +// +// Register subpage TSU_DMA_EBASE +// +#define TSU_DMA_EBASE_REGISTERS_SIZE 0x0C00 + +// +// Register subpage ID +// +#define NPU_REG_REVISION 0x0FC0 +#define NPU_REG_PID4 0x0FD0 +#define NPU_REG_PID5 0x0FD4 +#define NPU_REG_PID6 0x0FD8 +#define NPU_REG_PID7 0x0FDC +#define NPU_REG_PID0 0x0FE0 +#define NPU_REG_PID1 0x0FE4 +#define NPU_REG_PID2 0x0FE8 +#define NPU_REG_PID3 0x0FEC +#define NPU_REG_CID0 0x0FF0 +#define NPU_REG_CID1 0x0FF4 +#define NPU_REG_CID2 0x0FF8 +#define NPU_REG_CID3 0x0FFC +#define ID_REGISTERS_SIZE 0x1000 + +#ifdef __cplusplus +// Enum types +enum class acc_format : uint8_t +{ + I32 = 0, + I40 = 1, + F16 = 2, +}; + +enum class activation_clip_range : uint8_t +{ + OFM_PRECISION = 0, + FORCE_UINT8 = 2, + FORCE_INT8 = 3, + FORCE_INT16 = 5, +}; + +enum class activation_format : uint8_t +{ + NHWC = 0, + NHCWB16 = 1, +}; + +enum class activation_function : uint8_t +{ + RELU = 0, + TANH = 3, + SIGMOID = 4, + TABLE_0 = 16, + TABLE_1 = 17, + TABLE_2 = 18, + TABLE_3 = 19, + TABLE_4 = 20, + TABLE_5 = 21, + TABLE_6 = 22, + TABLE_7 = 23, +}; + +enum class activation_precision : uint8_t +{ + B8 = 0, + B16 = 1, + B32 = 2, + B64 = 3, +}; + +enum class activation_type : uint8_t +{ + UNSIGNED = 0, + SIGNED = 1, +}; + +enum class axi_mem_encoding : uint8_t +{ + DEVICE_NON_BUFFERABLE = 0, + DEVICE_BUFFERABLE = 1, + NORMAL_NON_CACHEABLE_NON_BUFFERABLE = 2, + NORMAL_NON_CACHEABLE_BUFFERABLE = 3, + WRITE_THROUGH_NO_ALLOCATE = 4, + WRITE_THROUGH_READ_ALLOCATE = 5, + WRITE_THROUGH_WRITE_ALLOCATE = 6, + WRITE_THROUGH_READ_AND_WRITE_ALLOCATE = 7, + WRITE_BACK_NO_ALLOCATE = 8, + WRITE_BACK_READ_ALLOCATE = 9, + WRITE_BACK_WRITE_ALLOCATE = 10, + WRITE_BACK_READ_AND_WRITE_ALLOCATE = 11, +}; + +enum class broadcast_mode : uint8_t +{ + DISABLE = 0, + ENABLE = 1, +}; + +enum class cmd0_opcode : uint16_t +{ + NPU_OP_STOP = 0, + NPU_OP_IRQ = 1, + NPU_OP_CONV = 2, + NPU_OP_DEPTHWISE = 3, + NPU_OP_POOL = 5, + NPU_OP_ELEMENTWISE = 6, + NPU_OP_DMA_START = 16, + NPU_OP_DMA_WAIT = 17, + NPU_OP_KERNEL_WAIT = 18, + NPU_OP_PMU_MASK = 19, + NPU_SET_IFM_PAD_TOP = 256, + NPU_SET_IFM_PAD_LEFT = 257, + NPU_SET_IFM_PAD_RIGHT = 258, + NPU_SET_IFM_PAD_BOTTOM = 259, + NPU_SET_IFM_DEPTH_M1 = 260, + NPU_SET_IFM_PRECISION = 261, + NPU_SET_IFM_UPSCALE = 263, + NPU_SET_IFM_ZERO_POINT = 265, + NPU_SET_IFM_WIDTH0_M1 = 266, + NPU_SET_IFM_HEIGHT0_M1 = 267, + NPU_SET_IFM_HEIGHT1_M1 = 268, + NPU_SET_IFM_IB_END = 269, + NPU_SET_IFM_REGION = 271, + NPU_SET_OFM_WIDTH_M1 = 273, + NPU_SET_OFM_HEIGHT_M1 = 274, + NPU_SET_OFM_DEPTH_M1 = 275, + NPU_SET_OFM_PRECISION = 276, + NPU_SET_OFM_BLK_WIDTH_M1 = 277, + NPU_SET_OFM_BLK_HEIGHT_M1 = 278, + NPU_SET_OFM_BLK_DEPTH_M1 = 279, + NPU_SET_OFM_ZERO_POINT = 280, + NPU_SET_OFM_WIDTH0_M1 = 282, + NPU_SET_OFM_HEIGHT0_M1 = 283, + NPU_SET_OFM_HEIGHT1_M1 = 284, + NPU_SET_OFM_REGION = 287, + NPU_SET_KERNEL_WIDTH_M1 = 288, + NPU_SET_KERNEL_HEIGHT_M1 = 289, + NPU_SET_KERNEL_STRIDE = 290, + NPU_SET_PARALLEL_MODE = 291, + NPU_SET_ACC_FORMAT = 292, + NPU_SET_ACTIVATION = 293, + NPU_SET_ACTIVATION_MIN = 294, + NPU_SET_ACTIVATION_MAX = 295, + NPU_SET_WEIGHT_REGION = 296, + NPU_SET_SCALE_REGION = 297, + NPU_SET_AB_START = 301, + NPU_SET_BLOCKDEP = 303, + NPU_SET_DMA0_SRC_REGION = 304, + NPU_SET_DMA0_DST_REGION = 305, + NPU_SET_DMA0_SIZE0 = 306, + NPU_SET_DMA0_SIZE1 = 307, + NPU_SET_IFM2_BROADCAST = 384, + NPU_SET_IFM2_SCALAR = 385, + NPU_SET_IFM2_PRECISION = 389, + NPU_SET_IFM2_ZERO_POINT = 393, + NPU_SET_IFM2_WIDTH0_M1 = 394, + NPU_SET_IFM2_HEIGHT0_M1 = 395, + NPU_SET_IFM2_HEIGHT1_M1 = 396, + NPU_SET_IFM2_IB_START = 397, + NPU_SET_IFM2_REGION = 399, +}; + +enum class cmd1_opcode : uint16_t +{ + NPU_SET_IFM_BASE0 = 0, + NPU_SET_IFM_BASE1 = 1, + NPU_SET_IFM_BASE2 = 2, + NPU_SET_IFM_BASE3 = 3, + NPU_SET_IFM_STRIDE_X = 4, + NPU_SET_IFM_STRIDE_Y = 5, + NPU_SET_IFM_STRIDE_C = 6, + NPU_SET_OFM_BASE0 = 16, + NPU_SET_OFM_BASE1 = 17, + NPU_SET_OFM_BASE2 = 18, + NPU_SET_OFM_BASE3 = 19, + NPU_SET_OFM_STRIDE_X = 20, + NPU_SET_OFM_STRIDE_Y = 21, + NPU_SET_OFM_STRIDE_C = 22, + NPU_SET_WEIGHT_BASE = 32, + NPU_SET_WEIGHT_LENGTH = 33, + NPU_SET_SCALE_BASE = 34, + NPU_SET_SCALE_LENGTH = 35, + NPU_SET_OFM_SCALE = 36, + NPU_SET_OPA_SCALE = 37, + NPU_SET_OPB_SCALE = 38, + NPU_SET_DMA0_SRC = 48, + NPU_SET_DMA0_DST = 49, + NPU_SET_DMA0_LEN = 50, + NPU_SET_DMA0_SKIP0 = 51, + NPU_SET_DMA0_SKIP1 = 52, + NPU_SET_IFM2_BASE0 = 128, + NPU_SET_IFM2_BASE1 = 129, + NPU_SET_IFM2_BASE2 = 130, + NPU_SET_IFM2_BASE3 = 131, + NPU_SET_IFM2_STRIDE_X = 132, + NPU_SET_IFM2_STRIDE_Y = 133, + NPU_SET_IFM2_STRIDE_C = 134, + NPU_SET_WEIGHT1_BASE = 144, + NPU_SET_WEIGHT1_LENGTH = 145, + NPU_SET_SCALE1_BASE = 146, + NPU_SET_SCALE1_LENGTH = 147, +}; + +enum class cmd_ctrl : uint8_t +{ + CMD0_CTRL = 0, + CMD1_CTRL = 1, +}; + +enum class custom_dma : uint8_t +{ + NOT_IMPLEMENTED = 0, + IMPLEMENTED = 1, +}; + +enum class dma_fault_src : uint8_t +{ + AXI_M0 = 0, + AXI_M1 = 1, +}; + +enum class dma_region_mode : uint8_t +{ + EXTERNAL = 0, + INTERNAL = 1, +}; + +enum class dma_stride_mode : uint8_t +{ + D1 = 0, + D2 = 1, + D3 = 2, +}; + +enum class elementwise_mode : uint8_t +{ + MUL = 0, + ADD = 1, + SUB = 2, + MIN = 3, + MAX = 4, + LRELU = 5, + ABS = 6, + CLZ = 7, + SHR = 8, + SHL = 9, +}; + +enum class functional_safety : uint8_t +{ + NOT_IMPLEMENTED = 0, + IMPLEMENTED = 1, +}; + +enum class ifm2_operand_order : uint8_t +{ + ORDER_B = 0, + ORDER_A = 1, +}; + +enum class ifm_scale_mode : uint8_t +{ + OPA_OPB_16 = 0, + OPA_32 = 1, + OPB_32 = 2, +}; + +enum class ifm_upscale_mode : uint8_t +{ + NONE = 0, + NEAREST = 1, + ZEROS = 2, +}; + +enum class kernel_decomposition : uint8_t +{ + D8X8 = 0, + D4X4 = 1, +}; + +enum class kernel_dilation : uint8_t +{ + NONE = 0, + X2 = 1, +}; + +enum class max_beats : uint8_t +{ + B64 = 0, + B128 = 1, + B256 = 2, +}; + +enum class mem_attr : uint8_t +{ + AXI0_OUTSTANDING_COUNTER0 = 0, + AXI0_OUTSTANDING_COUNTER1 = 1, + AXI1_OUTSTANDING_COUNTER2 = 2, + AXI1_OUTSTANDING_COUNTER3 = 3, +}; + +enum class ofm_scale_mode : uint8_t +{ + PER_CHANNEL = 0, + GLOBAL = 1, +}; + +enum class parallel_mode : uint8_t +{ + SINGLE_CORE = 0, + DUAL_CORE_DEPTH = 1, +}; + +enum class pmu_axi_channel : uint8_t +{ + RD_CMD = 0, + RD_IFM = 1, + RD_WEIGHTS = 2, + RD_SCALE_BIAS = 3, + RD_MEM2MEM = 4, + WR_OFM = 8, + WR_MEM2MEM = 9, +}; + +enum class pmu_event : uint16_t +{ + NO_EVENT = 0, + CYCLE = 17, + NPU_IDLE = 32, + CC_STALLED_ON_BLOCKDEP = 33, + CC_STALLED_ON_SHRAM_RECONFIG = 34, + NPU_ACTIVE = 35, + MAC_ACTIVE = 48, + MAC_ACTIVE_8BIT = 49, + MAC_ACTIVE_16BIT = 50, + MAC_DPU_ACTIVE = 51, + MAC_STALLED_BY_WD_ACC = 52, + MAC_STALLED_BY_WD = 53, + MAC_STALLED_BY_ACC = 54, + MAC_STALLED_BY_IB = 55, + MAC_ACTIVE_32BIT = 56, + MAC_STALLED_BY_INT_W = 57, + MAC_STALLED_BY_INT_ACC = 58, + AO_ACTIVE = 64, + AO_ACTIVE_8BIT = 65, + AO_ACTIVE_16BIT = 66, + AO_STALLED_BY_OFMP_OB = 67, + AO_STALLED_BY_OFMP = 68, + AO_STALLED_BY_OB = 69, + AO_STALLED_BY_ACC_IB = 70, + AO_STALLED_BY_ACC = 71, + AO_STALLED_BY_IB = 72, + WD_ACTIVE = 80, + WD_STALLED = 81, + WD_STALLED_BY_WS = 82, + WD_STALLED_BY_WD_BUF = 83, + WD_PARSE_ACTIVE = 84, + WD_PARSE_STALLED = 85, + WD_PARSE_STALLED_IN = 86, + WD_PARSE_STALLED_OUT = 87, + WD_TRANS_WS = 88, + WD_TRANS_WB = 89, + WD_TRANS_DW0 = 90, + WD_TRANS_DW1 = 91, + AXI0_RD_TRANS_ACCEPTED = 128, + AXI0_RD_TRANS_COMPLETED = 129, + AXI0_RD_DATA_BEAT_RECEIVED = 130, + AXI0_RD_TRAN_REQ_STALLED = 131, + AXI0_WR_TRANS_ACCEPTED = 132, + AXI0_WR_TRANS_COMPLETED_M = 133, + AXI0_WR_TRANS_COMPLETED_S = 134, + AXI0_WR_DATA_BEAT_WRITTEN = 135, + AXI0_WR_TRAN_REQ_STALLED = 136, + AXI0_WR_DATA_BEAT_STALLED = 137, + AXI0_ENABLED_CYCLES = 140, + AXI0_RD_STALL_LIMIT = 142, + AXI0_WR_STALL_LIMIT = 143, + AXI_LATENCY_ANY = 160, + AXI_LATENCY_32 = 161, + AXI_LATENCY_64 = 162, + AXI_LATENCY_128 = 163, + AXI_LATENCY_256 = 164, + AXI_LATENCY_512 = 165, + AXI_LATENCY_1024 = 166, + ECC_DMA = 176, + ECC_SB0 = 177, + AXI1_RD_TRANS_ACCEPTED = 384, + AXI1_RD_TRANS_COMPLETED = 385, + AXI1_RD_DATA_BEAT_RECEIVED = 386, + AXI1_RD_TRAN_REQ_STALLED = 387, + AXI1_WR_TRANS_ACCEPTED = 388, + AXI1_WR_TRANS_COMPLETED_M = 389, + AXI1_WR_TRANS_COMPLETED_S = 390, + AXI1_WR_DATA_BEAT_WRITTEN = 391, + AXI1_WR_TRAN_REQ_STALLED = 392, + AXI1_WR_DATA_BEAT_STALLED = 393, + AXI1_ENABLED_CYCLES = 396, + AXI1_RD_STALL_LIMIT = 398, + AXI1_WR_STALL_LIMIT = 399, + ECC_SB1 = 433, +}; + +enum class pooling_mode : uint8_t +{ + MAX = 0, + AVERAGE = 1, + REDUCE_SUM = 2, +}; + +enum class privilege_level : uint8_t +{ + USER = 0, + PRIVILEGED = 1, +}; + +enum class round_mode : uint8_t +{ + DBL = 0, + TRUNCATE = 1, + NATURAL = 2, +}; + +enum class security_level : uint8_t +{ + SECURE = 0, + NON_SECURE = 1, +}; + +enum class state : uint8_t +{ + STOPPED = 0, + RUNNING = 1, +}; + +enum class wd_core_slice_state : uint8_t +{ + HEADER = 0, + PALETTE = 1, + WEIGHTS = 2, +}; + +enum class wd_ctrl_state : uint8_t +{ + IDLE = 0, + DRAIN = 1, + OFD_INIT = 2, + OFD_RUN = 3, +}; + +enum class weight_order : uint8_t +{ + DEPTH_FIRST = 0, + PART_KERNEL_FIRST = 1, +}; + +#else + +enum acc_format +{ + ACC_FORMAT_I32 = 0, + ACC_FORMAT_I40 = 1, + ACC_FORMAT_F16 = 2, +}; + +enum activation_clip_range +{ + ACTIVATION_CLIP_RANGE_OFM_PRECISION = 0, + ACTIVATION_CLIP_RANGE_FORCE_UINT8 = 2, + ACTIVATION_CLIP_RANGE_FORCE_INT8 = 3, + ACTIVATION_CLIP_RANGE_FORCE_INT16 = 5, +}; + +enum activation_format +{ + ACTIVATION_FORMAT_NHWC = 0, + ACTIVATION_FORMAT_NHCWB16 = 1, +}; + +enum activation_function +{ + ACTIVATION_FUNCTION_RELU = 0, + ACTIVATION_FUNCTION_TANH = 3, + ACTIVATION_FUNCTION_SIGMOID = 4, + ACTIVATION_FUNCTION_TABLE_0 = 16, + ACTIVATION_FUNCTION_TABLE_1 = 17, + ACTIVATION_FUNCTION_TABLE_2 = 18, + ACTIVATION_FUNCTION_TABLE_3 = 19, + ACTIVATION_FUNCTION_TABLE_4 = 20, + ACTIVATION_FUNCTION_TABLE_5 = 21, + ACTIVATION_FUNCTION_TABLE_6 = 22, + ACTIVATION_FUNCTION_TABLE_7 = 23, +}; + +enum activation_precision +{ + ACTIVATION_PRECISION_B8 = 0, + ACTIVATION_PRECISION_B16 = 1, + ACTIVATION_PRECISION_B32 = 2, + ACTIVATION_PRECISION_B64 = 3, +}; + +enum activation_type +{ + ACTIVATION_TYPE_UNSIGNED = 0, + ACTIVATION_TYPE_SIGNED = 1, +}; + +enum axi_mem_encoding +{ + AXI_MEM_ENCODING_DEVICE_NON_BUFFERABLE = 0, + AXI_MEM_ENCODING_DEVICE_BUFFERABLE = 1, + AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_NON_BUFFERABLE = 2, + AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_BUFFERABLE = 3, + AXI_MEM_ENCODING_WRITE_THROUGH_NO_ALLOCATE = 4, + AXI_MEM_ENCODING_WRITE_THROUGH_READ_ALLOCATE = 5, + AXI_MEM_ENCODING_WRITE_THROUGH_WRITE_ALLOCATE = 6, + AXI_MEM_ENCODING_WRITE_THROUGH_READ_AND_WRITE_ALLOCATE = 7, + AXI_MEM_ENCODING_WRITE_BACK_NO_ALLOCATE = 8, + AXI_MEM_ENCODING_WRITE_BACK_READ_ALLOCATE = 9, + AXI_MEM_ENCODING_WRITE_BACK_WRITE_ALLOCATE = 10, + AXI_MEM_ENCODING_WRITE_BACK_READ_AND_WRITE_ALLOCATE = 11, +}; + +enum broadcast_mode +{ + BROADCAST_MODE_DISABLE = 0, + BROADCAST_MODE_ENABLE = 1, +}; + +enum cmd0_opcode +{ + CMD0_OPCODE_NPU_OP_STOP = 0, + CMD0_OPCODE_NPU_OP_IRQ = 1, + CMD0_OPCODE_NPU_OP_CONV = 2, + CMD0_OPCODE_NPU_OP_DEPTHWISE = 3, + CMD0_OPCODE_NPU_OP_POOL = 5, + CMD0_OPCODE_NPU_OP_ELEMENTWISE = 6, + CMD0_OPCODE_NPU_OP_DMA_START = 16, + CMD0_OPCODE_NPU_OP_DMA_WAIT = 17, + CMD0_OPCODE_NPU_OP_KERNEL_WAIT = 18, + CMD0_OPCODE_NPU_OP_PMU_MASK = 19, + CMD0_OPCODE_NPU_SET_IFM_PAD_TOP = 256, + CMD0_OPCODE_NPU_SET_IFM_PAD_LEFT = 257, + CMD0_OPCODE_NPU_SET_IFM_PAD_RIGHT = 258, + CMD0_OPCODE_NPU_SET_IFM_PAD_BOTTOM = 259, + CMD0_OPCODE_NPU_SET_IFM_DEPTH_M1 = 260, + CMD0_OPCODE_NPU_SET_IFM_PRECISION = 261, + CMD0_OPCODE_NPU_SET_IFM_UPSCALE = 263, + CMD0_OPCODE_NPU_SET_IFM_ZERO_POINT = 265, + CMD0_OPCODE_NPU_SET_IFM_WIDTH0_M1 = 266, + CMD0_OPCODE_NPU_SET_IFM_HEIGHT0_M1 = 267, + CMD0_OPCODE_NPU_SET_IFM_HEIGHT1_M1 = 268, + CMD0_OPCODE_NPU_SET_IFM_IB_END = 269, + CMD0_OPCODE_NPU_SET_IFM_REGION = 271, + CMD0_OPCODE_NPU_SET_OFM_WIDTH_M1 = 273, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT_M1 = 274, + CMD0_OPCODE_NPU_SET_OFM_DEPTH_M1 = 275, + CMD0_OPCODE_NPU_SET_OFM_PRECISION = 276, + CMD0_OPCODE_NPU_SET_OFM_BLK_WIDTH_M1 = 277, + CMD0_OPCODE_NPU_SET_OFM_BLK_HEIGHT_M1 = 278, + CMD0_OPCODE_NPU_SET_OFM_BLK_DEPTH_M1 = 279, + CMD0_OPCODE_NPU_SET_OFM_ZERO_POINT = 280, + CMD0_OPCODE_NPU_SET_OFM_WIDTH0_M1 = 282, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT0_M1 = 283, + CMD0_OPCODE_NPU_SET_OFM_HEIGHT1_M1 = 284, + CMD0_OPCODE_NPU_SET_OFM_REGION = 287, + CMD0_OPCODE_NPU_SET_KERNEL_WIDTH_M1 = 288, + CMD0_OPCODE_NPU_SET_KERNEL_HEIGHT_M1 = 289, + CMD0_OPCODE_NPU_SET_KERNEL_STRIDE = 290, + CMD0_OPCODE_NPU_SET_PARALLEL_MODE = 291, + CMD0_OPCODE_NPU_SET_ACC_FORMAT = 292, + CMD0_OPCODE_NPU_SET_ACTIVATION = 293, + CMD0_OPCODE_NPU_SET_ACTIVATION_MIN = 294, + CMD0_OPCODE_NPU_SET_ACTIVATION_MAX = 295, + CMD0_OPCODE_NPU_SET_WEIGHT_REGION = 296, + CMD0_OPCODE_NPU_SET_SCALE_REGION = 297, + CMD0_OPCODE_NPU_SET_AB_START = 301, + CMD0_OPCODE_NPU_SET_BLOCKDEP = 303, + CMD0_OPCODE_NPU_SET_DMA0_SRC_REGION = 304, + CMD0_OPCODE_NPU_SET_DMA0_DST_REGION = 305, + CMD0_OPCODE_NPU_SET_DMA0_SIZE0 = 306, + CMD0_OPCODE_NPU_SET_DMA0_SIZE1 = 307, + CMD0_OPCODE_NPU_SET_IFM2_BROADCAST = 384, + CMD0_OPCODE_NPU_SET_IFM2_SCALAR = 385, + CMD0_OPCODE_NPU_SET_IFM2_PRECISION = 389, + CMD0_OPCODE_NPU_SET_IFM2_ZERO_POINT = 393, + CMD0_OPCODE_NPU_SET_IFM2_WIDTH0_M1 = 394, + CMD0_OPCODE_NPU_SET_IFM2_HEIGHT0_M1 = 395, + CMD0_OPCODE_NPU_SET_IFM2_HEIGHT1_M1 = 396, + CMD0_OPCODE_NPU_SET_IFM2_IB_START = 397, + CMD0_OPCODE_NPU_SET_IFM2_REGION = 399, +}; + +enum cmd1_opcode +{ + CMD1_OPCODE_NPU_SET_IFM_BASE0 = 0, + CMD1_OPCODE_NPU_SET_IFM_BASE1 = 1, + CMD1_OPCODE_NPU_SET_IFM_BASE2 = 2, + CMD1_OPCODE_NPU_SET_IFM_BASE3 = 3, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_X = 4, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_Y = 5, + CMD1_OPCODE_NPU_SET_IFM_STRIDE_C = 6, + CMD1_OPCODE_NPU_SET_OFM_BASE0 = 16, + CMD1_OPCODE_NPU_SET_OFM_BASE1 = 17, + CMD1_OPCODE_NPU_SET_OFM_BASE2 = 18, + CMD1_OPCODE_NPU_SET_OFM_BASE3 = 19, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_X = 20, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_Y = 21, + CMD1_OPCODE_NPU_SET_OFM_STRIDE_C = 22, + CMD1_OPCODE_NPU_SET_WEIGHT_BASE = 32, + CMD1_OPCODE_NPU_SET_WEIGHT_LENGTH = 33, + CMD1_OPCODE_NPU_SET_SCALE_BASE = 34, + CMD1_OPCODE_NPU_SET_SCALE_LENGTH = 35, + CMD1_OPCODE_NPU_SET_OFM_SCALE = 36, + CMD1_OPCODE_NPU_SET_OPA_SCALE = 37, + CMD1_OPCODE_NPU_SET_OPB_SCALE = 38, + CMD1_OPCODE_NPU_SET_DMA0_SRC = 48, + CMD1_OPCODE_NPU_SET_DMA0_DST = 49, + CMD1_OPCODE_NPU_SET_DMA0_LEN = 50, + CMD1_OPCODE_NPU_SET_DMA0_SKIP0 = 51, + CMD1_OPCODE_NPU_SET_DMA0_SKIP1 = 52, + CMD1_OPCODE_NPU_SET_IFM2_BASE0 = 128, + CMD1_OPCODE_NPU_SET_IFM2_BASE1 = 129, + CMD1_OPCODE_NPU_SET_IFM2_BASE2 = 130, + CMD1_OPCODE_NPU_SET_IFM2_BASE3 = 131, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_X = 132, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_Y = 133, + CMD1_OPCODE_NPU_SET_IFM2_STRIDE_C = 134, + CMD1_OPCODE_NPU_SET_WEIGHT1_BASE = 144, + CMD1_OPCODE_NPU_SET_WEIGHT1_LENGTH = 145, + CMD1_OPCODE_NPU_SET_SCALE1_BASE = 146, + CMD1_OPCODE_NPU_SET_SCALE1_LENGTH = 147, +}; + +enum cmd_ctrl +{ + CMD_CTRL_CMD0_CTRL = 0, + CMD_CTRL_CMD1_CTRL = 1, +}; + +enum custom_dma +{ + CUSTOM_DMA_NOT_IMPLEMENTED = 0, + CUSTOM_DMA_IMPLEMENTED = 1, +}; + +enum dma_fault_src +{ + DMA_FAULT_SRC_AXI_M0 = 0, + DMA_FAULT_SRC_AXI_M1 = 1, +}; + +enum dma_region_mode +{ + DMA_REGION_MODE_EXTERNAL = 0, + DMA_REGION_MODE_INTERNAL = 1, +}; + +enum dma_stride_mode +{ + DMA_STRIDE_MODE_D1 = 0, + DMA_STRIDE_MODE_D2 = 1, + DMA_STRIDE_MODE_D3 = 2, +}; + +enum elementwise_mode +{ + ELEMENTWISE_MODE_MUL = 0, + ELEMENTWISE_MODE_ADD = 1, + ELEMENTWISE_MODE_SUB = 2, + ELEMENTWISE_MODE_MIN = 3, + ELEMENTWISE_MODE_MAX = 4, + ELEMENTWISE_MODE_LRELU = 5, + ELEMENTWISE_MODE_ABS = 6, + ELEMENTWISE_MODE_CLZ = 7, + ELEMENTWISE_MODE_SHR = 8, + ELEMENTWISE_MODE_SHL = 9, +}; + +enum functional_safety +{ + FUNCTIONAL_SAFETY_NOT_IMPLEMENTED = 0, + FUNCTIONAL_SAFETY_IMPLEMENTED = 1, +}; + +enum ifm2_operand_order +{ + IFM2_OPERAND_ORDER_ORDER_B = 0, + IFM2_OPERAND_ORDER_ORDER_A = 1, +}; + +enum ifm_scale_mode +{ + IFM_SCALE_MODE_OPA_OPB_16 = 0, + IFM_SCALE_MODE_OPA_32 = 1, + IFM_SCALE_MODE_OPB_32 = 2, +}; + +enum ifm_upscale_mode +{ + IFM_UPSCALE_MODE_NONE = 0, + IFM_UPSCALE_MODE_NEAREST = 1, + IFM_UPSCALE_MODE_ZEROS = 2, +}; + +enum kernel_decomposition +{ + KERNEL_DECOMPOSITION_D8X8 = 0, + KERNEL_DECOMPOSITION_D4X4 = 1, +}; + +enum kernel_dilation +{ + KERNEL_DILATION_NONE = 0, + KERNEL_DILATION_X2 = 1, +}; + +enum max_beats +{ + MAX_BEATS_B64 = 0, + MAX_BEATS_B128 = 1, + MAX_BEATS_B256 = 2, +}; + +enum mem_attr +{ + MEM_ATTR_AXI0_OUTSTANDING_COUNTER0 = 0, + MEM_ATTR_AXI0_OUTSTANDING_COUNTER1 = 1, + MEM_ATTR_AXI1_OUTSTANDING_COUNTER2 = 2, + MEM_ATTR_AXI1_OUTSTANDING_COUNTER3 = 3, +}; + +enum ofm_scale_mode +{ + OFM_SCALE_MODE_PER_CHANNEL = 0, + OFM_SCALE_MODE_GLOBAL = 1, +}; + +enum parallel_mode +{ + PARALLEL_MODE_SINGLE_CORE = 0, + PARALLEL_MODE_DUAL_CORE_DEPTH = 1, +}; + +enum pmu_axi_channel +{ + PMU_AXI_CHANNEL_RD_CMD = 0, + PMU_AXI_CHANNEL_RD_IFM = 1, + PMU_AXI_CHANNEL_RD_WEIGHTS = 2, + PMU_AXI_CHANNEL_RD_SCALE_BIAS = 3, + PMU_AXI_CHANNEL_RD_MEM2MEM = 4, + PMU_AXI_CHANNEL_WR_OFM = 8, + PMU_AXI_CHANNEL_WR_MEM2MEM = 9, +}; + +enum pmu_event +{ + PMU_EVENT_NO_EVENT = 0, + PMU_EVENT_CYCLE = 17, + PMU_EVENT_NPU_IDLE = 32, + PMU_EVENT_CC_STALLED_ON_BLOCKDEP = 33, + PMU_EVENT_CC_STALLED_ON_SHRAM_RECONFIG = 34, + PMU_EVENT_NPU_ACTIVE = 35, + PMU_EVENT_MAC_ACTIVE = 48, + PMU_EVENT_MAC_ACTIVE_8BIT = 49, + PMU_EVENT_MAC_ACTIVE_16BIT = 50, + PMU_EVENT_MAC_DPU_ACTIVE = 51, + PMU_EVENT_MAC_STALLED_BY_WD_ACC = 52, + PMU_EVENT_MAC_STALLED_BY_WD = 53, + PMU_EVENT_MAC_STALLED_BY_ACC = 54, + PMU_EVENT_MAC_STALLED_BY_IB = 55, + PMU_EVENT_MAC_ACTIVE_32BIT = 56, + PMU_EVENT_MAC_STALLED_BY_INT_W = 57, + PMU_EVENT_MAC_STALLED_BY_INT_ACC = 58, + PMU_EVENT_AO_ACTIVE = 64, + PMU_EVENT_AO_ACTIVE_8BIT = 65, + PMU_EVENT_AO_ACTIVE_16BIT = 66, + PMU_EVENT_AO_STALLED_BY_OFMP_OB = 67, + PMU_EVENT_AO_STALLED_BY_OFMP = 68, + PMU_EVENT_AO_STALLED_BY_OB = 69, + PMU_EVENT_AO_STALLED_BY_ACC_IB = 70, + PMU_EVENT_AO_STALLED_BY_ACC = 71, + PMU_EVENT_AO_STALLED_BY_IB = 72, + PMU_EVENT_WD_ACTIVE = 80, + PMU_EVENT_WD_STALLED = 81, + PMU_EVENT_WD_STALLED_BY_WS = 82, + PMU_EVENT_WD_STALLED_BY_WD_BUF = 83, + PMU_EVENT_WD_PARSE_ACTIVE = 84, + PMU_EVENT_WD_PARSE_STALLED = 85, + PMU_EVENT_WD_PARSE_STALLED_IN = 86, + PMU_EVENT_WD_PARSE_STALLED_OUT = 87, + PMU_EVENT_WD_TRANS_WS = 88, + PMU_EVENT_WD_TRANS_WB = 89, + PMU_EVENT_WD_TRANS_DW0 = 90, + PMU_EVENT_WD_TRANS_DW1 = 91, + PMU_EVENT_AXI0_RD_TRANS_ACCEPTED = 128, + PMU_EVENT_AXI0_RD_TRANS_COMPLETED = 129, + PMU_EVENT_AXI0_RD_DATA_BEAT_RECEIVED = 130, + PMU_EVENT_AXI0_RD_TRAN_REQ_STALLED = 131, + PMU_EVENT_AXI0_WR_TRANS_ACCEPTED = 132, + PMU_EVENT_AXI0_WR_TRANS_COMPLETED_M = 133, + PMU_EVENT_AXI0_WR_TRANS_COMPLETED_S = 134, + PMU_EVENT_AXI0_WR_DATA_BEAT_WRITTEN = 135, + PMU_EVENT_AXI0_WR_TRAN_REQ_STALLED = 136, + PMU_EVENT_AXI0_WR_DATA_BEAT_STALLED = 137, + PMU_EVENT_AXI0_ENABLED_CYCLES = 140, + PMU_EVENT_AXI0_RD_STALL_LIMIT = 142, + PMU_EVENT_AXI0_WR_STALL_LIMIT = 143, + PMU_EVENT_AXI_LATENCY_ANY = 160, + PMU_EVENT_AXI_LATENCY_32 = 161, + PMU_EVENT_AXI_LATENCY_64 = 162, + PMU_EVENT_AXI_LATENCY_128 = 163, + PMU_EVENT_AXI_LATENCY_256 = 164, + PMU_EVENT_AXI_LATENCY_512 = 165, + PMU_EVENT_AXI_LATENCY_1024 = 166, + PMU_EVENT_ECC_DMA = 176, + PMU_EVENT_ECC_SB0 = 177, + PMU_EVENT_AXI1_RD_TRANS_ACCEPTED = 384, + PMU_EVENT_AXI1_RD_TRANS_COMPLETED = 385, + PMU_EVENT_AXI1_RD_DATA_BEAT_RECEIVED = 386, + PMU_EVENT_AXI1_RD_TRAN_REQ_STALLED = 387, + PMU_EVENT_AXI1_WR_TRANS_ACCEPTED = 388, + PMU_EVENT_AXI1_WR_TRANS_COMPLETED_M = 389, + PMU_EVENT_AXI1_WR_TRANS_COMPLETED_S = 390, + PMU_EVENT_AXI1_WR_DATA_BEAT_WRITTEN = 391, + PMU_EVENT_AXI1_WR_TRAN_REQ_STALLED = 392, + PMU_EVENT_AXI1_WR_DATA_BEAT_STALLED = 393, + PMU_EVENT_AXI1_ENABLED_CYCLES = 396, + PMU_EVENT_AXI1_RD_STALL_LIMIT = 398, + PMU_EVENT_AXI1_WR_STALL_LIMIT = 399, + PMU_EVENT_ECC_SB1 = 433, +}; + +enum pooling_mode +{ + POOLING_MODE_MAX = 0, + POOLING_MODE_AVERAGE = 1, + POOLING_MODE_REDUCE_SUM = 2, +}; + +enum privilege_level +{ + PRIVILEGE_LEVEL_USER = 0, + PRIVILEGE_LEVEL_PRIVILEGED = 1, +}; + +enum round_mode +{ + ROUND_MODE_DBL = 0, + ROUND_MODE_TRUNCATE = 1, + ROUND_MODE_NATURAL = 2, +}; + +enum security_level +{ + SECURITY_LEVEL_SECURE = 0, + SECURITY_LEVEL_NON_SECURE = 1, +}; + +enum state +{ + STATE_STOPPED = 0, + STATE_RUNNING = 1, +}; + +enum wd_core_slice_state +{ + WD_CORE_SLICE_STATE_HEADER = 0, + WD_CORE_SLICE_STATE_PALETTE = 1, + WD_CORE_SLICE_STATE_WEIGHTS = 2, +}; + +enum wd_ctrl_state +{ + WD_CTRL_STATE_IDLE = 0, + WD_CTRL_STATE_DRAIN = 1, + WD_CTRL_STATE_OFD_INIT = 2, + WD_CTRL_STATE_OFD_RUN = 3, +}; + +enum weight_order +{ + WEIGHT_ORDER_DEPTH_FIRST = 0, + WEIGHT_ORDER_PART_KERNEL_FIRST = 1, +}; + +#endif + +#ifdef NPU_DISASSEMBLE + +static const char *acc_format_str[] = { + "ACC_FORMAT_I32", + "ACC_FORMAT_I40", + "ACC_FORMAT_F16", +}; + +static const char *activation_clip_range_str[] = { + "ACTIVATION_CLIP_RANGE_OFM_PRECISION", + "****", + "ACTIVATION_CLIP_RANGE_FORCE_UINT8", + "ACTIVATION_CLIP_RANGE_FORCE_INT8", + "****", + "ACTIVATION_CLIP_RANGE_FORCE_INT16", +}; + +static const char *activation_format_str[] = { + "ACTIVATION_FORMAT_NHWC", + "ACTIVATION_FORMAT_NHCWB16", +}; + +static const char *activation_function_str[] = { + "ACTIVATION_FUNCTION_RELU", + "****", + "****", + "ACTIVATION_FUNCTION_TANH", + "ACTIVATION_FUNCTION_SIGMOID", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "ACTIVATION_FUNCTION_TABLE_0", + "ACTIVATION_FUNCTION_TABLE_1", + "ACTIVATION_FUNCTION_TABLE_2", + "ACTIVATION_FUNCTION_TABLE_3", + "ACTIVATION_FUNCTION_TABLE_4", + "ACTIVATION_FUNCTION_TABLE_5", + "ACTIVATION_FUNCTION_TABLE_6", + "ACTIVATION_FUNCTION_TABLE_7", +}; + +static const char *activation_precision_str[] = { + "ACTIVATION_PRECISION_B8", + "ACTIVATION_PRECISION_B16", + "ACTIVATION_PRECISION_B32", + "ACTIVATION_PRECISION_B64", +}; + +static const char *activation_type_str[] = { + "ACTIVATION_TYPE_UNSIGNED", + "ACTIVATION_TYPE_SIGNED", +}; + +static const char *axi_mem_encoding_str[] = { + "AXI_MEM_ENCODING_DEVICE_NON_BUFFERABLE", + "AXI_MEM_ENCODING_DEVICE_BUFFERABLE", + "AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_NON_BUFFERABLE", + "AXI_MEM_ENCODING_NORMAL_NON_CACHEABLE_BUFFERABLE", + "AXI_MEM_ENCODING_WRITE_THROUGH_NO_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_READ_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_THROUGH_READ_AND_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_NO_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_READ_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_WRITE_ALLOCATE", + "AXI_MEM_ENCODING_WRITE_BACK_READ_AND_WRITE_ALLOCATE", +}; + +static const char *broadcast_mode_str[] = { + "BROADCAST_MODE_DISABLE", + "BROADCAST_MODE_ENABLE", +}; + +static const char *cmd0_opcode_str[] = { + "CMD0_OPCODE_NPU_OP_STOP", + "CMD0_OPCODE_NPU_OP_IRQ", + "CMD0_OPCODE_NPU_OP_CONV", + "CMD0_OPCODE_NPU_OP_DEPTHWISE", + "****", + "CMD0_OPCODE_NPU_OP_POOL", + "CMD0_OPCODE_NPU_OP_ELEMENTWISE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_OP_DMA_START", + "CMD0_OPCODE_NPU_OP_DMA_WAIT", + "CMD0_OPCODE_NPU_OP_KERNEL_WAIT", + "CMD0_OPCODE_NPU_OP_PMU_MASK", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM_PAD_TOP", + "CMD0_OPCODE_NPU_SET_IFM_PAD_LEFT", + "CMD0_OPCODE_NPU_SET_IFM_PAD_RIGHT", + "CMD0_OPCODE_NPU_SET_IFM_PAD_BOTTOM", + "CMD0_OPCODE_NPU_SET_IFM_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_IFM_PRECISION", + "****", + "CMD0_OPCODE_NPU_SET_IFM_UPSCALE", + "****", + "CMD0_OPCODE_NPU_SET_IFM_ZERO_POINT", + "CMD0_OPCODE_NPU_SET_IFM_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_IFM_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_IFM_HEIGHT1_M1", + "CMD0_OPCODE_NPU_SET_IFM_IB_END", + "****", + "CMD0_OPCODE_NPU_SET_IFM_REGION", + "****", + "CMD0_OPCODE_NPU_SET_OFM_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_OFM_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_PRECISION", + "CMD0_OPCODE_NPU_SET_OFM_BLK_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_BLK_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_OFM_BLK_DEPTH_M1", + "CMD0_OPCODE_NPU_SET_OFM_ZERO_POINT", + "****", + "CMD0_OPCODE_NPU_SET_OFM_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_OFM_HEIGHT1_M1", + "****", + "****", + "CMD0_OPCODE_NPU_SET_OFM_REGION", + "CMD0_OPCODE_NPU_SET_KERNEL_WIDTH_M1", + "CMD0_OPCODE_NPU_SET_KERNEL_HEIGHT_M1", + "CMD0_OPCODE_NPU_SET_KERNEL_STRIDE", + "CMD0_OPCODE_NPU_SET_PARALLEL_MODE", + "CMD0_OPCODE_NPU_SET_ACC_FORMAT", + "CMD0_OPCODE_NPU_SET_ACTIVATION", + "CMD0_OPCODE_NPU_SET_ACTIVATION_MIN", + "CMD0_OPCODE_NPU_SET_ACTIVATION_MAX", + "CMD0_OPCODE_NPU_SET_WEIGHT_REGION", + "CMD0_OPCODE_NPU_SET_SCALE_REGION", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_AB_START", + "****", + "CMD0_OPCODE_NPU_SET_BLOCKDEP", + "CMD0_OPCODE_NPU_SET_DMA0_SRC_REGION", + "CMD0_OPCODE_NPU_SET_DMA0_DST_REGION", + "CMD0_OPCODE_NPU_SET_DMA0_SIZE0", + "CMD0_OPCODE_NPU_SET_DMA0_SIZE1", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_BROADCAST", + "CMD0_OPCODE_NPU_SET_IFM2_SCALAR", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_PRECISION", + "****", + "****", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_ZERO_POINT", + "CMD0_OPCODE_NPU_SET_IFM2_WIDTH0_M1", + "CMD0_OPCODE_NPU_SET_IFM2_HEIGHT0_M1", + "CMD0_OPCODE_NPU_SET_IFM2_HEIGHT1_M1", + "CMD0_OPCODE_NPU_SET_IFM2_IB_START", + "****", + "CMD0_OPCODE_NPU_SET_IFM2_REGION", +}; + +static const char *cmd1_opcode_str[] = { + "CMD1_OPCODE_NPU_SET_IFM_BASE0", + "CMD1_OPCODE_NPU_SET_IFM_BASE1", + "CMD1_OPCODE_NPU_SET_IFM_BASE2", + "CMD1_OPCODE_NPU_SET_IFM_BASE3", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_X", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_IFM_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_OFM_BASE0", + "CMD1_OPCODE_NPU_SET_OFM_BASE1", + "CMD1_OPCODE_NPU_SET_OFM_BASE2", + "CMD1_OPCODE_NPU_SET_OFM_BASE3", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_X", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_OFM_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_WEIGHT_BASE", + "CMD1_OPCODE_NPU_SET_WEIGHT_LENGTH", + "CMD1_OPCODE_NPU_SET_SCALE_BASE", + "CMD1_OPCODE_NPU_SET_SCALE_LENGTH", + "CMD1_OPCODE_NPU_SET_OFM_SCALE", + "CMD1_OPCODE_NPU_SET_OPA_SCALE", + "CMD1_OPCODE_NPU_SET_OPB_SCALE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_DMA0_SRC", + "CMD1_OPCODE_NPU_SET_DMA0_DST", + "CMD1_OPCODE_NPU_SET_DMA0_LEN", + "CMD1_OPCODE_NPU_SET_DMA0_SKIP0", + "CMD1_OPCODE_NPU_SET_DMA0_SKIP1", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_IFM2_BASE0", + "CMD1_OPCODE_NPU_SET_IFM2_BASE1", + "CMD1_OPCODE_NPU_SET_IFM2_BASE2", + "CMD1_OPCODE_NPU_SET_IFM2_BASE3", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_X", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_Y", + "CMD1_OPCODE_NPU_SET_IFM2_STRIDE_C", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "CMD1_OPCODE_NPU_SET_WEIGHT1_BASE", + "CMD1_OPCODE_NPU_SET_WEIGHT1_LENGTH", + "CMD1_OPCODE_NPU_SET_SCALE1_BASE", + "CMD1_OPCODE_NPU_SET_SCALE1_LENGTH", +}; + +static const char *cmd_ctrl_str[] = { + "CMD_CTRL_CMD0_CTRL", + "CMD_CTRL_CMD1_CTRL", +}; + +static const char *custom_dma_str[] = { + "CUSTOM_DMA_NOT_IMPLEMENTED", + "CUSTOM_DMA_IMPLEMENTED", +}; + +static const char *dma_fault_src_str[] = { + "DMA_FAULT_SRC_AXI_M0", + "DMA_FAULT_SRC_AXI_M1", +}; + +static const char *dma_region_mode_str[] = { + "DMA_REGION_MODE_EXTERNAL", + "DMA_REGION_MODE_INTERNAL", +}; + +static const char *dma_stride_mode_str[] = { + "DMA_STRIDE_MODE_D1", + "DMA_STRIDE_MODE_D2", + "DMA_STRIDE_MODE_D3", +}; + +static const char *elementwise_mode_str[] = { + "ELEMENTWISE_MODE_MUL", + "ELEMENTWISE_MODE_ADD", + "ELEMENTWISE_MODE_SUB", + "ELEMENTWISE_MODE_MIN", + "ELEMENTWISE_MODE_MAX", + "ELEMENTWISE_MODE_LRELU", + "ELEMENTWISE_MODE_ABS", + "ELEMENTWISE_MODE_CLZ", + "ELEMENTWISE_MODE_SHR", + "ELEMENTWISE_MODE_SHL", +}; + +static const char *functional_safety_str[] = { + "FUNCTIONAL_SAFETY_NOT_IMPLEMENTED", + "FUNCTIONAL_SAFETY_IMPLEMENTED", +}; + +static const char *ifm2_operand_order_str[] = { + "IFM2_OPERAND_ORDER_ORDER_B", + "IFM2_OPERAND_ORDER_ORDER_A", +}; + +static const char *ifm_scale_mode_str[] = { + "IFM_SCALE_MODE_OPA_OPB_16", + "IFM_SCALE_MODE_OPA_32", + "IFM_SCALE_MODE_OPB_32", +}; + +static const char *ifm_upscale_mode_str[] = { + "IFM_UPSCALE_MODE_NONE", + "IFM_UPSCALE_MODE_NEAREST", + "IFM_UPSCALE_MODE_ZEROS", +}; + +static const char *kernel_decomposition_str[] = { + "KERNEL_DECOMPOSITION_D8X8", + "KERNEL_DECOMPOSITION_D4X4", +}; + +static const char *kernel_dilation_str[] = { + "KERNEL_DILATION_NONE", + "KERNEL_DILATION_X2", +}; + +static const char *max_beats_str[] = { + "MAX_BEATS_B64", + "MAX_BEATS_B128", + "MAX_BEATS_B256", +}; + +static const char *mem_attr_str[] = { + "MEM_ATTR_AXI0_OUTSTANDING_COUNTER0", + "MEM_ATTR_AXI0_OUTSTANDING_COUNTER1", + "MEM_ATTR_AXI1_OUTSTANDING_COUNTER2", + "MEM_ATTR_AXI1_OUTSTANDING_COUNTER3", +}; + +static const char *ofm_scale_mode_str[] = { + "OFM_SCALE_MODE_PER_CHANNEL", + "OFM_SCALE_MODE_GLOBAL", +}; + +static const char *parallel_mode_str[] = { + "PARALLEL_MODE_SINGLE_CORE", + "PARALLEL_MODE_DUAL_CORE_DEPTH", +}; + +static const char *pmu_axi_channel_str[] = { + "PMU_AXI_CHANNEL_RD_CMD", + "PMU_AXI_CHANNEL_RD_IFM", + "PMU_AXI_CHANNEL_RD_WEIGHTS", + "PMU_AXI_CHANNEL_RD_SCALE_BIAS", + "PMU_AXI_CHANNEL_RD_MEM2MEM", + "****", + "****", + "****", + "PMU_AXI_CHANNEL_WR_OFM", + "PMU_AXI_CHANNEL_WR_MEM2MEM", +}; + +static const char *pmu_event_str[] = { + "PMU_EVENT_NO_EVENT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_CYCLE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_NPU_IDLE", + "PMU_EVENT_CC_STALLED_ON_BLOCKDEP", + "PMU_EVENT_CC_STALLED_ON_SHRAM_RECONFIG", + "PMU_EVENT_NPU_ACTIVE", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_MAC_ACTIVE", + "PMU_EVENT_MAC_ACTIVE_8BIT", + "PMU_EVENT_MAC_ACTIVE_16BIT", + "PMU_EVENT_MAC_DPU_ACTIVE", + "PMU_EVENT_MAC_STALLED_BY_WD_ACC", + "PMU_EVENT_MAC_STALLED_BY_WD", + "PMU_EVENT_MAC_STALLED_BY_ACC", + "PMU_EVENT_MAC_STALLED_BY_IB", + "PMU_EVENT_MAC_ACTIVE_32BIT", + "PMU_EVENT_MAC_STALLED_BY_INT_W", + "PMU_EVENT_MAC_STALLED_BY_INT_ACC", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AO_ACTIVE", + "PMU_EVENT_AO_ACTIVE_8BIT", + "PMU_EVENT_AO_ACTIVE_16BIT", + "PMU_EVENT_AO_STALLED_BY_OFMP_OB", + "PMU_EVENT_AO_STALLED_BY_OFMP", + "PMU_EVENT_AO_STALLED_BY_OB", + "PMU_EVENT_AO_STALLED_BY_ACC_IB", + "PMU_EVENT_AO_STALLED_BY_ACC", + "PMU_EVENT_AO_STALLED_BY_IB", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_WD_ACTIVE", + "PMU_EVENT_WD_STALLED", + "PMU_EVENT_WD_STALLED_BY_WS", + "PMU_EVENT_WD_STALLED_BY_WD_BUF", + "PMU_EVENT_WD_PARSE_ACTIVE", + "PMU_EVENT_WD_PARSE_STALLED", + "PMU_EVENT_WD_PARSE_STALLED_IN", + "PMU_EVENT_WD_PARSE_STALLED_OUT", + "PMU_EVENT_WD_TRANS_WS", + "PMU_EVENT_WD_TRANS_WB", + "PMU_EVENT_WD_TRANS_DW0", + "PMU_EVENT_WD_TRANS_DW1", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI0_RD_TRANS_ACCEPTED", + "PMU_EVENT_AXI0_RD_TRANS_COMPLETED", + "PMU_EVENT_AXI0_RD_DATA_BEAT_RECEIVED", + "PMU_EVENT_AXI0_RD_TRAN_REQ_STALLED", + "PMU_EVENT_AXI0_WR_TRANS_ACCEPTED", + "PMU_EVENT_AXI0_WR_TRANS_COMPLETED_M", + "PMU_EVENT_AXI0_WR_TRANS_COMPLETED_S", + "PMU_EVENT_AXI0_WR_DATA_BEAT_WRITTEN", + "PMU_EVENT_AXI0_WR_TRAN_REQ_STALLED", + "PMU_EVENT_AXI0_WR_DATA_BEAT_STALLED", + "****", + "****", + "PMU_EVENT_AXI0_ENABLED_CYCLES", + "****", + "PMU_EVENT_AXI0_RD_STALL_LIMIT", + "PMU_EVENT_AXI0_WR_STALL_LIMIT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI_LATENCY_ANY", + "PMU_EVENT_AXI_LATENCY_32", + "PMU_EVENT_AXI_LATENCY_64", + "PMU_EVENT_AXI_LATENCY_128", + "PMU_EVENT_AXI_LATENCY_256", + "PMU_EVENT_AXI_LATENCY_512", + "PMU_EVENT_AXI_LATENCY_1024", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_ECC_DMA", + "PMU_EVENT_ECC_SB0", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_AXI1_RD_TRANS_ACCEPTED", + "PMU_EVENT_AXI1_RD_TRANS_COMPLETED", + "PMU_EVENT_AXI1_RD_DATA_BEAT_RECEIVED", + "PMU_EVENT_AXI1_RD_TRAN_REQ_STALLED", + "PMU_EVENT_AXI1_WR_TRANS_ACCEPTED", + "PMU_EVENT_AXI1_WR_TRANS_COMPLETED_M", + "PMU_EVENT_AXI1_WR_TRANS_COMPLETED_S", + "PMU_EVENT_AXI1_WR_DATA_BEAT_WRITTEN", + "PMU_EVENT_AXI1_WR_TRAN_REQ_STALLED", + "PMU_EVENT_AXI1_WR_DATA_BEAT_STALLED", + "****", + "****", + "PMU_EVENT_AXI1_ENABLED_CYCLES", + "****", + "PMU_EVENT_AXI1_RD_STALL_LIMIT", + "PMU_EVENT_AXI1_WR_STALL_LIMIT", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "****", + "PMU_EVENT_ECC_SB1", +}; + +static const char *pooling_mode_str[] = { + "POOLING_MODE_MAX", + "POOLING_MODE_AVERAGE", + "POOLING_MODE_REDUCE_SUM", +}; + +static const char *privilege_level_str[] = { + "PRIVILEGE_LEVEL_USER", + "PRIVILEGE_LEVEL_PRIVILEGED", +}; + +static const char *round_mode_str[] = { + "ROUND_MODE_DBL", + "ROUND_MODE_TRUNCATE", + "ROUND_MODE_NATURAL", +}; + +static const char *security_level_str[] = { + "SECURITY_LEVEL_SECURE", + "SECURITY_LEVEL_NON_SECURE", +}; + +static const char *state_str[] = { + "STATE_STOPPED", + "STATE_RUNNING", +}; + +static const char *wd_core_slice_state_str[] = { + "WD_CORE_SLICE_STATE_HEADER", + "WD_CORE_SLICE_STATE_PALETTE", + "WD_CORE_SLICE_STATE_WEIGHTS", +}; + +static const char *wd_ctrl_state_str[] = { + "WD_CTRL_STATE_IDLE", + "WD_CTRL_STATE_DRAIN", + "WD_CTRL_STATE_OFD_INIT", + "WD_CTRL_STATE_OFD_RUN", +}; + +static const char *weight_order_str[] = { + "WEIGHT_ORDER_DEPTH_FIRST", + "WEIGHT_ORDER_PART_KERNEL_FIRST", +}; + +#endif + +// Register type structs +// id_r - ID register +struct id_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t version_status : 4; // This is the version of the product + uint32_t version_minor : 4; // This is the n for the P part of an RnPn release number + uint32_t version_major : 4; // This is the n for the R part of an RnPn release number + uint32_t product_major : 4; // Product major ID number (unique per base product) + uint32_t arch_patch_rev : 4; // This is the patch number of the architecture version a.b + uint32_t + arch_minor_rev : 8; // This is the minor architecture version number, b in the architecture version a.b + uint32_t + arch_major_rev : 4; // This is the major architecture version number, a in the architecture version a.b + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR id_r() : word0(268853249) {} + CONSTEXPR id_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + id_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_version_status() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + uint32_t get_version_status() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR id_r &set_version_status(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + volatile id_r &set_version_status(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_version_minor() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + uint32_t get_version_minor() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR id_r &set_version_minor(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + volatile id_r &set_version_minor(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_version_major() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 8); + return value; + } + uint32_t get_version_major() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR id_r &set_version_major(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 8) & word0) | ((((1U << 4) - 1) & value) << 8); + return *this; + } + volatile id_r &set_version_major(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 8) & word0) | ((((1U << 4) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_product_major() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + uint32_t get_product_major() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR id_r &set_product_major(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + volatile id_r &set_product_major(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_arch_patch_rev() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 16); + return value; + } + uint32_t get_arch_patch_rev() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR id_r &set_arch_patch_rev(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 16) & word0) | ((((1U << 4) - 1) & value) << 16); + return *this; + } + volatile id_r &set_arch_patch_rev(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 16) & word0) | ((((1U << 4) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_arch_minor_rev() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 20); + return value; + } + uint32_t get_arch_minor_rev() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR id_r &set_arch_minor_rev(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 20) & word0) | ((((1U << 8) - 1) & value) << 20); + return *this; + } + volatile id_r &set_arch_minor_rev(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 20) & word0) | ((((1U << 8) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_arch_major_rev() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + uint32_t get_arch_major_rev() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR id_r &set_arch_major_rev(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } + volatile id_r &set_arch_major_rev(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } +#endif +}; + +// status_r - Register describes the current operating status of the NPU +struct status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t state : 1; // NPU state, 0 = Stopped, 1 = Running + uint32_t irq_raised : 1; // Raw IRQ status, 0 = IRQ not raised, 1 = IRQ raised. IRQ is cleared using command + // register bit 1 + uint32_t + bus_status : 1; // 0=OK, 1=Bus abort detected and processing halted (NPU will reach IDLE state and not + // to start process any more commands/AXI transactions). Can only be cleared by a reset + uint32_t reset_status : 1; // Reset is ongoing and only this register can be read (other registers read as 0 + // and writes are ignored.) A value of 0 means NPU is not being reset and can be + // accessed as normal + uint32_t + cmd_parse_error : 1; // 0=No error 1=Command stream parsing error detected. Can only be cleared by reset + uint32_t cmd_end_reached : 1; // 0=Not reached, 1=Reached. Cleared by writing QBASE or QSIZE when NPU is in + // stopped state + uint32_t pmu_irq_raised : 1; // 0=No PMU IRQ, 1=PMU IRQ raised. Cleared by using command register bit 1 + uint32_t wd_fault : 1; // Weight decoder state: 0=no fault 1=weight decoder decompression fault. Can only be + // cleared by reset + uint32_t ecc_fault : 1; // ECC state for internal RAMs: 0=no fault 1=ECC fault signalled. Can only be + // cleared by reset + uint32_t reserved0 : 2; + uint32_t faulting_interface : 1; // Faulting interface on bus abort + uint32_t faulting_channel : 4; // Faulting channel on a bus abort. Read: 0=Cmd 1=IFM 2=Weights 3=Scale+Bias + // 4=Mem2Mem; Write: 8=OFM 9=Mem2Mem + uint32_t irq_history_mask : 16; // IRQ History mask + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR status_r() : word0(8) {} + CONSTEXPR status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + status_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::state get_state() const + { + NPU_NAMESPACE::state value = static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::state get_state() const volatile + { + NPU_NAMESPACE::state value = static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR status_r &set_state(NPU_NAMESPACE::state value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile status_r &set_state(NPU_NAMESPACE::state value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_irq_raised() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_irq_raised() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR status_r &set_irq_raised(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile status_r &set_irq_raised(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_bus_status() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_bus_status() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR status_r &set_bus_status(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile status_r &set_bus_status(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_reset_status() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_reset_status() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR status_r &set_reset_status(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile status_r &set_reset_status(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_cmd_parse_error() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_parse_error() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR status_r &set_cmd_parse_error(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile status_r &set_cmd_parse_error(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_cmd_end_reached() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_cmd_end_reached() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR status_r &set_cmd_end_reached(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile status_r &set_cmd_end_reached(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_pmu_irq_raised() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_pmu_irq_raised() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR status_r &set_pmu_irq_raised(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile status_r &set_pmu_irq_raised(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_wd_fault() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_wd_fault() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR status_r &set_wd_fault(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile status_r &set_wd_fault(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_ecc_fault() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_ecc_fault() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR status_r &set_ecc_fault(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile status_r &set_ecc_fault(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_fault_src get_faulting_interface() const + { + NPU_NAMESPACE::dma_fault_src value = static_cast(((1U << 1) - 1) & (word0 >> 11)); + return value; + } + NPU_NAMESPACE::dma_fault_src get_faulting_interface() const volatile + { + NPU_NAMESPACE::dma_fault_src value = static_cast(((1U << 1) - 1) & (word0 >> 11)); + return value; + } + CONSTEXPR status_r &set_faulting_interface(NPU_NAMESPACE::dma_fault_src value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 11); + return *this; + } + volatile status_r &set_faulting_interface(NPU_NAMESPACE::dma_fault_src value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 11); + return *this; + } + CONSTEXPR uint32_t get_faulting_channel() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + uint32_t get_faulting_channel() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR status_r &set_faulting_channel(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + volatile status_r &set_faulting_channel(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 12) & word0) | ((((1U << 4) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_irq_history_mask() const + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + uint32_t get_irq_history_mask() const volatile + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR status_r &set_irq_history_mask(uint32_t value) + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } + volatile status_r &set_irq_history_mask(uint32_t value) volatile + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } +#endif +}; + +// cmd_r - Command register, reads as last written command +struct cmd_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t transition_to_running_state : 1; // Write 1 to transition the NPU to running state. Writing 0 has + // no effect + uint32_t clear_irq : 1; // Write 1 to clear the IRQ status in the STATUS register. Writing 0 has no effect + uint32_t clock_q_enable : 1; // Write 1 to this bit to enable clock off using clock q-interface and enable + // the requester clock gate + uint32_t power_q_enable : 1; // Write 1 to this bit to enable power off using power q-interface + uint32_t + stop_request : 1; // Write 1 to this bit to request STOP after completing any already-started commands + uint32_t reserved0 : 11; + uint32_t clear_irq_history : 16; // Clears the IRQ history mask + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cmd_r() : word0(12) {} + CONSTEXPR cmd_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cmd_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_transition_to_running_state() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_transition_to_running_state() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR cmd_r &set_transition_to_running_state(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile cmd_r &set_transition_to_running_state(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_clear_irq() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_clear_irq() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR cmd_r &set_clear_irq(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile cmd_r &set_clear_irq(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_clock_q_enable() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_clock_q_enable() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR cmd_r &set_clock_q_enable(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile cmd_r &set_clock_q_enable(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_power_q_enable() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_power_q_enable() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR cmd_r &set_power_q_enable(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile cmd_r &set_power_q_enable(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_stop_request() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_stop_request() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR cmd_r &set_stop_request(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile cmd_r &set_stop_request(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_clear_irq_history() const + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + uint32_t get_clear_irq_history() const volatile + { + uint32_t value = ((1U << 16) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR cmd_r &set_clear_irq_history(uint32_t value) + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } + volatile cmd_r &set_clear_irq_history(uint32_t value) volatile + { + word0 = (((~((1U << 16) - 1)) << 16) & word0) | ((((1U << 16) - 1) & value) << 16); + return *this; + } +#endif +}; + +// reset_r - Request Reset and new security mode +struct reset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t pending_CPL : 1; // Current privilege level 0=User 1=Privileged + uint32_t pending_CSL : 1; // Current security level 0=Secure 1=Non secure + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR reset_r() : word0(0) {} + CONSTEXPR reset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + reset_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::privilege_level get_pending_CPL() const + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::privilege_level get_pending_CPL() const volatile + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR reset_r &set_pending_CPL(NPU_NAMESPACE::privilege_level value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile reset_r &set_pending_CPL(NPU_NAMESPACE::privilege_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::security_level get_pending_CSL() const + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + NPU_NAMESPACE::security_level get_pending_CSL() const volatile + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + CONSTEXPR reset_r &set_pending_CSL(NPU_NAMESPACE::security_level value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } + volatile reset_r &set_pending_CSL(NPU_NAMESPACE::security_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } +#endif +}; + +// qbase_r - Base address of the command stream in bytes +struct qbase_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR qbase_r() : word0(0), word1(0) {} + CONSTEXPR qbase_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + qbase_r copy() volatile + { + return *this; + } +#endif +}; + +// qread_r - Read offset in the command stream in bytes. Multiple of 4 in the range 0 to 16 MB +struct qread_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t QREAD : 32; // The read offset of the current command under execution + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qread_r() : word0(0) {} + CONSTEXPR qread_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qread_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_QREAD() const + { + uint32_t value = word0; + return value; + } + uint32_t get_QREAD() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR qread_r &set_QREAD(uint32_t value) + { + word0 = value; + return *this; + } + volatile qread_r &set_QREAD(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// qconfig_r - AXI configuration for the command stream in the range 0-3. Same encoding as for REGIONCFG +struct qconfig_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_region0 : 2; // Command region configuration + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qconfig_r() : word0(0) {} + CONSTEXPR qconfig_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qconfig_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_cmd_region0() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::mem_attr get_cmd_region0() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR qconfig_r &set_cmd_region0(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile qconfig_r &set_cmd_region0(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } +#endif +}; + +// qsize_r - Size of the command stream in bytes. Multiple of 4 in the range 0 to 16 MB +struct qsize_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t QSIZE : 32; // Size of the next command stream to be executed by the NPU + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR qsize_r() : word0(0) {} + CONSTEXPR qsize_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + qsize_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_QSIZE() const + { + uint32_t value = word0; + return value; + } + uint32_t get_QSIZE() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR qsize_r &set_QSIZE(uint32_t value) + { + word0 = value; + return *this; + } + volatile qsize_r &set_QSIZE(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// prot_r - Protection level configured for the NPU when acting as an AXI requester +struct prot_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t active_CPL : 1; // Current privilege level 0=User 1=Privileged + uint32_t active_CSL : 1; // Current security level 0=Secure 1=Non secure + uint32_t reserved0 : 30; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR prot_r() : word0(0) {} + CONSTEXPR prot_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + prot_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::privilege_level get_active_CPL() const + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::privilege_level get_active_CPL() const volatile + { + NPU_NAMESPACE::privilege_level value = + static_cast(((1U << 1) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR prot_r &set_active_CPL(NPU_NAMESPACE::privilege_level value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + volatile prot_r &set_active_CPL(NPU_NAMESPACE::privilege_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::security_level get_active_CSL() const + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + NPU_NAMESPACE::security_level get_active_CSL() const volatile + { + NPU_NAMESPACE::security_level value = + static_cast(((1U << 1) - 1) & (word0 >> 1)); + return value; + } + CONSTEXPR prot_r &set_active_CSL(NPU_NAMESPACE::security_level value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } + volatile prot_r &set_active_CSL(NPU_NAMESPACE::security_level value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 1); + return *this; + } +#endif +}; + +// config_r - RTL configuration +struct config_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t macs_per_cc : 4; // The log2(macs/clock cycle) + uint32_t cmd_stream_version : 4; // command stream version accepted by this NPU + uint32_t shram_size : 8; // Total size in KB of internal SHRAM + uint32_t reserved0 : 10; + uint32_t functional_safety : 1; // Functional safety configuration + uint32_t custom_dma : 1; // Custom DMA configuration + uint32_t product : 4; // Product configuration + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR config_r() : word0(268435456) {} + CONSTEXPR config_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + config_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_macs_per_cc() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + uint32_t get_macs_per_cc() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR config_r &set_macs_per_cc(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + volatile config_r &set_macs_per_cc(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cmd_stream_version() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_stream_version() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR config_r &set_cmd_stream_version(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + volatile config_r &set_cmd_stream_version(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_shram_size() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 8); + return value; + } + uint32_t get_shram_size() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR config_r &set_shram_size(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 8) & word0) | ((((1U << 8) - 1) & value) << 8); + return *this; + } + volatile config_r &set_shram_size(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 8) & word0) | ((((1U << 8) - 1) & value) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::functional_safety get_functional_safety() const + { + NPU_NAMESPACE::functional_safety value = + static_cast(((1U << 1) - 1) & (word0 >> 26)); + return value; + } + NPU_NAMESPACE::functional_safety get_functional_safety() const volatile + { + NPU_NAMESPACE::functional_safety value = + static_cast(((1U << 1) - 1) & (word0 >> 26)); + return value; + } + CONSTEXPR config_r &set_functional_safety(NPU_NAMESPACE::functional_safety value) + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 26); + return *this; + } + volatile config_r &set_functional_safety(NPU_NAMESPACE::functional_safety value) volatile + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 26); + return *this; + } + CONSTEXPR NPU_NAMESPACE::custom_dma get_custom_dma() const + { + NPU_NAMESPACE::custom_dma value = static_cast(((1U << 1) - 1) & (word0 >> 27)); + return value; + } + NPU_NAMESPACE::custom_dma get_custom_dma() const volatile + { + NPU_NAMESPACE::custom_dma value = static_cast(((1U << 1) - 1) & (word0 >> 27)); + return value; + } + CONSTEXPR config_r &set_custom_dma(NPU_NAMESPACE::custom_dma value) + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 27); + return *this; + } + volatile config_r &set_custom_dma(NPU_NAMESPACE::custom_dma value) volatile + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & static_cast(value)) << 27); + return *this; + } + CONSTEXPR uint32_t get_product() const + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + uint32_t get_product() const volatile + { + uint32_t value = ((1U << 4) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR config_r &set_product(uint32_t value) + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } + volatile config_r &set_product(uint32_t value) volatile + { + word0 = (((~((1U << 4) - 1)) << 28) & word0) | ((((1U << 4) - 1) & value) << 28); + return *this; + } +#endif +}; + +// lock_r - Lock register. This register is designed for driver use and does not affect NPU functionality +struct lock_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t LOCK : 32; // 32 bit value for LOCK configuration + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR lock_r() : word0(0) {} + CONSTEXPR lock_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + lock_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_LOCK() const + { + uint32_t value = word0; + return value; + } + uint32_t get_LOCK() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR lock_r &set_LOCK(uint32_t value) + { + word0 = value; + return *this; + } + volatile lock_r &set_LOCK(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// regioncfg_r - Region memory type configuration. Bits[2*k+1:2*k] give the memory type for REGION[k] +struct regioncfg_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t region0 : 2; // Bits for Region0 Configuration + uint32_t region1 : 2; // Bits for Region1 Configuration + uint32_t region2 : 2; // Bits for Region2 Configuration + uint32_t region3 : 2; // Bits for Region3 Configuration + uint32_t region4 : 2; // Bits for Region4 Configuration + uint32_t region5 : 2; // Bits for Region5 Configuration + uint32_t region6 : 2; // Bits for Region6 Configuration + uint32_t region7 : 2; // Bits for Region7 Configuration + uint32_t reserved0 : 16; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR regioncfg_r() : word0(0) {} + CONSTEXPR regioncfg_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + regioncfg_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region0() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::mem_attr get_region0() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR regioncfg_r &set_region0(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile regioncfg_r &set_region0(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region1() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 2)); + return value; + } + NPU_NAMESPACE::mem_attr get_region1() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 2)); + return value; + } + CONSTEXPR regioncfg_r &set_region1(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 2) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 2); + return *this; + } + volatile regioncfg_r &set_region1(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 2) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 2); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region2() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::mem_attr get_region2() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR regioncfg_r &set_region2(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 4) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 4); + return *this; + } + volatile regioncfg_r &set_region2(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 4) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region3() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 6)); + return value; + } + NPU_NAMESPACE::mem_attr get_region3() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 6)); + return value; + } + CONSTEXPR regioncfg_r &set_region3(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 6) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 6); + return *this; + } + volatile regioncfg_r &set_region3(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 6) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 6); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region4() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 8)); + return value; + } + NPU_NAMESPACE::mem_attr get_region4() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 8)); + return value; + } + CONSTEXPR regioncfg_r &set_region4(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 8); + return *this; + } + volatile regioncfg_r &set_region4(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 8); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region5() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 10)); + return value; + } + NPU_NAMESPACE::mem_attr get_region5() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 10)); + return value; + } + CONSTEXPR regioncfg_r &set_region5(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 10) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 10); + return *this; + } + volatile regioncfg_r &set_region5(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 10) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 10); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region6() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 12)); + return value; + } + NPU_NAMESPACE::mem_attr get_region6() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 12)); + return value; + } + CONSTEXPR regioncfg_r &set_region6(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 12) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 12); + return *this; + } + volatile regioncfg_r &set_region6(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 12) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 12); + return *this; + } + CONSTEXPR NPU_NAMESPACE::mem_attr get_region7() const + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 14)); + return value; + } + NPU_NAMESPACE::mem_attr get_region7() const volatile + { + NPU_NAMESPACE::mem_attr value = static_cast(((1U << 2) - 1) & (word0 >> 14)); + return value; + } + CONSTEXPR regioncfg_r &set_region7(NPU_NAMESPACE::mem_attr value) + { + word0 = (((~((1U << 2) - 1)) << 14) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 14); + return *this; + } + volatile regioncfg_r &set_region7(NPU_NAMESPACE::mem_attr value) volatile + { + word0 = (((~((1U << 2) - 1)) << 14) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 14); + return *this; + } +#endif +}; + +// axi_limit0_r - AXI limits for port 0 counter 0 +struct axi_limit0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 6; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 63 + uint32_t reserved2 : 2; + uint32_t max_outstanding_write_m1 : 5; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 31 + uint32_t reserved3 : 3; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit0_r() : word0(0) {} + CONSTEXPR axi_limit0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit0_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit0_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit0_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit0_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit0_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit0_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + volatile axi_limit0_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit0_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } + volatile axi_limit0_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit1_r - AXI limits for port 0 counter 1 +struct axi_limit1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 6; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 63 + uint32_t reserved2 : 2; + uint32_t max_outstanding_write_m1 : 5; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 31 + uint32_t reserved3 : 3; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit1_r() : word0(0) {} + CONSTEXPR axi_limit1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit1_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit1_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit1_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit1_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit1_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit1_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + volatile axi_limit1_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit1_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } + volatile axi_limit1_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit2_r - AXI limits for port 1 counter 2 +struct axi_limit2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 6; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 63 + uint32_t reserved2 : 2; + uint32_t max_outstanding_write_m1 : 5; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 31 + uint32_t reserved3 : 3; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit2_r() : word0(0) {} + CONSTEXPR axi_limit2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit2_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit2_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit2_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit2_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit2_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit2_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + volatile axi_limit2_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit2_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } + volatile axi_limit2_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } +#endif +}; + +// axi_limit3_r - AXI limits for port 1 counter 3 +struct axi_limit3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t max_beats : 2; // Burst split alignment + uint32_t reserved0 : 2; + uint32_t memtype : 4; // Memtype to be used to encode AxCACHE signals + uint32_t reserved1 : 8; + uint32_t + max_outstanding_read_m1 : 6; // Maximum number of outstanding AXI read transactions - 1 in range 0 to 63 + uint32_t reserved2 : 2; + uint32_t max_outstanding_write_m1 : 5; // Maximum number of outstanding AXI write transactions - 1 in range + // 0 to 31 + uint32_t reserved3 : 3; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR axi_limit3_r() : word0(0) {} + CONSTEXPR axi_limit3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + axi_limit3_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::max_beats get_max_beats() const + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::max_beats get_max_beats() const volatile + { + NPU_NAMESPACE::max_beats value = static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR axi_limit3_r &set_max_beats(NPU_NAMESPACE::max_beats value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile axi_limit3_r &set_max_beats(NPU_NAMESPACE::max_beats value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::axi_mem_encoding get_memtype() const + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + NPU_NAMESPACE::axi_mem_encoding get_memtype() const volatile + { + NPU_NAMESPACE::axi_mem_encoding value = + static_cast(((1U << 4) - 1) & (word0 >> 4)); + return value; + } + CONSTEXPR axi_limit3_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + volatile axi_limit3_r &set_memtype(NPU_NAMESPACE::axi_mem_encoding value) volatile + { + word0 = (((~((1U << 4) - 1)) << 4) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 4); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_read_m1() const + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + uint32_t get_max_outstanding_read_m1() const volatile + { + uint32_t value = ((1U << 6) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR axi_limit3_r &set_max_outstanding_read_m1(uint32_t value) + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + volatile axi_limit3_r &set_max_outstanding_read_m1(uint32_t value) volatile + { + word0 = (((~((1U << 6) - 1)) << 16) & word0) | ((((1U << 6) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_max_outstanding_write_m1() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + uint32_t get_max_outstanding_write_m1() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR axi_limit3_r &set_max_outstanding_write_m1(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } + volatile axi_limit3_r &set_max_outstanding_write_m1(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 24) & word0) | ((((1U << 5) - 1) & value) << 24); + return *this; + } +#endif +}; + +// basep_r - The driver can use this address to relocate the command stream on region 0. If the region contains data +// requiring A-byte alignment then the base must be a multiple of A +struct basep_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR basep_r() : word0(0), word1(0) {} + CONSTEXPR basep_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + basep_r copy() volatile + { + return *this; + } +#endif +}; + +// wd_status_r - WD_STATUS +struct wd_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t core_slice_state : 2; // WD core slice parser state + uint32_t core_idle : 1; // Core idle + uint32_t ctrl_state : 2; // WD control state + uint32_t ctrl_idle : 1; // All stripe jobs idle (all weights consumed) + uint32_t write_buf_index0 : 3; // current write index for next data from core + uint32_t write_buf_valid0 : 1; // write buf valid (full) + uint32_t write_buf_idle0 : 1; // write buf idle (empty) + uint32_t write_buf_index1 : 3; // current write index for next data from core + uint32_t write_buf_valid1 : 1; // write buf valid (full) + uint32_t write_buf_idle1 : 1; // write buf idle (empty) + uint32_t events : 12; // WD events mapped as appendix A + uint32_t reserved0 : 4; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR wd_status_r() : word0(0) {} + CONSTEXPR wd_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + wd_status_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::wd_core_slice_state get_core_slice_state() const + { + NPU_NAMESPACE::wd_core_slice_state value = + static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::wd_core_slice_state get_core_slice_state() const volatile + { + NPU_NAMESPACE::wd_core_slice_state value = + static_cast(((1U << 2) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR wd_status_r &set_core_slice_state(NPU_NAMESPACE::wd_core_slice_state value) + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + volatile wd_status_r &set_core_slice_state(NPU_NAMESPACE::wd_core_slice_state value) volatile + { + word0 = (((~((1U << 2) - 1)) << 0) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_core_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_core_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR wd_status_r &set_core_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile wd_status_r &set_core_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR NPU_NAMESPACE::wd_ctrl_state get_ctrl_state() const + { + NPU_NAMESPACE::wd_ctrl_state value = static_cast(((1U << 2) - 1) & (word0 >> 3)); + return value; + } + NPU_NAMESPACE::wd_ctrl_state get_ctrl_state() const volatile + { + NPU_NAMESPACE::wd_ctrl_state value = static_cast(((1U << 2) - 1) & (word0 >> 3)); + return value; + } + CONSTEXPR wd_status_r &set_ctrl_state(NPU_NAMESPACE::wd_ctrl_state value) + { + word0 = (((~((1U << 2) - 1)) << 3) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 3); + return *this; + } + volatile wd_status_r &set_ctrl_state(NPU_NAMESPACE::wd_ctrl_state value) volatile + { + word0 = (((~((1U << 2) - 1)) << 3) & word0) | ((((1U << 2) - 1) & static_cast(value)) << 3); + return *this; + } + CONSTEXPR uint32_t get_ctrl_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_ctrl_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR wd_status_r &set_ctrl_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile wd_status_r &set_ctrl_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_write_buf_index0() const + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 6); + return value; + } + uint32_t get_write_buf_index0() const volatile + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_index0(uint32_t value) + { + word0 = (((~((1U << 3) - 1)) << 6) & word0) | ((((1U << 3) - 1) & value) << 6); + return *this; + } + volatile wd_status_r &set_write_buf_index0(uint32_t value) volatile + { + word0 = (((~((1U << 3) - 1)) << 6) & word0) | ((((1U << 3) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_write_buf_valid0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_write_buf_valid0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_valid0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile wd_status_r &set_write_buf_valid0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_write_buf_idle0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_write_buf_idle0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_idle0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile wd_status_r &set_write_buf_idle0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_write_buf_index1() const + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 11); + return value; + } + uint32_t get_write_buf_index1() const volatile + { + uint32_t value = ((1U << 3) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_index1(uint32_t value) + { + word0 = (((~((1U << 3) - 1)) << 11) & word0) | ((((1U << 3) - 1) & value) << 11); + return *this; + } + volatile wd_status_r &set_write_buf_index1(uint32_t value) volatile + { + word0 = (((~((1U << 3) - 1)) << 11) & word0) | ((((1U << 3) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_write_buf_valid1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_write_buf_valid1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_valid1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile wd_status_r &set_write_buf_valid1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_write_buf_idle1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_write_buf_idle1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR wd_status_r &set_write_buf_idle1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile wd_status_r &set_write_buf_idle1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 12) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 12) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR wd_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 12) - 1)) << 16) & word0) | ((((1U << 12) - 1) & value) << 16); + return *this; + } + volatile wd_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 12) - 1)) << 16) & word0) | ((((1U << 12) - 1) & value) << 16); + return *this; + } +#endif +}; + +// mac_status_r - MAC_STATUS +struct mac_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t block_cfg_valid : 1; // MAC has a valid block configuration + uint32_t trav_en : 1; // MAC is doing block traversal + uint32_t wait_for_ib : 1; // MAC is waiting for an Input Buffer to become available + uint32_t wait_for_acc_buf : 1; // MAC is waiting for an Accumulator Buffer to become available + uint32_t wait_for_weights : 1; // MAC is waiting for a Weight Block to become available + uint32_t stall_stripe : 1; // MAC is stalling between two stripes + uint32_t dw_sel : 1; // Currently used weight interface in MAC AI + uint32_t wait_for_dw0_ready : 1; // MAC AI is waiting for MAC DPU to send dw0_ready to WD + uint32_t wait_for_dw1_ready : 1; // MAC AI is waiting for MAC DPU to send dw1_ready to WD + uint32_t acc_buf_sel_ai : 1; // Currently used AccBuf interface in MAC AI + uint32_t wait_for_acc0_ready : 1; // MAC AI is waiting for acc0_ready from AO + uint32_t wait_for_acc1_ready : 1; // MAC AI is waiting for acc1_ready from AO + uint32_t acc_buf_sel_aa : 1; // Currently used AccBuf interface in MAC ADDER_ARRAY + uint32_t acc0_valid : 1; // MAC outgoing value of acc0_valid + uint32_t acc1_valid : 1; // MAC outgoing value of acc1_valid + uint32_t reserved0 : 1; + uint32_t events : 11; // Mapped to MAC events described in Appendix A + uint32_t reserved1 : 5; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR mac_status_r() : word0(0) {} + CONSTEXPR mac_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + mac_status_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_block_cfg_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_block_cfg_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR mac_status_r &set_block_cfg_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile mac_status_r &set_block_cfg_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_trav_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_trav_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR mac_status_r &set_trav_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile mac_status_r &set_trav_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_wait_for_ib() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_wait_for_ib() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_ib(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile mac_status_r &set_wait_for_ib(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc_buf() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_wait_for_acc_buf() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc_buf(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile mac_status_r &set_wait_for_acc_buf(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_wait_for_weights() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_wait_for_weights() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_weights(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile mac_status_r &set_wait_for_weights(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_stall_stripe() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_stall_stripe() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR mac_status_r &set_stall_stripe(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile mac_status_r &set_stall_stripe(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_dw_sel() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_dw_sel() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR mac_status_r &set_dw_sel(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile mac_status_r &set_dw_sel(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_wait_for_dw0_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_wait_for_dw0_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_dw0_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile mac_status_r &set_wait_for_dw0_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_wait_for_dw1_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_wait_for_dw1_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_dw1_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile mac_status_r &set_wait_for_dw1_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_acc_buf_sel_ai() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_acc_buf_sel_ai() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR mac_status_r &set_acc_buf_sel_ai(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile mac_status_r &set_acc_buf_sel_ai(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc0_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_wait_for_acc0_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc0_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile mac_status_r &set_wait_for_acc0_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_wait_for_acc1_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_wait_for_acc1_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR mac_status_r &set_wait_for_acc1_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile mac_status_r &set_wait_for_acc1_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_acc_buf_sel_aa() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_acc_buf_sel_aa() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR mac_status_r &set_acc_buf_sel_aa(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile mac_status_r &set_acc_buf_sel_aa(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_acc0_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_acc0_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR mac_status_r &set_acc0_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile mac_status_r &set_acc0_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_acc1_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_acc1_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR mac_status_r &set_acc1_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile mac_status_r &set_acc1_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 11) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 11) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR mac_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 11) - 1)) << 16) & word0) | ((((1U << 11) - 1) & value) << 16); + return *this; + } + volatile mac_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 11) - 1)) << 16) & word0) | ((((1U << 11) - 1) & value) << 16); + return *this; + } +#endif +}; + +// ao_status_r - AO_STATUS +struct ao_status_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_sbw_valid : 1; // Block command to shared buffer write module is valid + uint32_t cmd_act_valid : 1; // Block command to activation function module is valid + uint32_t cmd_ctl_valid : 1; // Block command to control module is valid + uint32_t cmd_scl_valid : 1; // Block command to scale module is valid + uint32_t cmd_sbr_valid : 1; // Block command to shared buffer read module is valid + uint32_t cmd_ofm_valid : 1; // Block command to ofm parameter module is valid + uint32_t blk_cmd_ready : 1; // Ready to accept block command + uint32_t blk_cmd_valid : 1; // Block command from CC is valid + uint32_t reserved0 : 8; + uint32_t events : 8; // Mapped to AO events described in Appendix A + uint32_t reserved1 : 8; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ao_status_r() : word0(0) {} + CONSTEXPR ao_status_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ao_status_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cmd_sbw_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cmd_sbw_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR ao_status_r &set_cmd_sbw_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile ao_status_r &set_cmd_sbw_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cmd_act_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_cmd_act_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR ao_status_r &set_cmd_act_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile ao_status_r &set_cmd_act_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_cmd_ctl_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_cmd_ctl_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR ao_status_r &set_cmd_ctl_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile ao_status_r &set_cmd_ctl_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_cmd_scl_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_cmd_scl_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR ao_status_r &set_cmd_scl_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile ao_status_r &set_cmd_scl_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_cmd_sbr_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_cmd_sbr_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR ao_status_r &set_cmd_sbr_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile ao_status_r &set_cmd_sbr_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_cmd_ofm_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_cmd_ofm_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR ao_status_r &set_cmd_ofm_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile ao_status_r &set_cmd_ofm_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_blk_cmd_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_blk_cmd_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR ao_status_r &set_blk_cmd_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile ao_status_r &set_blk_cmd_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_blk_cmd_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_blk_cmd_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR ao_status_r &set_blk_cmd_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile ao_status_r &set_blk_cmd_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_events() const + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 16); + return value; + } + uint32_t get_events() const volatile + { + uint32_t value = ((1U << 8) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR ao_status_r &set_events(uint32_t value) + { + word0 = (((~((1U << 8) - 1)) << 16) & word0) | ((((1U << 8) - 1) & value) << 16); + return *this; + } + volatile ao_status_r &set_events(uint32_t value) volatile + { + word0 = (((~((1U << 8) - 1)) << 16) & word0) | ((((1U << 8) - 1) & value) << 16); + return *this; + } +#endif +}; + +// dma_status0_r - DMA_STATUS0 +struct dma_status0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cmd_idle : 1; // When this bit is high means that the CMD block is not busy in generating addresses + // for a CMD job + uint32_t ifm_idle : 1; // When this bit is high means that there are no ongoing IFM jobs + uint32_t wgt_idle_c0 : 1; // When this bit is high means that the WGT block is not busy in generating + // addresses for a WGT job + uint32_t bas_idle_c0 : 1; // When this bit is high means that the BAS block is not busy in generating + // addresses for a BAS job + uint32_t m2m_idle : 1; // When this bit is high means that there are no ongoing M2M jobs + uint32_t ofm_idle : 1; // When this bit is high means that there are no ongoing OFM jobs + uint32_t halt_req : 1; // CPM has requested to HALT AXI bus before soft reset + uint32_t halt_ack : 1; // DMA is in condition to halt the AXI bus since there are no pending transactions + uint32_t pause_req : 1; // CC has requested to pause the AXI + uint32_t pause_ack : 1; // DMA is in condition to pause the AXI bus since there are no pending transactions + uint32_t ib0_ai_valid_c0 : 1; // Data for AI to be read in IFM input buffer 0 - Core 0 + uint32_t ib0_ai_ready_c0 : 1; // Data consumed from AI in IFM input buffer 0 - Core 0 + uint32_t ib1_ai_valid_c0 : 1; // Data for AI to be read in IFM input buffer 1 - Core 0 + uint32_t ib1_ai_ready_c0 : 1; // Data consumed from AI in IFM input buffer 1 - Core 0 + uint32_t ib0_ao_valid_c0 : 1; // Data for AO to be read in IFM input buffer 0 - Core 0 + uint32_t ib0_ao_ready_c0 : 1; // Data consumed from AO in IFM input buffer 0 - Core 0 + uint32_t ib1_ao_valid_c0 : 1; // Data for AO to be read in IFM input buffer 0 - Core 0 + uint32_t ib1_ao_ready_c0 : 1; // Data consumed from AO in IFM input buffer 1 - Core 0 + uint32_t ob0_valid_c0 : 1; // Data for DMA ready to be consumed in OFM output buffer 0 - Core 0 + uint32_t ob0_ready_c0 : 1; // Data consumed from DMA in OFM output buffer 0 - Core 0 + uint32_t ob1_valid_c0 : 1; // Data for DMA ready to be consumed in OFM output buffer 1 - Core 0 + uint32_t ob1_ready_c0 : 1; // Data consumed from DMA in OFM output buffer 1 - Core 0 + uint32_t cmd_valid : 1; // New command word for CC to be consumed + uint32_t cmd_ready : 1; // command word consumed by CC + uint32_t wd_bitstream_valid_c0 : 1; // New weight word for WD to be consumed - Core 0 + uint32_t wd_bitstream_ready_c0 : 1; // Weight word consumed by WD - Core 0 + uint32_t bs_bitstream_valid_c0 : 1; // New BaS word for AO to be consumed - Core 0 + uint32_t bs_bitstream_ready_c0 : 1; // BaS word consumed by AO - Core 0 + uint32_t axi0_ar_stalled : 1; // Read transfer request stalled on arready low AXI0 (due to memory system) + uint32_t axi0_rd_limit_stall : 1; // Read stalled due to one AXI0 limit counter being reached + uint32_t axi0_aw_stalled : 1; // Write transfer request stalled on awready low AXI0 (due to memory system) + uint32_t axi0_w_stalled : 1; // Write transfer stalled on awready low AXI0 (due to memory system) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_status0_r() : word0(0) {} + CONSTEXPR dma_status0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_status0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cmd_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cmd_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile dma_status0_r &set_cmd_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_ifm_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_ifm_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR dma_status0_r &set_ifm_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile dma_status0_r &set_ifm_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_wgt_idle_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_wgt_idle_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR dma_status0_r &set_wgt_idle_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile dma_status0_r &set_wgt_idle_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_bas_idle_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_bas_idle_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR dma_status0_r &set_bas_idle_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile dma_status0_r &set_bas_idle_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_m2m_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_m2m_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR dma_status0_r &set_m2m_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile dma_status0_r &set_m2m_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_ofm_idle() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_ofm_idle() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR dma_status0_r &set_ofm_idle(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile dma_status0_r &set_ofm_idle(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_halt_req() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_halt_req() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR dma_status0_r &set_halt_req(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile dma_status0_r &set_halt_req(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_halt_ack() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_halt_ack() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR dma_status0_r &set_halt_ack(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile dma_status0_r &set_halt_ack(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_pause_req() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_pause_req() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR dma_status0_r &set_pause_req(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile dma_status0_r &set_pause_req(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_pause_ack() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_pause_ack() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR dma_status0_r &set_pause_ack(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile dma_status0_r &set_pause_ack(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_ib0_ai_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ai_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile dma_status0_r &set_ib0_ai_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_ib0_ai_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ai_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile dma_status0_r &set_ib0_ai_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_ib1_ai_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ai_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile dma_status0_r &set_ib1_ai_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_ib1_ai_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ai_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile dma_status0_r &set_ib1_ai_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_ib0_ao_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ao_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile dma_status0_r &set_ib0_ao_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_ib0_ao_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR dma_status0_r &set_ib0_ao_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile dma_status0_r &set_ib0_ao_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + uint32_t get_ib1_ao_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ao_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + volatile dma_status0_r &set_ib1_ao_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + uint32_t get_ib1_ao_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + CONSTEXPR dma_status0_r &set_ib1_ao_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + volatile dma_status0_r &set_ib1_ao_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + CONSTEXPR uint32_t get_ob0_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + uint32_t get_ob0_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + CONSTEXPR dma_status0_r &set_ob0_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + volatile dma_status0_r &set_ob0_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + CONSTEXPR uint32_t get_ob0_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + uint32_t get_ob0_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + CONSTEXPR dma_status0_r &set_ob0_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + volatile dma_status0_r &set_ob0_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + CONSTEXPR uint32_t get_ob1_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + uint32_t get_ob1_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR dma_status0_r &set_ob1_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + volatile dma_status0_r &set_ob1_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_ob1_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + uint32_t get_ob1_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + CONSTEXPR dma_status0_r &set_ob1_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + volatile dma_status0_r &set_ob1_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + CONSTEXPR uint32_t get_cmd_valid() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + uint32_t get_cmd_valid() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_valid(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + volatile dma_status0_r &set_cmd_valid(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + CONSTEXPR uint32_t get_cmd_ready() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + uint32_t get_cmd_ready() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + CONSTEXPR dma_status0_r &set_cmd_ready(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + volatile dma_status0_r &set_cmd_ready(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 24); + return value; + } + uint32_t get_wd_bitstream_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 24); + return value; + } + CONSTEXPR dma_status0_r &set_wd_bitstream_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 24) & word0) | ((((1U << 1) - 1) & value) << 24); + return *this; + } + volatile dma_status0_r &set_wd_bitstream_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 24) & word0) | ((((1U << 1) - 1) & value) << 24); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 25); + return value; + } + uint32_t get_wd_bitstream_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 25); + return value; + } + CONSTEXPR dma_status0_r &set_wd_bitstream_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 25) & word0) | ((((1U << 1) - 1) & value) << 25); + return *this; + } + volatile dma_status0_r &set_wd_bitstream_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 25) & word0) | ((((1U << 1) - 1) & value) << 25); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_valid_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 26); + return value; + } + uint32_t get_bs_bitstream_valid_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 26); + return value; + } + CONSTEXPR dma_status0_r &set_bs_bitstream_valid_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & value) << 26); + return *this; + } + volatile dma_status0_r &set_bs_bitstream_valid_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 26) & word0) | ((((1U << 1) - 1) & value) << 26); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_ready_c0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 27); + return value; + } + uint32_t get_bs_bitstream_ready_c0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 27); + return value; + } + CONSTEXPR dma_status0_r &set_bs_bitstream_ready_c0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & value) << 27); + return *this; + } + volatile dma_status0_r &set_bs_bitstream_ready_c0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 27) & word0) | ((((1U << 1) - 1) & value) << 27); + return *this; + } + CONSTEXPR uint32_t get_axi0_ar_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 28); + return value; + } + uint32_t get_axi0_ar_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 28); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_ar_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 28) & word0) | ((((1U << 1) - 1) & value) << 28); + return *this; + } + volatile dma_status0_r &set_axi0_ar_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 28) & word0) | ((((1U << 1) - 1) & value) << 28); + return *this; + } + CONSTEXPR uint32_t get_axi0_rd_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 29); + return value; + } + uint32_t get_axi0_rd_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 29); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_rd_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 29) & word0) | ((((1U << 1) - 1) & value) << 29); + return *this; + } + volatile dma_status0_r &set_axi0_rd_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 29) & word0) | ((((1U << 1) - 1) & value) << 29); + return *this; + } + CONSTEXPR uint32_t get_axi0_aw_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 30); + return value; + } + uint32_t get_axi0_aw_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 30); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_aw_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 30) & word0) | ((((1U << 1) - 1) & value) << 30); + return *this; + } + volatile dma_status0_r &set_axi0_aw_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 30) & word0) | ((((1U << 1) - 1) & value) << 30); + return *this; + } + CONSTEXPR uint32_t get_axi0_w_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_axi0_w_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR dma_status0_r &set_axi0_w_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile dma_status0_r &set_axi0_w_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// dma_status1_r - DMA_STATUS1 +struct dma_status1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t axi0_wr_limit_stall : 1; // Write stalled due to one AXI0 limit counter being reached + uint32_t axi1_ar_stalled : 1; // Read transfer request stalled on arready low AXI1 (due to memory system) + uint32_t axi1_rd_limit_stall : 1; // Read stalled due to one AXI1 limit counter being reached + uint32_t axi1_wr_stalled : 1; // Write transfer request stalled on awready low AXI1 (due to memory system) + uint32_t axi1_w_stalled : 1; // Write transfer stalled on wready low AXI1 (due to memory system) + uint32_t axi1_wr_limit_stall : 1; // Write stalled due to one AXI1 limit counter being reached + uint32_t wgt_idle_c1 : 1; // When this bit is high means that the WGT block is not busy in generating + // addresses for a WGT job + uint32_t bas_idle_c1 : 1; // When this bit is high means that the BAS block is not busy in generating + // addresses for a BAS job + uint32_t ib0_ai_valid_c1 : 1; // Data for AI to be read in IFM input buffer 0 - Core 1 + uint32_t ib0_ai_ready_c1 : 1; // Data consumed from AI in IFM input buffer 0 - Core 1 + uint32_t ib1_ai_valid_c1 : 1; // Data for AI to be read in IFM input buffer 1 - Core 1 + uint32_t ib1_ai_ready_c1 : 1; // Data consumed from AI in IFM input buffer 1 - Core 1 + uint32_t ib0_ao_valid_c1 : 1; // Data for AO to be read in IFM input buffer 0 - Core 1 + uint32_t ib0_ao_ready_c1 : 1; // Data consumed from AO in IFM input buffer 0 - Core 1 + uint32_t ib1_ao_valid_c1 : 1; // Data for AO to be read in IFM input buffer 0 - Core 1 + uint32_t ib1_ao_ready_c1 : 1; // Data consumed from AO in IFM input buffer 1 - Core 1 + uint32_t ob0_valid_c1 : 1; // Data for DMA ready to be consumed in OFM output buffer 0 - Core 1 + uint32_t ob0_ready_c1 : 1; // Data consumed from DMA in OFM output buffer 0 - Core 1 + uint32_t ob1_valid_c1 : 1; // Data for DMA ready to be consumed in OFM output buffer 1 - Core 1 + uint32_t ob1_ready_c1 : 1; // Data consumed from DMA in OFM output buffer 1 - Core 1 + uint32_t wd_bitstream_valid_c1 : 1; // New weight word for WD to be consumed - Core 1 + uint32_t wd_bitstream_ready_c1 : 1; // Weight word consumed by WD - Core 1 + uint32_t bs_bitstream_valid_c1 : 1; // New BaS word for AO to be consumed - Core 1 + uint32_t bs_bitstream_ready_c1 : 1; // BaS word consumed by AO - Core 1 + uint32_t reserved0 : 8; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_status1_r() : word0(0) {} + CONSTEXPR dma_status1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_status1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_axi0_wr_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_axi0_wr_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR dma_status1_r &set_axi0_wr_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile dma_status1_r &set_axi0_wr_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_axi1_ar_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_axi1_ar_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_ar_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile dma_status1_r &set_axi1_ar_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_axi1_rd_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_axi1_rd_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_rd_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile dma_status1_r &set_axi1_rd_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_axi1_wr_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_axi1_wr_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_wr_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile dma_status1_r &set_axi1_wr_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_axi1_w_stalled() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_axi1_w_stalled() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_w_stalled(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile dma_status1_r &set_axi1_w_stalled(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_axi1_wr_limit_stall() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_axi1_wr_limit_stall() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR dma_status1_r &set_axi1_wr_limit_stall(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile dma_status1_r &set_axi1_wr_limit_stall(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + CONSTEXPR uint32_t get_wgt_idle_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + uint32_t get_wgt_idle_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 6); + return value; + } + CONSTEXPR dma_status1_r &set_wgt_idle_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + volatile dma_status1_r &set_wgt_idle_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 6) & word0) | ((((1U << 1) - 1) & value) << 6); + return *this; + } + CONSTEXPR uint32_t get_bas_idle_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + uint32_t get_bas_idle_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 7); + return value; + } + CONSTEXPR dma_status1_r &set_bas_idle_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + volatile dma_status1_r &set_bas_idle_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 7) & word0) | ((((1U << 1) - 1) & value) << 7); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + uint32_t get_ib0_ai_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ai_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + volatile dma_status1_r &set_ib0_ai_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 8) & word0) | ((((1U << 1) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_ib0_ai_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + uint32_t get_ib0_ai_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 9); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ai_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + volatile dma_status1_r &set_ib0_ai_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 9) & word0) | ((((1U << 1) - 1) & value) << 9); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_ib1_ai_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ai_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile dma_status1_r &set_ib1_ai_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + CONSTEXPR uint32_t get_ib1_ai_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + uint32_t get_ib1_ai_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ai_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + volatile dma_status1_r &set_ib1_ai_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 11) & word0) | ((((1U << 1) - 1) & value) << 11); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + uint32_t get_ib0_ao_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 12); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ao_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + volatile dma_status1_r &set_ib0_ao_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 12) & word0) | ((((1U << 1) - 1) & value) << 12); + return *this; + } + CONSTEXPR uint32_t get_ib0_ao_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + uint32_t get_ib0_ao_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 13); + return value; + } + CONSTEXPR dma_status1_r &set_ib0_ao_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + volatile dma_status1_r &set_ib0_ao_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 13) & word0) | ((((1U << 1) - 1) & value) << 13); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + uint32_t get_ib1_ao_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 14); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ao_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + volatile dma_status1_r &set_ib1_ao_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 14) & word0) | ((((1U << 1) - 1) & value) << 14); + return *this; + } + CONSTEXPR uint32_t get_ib1_ao_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + uint32_t get_ib1_ao_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 15); + return value; + } + CONSTEXPR dma_status1_r &set_ib1_ao_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + volatile dma_status1_r &set_ib1_ao_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 15) & word0) | ((((1U << 1) - 1) & value) << 15); + return *this; + } + CONSTEXPR uint32_t get_ob0_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + uint32_t get_ob0_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 16); + return value; + } + CONSTEXPR dma_status1_r &set_ob0_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + volatile dma_status1_r &set_ob0_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 16) & word0) | ((((1U << 1) - 1) & value) << 16); + return *this; + } + CONSTEXPR uint32_t get_ob0_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + uint32_t get_ob0_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 17); + return value; + } + CONSTEXPR dma_status1_r &set_ob0_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + volatile dma_status1_r &set_ob0_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 17) & word0) | ((((1U << 1) - 1) & value) << 17); + return *this; + } + CONSTEXPR uint32_t get_ob1_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + uint32_t get_ob1_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 18); + return value; + } + CONSTEXPR dma_status1_r &set_ob1_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + volatile dma_status1_r &set_ob1_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 18) & word0) | ((((1U << 1) - 1) & value) << 18); + return *this; + } + CONSTEXPR uint32_t get_ob1_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + uint32_t get_ob1_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 19); + return value; + } + CONSTEXPR dma_status1_r &set_ob1_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + volatile dma_status1_r &set_ob1_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 19) & word0) | ((((1U << 1) - 1) & value) << 19); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + uint32_t get_wd_bitstream_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 20); + return value; + } + CONSTEXPR dma_status1_r &set_wd_bitstream_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + volatile dma_status1_r &set_wd_bitstream_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 20) & word0) | ((((1U << 1) - 1) & value) << 20); + return *this; + } + CONSTEXPR uint32_t get_wd_bitstream_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + uint32_t get_wd_bitstream_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 21); + return value; + } + CONSTEXPR dma_status1_r &set_wd_bitstream_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + volatile dma_status1_r &set_wd_bitstream_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 21) & word0) | ((((1U << 1) - 1) & value) << 21); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_valid_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + uint32_t get_bs_bitstream_valid_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 22); + return value; + } + CONSTEXPR dma_status1_r &set_bs_bitstream_valid_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + volatile dma_status1_r &set_bs_bitstream_valid_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 22) & word0) | ((((1U << 1) - 1) & value) << 22); + return *this; + } + CONSTEXPR uint32_t get_bs_bitstream_ready_c1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + uint32_t get_bs_bitstream_ready_c1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 23); + return value; + } + CONSTEXPR dma_status1_r &set_bs_bitstream_ready_c1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } + volatile dma_status1_r &set_bs_bitstream_ready_c1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 23) & word0) | ((((1U << 1) - 1) & value) << 23); + return *this; + } +#endif +}; + +// clkforce_r - Force clocks on for clock gating +struct clkforce_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t top_level_clk : 1; // set to 1 to force on TOP level clock + uint32_t cc_clk : 1; // set to 1 to force on CC clock + uint32_t dma_clk : 1; // set to 1 to force on DMA clock + uint32_t mac_clk : 1; // set to 1 to force on MAC clock + uint32_t ao_clk : 1; // set to 1 to force on AO clock + uint32_t wd_clk : 1; // set to 1 to force on WD clock + uint32_t reserved0 : 26; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR clkforce_r() : word0(0) {} + CONSTEXPR clkforce_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + clkforce_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_top_level_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_top_level_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR clkforce_r &set_top_level_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile clkforce_r &set_top_level_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_cc_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_cc_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR clkforce_r &set_cc_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile clkforce_r &set_cc_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_dma_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_dma_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR clkforce_r &set_dma_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile clkforce_r &set_dma_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_mac_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_mac_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR clkforce_r &set_mac_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile clkforce_r &set_mac_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_ao_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + uint32_t get_ao_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 4); + return value; + } + CONSTEXPR clkforce_r &set_ao_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + volatile clkforce_r &set_ao_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 4) & word0) | ((((1U << 1) - 1) & value) << 4); + return *this; + } + CONSTEXPR uint32_t get_wd_clk() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + uint32_t get_wd_clk() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 5); + return value; + } + CONSTEXPR clkforce_r &set_wd_clk(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } + volatile clkforce_r &set_wd_clk(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 5) & word0) | ((((1U << 1) - 1) & value) << 5); + return *this; + } +#endif +}; + +// debug_address_r - Set debug address for register reads 0x400-0x7FF. The address must be 1KB aligned +struct debug_address_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t addr : 32; // Register address + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_address_r() : word0(0) {} + CONSTEXPR debug_address_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_address_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_addr() const + { + uint32_t value = word0; + return value; + } + uint32_t get_addr() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_address_r &set_addr(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_address_r &set_addr(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// debug_misc_r - 32-bit read/write register for driver debug use. This does not affect NPU function +struct debug_misc_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t misc : 32; // Debug misc + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_misc_r() : word0(0) {} + CONSTEXPR debug_misc_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_misc_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_misc() const + { + uint32_t value = word0; + return value; + } + uint32_t get_misc() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_misc_r &set_misc(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_misc_r &set_misc(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// debugcore_r - Select core number for debug registers (0x200-0x2FF) and RAM reads (0x400-0x7FF). Value is 0 or 1 +struct debugcore_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t core : 32; // Debug core + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debugcore_r() : word0(0) {} + CONSTEXPR debugcore_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debugcore_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_core() const + { + uint32_t value = word0; + return value; + } + uint32_t get_core() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debugcore_r &set_core(uint32_t value) + { + word0 = value; + return *this; + } + volatile debugcore_r &set_core(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// debug_block_r - Set from which of four block banks the TSU registers are read. 0 = read from the current bank 256+n = +// force to read from bank n where n is in the range 0 to 3 +struct debug_block_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t block : 32; // Debug block + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR debug_block_r() : word0(0) {} + CONSTEXPR debug_block_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + debug_block_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_block() const + { + uint32_t value = word0; + return value; + } + uint32_t get_block() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR debug_block_r &set_block(uint32_t value) + { + word0 = value; + return *this; + } + volatile debug_block_r &set_block(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmcr_r - PMU Register control +struct pmcr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t cnt_en : 1; // Enable counter + uint32_t event_cnt_rst : 1; // Reset event counter + uint32_t cycle_cnt_rst : 1; // Reset cycle counter + uint32_t mask_en : 1; // PMU can be enabled/disabled by command stream operation NPU_OP_PMU_MASK + uint32_t reserved0 : 7; + uint32_t num_event_cnt : 5; // Number of event counters + uint32_t reserved1 : 16; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcr_r() : word0(8192) {} + CONSTEXPR pmcr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_cnt_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_cnt_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcr_r &set_cnt_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcr_r &set_cnt_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_event_cnt_rst() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_event_cnt_rst() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcr_r &set_event_cnt_rst(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcr_r &set_event_cnt_rst(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_cycle_cnt_rst() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_cycle_cnt_rst() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcr_r &set_cycle_cnt_rst(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcr_r &set_cycle_cnt_rst(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_mask_en() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_mask_en() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcr_r &set_mask_en(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcr_r &set_mask_en(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_num_event_cnt() const + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 11); + return value; + } + uint32_t get_num_event_cnt() const volatile + { + uint32_t value = ((1U << 5) - 1) & (word0 >> 11); + return value; + } + CONSTEXPR pmcr_r &set_num_event_cnt(uint32_t value) + { + word0 = (((~((1U << 5) - 1)) << 11) & word0) | ((((1U << 5) - 1) & value) << 11); + return *this; + } + volatile pmcr_r &set_num_event_cnt(uint32_t value) volatile + { + word0 = (((~((1U << 5) - 1)) << 11) & word0) | ((((1U << 5) - 1) & value) << 11); + return *this; + } +#endif +}; + +// pmcntenset_r - Count enable set register +struct pmcntenset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0 : 1; // Event counter enable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1 : 1; // Event counter enable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2 : 1; // Event counter enable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3 : 1; // Event counter enable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT : 1; // PMCCNTR enable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcntenset_r() : word0(0) {} + CONSTEXPR pmcntenset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcntenset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_2(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_2(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcntenset_r &set_EVENT_CNT_3(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcntenset_r &set_EVENT_CNT_3(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmcntenset_r &set_CYCLE_CNT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmcntenset_r &set_CYCLE_CNT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmcntenclr_r - Count enable clear register +struct pmcntenclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0 : 1; // Event counter disable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1 : 1; // Event counter disable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2 : 1; // Event counter disable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3 : 1; // Event counter disable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT : 1; // PMCCNTR disable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcntenclr_r() : word0(0) {} + CONSTEXPR pmcntenclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcntenclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_0(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_0(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_1(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_1(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_2(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_2(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmcntenclr_r &set_EVENT_CNT_3(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmcntenclr_r &set_EVENT_CNT_3(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmcntenclr_r &set_CYCLE_CNT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmcntenclr_r &set_CYCLE_CNT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmovsset_r - Overflow flag status set register +struct pmovsset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_OVF : 1; // Event counter overflow set bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_OVF : 1; // Event counter overflow set bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_OVF : 1; // Event counter overflow set bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_OVF : 1; // Event counter overflow set bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_OVF : 1; // PMCCNTR overflow set bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmovsset_r() : word0(0) {} + CONSTEXPR pmovsset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmovsset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_0_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_0_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_1_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_1_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_2_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_2_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmovsset_r &set_EVENT_CNT_3_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmovsset_r &set_EVENT_CNT_3_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmovsset_r &set_CYCLE_CNT_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmovsset_r &set_CYCLE_CNT_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmovsclr_r - Overflow flag status clear register +struct pmovsclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_OVF : 1; // Event counter overflow clear bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_OVF : 1; // Event counter overflow clear bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_OVF : 1; // Event counter overflow clear bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_OVF : 1; // Event counter overflow clear bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_OVF : 1; // PMCCNTR overflow clear bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmovsclr_r() : word0(0) {} + CONSTEXPR pmovsclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmovsclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_0_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_0_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_1_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_1_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_2_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_2_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmovsclr_r &set_EVENT_CNT_3_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmovsclr_r &set_EVENT_CNT_3_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_OVF() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_OVF() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmovsclr_r &set_CYCLE_CNT_OVF(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmovsclr_r &set_CYCLE_CNT_OVF(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmintset_r - Interrupt enable set register +struct pmintset_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_INT : 1; // Event counter overflow interrupt request enable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_INT : 1; // PMCCNTR overflow interrupt request enable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmintset_r() : word0(0) {} + CONSTEXPR pmintset_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmintset_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_0_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_0_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_1_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_1_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_2_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_2_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmintset_r &set_EVENT_CNT_3_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmintset_r &set_EVENT_CNT_3_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmintset_r &set_CYCLE_CNT_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmintset_r &set_CYCLE_CNT_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmintclr_r - Interrupt enable clear register +struct pmintclr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EVENT_CNT_0_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR0 + uint32_t EVENT_CNT_1_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR1 + uint32_t EVENT_CNT_2_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR2 + uint32_t EVENT_CNT_3_INT : 1; // Event counter overflow interrupt request disable bit for PMEVCNTR3 + uint32_t reserved0 : 27; + uint32_t CYCLE_CNT_INT : 1; // PMCCNTR overflow interrupt request disable bit + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmintclr_r() : word0(0) {} + CONSTEXPR pmintclr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmintclr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_0_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + uint32_t get_EVENT_CNT_0_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 0); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_0_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_0_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 0) & word0) | ((((1U << 1) - 1) & value) << 0); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_1_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + uint32_t get_EVENT_CNT_1_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 1); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_1_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_1_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 1) & word0) | ((((1U << 1) - 1) & value) << 1); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_2_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + uint32_t get_EVENT_CNT_2_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 2); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_2_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_2_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 2) & word0) | ((((1U << 1) - 1) & value) << 2); + return *this; + } + CONSTEXPR uint32_t get_EVENT_CNT_3_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + uint32_t get_EVENT_CNT_3_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 3); + return value; + } + CONSTEXPR pmintclr_r &set_EVENT_CNT_3_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + volatile pmintclr_r &set_EVENT_CNT_3_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 3) & word0) | ((((1U << 1) - 1) & value) << 3); + return *this; + } + CONSTEXPR uint32_t get_CYCLE_CNT_INT() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + uint32_t get_CYCLE_CNT_INT() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 31); + return value; + } + CONSTEXPR pmintclr_r &set_CYCLE_CNT_INT(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } + volatile pmintclr_r &set_CYCLE_CNT_INT(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 31) & word0) | ((((1U << 1) - 1) & value) << 31); + return *this; + } +#endif +}; + +// pmccntr_r - Performance monitor cycle count register +struct pmccntr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CYCLE_CNT_LO : 32; // Cycle count - LSB + uint32_t CYCLE_CNT_HI : 16; // Cycle count - MSB + uint32_t reserved0 : 16; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR pmccntr_r() : word0(0), word1(0) {} + CONSTEXPR pmccntr_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + pmccntr_r copy() volatile + { + return *this; + } +#endif +}; + +// pmccntr_cfg_r - Set start/stop event on the cycle counter +struct pmccntr_cfg_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CYCLE_CNT_CFG_START : 10; // Cycle counter start event + uint32_t reserved0 : 6; + uint32_t CYCLE_CNT_CFG_STOP : 10; // Cycle counter stop event + uint32_t reserved1 : 6; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmccntr_cfg_r() : word0(0) {} + CONSTEXPR pmccntr_cfg_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmccntr_cfg_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_START() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_START() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmccntr_cfg_r &set_CYCLE_CNT_CFG_START(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmccntr_cfg_r &set_CYCLE_CNT_CFG_START(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_STOP() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 16)); + return value; + } + NPU_NAMESPACE::pmu_event get_CYCLE_CNT_CFG_STOP() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 16)); + return value; + } + CONSTEXPR pmccntr_cfg_r &set_CYCLE_CNT_CFG_STOP(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 16) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 16); + return *this; + } + volatile pmccntr_cfg_r &set_CYCLE_CNT_CFG_STOP(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 16) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 16); + return *this; + } +#endif +}; + +// pmcaxi_chan_r - Set which AXI channel to monitor for latency measurements in PMU +struct pmcaxi_chan_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CH_SEL : 4; // Channel select for latency measurements + uint32_t reserved0 : 4; + uint32_t AXI_CNT_SEL : 2; // AXI counter to monitor for latency measurements + uint32_t BW_CH_SEL_EN : 1; // Bandwidth channel selector + uint32_t reserved1 : 21; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmcaxi_chan_r() : word0(0) {} + CONSTEXPR pmcaxi_chan_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmcaxi_chan_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_axi_channel get_CH_SEL() const + { + NPU_NAMESPACE::pmu_axi_channel value = + static_cast(((1U << 4) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_axi_channel get_CH_SEL() const volatile + { + NPU_NAMESPACE::pmu_axi_channel value = + static_cast(((1U << 4) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_CH_SEL(NPU_NAMESPACE::pmu_axi_channel value) + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmcaxi_chan_r &set_CH_SEL(NPU_NAMESPACE::pmu_axi_channel value) volatile + { + word0 = (((~((1U << 4) - 1)) << 0) & word0) | ((((1U << 4) - 1) & static_cast(value)) << 0); + return *this; + } + CONSTEXPR uint32_t get_AXI_CNT_SEL() const + { + uint32_t value = ((1U << 2) - 1) & (word0 >> 8); + return value; + } + uint32_t get_AXI_CNT_SEL() const volatile + { + uint32_t value = ((1U << 2) - 1) & (word0 >> 8); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_AXI_CNT_SEL(uint32_t value) + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & value) << 8); + return *this; + } + volatile pmcaxi_chan_r &set_AXI_CNT_SEL(uint32_t value) volatile + { + word0 = (((~((1U << 2) - 1)) << 8) & word0) | ((((1U << 2) - 1) & value) << 8); + return *this; + } + CONSTEXPR uint32_t get_BW_CH_SEL_EN() const + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + uint32_t get_BW_CH_SEL_EN() const volatile + { + uint32_t value = ((1U << 1) - 1) & (word0 >> 10); + return value; + } + CONSTEXPR pmcaxi_chan_r &set_BW_CH_SEL_EN(uint32_t value) + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } + volatile pmcaxi_chan_r &set_BW_CH_SEL_EN(uint32_t value) volatile + { + word0 = (((~((1U << 1) - 1)) << 10) & word0) | ((((1U << 1) - 1) & value) << 10); + return *this; + } +#endif +}; + +// kernel_x_r - Kernel X offset of in kernel decomposition +struct kernel_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_x_r() : word0(0) {} + CONSTEXPR kernel_x_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_x_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_x_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_x_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_y_r - Kernel Y offset of in kernel decomposition +struct kernel_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_y_r() : word0(0) {} + CONSTEXPR kernel_y_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_y_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_y_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_y_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_w_m1_r - Kernel (width-1) of current block +struct kernel_w_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_w_m1_r() : word0(0) {} + CONSTEXPR kernel_w_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_w_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_w_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_w_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_h_m1_r - Kernel (height-1) of current block +struct kernel_h_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_h_m1_r() : word0(0) {} + CONSTEXPR kernel_h_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_h_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_h_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_h_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_width_m1_r - OFM current block (width-1) +struct ofm_cblk_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_width_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_height_m1_r - OFM current block (height-1) +struct ofm_cblk_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_height_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_cblk_depth_m1_r - OFM current block (depth-1) +struct ofm_cblk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_cblk_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_cblk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_cblk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_cblk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_cblk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_depth_m1_r - IFM current block (depth-1) +struct ifm_cblk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_depth_m1_r() : word0(0) {} + CONSTEXPR ifm_cblk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_x_r - Block X coordinate in OFM +struct ofm_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_x_r() : word0(0) {} + CONSTEXPR ofm_x_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_x_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_x_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_x_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_y_r - Block Y coordinate in OFM +struct ofm_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_y_r() : word0(0) {} + CONSTEXPR ofm_y_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_y_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_y_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_y_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_z_r - Block Z (channel) coordinate in OFM +struct ofm_z_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_z_r() : word0(0) {} + CONSTEXPR ofm_z_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_z_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_z_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_z_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_z_r - Block Z (channel) coordinate in IFM +struct ifm_z_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_z_r() : word0(0) {} + CONSTEXPR ifm_z_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_z_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_z_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_z_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pad_top_r - Block top pad +struct pad_top_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pad_top_r() : word0(0) {} + CONSTEXPR pad_top_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pad_top_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pad_top_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile pad_top_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pad_left_r - Block left pad +struct pad_left_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pad_left_r() : word0(0) {} + CONSTEXPR pad_left_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pad_left_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pad_left_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile pad_left_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_width_r - IFM current block derived width +struct ifm_cblk_width_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_width_r() : word0(0) {} + CONSTEXPR ifm_cblk_width_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_width_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_width_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_width_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_cblk_height_r - IFM current block derived height +struct ifm_cblk_height_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_cblk_height_r() : word0(0) {} + CONSTEXPR ifm_cblk_height_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_cblk_height_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_cblk_height_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_cblk_height_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ifm_src_r - DMA IFM channel source position on AXI +struct dma_ifm_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_ifm_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_ifm_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_ifm_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_ifm_dst_r - DMA IFM channel destination position in SHRAM +struct dma_ifm_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_ifm_dst_r() : word0(0) {} + CONSTEXPR dma_ifm_dst_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_ifm_dst_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_ifm_dst_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_ifm_dst_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ofm_src_r - DMA OFM channel source position in SHRAM +struct dma_ofm_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_ofm_src_r() : word0(0) {} + CONSTEXPR dma_ofm_src_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_ofm_src_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_ofm_src_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_ofm_src_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_ofm_dst_r - DMA OFM channel destination position on AXI +struct dma_ofm_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_ofm_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma_ofm_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_ofm_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_weight_src_r - DMA weight channel source position on AXI +struct dma_weight_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_weight_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_weight_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_weight_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_cmd_src_r - DMA command channel source position on AXI +struct dma_cmd_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_cmd_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_cmd_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_cmd_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_cmd_size_r - DMA command channel number of bytes buffered +struct dma_cmd_size_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma_cmd_size_r() : word0(0) {} + CONSTEXPR dma_cmd_size_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma_cmd_size_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma_cmd_size_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma_cmd_size_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_m2m_src_r - DMA memory to memory source position on AXI +struct dma_m2m_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_m2m_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_m2m_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_m2m_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma_m2m_dst_r - DMA memory to memory destination position on AXI +struct dma_m2m_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_m2m_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma_m2m_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_m2m_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// current_qread_r - QREAD position being issued (rather than completed) +struct current_qread_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_qread_r() : word0(0) {} + CONSTEXPR current_qread_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_qread_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_qread_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_qread_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma_scale_src_r - DMA scale and bias channel source position on AXI +struct dma_scale_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t offset_LO : 32; // Offset - LSB + uint32_t offset_HI : 8; // Offset - MSB + uint32_t reserved0 : 24; + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma_scale_src_r() : word0(0), word1(0) {} + CONSTEXPR dma_scale_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma_scale_src_r copy() volatile + { + return *this; + } +#endif +}; + +// current_block_r - 0-3. Current block bank being executed by the TSU or last one executed if TSU is stopped +struct current_block_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_block_r() : word0(0) {} + CONSTEXPR current_block_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_block_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_block_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_block_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// current_op_r - Current NPU OP command being executed by the TSU +struct current_op_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_op_r() : word0(0) {} + CONSTEXPR current_op_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_op_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_op_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_op_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// current_cmd_r - Current 32-bit command being parsed by the command stream parser +struct current_cmd_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR current_cmd_r() : word0(0) {} + CONSTEXPR current_cmd_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + current_cmd_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR current_cmd_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile current_cmd_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmevcntr_r - Performance monitor event 0 count register +struct pmevcntr_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t count : 32; // Count word + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmevcntr_r() : word0(0) {} + CONSTEXPR pmevcntr_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmevcntr_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_count() const + { + uint32_t value = word0; + return value; + } + uint32_t get_count() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pmevcntr_r &set_count(uint32_t value) + { + word0 = value; + return *this; + } + volatile pmevcntr_r &set_count(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pmevtyper_r - Performance monitor event type register 0 +struct pmevtyper_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t EV_TYPE : 10; // Event Type + uint32_t reserved0 : 22; + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pmevtyper_r() : word0(0) {} + CONSTEXPR pmevtyper_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pmevtyper_r copy() volatile + { + return *this; + } + CONSTEXPR NPU_NAMESPACE::pmu_event get_EV_TYPE() const + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + NPU_NAMESPACE::pmu_event get_EV_TYPE() const volatile + { + NPU_NAMESPACE::pmu_event value = static_cast(((1U << 10) - 1) & (word0 >> 0)); + return value; + } + CONSTEXPR pmevtyper_r &set_EV_TYPE(NPU_NAMESPACE::pmu_event value) + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } + volatile pmevtyper_r &set_EV_TYPE(NPU_NAMESPACE::pmu_event value) volatile + { + word0 = (((~((1U << 10) - 1)) << 0) & word0) | ((((1U << 10) - 1) & static_cast(value)) << 0); + return *this; + } +#endif +}; + +// shared_buffer_r - Shared buffer debug access. Only valid in STOPPED state +struct shared_buffer_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t mem_word : 32; // Memory word + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR shared_buffer_r() : word0(0) {} + CONSTEXPR shared_buffer_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + shared_buffer_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_mem_word() const + { + uint32_t value = word0; + return value; + } + uint32_t get_mem_word() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR shared_buffer_r &set_mem_word(uint32_t value) + { + word0 = value; + return *this; + } + volatile shared_buffer_r &set_mem_word(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_top_r - None +struct ifm_pad_top_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_top_r() : word0(0) {} + CONSTEXPR ifm_pad_top_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_top_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_top_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_top_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_left_r - None +struct ifm_pad_left_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_left_r() : word0(0) {} + CONSTEXPR ifm_pad_left_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_left_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_left_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_left_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_right_r - None +struct ifm_pad_right_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_right_r() : word0(0) {} + CONSTEXPR ifm_pad_right_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_right_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_right_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_right_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_pad_bottom_r - None +struct ifm_pad_bottom_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_pad_bottom_r() : word0(0) {} + CONSTEXPR ifm_pad_bottom_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_pad_bottom_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_pad_bottom_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_pad_bottom_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_depth_m1_r - None +struct ifm_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_depth_m1_r() : word0(0) {} + CONSTEXPR ifm_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_precision_r - None +struct ifm_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_precision_r() : word0(0) {} + CONSTEXPR ifm_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_upscale_r - None +struct ifm_upscale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_upscale_r() : word0(0) {} + CONSTEXPR ifm_upscale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_upscale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_upscale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_upscale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_zero_point_r - None +struct ifm_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_zero_point_r() : word0(0) {} + CONSTEXPR ifm_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_width0_m1_r - None +struct ifm_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_width0_m1_r() : word0(0) {} + CONSTEXPR ifm_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_height0_m1_r - None +struct ifm_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_height0_m1_r() : word0(0) {} + CONSTEXPR ifm_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_height1_m1_r - None +struct ifm_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_height1_m1_r() : word0(0) {} + CONSTEXPR ifm_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_ib_end_r - None +struct ifm_ib_end_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_ib_end_r() : word0(0) {} + CONSTEXPR ifm_ib_end_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_ib_end_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_ib_end_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_ib_end_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_region_r - None +struct ifm_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm_region_r() : word0(0) {} + CONSTEXPR ifm_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_width_m1_r - None +struct ofm_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_width_m1_r() : word0(0) {} + CONSTEXPR ofm_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height_m1_r - None +struct ofm_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height_m1_r() : word0(0) {} + CONSTEXPR ofm_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_depth_m1_r - None +struct ofm_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_precision_r - None +struct ofm_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_precision_r() : word0(0) {} + CONSTEXPR ofm_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_width_m1_r - None +struct ofm_blk_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_width_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_height_m1_r - None +struct ofm_blk_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_height_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_blk_depth_m1_r - None +struct ofm_blk_depth_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_blk_depth_m1_r() : word0(0) {} + CONSTEXPR ofm_blk_depth_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_blk_depth_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_blk_depth_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_blk_depth_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_zero_point_r - None +struct ofm_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_zero_point_r() : word0(0) {} + CONSTEXPR ofm_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_width0_m1_r - None +struct ofm_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_width0_m1_r() : word0(0) {} + CONSTEXPR ofm_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height0_m1_r - None +struct ofm_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height0_m1_r() : word0(0) {} + CONSTEXPR ofm_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_height1_m1_r - None +struct ofm_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_height1_m1_r() : word0(0) {} + CONSTEXPR ofm_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_region_r - None +struct ofm_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_region_r() : word0(0) {} + CONSTEXPR ofm_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_width_m1_r - None +struct kernel_width_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_width_m1_r() : word0(0) {} + CONSTEXPR kernel_width_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_width_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_width_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_width_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_height_m1_r - None +struct kernel_height_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_height_m1_r() : word0(0) {} + CONSTEXPR kernel_height_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_height_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_height_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_height_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// kernel_stride_r - None +struct kernel_stride_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR kernel_stride_r() : word0(0) {} + CONSTEXPR kernel_stride_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + kernel_stride_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR kernel_stride_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile kernel_stride_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// parallel_mode_r - None +struct parallel_mode_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR parallel_mode_r() : word0(0) {} + CONSTEXPR parallel_mode_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + parallel_mode_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR parallel_mode_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile parallel_mode_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// acc_format_r - None +struct acc_format_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR acc_format_r() : word0(0) {} + CONSTEXPR acc_format_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + acc_format_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR acc_format_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile acc_format_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_r - None +struct activation_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_r() : word0(0) {} + CONSTEXPR activation_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_min_r - None +struct activation_min_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_min_r() : word0(0) {} + CONSTEXPR activation_min_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_min_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_min_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_min_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// activation_max_r - None +struct activation_max_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR activation_max_r() : word0(0) {} + CONSTEXPR activation_max_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + activation_max_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR activation_max_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile activation_max_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// weight_region_r - None +struct weight_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR weight_region_r() : word0(0) {} + CONSTEXPR weight_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + weight_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR weight_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile weight_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// scale_region_r - None +struct scale_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR scale_region_r() : word0(0) {} + CONSTEXPR scale_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + scale_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR scale_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile scale_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ab_start_r - None +struct ab_start_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ab_start_r() : word0(0) {} + CONSTEXPR ab_start_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ab_start_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ab_start_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ab_start_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// blockdep_r - None +struct blockdep_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR blockdep_r() : word0(0) {} + CONSTEXPR blockdep_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + blockdep_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR blockdep_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile blockdep_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_src_region_r - None +struct dma0_src_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_src_region_r() : word0(0) {} + CONSTEXPR dma0_src_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_src_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_src_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_src_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_dst_region_r - None +struct dma0_dst_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_dst_region_r() : word0(0) {} + CONSTEXPR dma0_dst_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_dst_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_dst_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_dst_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_size0_r - None +struct dma0_size0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_size0_r() : word0(0) {} + CONSTEXPR dma0_size0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_size0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_size0_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_size0_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_size1_r - None +struct dma0_size1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR dma0_size1_r() : word0(0) {} + CONSTEXPR dma0_size1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + dma0_size1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR dma0_size1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile dma0_size1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_broadcast_r - None +struct ifm2_broadcast_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_broadcast_r() : word0(0) {} + CONSTEXPR ifm2_broadcast_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_broadcast_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_broadcast_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_broadcast_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_scalar_r - None +struct ifm2_scalar_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_scalar_r() : word0(0) {} + CONSTEXPR ifm2_scalar_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_scalar_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_scalar_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_scalar_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_precision_r - None +struct ifm2_precision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_precision_r() : word0(0) {} + CONSTEXPR ifm2_precision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_precision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_precision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_precision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_zero_point_r - None +struct ifm2_zero_point_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_zero_point_r() : word0(0) {} + CONSTEXPR ifm2_zero_point_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_zero_point_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_zero_point_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_zero_point_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_width0_m1_r - None +struct ifm2_width0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_width0_m1_r() : word0(0) {} + CONSTEXPR ifm2_width0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_width0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_width0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_width0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_height0_m1_r - None +struct ifm2_height0_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_height0_m1_r() : word0(0) {} + CONSTEXPR ifm2_height0_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_height0_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_height0_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_height0_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_height1_m1_r - None +struct ifm2_height1_m1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_height1_m1_r() : word0(0) {} + CONSTEXPR ifm2_height1_m1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_height1_m1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_height1_m1_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_height1_m1_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_ib_start_r - None +struct ifm2_ib_start_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_ib_start_r() : word0(0) {} + CONSTEXPR ifm2_ib_start_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_ib_start_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_ib_start_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_ib_start_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm2_region_r - None +struct ifm2_region_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ifm2_region_r() : word0(0) {} + CONSTEXPR ifm2_region_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ifm2_region_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ifm2_region_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ifm2_region_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ifm_base0_r - None +struct ifm_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base0_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base1_r - None +struct ifm_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base1_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base2_r - None +struct ifm_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base2_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_base3_r - None +struct ifm_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_base3_r() : word0(0), word1(0) {} + CONSTEXPR ifm_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_x_r - None +struct ifm_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_y_r - None +struct ifm_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm_stride_c_r - None +struct ifm_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ifm_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base0_r - None +struct ofm_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base0_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base1_r - None +struct ofm_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base1_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base2_r - None +struct ofm_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base2_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_base3_r - None +struct ofm_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_base3_r() : word0(0), word1(0) {} + CONSTEXPR ofm_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_x_r - None +struct ofm_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_y_r - None +struct ofm_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_stride_c_r - None +struct ofm_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ofm_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ofm_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ofm_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// weight_base_r - None +struct weight_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight_base_r() : word0(0), word1(0) {} + CONSTEXPR weight_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight_base_r copy() volatile + { + return *this; + } +#endif +}; + +// weight_length_r - None +struct weight_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight_length_r() : word0(0), word1(0) {} + CONSTEXPR weight_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight_length_r copy() volatile + { + return *this; + } +#endif +}; + +// scale_base_r - None +struct scale_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale_base_r() : word0(0), word1(0) {} + CONSTEXPR scale_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale_base_r copy() volatile + { + return *this; + } +#endif +}; + +// scale_length_r - None +struct scale_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale_length_r() : word0(0), word1(0) {} + CONSTEXPR scale_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale_length_r copy() volatile + { + return *this; + } +#endif +}; + +// ofm_scale_r - None +struct ofm_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_scale_r() : word0(0) {} + CONSTEXPR ofm_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// ofm_scale_shift_r - None +struct ofm_scale_shift_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR ofm_scale_shift_r() : word0(0) {} + CONSTEXPR ofm_scale_shift_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + ofm_scale_shift_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR ofm_scale_shift_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile ofm_scale_shift_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opa_scale_r - None +struct opa_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opa_scale_r() : word0(0) {} + CONSTEXPR opa_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opa_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opa_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opa_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opa_scale_shift_r - None +struct opa_scale_shift_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opa_scale_shift_r() : word0(0) {} + CONSTEXPR opa_scale_shift_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opa_scale_shift_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opa_scale_shift_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opa_scale_shift_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// opb_scale_r - None +struct opb_scale_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR opb_scale_r() : word0(0) {} + CONSTEXPR opb_scale_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + opb_scale_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR opb_scale_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile opb_scale_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// dma0_src_r - None +struct dma0_src_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_src_r() : word0(0), word1(0) {} + CONSTEXPR dma0_src_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_src_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_dst_r - None +struct dma0_dst_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_dst_r() : word0(0), word1(0) {} + CONSTEXPR dma0_dst_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_dst_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_len_r - None +struct dma0_len_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_len_r() : word0(0), word1(0) {} + CONSTEXPR dma0_len_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_len_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_skip0_r - None +struct dma0_skip0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_skip0_r() : word0(0), word1(0) {} + CONSTEXPR dma0_skip0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_skip0_r copy() volatile + { + return *this; + } +#endif +}; + +// dma0_skip1_r - None +struct dma0_skip1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR dma0_skip1_r() : word0(0), word1(0) {} + CONSTEXPR dma0_skip1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + dma0_skip1_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base0_r - None +struct ifm2_base0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base0_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base0_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base0_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base1_r - None +struct ifm2_base1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base1_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base1_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base1_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base2_r - None +struct ifm2_base2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base2_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base2_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base2_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_base3_r - None +struct ifm2_base3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_base3_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_base3_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_base3_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_x_r - None +struct ifm2_stride_x_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_x_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_x_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_x_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_y_r - None +struct ifm2_stride_y_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_y_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_y_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_y_r copy() volatile + { + return *this; + } +#endif +}; + +// ifm2_stride_c_r - None +struct ifm2_stride_c_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR ifm2_stride_c_r() : word0(0), word1(0) {} + CONSTEXPR ifm2_stride_c_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + ifm2_stride_c_r copy() volatile + { + return *this; + } +#endif +}; + +// weight1_base_r - None +struct weight1_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight1_base_r() : word0(0), word1(0) {} + CONSTEXPR weight1_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight1_base_r copy() volatile + { + return *this; + } +#endif +}; + +// weight1_length_r - None +struct weight1_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR weight1_length_r() : word0(0), word1(0) {} + CONSTEXPR weight1_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + weight1_length_r copy() volatile + { + return *this; + } +#endif +}; + +// scale1_base_r - None +struct scale1_base_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale1_base_r() : word0(0), word1(0) {} + CONSTEXPR scale1_base_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale1_base_r copy() volatile + { + return *this; + } +#endif +}; + +// scale1_length_r - None +struct scale1_length_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value_LO : 32; // 64-bit register value - LSB + uint32_t value_HI : 32; // 64-bit register value - MSB + }; + uint32_t word[2]; + }; +#else + private: + uint32_t word0; + uint32_t word1; + + public: + CONSTEXPR scale1_length_r() : word0(0), word1(0) {} + CONSTEXPR scale1_length_r(uint64_t init) : + word0(static_cast((init)&std::numeric_limits::max())), + word1(static_cast((init >> 32) & std::numeric_limits::max())) + { + } + CONSTEXPR void operator=(uint64_t value) + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + void operator=(uint64_t value) volatile + { + word0 = static_cast((value)&std::numeric_limits::max()); + word1 = static_cast((value >> 32) & std::numeric_limits::max()); + } + CONSTEXPR operator uint64_t() + { + return (static_cast(word1) << 32) | word0; + } + operator uint64_t() volatile + { + return (static_cast(word1) << 32) | word0; + } + scale1_length_r copy() volatile + { + return *this; + } +#endif +}; + +// revision_r - Internal FPGA build revision: first 32-bits of the Ultan Git hash used for the build +struct revision_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t value : 32; // 32-bit register value + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR revision_r() : word0(0) {} + CONSTEXPR revision_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + revision_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_value() const + { + uint32_t value = word0; + return value; + } + uint32_t get_value() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR revision_r &set_value(uint32_t value) + { + word0 = value; + return *this; + } + volatile revision_r &set_value(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid4_r - Peripheral ID byte 4 (Arm=code 4) +struct pid4_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID4 : 32; // Byte 4 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid4_r() : word0(4) {} + CONSTEXPR pid4_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid4_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID4() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID4() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid4_r &set_PID4(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid4_r &set_PID4(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid5_r - Peripheral ID byte 5 (reserved) +struct pid5_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID5 : 32; // Byte 5 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid5_r() : word0(0) {} + CONSTEXPR pid5_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid5_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID5() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID5() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid5_r &set_PID5(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid5_r &set_PID5(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid6_r - Peripheral ID byte 6 (reserved) +struct pid6_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID6 : 32; // Byte 6 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid6_r() : word0(0) {} + CONSTEXPR pid6_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid6_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID6() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID6() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid6_r &set_PID6(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid6_r &set_PID6(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid7_r - Peripheral ID byte 7 (reserved) +struct pid7_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID7 : 32; // Byte 7 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid7_r() : word0(0) {} + CONSTEXPR pid7_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid7_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID7() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID7() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid7_r &set_PID7(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid7_r &set_PID7(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid0_r - Peripheral ID byte 0. This is bits[7:0] of the part number +struct pid0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID0 : 32; // Byte 0 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid0_r() : word0(129) {} + CONSTEXPR pid0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID0() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID0() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid0_r &set_PID0(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid0_r &set_PID0(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid1_r - Peripheral ID byte 1. This is bits[11:8] of the part number in bits[3:0], and bits[3:0] of the Arm ID in +// bits[7:4] +struct pid1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID1 : 32; // Byte 1 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid1_r() : word0(181) {} + CONSTEXPR pid1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID1() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID1() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid1_r &set_PID1(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid1_r &set_PID1(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid2_r - Peripheral ID byte 2. This is bits[6:4] of the Arm ID in bits[2:0], and bit 3 indicates format B +struct pid2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID2 : 32; // Byte 2 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid2_r() : word0(11) {} + CONSTEXPR pid2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid2_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID2() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID2() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid2_r &set_PID2(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid2_r &set_PID2(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// pid3_r - Peripheral ID byte 3 +struct pid3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t PID3 : 32; // Byte 1 of Peripheral ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR pid3_r() : word0(0) {} + CONSTEXPR pid3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + pid3_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_PID3() const + { + uint32_t value = word0; + return value; + } + uint32_t get_PID3() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR pid3_r &set_PID3(uint32_t value) + { + word0 = value; + return *this; + } + volatile pid3_r &set_PID3(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid0_r - Component ID byte 0 +struct cid0_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID0 : 32; // Byte 0 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid0_r() : word0(13) {} + CONSTEXPR cid0_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid0_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID0() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID0() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid0_r &set_CID0(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid0_r &set_CID0(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid1_r - Component ID byte 1 +struct cid1_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID1 : 32; // Byte 1 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid1_r() : word0(240) {} + CONSTEXPR cid1_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid1_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID1() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID1() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid1_r &set_CID1(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid1_r &set_CID1(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid2_r - Component ID byte 2 +struct cid2_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID2 : 32; // Byte 2 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid2_r() : word0(5) {} + CONSTEXPR cid2_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid2_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID2() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID2() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid2_r &set_CID2(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid2_r &set_CID2(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +// cid3_r - Component ID byte 3 +struct cid3_r +{ +#ifndef __cplusplus + union + { + struct + { + uint32_t CID3 : 32; // Byte 3 of Component ID (Lower 8 bits valid) + }; + uint32_t word; + }; +#else + private: + uint32_t word0; + + public: + CONSTEXPR cid3_r() : word0(177) {} + CONSTEXPR cid3_r(uint32_t init) : word0(init) {} + CONSTEXPR void operator=(uint32_t value) + { + word0 = value; + } + void operator=(uint32_t value) volatile + { + word0 = value; + } + CONSTEXPR operator uint32_t() + { + return word0; + } + operator uint32_t() volatile + { + return word0; + } + cid3_r copy() volatile + { + return *this; + } + CONSTEXPR uint32_t get_CID3() const + { + uint32_t value = word0; + return value; + } + uint32_t get_CID3() const volatile + { + uint32_t value = word0; + return value; + } + CONSTEXPR cid3_r &set_CID3(uint32_t value) + { + word0 = value; + return *this; + } + volatile cid3_r &set_CID3(uint32_t value) volatile + { + word0 = value; + return *this; + } +#endif +}; + +struct NPU_REG +{ + STRUCT id_r ID; // 0x0000 + STRUCT status_r STATUS; // 0x0004 + STRUCT cmd_r CMD; // 0x0008 + STRUCT reset_r RESET; // 0x000C + STRUCT qbase_r QBASE; // 0x0010 + STRUCT qread_r QREAD; // 0x0018 + STRUCT qconfig_r QCONFIG; // 0x001C + STRUCT qsize_r QSIZE; // 0x0020 + STRUCT prot_r PROT; // 0x0024 + STRUCT config_r CONFIG; // 0x0028 + STRUCT lock_r LOCK; // 0x002C + uint32_t unused0[3]; + STRUCT regioncfg_r REGIONCFG; // 0x003C + STRUCT axi_limit0_r AXI_LIMIT0; // 0x0040 + STRUCT axi_limit1_r AXI_LIMIT1; // 0x0044 + STRUCT axi_limit2_r AXI_LIMIT2; // 0x0048 + STRUCT axi_limit3_r AXI_LIMIT3; // 0x004C + uint32_t unused1[12]; + STRUCT basep_r BASEP[8]; // 0x0080 + uint32_t unused2[16]; + STRUCT wd_status_r WD_STATUS; // 0x0100 + STRUCT mac_status_r MAC_STATUS; // 0x0104 + STRUCT ao_status_r AO_STATUS; // 0x0108 + uint32_t unused3[1]; + STRUCT dma_status0_r DMA_STATUS0; // 0x0110 + STRUCT dma_status1_r DMA_STATUS1; // 0x0114 + uint32_t unused4[10]; + STRUCT clkforce_r CLKFORCE; // 0x0140 + STRUCT debug_address_r DEBUG_ADDRESS; // 0x0144 + STRUCT debug_misc_r DEBUG_MISC; // 0x0148 + STRUCT debugcore_r DEBUGCORE; // 0x014C + STRUCT debug_block_r DEBUG_BLOCK; // 0x0150 + uint32_t unused5[11]; + STRUCT pmcr_r PMCR; // 0x0180 + STRUCT pmcntenset_r PMCNTENSET; // 0x0184 + STRUCT pmcntenclr_r PMCNTENCLR; // 0x0188 + STRUCT pmovsset_r PMOVSSET; // 0x018C + STRUCT pmovsclr_r PMOVSCLR; // 0x0190 + STRUCT pmintset_r PMINTSET; // 0x0194 + STRUCT pmintclr_r PMINTCLR; // 0x0198 + uint32_t unused6[1]; + STRUCT pmccntr_r PMCCNTR; // 0x01A0 + STRUCT pmccntr_cfg_r PMCCNTR_CFG; // 0x01A8 + STRUCT pmcaxi_chan_r PMCAXI_CHAN; // 0x01AC + uint32_t unused7[20]; + STRUCT kernel_x_r KERNEL_X; // 0x0200 + STRUCT kernel_y_r KERNEL_Y; // 0x0204 + STRUCT kernel_w_m1_r KERNEL_W_M1; // 0x0208 + STRUCT kernel_h_m1_r KERNEL_H_M1; // 0x020C + STRUCT ofm_cblk_width_m1_r OFM_CBLK_WIDTH_M1; // 0x0210 + STRUCT ofm_cblk_height_m1_r OFM_CBLK_HEIGHT_M1; // 0x0214 + STRUCT ofm_cblk_depth_m1_r OFM_CBLK_DEPTH_M1; // 0x0218 + STRUCT ifm_cblk_depth_m1_r IFM_CBLK_DEPTH_M1; // 0x021C + STRUCT ofm_x_r OFM_X; // 0x0220 + STRUCT ofm_y_r OFM_Y; // 0x0224 + STRUCT ofm_z_r OFM_Z; // 0x0228 + STRUCT ifm_z_r IFM_Z; // 0x022C + STRUCT pad_top_r PAD_TOP; // 0x0230 + STRUCT pad_left_r PAD_LEFT; // 0x0234 + STRUCT ifm_cblk_width_r IFM_CBLK_WIDTH; // 0x0238 + STRUCT ifm_cblk_height_r IFM_CBLK_HEIGHT; // 0x023C + STRUCT dma_ifm_src_r DMA_IFM_SRC; // 0x0240 + STRUCT dma_ifm_dst_r DMA_IFM_DST; // 0x0248 + STRUCT dma_ofm_src_r DMA_OFM_SRC; // 0x024C + STRUCT dma_ofm_dst_r DMA_OFM_DST; // 0x0250 + STRUCT dma_weight_src_r DMA_WEIGHT_SRC; // 0x0258 + STRUCT dma_cmd_src_r DMA_CMD_SRC; // 0x0260 + STRUCT dma_cmd_size_r DMA_CMD_SIZE; // 0x0268 + STRUCT dma_m2m_src_r DMA_M2M_SRC; // 0x026C + STRUCT dma_m2m_dst_r DMA_M2M_DST; // 0x0274 + STRUCT current_qread_r CURRENT_QREAD; // 0x027C + STRUCT dma_scale_src_r DMA_SCALE_SRC; // 0x0280 + uint32_t unused8[11]; + STRUCT current_block_r CURRENT_BLOCK; // 0x02B4 + STRUCT current_op_r CURRENT_OP; // 0x02B8 + STRUCT current_cmd_r CURRENT_CMD; // 0x02BC + uint32_t unused9[16]; + STRUCT pmevcntr_r PMEVCNTR[4]; // 0x0300 + uint32_t unused10[28]; + STRUCT pmevtyper_r PMEVTYPER[4]; // 0x0380 + uint32_t unused11[28]; + STRUCT shared_buffer_r SHARED_BUFFER[256]; // 0x0400 + STRUCT ifm_pad_top_r IFM_PAD_TOP; // 0x0800 + STRUCT ifm_pad_left_r IFM_PAD_LEFT; // 0x0804 + STRUCT ifm_pad_right_r IFM_PAD_RIGHT; // 0x0808 + STRUCT ifm_pad_bottom_r IFM_PAD_BOTTOM; // 0x080C + STRUCT ifm_depth_m1_r IFM_DEPTH_M1; // 0x0810 + STRUCT ifm_precision_r IFM_PRECISION; // 0x0814 + uint32_t unused12[1]; + STRUCT ifm_upscale_r IFM_UPSCALE; // 0x081C + uint32_t unused13[1]; + STRUCT ifm_zero_point_r IFM_ZERO_POINT; // 0x0824 + STRUCT ifm_width0_m1_r IFM_WIDTH0_M1; // 0x0828 + STRUCT ifm_height0_m1_r IFM_HEIGHT0_M1; // 0x082C + STRUCT ifm_height1_m1_r IFM_HEIGHT1_M1; // 0x0830 + STRUCT ifm_ib_end_r IFM_IB_END; // 0x0834 + uint32_t unused14[1]; + STRUCT ifm_region_r IFM_REGION; // 0x083C + uint32_t unused15[1]; + STRUCT ofm_width_m1_r OFM_WIDTH_M1; // 0x0844 + STRUCT ofm_height_m1_r OFM_HEIGHT_M1; // 0x0848 + STRUCT ofm_depth_m1_r OFM_DEPTH_M1; // 0x084C + STRUCT ofm_precision_r OFM_PRECISION; // 0x0850 + STRUCT ofm_blk_width_m1_r OFM_BLK_WIDTH_M1; // 0x0854 + STRUCT ofm_blk_height_m1_r OFM_BLK_HEIGHT_M1; // 0x0858 + STRUCT ofm_blk_depth_m1_r OFM_BLK_DEPTH_M1; // 0x085C + STRUCT ofm_zero_point_r OFM_ZERO_POINT; // 0x0860 + uint32_t unused16[1]; + STRUCT ofm_width0_m1_r OFM_WIDTH0_M1; // 0x0868 + STRUCT ofm_height0_m1_r OFM_HEIGHT0_M1; // 0x086C + STRUCT ofm_height1_m1_r OFM_HEIGHT1_M1; // 0x0870 + uint32_t unused17[2]; + STRUCT ofm_region_r OFM_REGION; // 0x087C + STRUCT kernel_width_m1_r KERNEL_WIDTH_M1; // 0x0880 + STRUCT kernel_height_m1_r KERNEL_HEIGHT_M1; // 0x0884 + STRUCT kernel_stride_r KERNEL_STRIDE; // 0x0888 + STRUCT parallel_mode_r PARALLEL_MODE; // 0x088C + STRUCT acc_format_r ACC_FORMAT; // 0x0890 + STRUCT activation_r ACTIVATION; // 0x0894 + STRUCT activation_min_r ACTIVATION_MIN; // 0x0898 + STRUCT activation_max_r ACTIVATION_MAX; // 0x089C + STRUCT weight_region_r WEIGHT_REGION; // 0x08A0 + STRUCT scale_region_r SCALE_REGION; // 0x08A4 + uint32_t unused18[3]; + STRUCT ab_start_r AB_START; // 0x08B4 + uint32_t unused19[1]; + STRUCT blockdep_r BLOCKDEP; // 0x08BC + STRUCT dma0_src_region_r DMA0_SRC_REGION; // 0x08C0 + STRUCT dma0_dst_region_r DMA0_DST_REGION; // 0x08C4 + STRUCT dma0_size0_r DMA0_SIZE0; // 0x08C8 + STRUCT dma0_size1_r DMA0_SIZE1; // 0x08CC + uint32_t unused20[12]; + STRUCT ifm2_broadcast_r IFM2_BROADCAST; // 0x0900 + STRUCT ifm2_scalar_r IFM2_SCALAR; // 0x0904 + uint32_t unused21[3]; + STRUCT ifm2_precision_r IFM2_PRECISION; // 0x0914 + uint32_t unused22[3]; + STRUCT ifm2_zero_point_r IFM2_ZERO_POINT; // 0x0924 + STRUCT ifm2_width0_m1_r IFM2_WIDTH0_M1; // 0x0928 + STRUCT ifm2_height0_m1_r IFM2_HEIGHT0_M1; // 0x092C + STRUCT ifm2_height1_m1_r IFM2_HEIGHT1_M1; // 0x0930 + STRUCT ifm2_ib_start_r IFM2_IB_START; // 0x0934 + uint32_t unused23[1]; + STRUCT ifm2_region_r IFM2_REGION; // 0x093C + uint32_t unused24[48]; + STRUCT ifm_base0_r IFM_BASE0; // 0x0A00 + STRUCT ifm_base1_r IFM_BASE1; // 0x0A08 + STRUCT ifm_base2_r IFM_BASE2; // 0x0A10 + STRUCT ifm_base3_r IFM_BASE3; // 0x0A18 + STRUCT ifm_stride_x_r IFM_STRIDE_X; // 0x0A20 + STRUCT ifm_stride_y_r IFM_STRIDE_Y; // 0x0A28 + STRUCT ifm_stride_c_r IFM_STRIDE_C; // 0x0A30 + uint32_t unused25[2]; + STRUCT ofm_base0_r OFM_BASE0; // 0x0A40 + STRUCT ofm_base1_r OFM_BASE1; // 0x0A48 + STRUCT ofm_base2_r OFM_BASE2; // 0x0A50 + STRUCT ofm_base3_r OFM_BASE3; // 0x0A58 + STRUCT ofm_stride_x_r OFM_STRIDE_X; // 0x0A60 + STRUCT ofm_stride_y_r OFM_STRIDE_Y; // 0x0A68 + STRUCT ofm_stride_c_r OFM_STRIDE_C; // 0x0A70 + uint32_t unused26[2]; + STRUCT weight_base_r WEIGHT_BASE; // 0x0A80 + STRUCT weight_length_r WEIGHT_LENGTH; // 0x0A88 + STRUCT scale_base_r SCALE_BASE; // 0x0A90 + STRUCT scale_length_r SCALE_LENGTH; // 0x0A98 + STRUCT ofm_scale_r OFM_SCALE; // 0x0AA0 + STRUCT ofm_scale_shift_r OFM_SCALE_SHIFT; // 0x0AA4 + STRUCT opa_scale_r OPA_SCALE; // 0x0AA8 + STRUCT opa_scale_shift_r OPA_SCALE_SHIFT; // 0x0AAC + STRUCT opb_scale_r OPB_SCALE; // 0x0AB0 + uint32_t unused27[3]; + STRUCT dma0_src_r DMA0_SRC; // 0x0AC0 + STRUCT dma0_dst_r DMA0_DST; // 0x0AC8 + STRUCT dma0_len_r DMA0_LEN; // 0x0AD0 + STRUCT dma0_skip0_r DMA0_SKIP0; // 0x0AD8 + STRUCT dma0_skip1_r DMA0_SKIP1; // 0x0AE0 + uint32_t unused28[6]; + STRUCT ifm2_base0_r IFM2_BASE0; // 0x0B00 + STRUCT ifm2_base1_r IFM2_BASE1; // 0x0B08 + STRUCT ifm2_base2_r IFM2_BASE2; // 0x0B10 + STRUCT ifm2_base3_r IFM2_BASE3; // 0x0B18 + STRUCT ifm2_stride_x_r IFM2_STRIDE_X; // 0x0B20 + STRUCT ifm2_stride_y_r IFM2_STRIDE_Y; // 0x0B28 + STRUCT ifm2_stride_c_r IFM2_STRIDE_C; // 0x0B30 + uint32_t unused29[2]; + STRUCT weight1_base_r WEIGHT1_BASE; // 0x0B40 + STRUCT weight1_length_r WEIGHT1_LENGTH; // 0x0B48 + STRUCT scale1_base_r SCALE1_BASE; // 0x0B50 + STRUCT scale1_length_r SCALE1_LENGTH; // 0x0B58 + uint32_t unused30[280]; + STRUCT revision_r REVISION; // 0x0FC0 + uint32_t unused31[3]; + STRUCT pid4_r PID4; // 0x0FD0 + STRUCT pid5_r PID5; // 0x0FD4 + STRUCT pid6_r PID6; // 0x0FD8 + STRUCT pid7_r PID7; // 0x0FDC + STRUCT pid0_r PID0; // 0x0FE0 + STRUCT pid1_r PID1; // 0x0FE4 + STRUCT pid2_r PID2; // 0x0FE8 + STRUCT pid3_r PID3; // 0x0FEC + STRUCT cid0_r CID0; // 0x0FF0 + STRUCT cid1_r CID1; // 0x0FF4 + STRUCT cid2_r CID2; // 0x0FF8 + STRUCT cid3_r CID3; // 0x0FFC + +#ifdef __cplusplus + enum class access_type_t : uint8_t + { + RW, + RO, + WO + }; + NPU_REG() + { + reset(); + } + void reset() + { + ID = 268853249; + STATUS = 8; + CMD = 12; + RESET = 0; + QBASE = 0; + QREAD = 0; + QCONFIG = 0; + QSIZE = 0; + PROT = 0; + CONFIG = 268435456; + LOCK = 0; + REGIONCFG = 0; + AXI_LIMIT0 = 0; + AXI_LIMIT1 = 0; + AXI_LIMIT2 = 0; + AXI_LIMIT3 = 0; + for (size_t i = 0; i < (sizeof(BASEP) / sizeof(BASEP[0])); ++i) + BASEP[i] = 0; + WD_STATUS = 0; + MAC_STATUS = 0; + AO_STATUS = 0; + DMA_STATUS0 = 0; + DMA_STATUS1 = 0; + CLKFORCE = 0; + DEBUG_ADDRESS = 0; + DEBUG_MISC = 0; + DEBUGCORE = 0; + DEBUG_BLOCK = 0; + PMCR = 8192; + PMCNTENSET = 0; + PMCNTENCLR = 0; + PMOVSSET = 0; + PMOVSCLR = 0; + PMINTSET = 0; + PMINTCLR = 0; + PMCCNTR = 0; + PMCCNTR_CFG = 0; + PMCAXI_CHAN = 0; + KERNEL_X = 0; + KERNEL_Y = 0; + KERNEL_W_M1 = 0; + KERNEL_H_M1 = 0; + OFM_CBLK_WIDTH_M1 = 0; + OFM_CBLK_HEIGHT_M1 = 0; + OFM_CBLK_DEPTH_M1 = 0; + IFM_CBLK_DEPTH_M1 = 0; + OFM_X = 0; + OFM_Y = 0; + OFM_Z = 0; + IFM_Z = 0; + PAD_TOP = 0; + PAD_LEFT = 0; + IFM_CBLK_WIDTH = 0; + IFM_CBLK_HEIGHT = 0; + DMA_IFM_SRC = 0; + DMA_IFM_DST = 0; + DMA_OFM_SRC = 0; + DMA_OFM_DST = 0; + DMA_WEIGHT_SRC = 0; + DMA_CMD_SRC = 0; + DMA_CMD_SIZE = 0; + DMA_M2M_SRC = 0; + DMA_M2M_DST = 0; + CURRENT_QREAD = 0; + DMA_SCALE_SRC = 0; + CURRENT_BLOCK = 0; + CURRENT_OP = 0; + CURRENT_CMD = 0; + for (size_t i = 0; i < (sizeof(PMEVCNTR) / sizeof(PMEVCNTR[0])); ++i) + PMEVCNTR[i] = 0; + for (size_t i = 0; i < (sizeof(PMEVTYPER) / sizeof(PMEVTYPER[0])); ++i) + PMEVTYPER[i] = 0; + for (size_t i = 0; i < (sizeof(SHARED_BUFFER) / sizeof(SHARED_BUFFER[0])); ++i) + SHARED_BUFFER[i] = 0; + IFM_PAD_TOP = 0; + IFM_PAD_LEFT = 0; + IFM_PAD_RIGHT = 0; + IFM_PAD_BOTTOM = 0; + IFM_DEPTH_M1 = 0; + IFM_PRECISION = 0; + IFM_UPSCALE = 0; + IFM_ZERO_POINT = 0; + IFM_WIDTH0_M1 = 0; + IFM_HEIGHT0_M1 = 0; + IFM_HEIGHT1_M1 = 0; + IFM_IB_END = 0; + IFM_REGION = 0; + OFM_WIDTH_M1 = 0; + OFM_HEIGHT_M1 = 0; + OFM_DEPTH_M1 = 0; + OFM_PRECISION = 0; + OFM_BLK_WIDTH_M1 = 0; + OFM_BLK_HEIGHT_M1 = 0; + OFM_BLK_DEPTH_M1 = 0; + OFM_ZERO_POINT = 0; + OFM_WIDTH0_M1 = 0; + OFM_HEIGHT0_M1 = 0; + OFM_HEIGHT1_M1 = 0; + OFM_REGION = 0; + KERNEL_WIDTH_M1 = 0; + KERNEL_HEIGHT_M1 = 0; + KERNEL_STRIDE = 0; + PARALLEL_MODE = 0; + ACC_FORMAT = 0; + ACTIVATION = 0; + ACTIVATION_MIN = 0; + ACTIVATION_MAX = 0; + WEIGHT_REGION = 0; + SCALE_REGION = 0; + AB_START = 0; + BLOCKDEP = 0; + DMA0_SRC_REGION = 0; + DMA0_DST_REGION = 0; + DMA0_SIZE0 = 0; + DMA0_SIZE1 = 0; + IFM2_BROADCAST = 0; + IFM2_SCALAR = 0; + IFM2_PRECISION = 0; + IFM2_ZERO_POINT = 0; + IFM2_WIDTH0_M1 = 0; + IFM2_HEIGHT0_M1 = 0; + IFM2_HEIGHT1_M1 = 0; + IFM2_IB_START = 0; + IFM2_REGION = 0; + IFM_BASE0 = 0; + IFM_BASE1 = 0; + IFM_BASE2 = 0; + IFM_BASE3 = 0; + IFM_STRIDE_X = 0; + IFM_STRIDE_Y = 0; + IFM_STRIDE_C = 0; + OFM_BASE0 = 0; + OFM_BASE1 = 0; + OFM_BASE2 = 0; + OFM_BASE3 = 0; + OFM_STRIDE_X = 0; + OFM_STRIDE_Y = 0; + OFM_STRIDE_C = 0; + WEIGHT_BASE = 0; + WEIGHT_LENGTH = 0; + SCALE_BASE = 0; + SCALE_LENGTH = 0; + OFM_SCALE = 0; + OFM_SCALE_SHIFT = 0; + OPA_SCALE = 0; + OPA_SCALE_SHIFT = 0; + OPB_SCALE = 0; + DMA0_SRC = 0; + DMA0_DST = 0; + DMA0_LEN = 0; + DMA0_SKIP0 = 0; + DMA0_SKIP1 = 0; + IFM2_BASE0 = 0; + IFM2_BASE1 = 0; + IFM2_BASE2 = 0; + IFM2_BASE3 = 0; + IFM2_STRIDE_X = 0; + IFM2_STRIDE_Y = 0; + IFM2_STRIDE_C = 0; + WEIGHT1_BASE = 0; + WEIGHT1_LENGTH = 0; + SCALE1_BASE = 0; + SCALE1_LENGTH = 0; + REVISION = 0; + PID4 = 4; + PID5 = 0; + PID6 = 0; + PID7 = 0; + PID0 = 129; + PID1 = 181; + PID2 = 11; + PID3 = 0; + CID0 = 13; + CID1 = 240; + CID2 = 5; + CID3 = 177; + } + uint32_t &operator[](const int addr_offset) + { + return reinterpret_cast(this)[addr_offset / 4]; + } + access_type_t get_access_type(uint32_t offset) + { + switch (offset) + { + case 0: + return access_type_t::RO; + case 4: + return access_type_t::RO; + case 8: + return access_type_t::RW; + case 12: + return access_type_t::RW; + case 16: + return access_type_t::RW; + case 24: + return access_type_t::RO; + case 28: + return access_type_t::RW; + case 32: + return access_type_t::RW; + case 36: + return access_type_t::RO; + case 40: + return access_type_t::RO; + case 44: + return access_type_t::RW; + case 60: + return access_type_t::RW; + case 64: + return access_type_t::RW; + case 68: + return access_type_t::RW; + case 72: + return access_type_t::RW; + case 76: + return access_type_t::RW; + case 128: + return access_type_t::RW; + case 136: + return access_type_t::RW; + case 144: + return access_type_t::RW; + case 152: + return access_type_t::RW; + case 160: + return access_type_t::RW; + case 168: + return access_type_t::RW; + case 176: + return access_type_t::RW; + case 184: + return access_type_t::RW; + case 256: + return access_type_t::RO; + case 260: + return access_type_t::RO; + case 264: + return access_type_t::RO; + case 272: + return access_type_t::RO; + case 276: + return access_type_t::RO; + case 320: + return access_type_t::RW; + case 324: + return access_type_t::RW; + case 328: + return access_type_t::RW; + case 332: + return access_type_t::RW; + case 336: + return access_type_t::RW; + case 384: + return access_type_t::RW; + case 388: + return access_type_t::RW; + case 392: + return access_type_t::RW; + case 396: + return access_type_t::RW; + case 400: + return access_type_t::RW; + case 404: + return access_type_t::RW; + case 408: + return access_type_t::RW; + case 416: + return access_type_t::RW; + case 424: + return access_type_t::RW; + case 428: + return access_type_t::RW; + case 512: + return access_type_t::RO; + case 516: + return access_type_t::RO; + case 520: + return access_type_t::RO; + case 524: + return access_type_t::RO; + case 528: + return access_type_t::RO; + case 532: + return access_type_t::RO; + case 536: + return access_type_t::RO; + case 540: + return access_type_t::RO; + case 544: + return access_type_t::RO; + case 548: + return access_type_t::RO; + case 552: + return access_type_t::RO; + case 556: + return access_type_t::RO; + case 560: + return access_type_t::RO; + case 564: + return access_type_t::RO; + case 568: + return access_type_t::RO; + case 572: + return access_type_t::RO; + case 576: + return access_type_t::RO; + case 584: + return access_type_t::RO; + case 588: + return access_type_t::RO; + case 592: + return access_type_t::RO; + case 600: + return access_type_t::RO; + case 608: + return access_type_t::RO; + case 616: + return access_type_t::RO; + case 620: + return access_type_t::RO; + case 628: + return access_type_t::RO; + case 636: + return access_type_t::RO; + case 640: + return access_type_t::RO; + case 692: + return access_type_t::RO; + case 696: + return access_type_t::RO; + case 700: + return access_type_t::RO; + case 768: + return access_type_t::RW; + case 772: + return access_type_t::RW; + case 776: + return access_type_t::RW; + case 780: + return access_type_t::RW; + case 896: + return access_type_t::RW; + case 900: + return access_type_t::RW; + case 904: + return access_type_t::RW; + case 908: + return access_type_t::RW; + case 1024: + return access_type_t::RW; + case 1028: + return access_type_t::RW; + case 1032: + return access_type_t::RW; + case 1036: + return access_type_t::RW; + case 1040: + return access_type_t::RW; + case 1044: + return access_type_t::RW; + case 1048: + return access_type_t::RW; + case 1052: + return access_type_t::RW; + case 1056: + return access_type_t::RW; + case 1060: + return access_type_t::RW; + case 1064: + return access_type_t::RW; + case 1068: + return access_type_t::RW; + case 1072: + return access_type_t::RW; + case 1076: + return access_type_t::RW; + case 1080: + return access_type_t::RW; + case 1084: + return access_type_t::RW; + case 1088: + return access_type_t::RW; + case 1092: + return access_type_t::RW; + case 1096: + return access_type_t::RW; + case 1100: + return access_type_t::RW; + case 1104: + return access_type_t::RW; + case 1108: + return access_type_t::RW; + case 1112: + return access_type_t::RW; + case 1116: + return access_type_t::RW; + case 1120: + return access_type_t::RW; + case 1124: + return access_type_t::RW; + case 1128: + return access_type_t::RW; + case 1132: + return access_type_t::RW; + case 1136: + return access_type_t::RW; + case 1140: + return access_type_t::RW; + case 1144: + return access_type_t::RW; + case 1148: + return access_type_t::RW; + case 1152: + return access_type_t::RW; + case 1156: + return access_type_t::RW; + case 1160: + return access_type_t::RW; + case 1164: + return access_type_t::RW; + case 1168: + return access_type_t::RW; + case 1172: + return access_type_t::RW; + case 1176: + return access_type_t::RW; + case 1180: + return access_type_t::RW; + case 1184: + return access_type_t::RW; + case 1188: + return access_type_t::RW; + case 1192: + return access_type_t::RW; + case 1196: + return access_type_t::RW; + case 1200: + return access_type_t::RW; + case 1204: + return access_type_t::RW; + case 1208: + return access_type_t::RW; + case 1212: + return access_type_t::RW; + case 1216: + return access_type_t::RW; + case 1220: + return access_type_t::RW; + case 1224: + return access_type_t::RW; + case 1228: + return access_type_t::RW; + case 1232: + return access_type_t::RW; + case 1236: + return access_type_t::RW; + case 1240: + return access_type_t::RW; + case 1244: + return access_type_t::RW; + case 1248: + return access_type_t::RW; + case 1252: + return access_type_t::RW; + case 1256: + return access_type_t::RW; + case 1260: + return access_type_t::RW; + case 1264: + return access_type_t::RW; + case 1268: + return access_type_t::RW; + case 1272: + return access_type_t::RW; + case 1276: + return access_type_t::RW; + case 1280: + return access_type_t::RW; + case 1284: + return access_type_t::RW; + case 1288: + return access_type_t::RW; + case 1292: + return access_type_t::RW; + case 1296: + return access_type_t::RW; + case 1300: + return access_type_t::RW; + case 1304: + return access_type_t::RW; + case 1308: + return access_type_t::RW; + case 1312: + return access_type_t::RW; + case 1316: + return access_type_t::RW; + case 1320: + return access_type_t::RW; + case 1324: + return access_type_t::RW; + case 1328: + return access_type_t::RW; + case 1332: + return access_type_t::RW; + case 1336: + return access_type_t::RW; + case 1340: + return access_type_t::RW; + case 1344: + return access_type_t::RW; + case 1348: + return access_type_t::RW; + case 1352: + return access_type_t::RW; + case 1356: + return access_type_t::RW; + case 1360: + return access_type_t::RW; + case 1364: + return access_type_t::RW; + case 1368: + return access_type_t::RW; + case 1372: + return access_type_t::RW; + case 1376: + return access_type_t::RW; + case 1380: + return access_type_t::RW; + case 1384: + return access_type_t::RW; + case 1388: + return access_type_t::RW; + case 1392: + return access_type_t::RW; + case 1396: + return access_type_t::RW; + case 1400: + return access_type_t::RW; + case 1404: + return access_type_t::RW; + case 1408: + return access_type_t::RW; + case 1412: + return access_type_t::RW; + case 1416: + return access_type_t::RW; + case 1420: + return access_type_t::RW; + case 1424: + return access_type_t::RW; + case 1428: + return access_type_t::RW; + case 1432: + return access_type_t::RW; + case 1436: + return access_type_t::RW; + case 1440: + return access_type_t::RW; + case 1444: + return access_type_t::RW; + case 1448: + return access_type_t::RW; + case 1452: + return access_type_t::RW; + case 1456: + return access_type_t::RW; + case 1460: + return access_type_t::RW; + case 1464: + return access_type_t::RW; + case 1468: + return access_type_t::RW; + case 1472: + return access_type_t::RW; + case 1476: + return access_type_t::RW; + case 1480: + return access_type_t::RW; + case 1484: + return access_type_t::RW; + case 1488: + return access_type_t::RW; + case 1492: + return access_type_t::RW; + case 1496: + return access_type_t::RW; + case 1500: + return access_type_t::RW; + case 1504: + return access_type_t::RW; + case 1508: + return access_type_t::RW; + case 1512: + return access_type_t::RW; + case 1516: + return access_type_t::RW; + case 1520: + return access_type_t::RW; + case 1524: + return access_type_t::RW; + case 1528: + return access_type_t::RW; + case 1532: + return access_type_t::RW; + case 1536: + return access_type_t::RW; + case 1540: + return access_type_t::RW; + case 1544: + return access_type_t::RW; + case 1548: + return access_type_t::RW; + case 1552: + return access_type_t::RW; + case 1556: + return access_type_t::RW; + case 1560: + return access_type_t::RW; + case 1564: + return access_type_t::RW; + case 1568: + return access_type_t::RW; + case 1572: + return access_type_t::RW; + case 1576: + return access_type_t::RW; + case 1580: + return access_type_t::RW; + case 1584: + return access_type_t::RW; + case 1588: + return access_type_t::RW; + case 1592: + return access_type_t::RW; + case 1596: + return access_type_t::RW; + case 1600: + return access_type_t::RW; + case 1604: + return access_type_t::RW; + case 1608: + return access_type_t::RW; + case 1612: + return access_type_t::RW; + case 1616: + return access_type_t::RW; + case 1620: + return access_type_t::RW; + case 1624: + return access_type_t::RW; + case 1628: + return access_type_t::RW; + case 1632: + return access_type_t::RW; + case 1636: + return access_type_t::RW; + case 1640: + return access_type_t::RW; + case 1644: + return access_type_t::RW; + case 1648: + return access_type_t::RW; + case 1652: + return access_type_t::RW; + case 1656: + return access_type_t::RW; + case 1660: + return access_type_t::RW; + case 1664: + return access_type_t::RW; + case 1668: + return access_type_t::RW; + case 1672: + return access_type_t::RW; + case 1676: + return access_type_t::RW; + case 1680: + return access_type_t::RW; + case 1684: + return access_type_t::RW; + case 1688: + return access_type_t::RW; + case 1692: + return access_type_t::RW; + case 1696: + return access_type_t::RW; + case 1700: + return access_type_t::RW; + case 1704: + return access_type_t::RW; + case 1708: + return access_type_t::RW; + case 1712: + return access_type_t::RW; + case 1716: + return access_type_t::RW; + case 1720: + return access_type_t::RW; + case 1724: + return access_type_t::RW; + case 1728: + return access_type_t::RW; + case 1732: + return access_type_t::RW; + case 1736: + return access_type_t::RW; + case 1740: + return access_type_t::RW; + case 1744: + return access_type_t::RW; + case 1748: + return access_type_t::RW; + case 1752: + return access_type_t::RW; + case 1756: + return access_type_t::RW; + case 1760: + return access_type_t::RW; + case 1764: + return access_type_t::RW; + case 1768: + return access_type_t::RW; + case 1772: + return access_type_t::RW; + case 1776: + return access_type_t::RW; + case 1780: + return access_type_t::RW; + case 1784: + return access_type_t::RW; + case 1788: + return access_type_t::RW; + case 1792: + return access_type_t::RW; + case 1796: + return access_type_t::RW; + case 1800: + return access_type_t::RW; + case 1804: + return access_type_t::RW; + case 1808: + return access_type_t::RW; + case 1812: + return access_type_t::RW; + case 1816: + return access_type_t::RW; + case 1820: + return access_type_t::RW; + case 1824: + return access_type_t::RW; + case 1828: + return access_type_t::RW; + case 1832: + return access_type_t::RW; + case 1836: + return access_type_t::RW; + case 1840: + return access_type_t::RW; + case 1844: + return access_type_t::RW; + case 1848: + return access_type_t::RW; + case 1852: + return access_type_t::RW; + case 1856: + return access_type_t::RW; + case 1860: + return access_type_t::RW; + case 1864: + return access_type_t::RW; + case 1868: + return access_type_t::RW; + case 1872: + return access_type_t::RW; + case 1876: + return access_type_t::RW; + case 1880: + return access_type_t::RW; + case 1884: + return access_type_t::RW; + case 1888: + return access_type_t::RW; + case 1892: + return access_type_t::RW; + case 1896: + return access_type_t::RW; + case 1900: + return access_type_t::RW; + case 1904: + return access_type_t::RW; + case 1908: + return access_type_t::RW; + case 1912: + return access_type_t::RW; + case 1916: + return access_type_t::RW; + case 1920: + return access_type_t::RW; + case 1924: + return access_type_t::RW; + case 1928: + return access_type_t::RW; + case 1932: + return access_type_t::RW; + case 1936: + return access_type_t::RW; + case 1940: + return access_type_t::RW; + case 1944: + return access_type_t::RW; + case 1948: + return access_type_t::RW; + case 1952: + return access_type_t::RW; + case 1956: + return access_type_t::RW; + case 1960: + return access_type_t::RW; + case 1964: + return access_type_t::RW; + case 1968: + return access_type_t::RW; + case 1972: + return access_type_t::RW; + case 1976: + return access_type_t::RW; + case 1980: + return access_type_t::RW; + case 1984: + return access_type_t::RW; + case 1988: + return access_type_t::RW; + case 1992: + return access_type_t::RW; + case 1996: + return access_type_t::RW; + case 2000: + return access_type_t::RW; + case 2004: + return access_type_t::RW; + case 2008: + return access_type_t::RW; + case 2012: + return access_type_t::RW; + case 2016: + return access_type_t::RW; + case 2020: + return access_type_t::RW; + case 2024: + return access_type_t::RW; + case 2028: + return access_type_t::RW; + case 2032: + return access_type_t::RW; + case 2036: + return access_type_t::RW; + case 2040: + return access_type_t::RW; + case 2044: + return access_type_t::RW; + case 2048: + return access_type_t::RW; + case 2052: + return access_type_t::RW; + case 2056: + return access_type_t::RW; + case 2060: + return access_type_t::RW; + case 2064: + return access_type_t::RW; + case 2068: + return access_type_t::RW; + case 2076: + return access_type_t::RW; + case 2084: + return access_type_t::RW; + case 2088: + return access_type_t::RW; + case 2092: + return access_type_t::RW; + case 2096: + return access_type_t::RW; + case 2100: + return access_type_t::RW; + case 2108: + return access_type_t::RW; + case 2116: + return access_type_t::RW; + case 2120: + return access_type_t::RW; + case 2124: + return access_type_t::RW; + case 2128: + return access_type_t::RW; + case 2132: + return access_type_t::RW; + case 2136: + return access_type_t::RW; + case 2140: + return access_type_t::RW; + case 2144: + return access_type_t::RW; + case 2152: + return access_type_t::RW; + case 2156: + return access_type_t::RW; + case 2160: + return access_type_t::RW; + case 2172: + return access_type_t::RW; + case 2176: + return access_type_t::RW; + case 2180: + return access_type_t::RW; + case 2184: + return access_type_t::RW; + case 2188: + return access_type_t::RW; + case 2192: + return access_type_t::RW; + case 2196: + return access_type_t::RW; + case 2200: + return access_type_t::RW; + case 2204: + return access_type_t::RW; + case 2208: + return access_type_t::RW; + case 2212: + return access_type_t::RW; + case 2228: + return access_type_t::RW; + case 2236: + return access_type_t::RW; + case 2240: + return access_type_t::RW; + case 2244: + return access_type_t::RW; + case 2248: + return access_type_t::RW; + case 2252: + return access_type_t::RW; + case 2304: + return access_type_t::RW; + case 2308: + return access_type_t::RW; + case 2324: + return access_type_t::RW; + case 2340: + return access_type_t::RW; + case 2344: + return access_type_t::RW; + case 2348: + return access_type_t::RW; + case 2352: + return access_type_t::RW; + case 2356: + return access_type_t::RW; + case 2364: + return access_type_t::RW; + case 2560: + return access_type_t::RW; + case 2568: + return access_type_t::RW; + case 2576: + return access_type_t::RW; + case 2584: + return access_type_t::RW; + case 2592: + return access_type_t::RW; + case 2600: + return access_type_t::RW; + case 2608: + return access_type_t::RW; + case 2624: + return access_type_t::RW; + case 2632: + return access_type_t::RW; + case 2640: + return access_type_t::RW; + case 2648: + return access_type_t::RW; + case 2656: + return access_type_t::RW; + case 2664: + return access_type_t::RW; + case 2672: + return access_type_t::RW; + case 2688: + return access_type_t::RW; + case 2696: + return access_type_t::RW; + case 2704: + return access_type_t::RW; + case 2712: + return access_type_t::RW; + case 2720: + return access_type_t::RW; + case 2724: + return access_type_t::RW; + case 2728: + return access_type_t::RW; + case 2732: + return access_type_t::RW; + case 2736: + return access_type_t::RW; + case 2752: + return access_type_t::RW; + case 2760: + return access_type_t::RW; + case 2768: + return access_type_t::RW; + case 2776: + return access_type_t::RW; + case 2784: + return access_type_t::RW; + case 2816: + return access_type_t::RW; + case 2824: + return access_type_t::RW; + case 2832: + return access_type_t::RW; + case 2840: + return access_type_t::RW; + case 2848: + return access_type_t::RW; + case 2856: + return access_type_t::RW; + case 2864: + return access_type_t::RW; + case 2880: + return access_type_t::RW; + case 2888: + return access_type_t::RW; + case 2896: + return access_type_t::RW; + case 2904: + return access_type_t::RW; + case 4032: + return access_type_t::RO; + case 4048: + return access_type_t::RO; + case 4052: + return access_type_t::RO; + case 4056: + return access_type_t::RO; + case 4060: + return access_type_t::RO; + case 4064: + return access_type_t::RO; + case 4068: + return access_type_t::RO; + case 4072: + return access_type_t::RO; + case 4076: + return access_type_t::RO; + case 4080: + return access_type_t::RO; + case 4084: + return access_type_t::RO; + case 4088: + return access_type_t::RO; + case 4092: + return access_type_t::RO; + default: + return access_type_t::RO; + } + } +#endif +}; + +#ifdef __cplusplus +struct isa +{ +#ifdef NPU_DISASSEMBLE + static int disassemble(const uint32_t *in, + std::string &op, + std::vector> &fields) + { + switch (*in & 0xffff) + { + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP): + { + const npu_op_stop_t &v = *reinterpret_cast(in); + op = "NPU_OP_STOP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ): + { + const npu_op_irq_t &v = *reinterpret_cast(in); + op = "NPU_OP_IRQ"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV): + { + const npu_op_conv_t &v = *reinterpret_cast(in); + op = "NPU_OP_CONV"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE): + { + const npu_op_depthwise_t &v = *reinterpret_cast(in); + op = "NPU_OP_DEPTHWISE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL): + { + const npu_op_pool_t &v = *reinterpret_cast(in); + op = "NPU_OP_POOL"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE): + { + const npu_op_elementwise_t &v = *reinterpret_cast(in); + op = "NPU_OP_ELEMENTWISE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START): + { + const npu_op_dma_start_t &v = *reinterpret_cast(in); + op = "NPU_OP_DMA_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT): + { + const npu_op_dma_wait_t &v = *reinterpret_cast(in); + op = "NPU_OP_DMA_WAIT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT): + { + const npu_op_kernel_wait_t &v = *reinterpret_cast(in); + op = "NPU_OP_KERNEL_WAIT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK): + { + const npu_op_pmu_mask_t &v = *reinterpret_cast(in); + op = "NPU_OP_PMU_MASK"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP): + { + const npu_set_ifm_pad_top_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_TOP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT): + { + const npu_set_ifm_pad_left_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_LEFT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT): + { + const npu_set_ifm_pad_right_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_RIGHT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM): + { + const npu_set_ifm_pad_bottom_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PAD_BOTTOM"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1): + { + const npu_set_ifm_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION): + { + const npu_set_ifm_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE): + { + const npu_set_ifm_upscale_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_UPSCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT): + { + const npu_set_ifm_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1): + { + const npu_set_ifm_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1): + { + const npu_set_ifm_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1): + { + const npu_set_ifm_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END): + { + const npu_set_ifm_ib_end_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_IB_END"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION): + { + const npu_set_ifm_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1): + { + const npu_set_ofm_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1): + { + const npu_set_ofm_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1): + { + const npu_set_ofm_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION): + { + const npu_set_ofm_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1): + { + const npu_set_ofm_blk_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1): + { + const npu_set_ofm_blk_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1): + { + const npu_set_ofm_blk_depth_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BLK_DEPTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT): + { + const npu_set_ofm_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1): + { + const npu_set_ofm_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1): + { + const npu_set_ofm_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1): + { + const npu_set_ofm_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION): + { + const npu_set_ofm_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1): + { + const npu_set_kernel_width_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_WIDTH_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1): + { + const npu_set_kernel_height_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_HEIGHT_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE): + { + const npu_set_kernel_stride_t &v = *reinterpret_cast(in); + op = "NPU_SET_KERNEL_STRIDE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_PARALLEL_MODE): + { + const npu_set_parallel_mode_t &v = *reinterpret_cast(in); + op = "NPU_SET_PARALLEL_MODE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT): + { + const npu_set_acc_format_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACC_FORMAT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION): + { + const npu_set_activation_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN): + { + const npu_set_activation_min_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION_MIN"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX): + { + const npu_set_activation_max_t &v = *reinterpret_cast(in); + op = "NPU_SET_ACTIVATION_MAX"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION): + { + const npu_set_weight_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION): + { + const npu_set_scale_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START): + { + const npu_set_ab_start_t &v = *reinterpret_cast(in); + op = "NPU_SET_AB_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP): + { + const npu_set_blockdep_t &v = *reinterpret_cast(in); + op = "NPU_SET_BLOCKDEP"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION): + { + const npu_set_dma0_src_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SRC_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION): + { + const npu_set_dma0_dst_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_DST_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0): + { + const npu_set_dma0_size0_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SIZE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1): + { + const npu_set_dma0_size1_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SIZE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST): + { + const npu_set_ifm2_broadcast_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BROADCAST"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR): + { + const npu_set_ifm2_scalar_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_SCALAR"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION): + { + const npu_set_ifm2_precision_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_PRECISION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT): + { + const npu_set_ifm2_zero_point_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_ZERO_POINT"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1): + { + const npu_set_ifm2_width0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_WIDTH0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1): + { + const npu_set_ifm2_height0_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_HEIGHT0_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1): + { + const npu_set_ifm2_height1_m1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_HEIGHT1_M1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START): + { + const npu_set_ifm2_ib_start_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_IB_START"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION): + { + const npu_set_ifm2_region_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_REGION"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0): + { + const npu_set_ifm_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1): + { + const npu_set_ifm_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2): + { + const npu_set_ifm_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3): + { + const npu_set_ifm_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X): + { + const npu_set_ifm_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y): + { + const npu_set_ifm_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C): + { + const npu_set_ifm_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0): + { + const npu_set_ofm_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1): + { + const npu_set_ofm_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2): + { + const npu_set_ofm_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3): + { + const npu_set_ofm_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X): + { + const npu_set_ofm_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y): + { + const npu_set_ofm_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C): + { + const npu_set_ofm_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE): + { + const npu_set_weight_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH): + { + const npu_set_weight_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT_LENGTH"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE): + { + const npu_set_scale_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH): + { + const npu_set_scale_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE_LENGTH"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE): + { + const npu_set_ofm_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OFM_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE): + { + const npu_set_opa_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OPA_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE): + { + const npu_set_opb_scale_t &v = *reinterpret_cast(in); + op = "NPU_SET_OPB_SCALE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC): + { + const npu_set_dma0_src_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SRC"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST): + { + const npu_set_dma0_dst_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_DST"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN): + { + const npu_set_dma0_len_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_LEN"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP0): + { + const npu_set_dma0_skip0_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SKIP0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP1): + { + const npu_set_dma0_skip1_t &v = *reinterpret_cast(in); + op = "NPU_SET_DMA0_SKIP1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0): + { + const npu_set_ifm2_base0_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE0"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1): + { + const npu_set_ifm2_base1_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE1"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2): + { + const npu_set_ifm2_base2_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE2"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3): + { + const npu_set_ifm2_base3_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_BASE3"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X): + { + const npu_set_ifm2_stride_x_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_X"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y): + { + const npu_set_ifm2_stride_y_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_Y"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C): + { + const npu_set_ifm2_stride_c_t &v = *reinterpret_cast(in); + op = "NPU_SET_IFM2_STRIDE_C"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_BASE): + { + const npu_set_weight1_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT1_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_LENGTH): + { + const npu_set_weight1_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_WEIGHT1_LENGTH"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_BASE): + { + const npu_set_scale1_base_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE1_BASE"; + v.disassemble(fields); + break; + } + case (static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL) << 14) | + static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_LENGTH): + { + const npu_set_scale1_length_t &v = *reinterpret_cast(in); + op = "NPU_SET_SCALE1_LENGTH"; + v.disassemble(fields); + break; + } + } + return (*in & (3 << 14)) != 0 ? 2 : 1; + } +#endif +#endif + // Signal the end of command stream + struct npu_op_stop_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mask : 16; // Encoding for 16-bit mask value +#ifdef __cplusplus + public: + npu_op_stop_t(uint32_t _mask) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(_mask & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_op_stop_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_STOP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_stop_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_stop_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_mask() const + { + return static_cast(mask); + } + CONSTEXPR npu_op_stop_t &set_mask(uint32_t value) + { + mask = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("mask", std::to_string(mask))); + } +#endif +#endif + }; + // Raises an IRQ to the host + struct npu_op_irq_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mask : 16; // Encoding for 16-bit mask value +#ifdef __cplusplus + public: + npu_op_irq_t(uint32_t _mask) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(_mask & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_op_irq_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mask(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_IRQ); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_irq_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_irq_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_mask() const + { + return static_cast(mask); + } + CONSTEXPR npu_op_irq_t &set_mask(uint32_t value) + { + mask = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("mask", std::to_string(mask))); + } +#endif +#endif + }; + // 2D convolution + struct npu_op_conv_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_conv_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_CONV); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_conv_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_conv_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Depth-wise 2D convolution + struct npu_op_depthwise_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_depthwise_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DEPTHWISE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_depthwise_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_depthwise_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Pooling + struct npu_op_pool_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pooling_mode : 3; // Pooling mode + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_op_pool_t(NPU_NAMESPACE::pooling_mode _pooling_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + pooling_mode(static_cast(_pooling_mode) & ((1U << 3) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_pool_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pooling_mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_POOL); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_pool_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_pool_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::pooling_mode get_pooling_mode() const + { + return static_cast(pooling_mode); + } + CONSTEXPR npu_op_pool_t &set_pooling_mode(NPU_NAMESPACE::pooling_mode value) + { + pooling_mode = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "pooling_mode", + (pooling_mode < (sizeof(pooling_mode_str) / sizeof(pooling_mode_str[0])) ? + pooling_mode_str[pooling_mode] : + "****"))); + } +#endif +#endif + }; + // Elementwise operation + struct npu_op_elementwise_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t elementwise_mode : 6; // Elementwise mode + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_op_elementwise_t(NPU_NAMESPACE::elementwise_mode _elementwise_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + elementwise_mode(static_cast(_elementwise_mode) & ((1U << 6) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_elementwise_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), elementwise_mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_ELEMENTWISE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_elementwise_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_elementwise_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::elementwise_mode get_elementwise_mode() const + { + return static_cast(elementwise_mode); + } + CONSTEXPR npu_op_elementwise_t &set_elementwise_mode(NPU_NAMESPACE::elementwise_mode value) + { + elementwise_mode = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "elementwise_mode", + (elementwise_mode < (sizeof(elementwise_mode_str) / sizeof(elementwise_mode_str[0])) ? + elementwise_mode_str[elementwise_mode] : + "****"))); + } +#endif +#endif + }; + // Queue new DMA for the given channel + struct npu_op_dma_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; +#ifdef __cplusplus + public: + CONSTEXPR npu_op_dma_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_dma_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_dma_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const {} +#endif +#endif + }; + // Wait for the DMA channel to have k or fewer active descriptors outstanding + struct npu_op_dma_wait_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t k : 4; // Number of outstanding descriptors + uint32_t reserved1 : 12; +#ifdef __cplusplus + public: + npu_op_dma_wait_t(uint32_t _k) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), k(_k & ((1U << 4) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_dma_wait_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), k(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_DMA_WAIT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_dma_wait_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_dma_wait_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_k() const + { + return static_cast(k); + } + CONSTEXPR npu_op_dma_wait_t &set_k(uint32_t value) + { + k = static_cast(value) & ((1U << 4) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("k", std::to_string(k))); + } +#endif +#endif + }; + // Wait for n or fewer kernel operations to be remaining + struct npu_op_kernel_wait_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t n : 2; // Number of kernel operations in range 0-3 + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_op_kernel_wait_t(uint32_t _n) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), n(_n & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_op_kernel_wait_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), n(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_KERNEL_WAIT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_kernel_wait_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_kernel_wait_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_n() const + { + return static_cast(n); + } + CONSTEXPR npu_op_kernel_wait_t &set_n(uint32_t value) + { + n = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("n", std::to_string(n))); + } +#endif +#endif + }; + // Enable or disable PMU counting (debug feature only) + struct npu_op_pmu_mask_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t enable : 1; // Enable or disable PMU mask + uint32_t reserved1 : 15; +#ifdef __cplusplus + public: + npu_op_pmu_mask_t(uint32_t _enable) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), enable(_enable & ((1U << 1) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_op_pmu_mask_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), enable(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_OP_PMU_MASK); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_op_pmu_mask_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_op_pmu_mask_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_enable() const + { + return static_cast(enable); + } + CONSTEXPR npu_op_pmu_mask_t &set_enable(uint32_t value) + { + enable = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("enable", std::to_string(enable))); + } +#endif +#endif + }; + // IFM top pad + struct npu_set_ifm_pad_top_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 7; // IFM top pad + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ifm_pad_top_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 7) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_top_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_TOP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_top_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM left pad + struct npu_set_ifm_pad_left_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 7; // IFM left pad + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ifm_pad_left_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 7) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_left_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_LEFT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_left_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM right pad + struct npu_set_ifm_pad_right_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 8; // IFM right pad. Max value is 128 + uint32_t reserved1 : 8; +#ifdef __cplusplus + public: + npu_set_ifm_pad_right_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 8) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_right_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_RIGHT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_right_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 8) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // IFM bottom pad + struct npu_set_ifm_pad_bottom_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t pad : 8; // IFM bottom pad. Max value is 128 + uint32_t reserved1 : 8; +#ifdef __cplusplus + public: + npu_set_ifm_pad_bottom_t(uint32_t _pad) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(_pad & ((1U << 8) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_pad_bottom_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), pad(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PAD_BOTTOM); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_pad() const + { + return static_cast(pad); + } + CONSTEXPR npu_set_ifm_pad_bottom_t &set_pad(uint32_t value) + { + pad = static_cast(value) & ((1U << 8) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("pad", std::to_string(pad))); + } +#endif +#endif + }; + // Number of input channels for convolution + struct npu_set_ifm_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 16; // Number of input channels for convolution +#ifdef __cplusplus + public: + npu_set_ifm_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ifm_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // IFM Precision + struct npu_set_ifm_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // IFM type + uint32_t reserved1 : 1; + uint32_t activation_precision : 2; // IFM precision + uint32_t reserved2 : 2; + uint32_t activation_format : 2; // IFM format + uint32_t scale_mode : 2; // IFM scale mode + uint32_t reserved3 : 4; + uint32_t round_mode : 2; // IFM round mode +#ifdef __cplusplus + public: + npu_set_ifm_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format, + NPU_NAMESPACE::ifm_scale_mode _scale_mode, + NPU_NAMESPACE::round_mode _round_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), reserved1(0), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved2(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), + scale_mode(static_cast(_scale_mode) & ((1U << 2) - 1)), reserved3(0), + round_mode(static_cast(_round_mode) & ((1U << 2) - 1)) + { + } + CONSTEXPR npu_set_ifm_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), reserved1(0), + activation_precision(0), reserved2(0), activation_format(0), scale_mode(0), reserved3(0), round_mode(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ifm_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm_scale_mode get_scale_mode() const + { + return static_cast(scale_mode); + } + CONSTEXPR npu_set_ifm_precision_t &set_scale_mode(NPU_NAMESPACE::ifm_scale_mode value) + { + scale_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::round_mode get_round_mode() const + { + return static_cast(round_mode); + } + CONSTEXPR npu_set_ifm_precision_t &set_round_mode(NPU_NAMESPACE::round_mode value) + { + round_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + fields.push_back(std::make_pair( + "scale_mode", + (scale_mode < (sizeof(ifm_scale_mode_str) / sizeof(ifm_scale_mode_str[0])) ? + ifm_scale_mode_str[scale_mode] : + "****"))); + fields.push_back(std::make_pair( + "round_mode", + (round_mode < (sizeof(round_mode_str) / sizeof(round_mode_str[0])) ? round_mode_str[round_mode] : + "****"))); + } +#endif +#endif + }; + // IFM upscale mode + struct npu_set_ifm_upscale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t mode : 2; // IFM upscale mode + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_ifm_upscale_t(NPU_NAMESPACE::ifm_upscale_mode _mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + mode(static_cast(_mode) & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_ifm_upscale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_UPSCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_upscale_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_upscale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm_upscale_mode get_mode() const + { + return static_cast(mode); + } + CONSTEXPR npu_set_ifm_upscale_t &set_mode(NPU_NAMESPACE::ifm_upscale_mode value) + { + mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "mode", + (mode < (sizeof(ifm_upscale_mode_str) / sizeof(ifm_upscale_mode_str[0])) ? ifm_upscale_mode_str[mode] : + "****"))); + } +#endif +#endif + }; + // IFM zero point + struct npu_set_ifm_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ifm_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ifm_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // IFM Tile 0 and tile 2 width + struct npu_set_ifm_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // IFM Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ifm_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ifm_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // IFM Tile 0 height + struct npu_set_ifm_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM Tile 0 height +#ifdef __cplusplus + public: + npu_set_ifm_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // IFM Tile 1 height + struct npu_set_ifm_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM Tile 1 height +#ifdef __cplusplus + public: + npu_set_ifm_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // End of IB0,IB1 buffers + struct npu_set_ifm_ib_end_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ib_end : 6; // End of IB0,IB1 buffers in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ifm_ib_end_t(uint32_t _ib_end) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_end(_ib_end & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm_ib_end_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_end(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_IB_END); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ib_end() const + { + return static_cast(ib_end); + } + CONSTEXPR npu_set_ifm_ib_end_t &set_ib_end(uint32_t value) + { + ib_end = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ib_end", std::to_string(ib_end))); + } +#endif +#endif + }; + // Index n for IFM access + struct npu_set_ifm_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number n + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_ifm_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ifm_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // Output feature map width + struct npu_set_ofm_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // Output feature map width +#ifdef __cplusplus + public: + npu_set_ofm_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // Output feature map height + struct npu_set_ofm_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // Output feature map height +#ifdef __cplusplus + public: + npu_set_ofm_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Output feature map depth + struct npu_set_ofm_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 16; // Output feature map depth +#ifdef __cplusplus + public: + npu_set_ofm_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ofm_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // OFM Precision + struct npu_set_ofm_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // OFM type + uint32_t activation_precision : 2; // OFM precision + uint32_t reserved1 : 3; + uint32_t activation_format : 2; // OFM format + uint32_t scale_mode : 1; // OFM scale mode + uint32_t reserved2 : 5; + uint32_t round_mode : 2; // OFM round mode +#ifdef __cplusplus + public: + npu_set_ofm_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format, + NPU_NAMESPACE::ofm_scale_mode _scale_mode, + NPU_NAMESPACE::round_mode _round_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved1(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), + scale_mode(static_cast(_scale_mode) & ((1U << 1) - 1)), reserved2(0), + round_mode(static_cast(_round_mode) & ((1U << 2) - 1)) + { + } + CONSTEXPR npu_set_ofm_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), + activation_precision(0), reserved1(0), activation_format(0), scale_mode(0), reserved2(0), round_mode(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ofm_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ofm_scale_mode get_scale_mode() const + { + return static_cast(scale_mode); + } + CONSTEXPR npu_set_ofm_precision_t &set_scale_mode(NPU_NAMESPACE::ofm_scale_mode value) + { + scale_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::round_mode get_round_mode() const + { + return static_cast(round_mode); + } + CONSTEXPR npu_set_ofm_precision_t &set_round_mode(NPU_NAMESPACE::round_mode value) + { + round_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + fields.push_back(std::make_pair( + "scale_mode", + (scale_mode < (sizeof(ofm_scale_mode_str) / sizeof(ofm_scale_mode_str[0])) ? + ofm_scale_mode_str[scale_mode] : + "****"))); + fields.push_back(std::make_pair( + "round_mode", + (round_mode < (sizeof(round_mode_str) / sizeof(round_mode_str[0])) ? round_mode_str[round_mode] : + "****"))); + } +#endif +#endif + }; + // OFM block width + struct npu_set_ofm_blk_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 6; // OFM block width + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ofm_blk_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_blk_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // OFM block height + struct npu_set_ofm_blk_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 5; // OFM block height + uint32_t reserved1 : 11; +#ifdef __cplusplus + public: + npu_set_ofm_blk_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 5) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_blk_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 5) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // OFM block depth + struct npu_set_ofm_blk_depth_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t depth_m1 : 7; // OFM block depth + uint32_t reserved1 : 9; +#ifdef __cplusplus + public: + npu_set_ofm_blk_depth_m1_t(uint32_t _depth_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(_depth_m1 & ((1U << 7) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), depth_m1(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_BLK_DEPTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_depth_m1() const + { + return static_cast(depth_m1); + } + CONSTEXPR npu_set_ofm_blk_depth_m1_t &set_depth_m1(uint32_t value) + { + depth_m1 = static_cast(value) & ((1U << 7) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("depth_m1", std::to_string(depth_m1))); + } +#endif +#endif + }; + // OFM zero point + struct npu_set_ofm_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ofm_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ofm_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // OFM Tile 0 and tile 2 width + struct npu_set_ofm_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // OFM Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ofm_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ofm_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // OFM Tile 0 height + struct npu_set_ofm_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // OFM Tile 0 height +#ifdef __cplusplus + public: + npu_set_ofm_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // OFM Tile 1 height + struct npu_set_ofm_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // OFM Tile 1 height +#ifdef __cplusplus + public: + npu_set_ofm_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ofm_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ofm_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Index n for OFM access + struct npu_set_ofm_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for OFM access + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_ofm_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ofm_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_OFM_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ofm_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // Kernel width + struct npu_set_kernel_width_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // Kernel width +#ifdef __cplusplus + public: + npu_set_kernel_width_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_kernel_width_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_WIDTH_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_kernel_width_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // Kernel height + struct npu_set_kernel_height_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // Kernel height +#ifdef __cplusplus + public: + npu_set_kernel_height_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_kernel_height_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_HEIGHT_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_kernel_height_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Kernel stride + struct npu_set_kernel_stride_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t stride_x_lsb : 1; // Stride x LSB. (kernel_x_stride - 1)[0] + uint32_t stride_y_lsb : 1; // Stride y LSB. (kernel_y_stride - 1)[0] + uint32_t weight_order : 1; // Weight ordering mode + uint32_t dilation_x : 1; // Kernel x dilation + uint32_t dilation_y : 1; // Kernel y dilation + uint32_t decomposition : 1; // Kernel decomposition + uint32_t stride_x_msb : 1; // Stride x MSB. (kernel_x_stride - 1) >> 1 + uint32_t reserved1 : 2; + uint32_t stride_y_msb : 1; // Stride y MSB. (kernel_y_stride - 1) >> 1 + uint32_t reserved2 : 6; +#ifdef __cplusplus + public: + npu_set_kernel_stride_t(uint32_t _stride_x_lsb, + uint32_t _stride_y_lsb, + NPU_NAMESPACE::weight_order _weight_order, + NPU_NAMESPACE::kernel_dilation _dilation_x, + NPU_NAMESPACE::kernel_dilation _dilation_y, + NPU_NAMESPACE::kernel_decomposition _decomposition, + uint32_t _stride_x_msb, + uint32_t _stride_y_msb) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + stride_x_lsb(_stride_x_lsb & ((1U << 1) - 1)), stride_y_lsb(_stride_y_lsb & ((1U << 1) - 1)), + weight_order(static_cast(_weight_order) & ((1U << 1) - 1)), + dilation_x(static_cast(_dilation_x) & ((1U << 1) - 1)), + dilation_y(static_cast(_dilation_y) & ((1U << 1) - 1)), + decomposition(static_cast(_decomposition) & ((1U << 1) - 1)), + stride_x_msb(_stride_x_msb & ((1U << 1) - 1)), reserved1(0), stride_y_msb(_stride_y_msb & ((1U << 1) - 1)), + reserved2(0) + { + } + CONSTEXPR npu_set_kernel_stride_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), stride_x_lsb(0), stride_y_lsb(0), + weight_order(0), dilation_x(0), dilation_y(0), decomposition(0), stride_x_msb(0), reserved1(0), + stride_y_msb(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_KERNEL_STRIDE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_kernel_stride_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_kernel_stride_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_x_lsb() const + { + return static_cast(stride_x_lsb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_x_lsb(uint32_t value) + { + stride_x_lsb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_y_lsb() const + { + return static_cast(stride_y_lsb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_y_lsb(uint32_t value) + { + stride_y_lsb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::weight_order get_weight_order() const + { + return static_cast(weight_order); + } + CONSTEXPR npu_set_kernel_stride_t &set_weight_order(NPU_NAMESPACE::weight_order value) + { + weight_order = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_dilation get_dilation_x() const + { + return static_cast(dilation_x); + } + CONSTEXPR npu_set_kernel_stride_t &set_dilation_x(NPU_NAMESPACE::kernel_dilation value) + { + dilation_x = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_dilation get_dilation_y() const + { + return static_cast(dilation_y); + } + CONSTEXPR npu_set_kernel_stride_t &set_dilation_y(NPU_NAMESPACE::kernel_dilation value) + { + dilation_y = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::kernel_decomposition get_decomposition() const + { + return static_cast(decomposition); + } + CONSTEXPR npu_set_kernel_stride_t &set_decomposition(NPU_NAMESPACE::kernel_decomposition value) + { + decomposition = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_x_msb() const + { + return static_cast(stride_x_msb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_x_msb(uint32_t value) + { + stride_x_msb = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR uint32_t get_stride_y_msb() const + { + return static_cast(stride_y_msb); + } + CONSTEXPR npu_set_kernel_stride_t &set_stride_y_msb(uint32_t value) + { + stride_y_msb = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("stride_x_lsb", std::to_string(stride_x_lsb))); + fields.push_back(std::make_pair("stride_y_lsb", std::to_string(stride_y_lsb))); + fields.push_back(std::make_pair( + "weight_order", + (weight_order < (sizeof(weight_order_str) / sizeof(weight_order_str[0])) ? + weight_order_str[weight_order] : + "****"))); + fields.push_back(std::make_pair( + "dilation_x", + (dilation_x < (sizeof(kernel_dilation_str) / sizeof(kernel_dilation_str[0])) ? + kernel_dilation_str[dilation_x] : + "****"))); + fields.push_back(std::make_pair( + "dilation_y", + (dilation_y < (sizeof(kernel_dilation_str) / sizeof(kernel_dilation_str[0])) ? + kernel_dilation_str[dilation_y] : + "****"))); + fields.push_back(std::make_pair( + "decomposition", + (decomposition < (sizeof(kernel_decomposition_str) / sizeof(kernel_decomposition_str[0])) ? + kernel_decomposition_str[decomposition] : + "****"))); + fields.push_back(std::make_pair("stride_x_msb", std::to_string(stride_x_msb))); + fields.push_back(std::make_pair("stride_y_msb", std::to_string(stride_y_msb))); + } +#endif +#endif + }; + // Multi-core parallel mode + struct npu_set_parallel_mode_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t parallel_mode : 1; // Multi-core parallel mode + uint32_t reserved1 : 15; +#ifdef __cplusplus + public: + npu_set_parallel_mode_t(NPU_NAMESPACE::parallel_mode _parallel_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_PARALLEL_MODE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + parallel_mode(static_cast(_parallel_mode) & ((1U << 1) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_parallel_mode_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_PARALLEL_MODE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), parallel_mode(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_PARALLEL_MODE) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_PARALLEL_MODE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_parallel_mode_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_parallel_mode_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::parallel_mode get_parallel_mode() const + { + return static_cast(parallel_mode); + } + CONSTEXPR npu_set_parallel_mode_t &set_parallel_mode(NPU_NAMESPACE::parallel_mode value) + { + parallel_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "parallel_mode", + (parallel_mode < (sizeof(parallel_mode_str) / sizeof(parallel_mode_str[0])) ? + parallel_mode_str[parallel_mode] : + "****"))); + } +#endif +#endif + }; + // Accumulator format + struct npu_set_acc_format_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t acc_format : 2; // Accumulator format + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_acc_format_t(NPU_NAMESPACE::acc_format _acc_format) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + acc_format(static_cast(_acc_format) & ((1U << 2) - 1)), reserved1(0) + { + } + CONSTEXPR npu_set_acc_format_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), acc_format(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACC_FORMAT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_acc_format_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_acc_format_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::acc_format get_acc_format() const + { + return static_cast(acc_format); + } + CONSTEXPR npu_set_acc_format_t &set_acc_format(NPU_NAMESPACE::acc_format value) + { + acc_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "acc_format", + (acc_format < (sizeof(acc_format_str) / sizeof(acc_format_str[0])) ? acc_format_str[acc_format] : + "****"))); + } +#endif +#endif + }; + // Activation function and clip range + struct npu_set_activation_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_function : 5; // Activation function (before table lookup) + uint32_t reserved1 : 7; + uint32_t activation_clip_range : 3; // Activation clip range. This must be set to 0 if table lookup is not used + uint32_t reserved2 : 1; +#ifdef __cplusplus + public: + npu_set_activation_t(NPU_NAMESPACE::activation_function _activation_function, + NPU_NAMESPACE::activation_clip_range _activation_clip_range) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_function(static_cast(_activation_function) & ((1U << 5) - 1)), reserved1(0), + activation_clip_range(static_cast(_activation_clip_range) & ((1U << 3) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_activation_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_function(0), reserved1(0), + activation_clip_range(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_function get_activation_function() const + { + return static_cast(activation_function); + } + CONSTEXPR npu_set_activation_t &set_activation_function(NPU_NAMESPACE::activation_function value) + { + activation_function = static_cast(value) & ((1U << 5) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_clip_range get_activation_clip_range() const + { + return static_cast(activation_clip_range); + } + CONSTEXPR npu_set_activation_t &set_activation_clip_range(NPU_NAMESPACE::activation_clip_range value) + { + activation_clip_range = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_function", + (activation_function < (sizeof(activation_function_str) / sizeof(activation_function_str[0])) ? + activation_function_str[activation_function] : + "****"))); + fields.push_back(std::make_pair( + "activation_clip_range", + (activation_clip_range < (sizeof(activation_clip_range_str) / sizeof(activation_clip_range_str[0])) ? + activation_clip_range_str[activation_clip_range] : + "****"))); + } +#endif +#endif + }; + // Lower bound clip + struct npu_set_activation_min_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t clip_boundary : 16; // Clip boundary for OFM activations +#ifdef __cplusplus + public: + npu_set_activation_min_t(uint32_t _clip_boundary) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + clip_boundary(_clip_boundary & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_activation_min_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), clip_boundary(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MIN); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_min_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_min_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_clip_boundary() const + { + return static_cast(clip_boundary); + } + CONSTEXPR npu_set_activation_min_t &set_clip_boundary(uint32_t value) + { + clip_boundary = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("clip_boundary", std::to_string(clip_boundary))); + } +#endif +#endif + }; + // Upper bound clip + struct npu_set_activation_max_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t clip_boundary : 16; // Clip boundary for OFM activations +#ifdef __cplusplus + public: + npu_set_activation_max_t(uint32_t _clip_boundary) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + clip_boundary(_clip_boundary & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_activation_max_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), clip_boundary(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_ACTIVATION_MAX); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_activation_max_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_activation_max_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_clip_boundary() const + { + return static_cast(clip_boundary); + } + CONSTEXPR npu_set_activation_max_t &set_clip_boundary(uint32_t value) + { + clip_boundary = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("clip_boundary", std::to_string(clip_boundary))); + } +#endif +#endif + }; + // Index n for weight stream access + struct npu_set_weight_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for weight stream access + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_weight_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_weight_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_WEIGHT_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_weight_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // Index n for scale stream access + struct npu_set_scale_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for scale stream access + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_scale_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_scale_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_SCALE_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_scale_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // Start of ACC0,ACC1 buffers + struct npu_set_ab_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ab_start : 6; // Start of ACC0,ACC1 buffers in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ab_start_t(uint32_t _ab_start) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ab_start(_ab_start & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ab_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ab_start(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_AB_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ab_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ab_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ab_start() const + { + return static_cast(ab_start); + } + CONSTEXPR npu_set_ab_start_t &set_ab_start(uint32_t value) + { + ab_start = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ab_start", std::to_string(ab_start))); + } +#endif +#endif + }; + // Block number of blocks dependency + struct npu_set_blockdep_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t blockdep : 2; // Block number of blocks dependency between kernel operations + uint32_t reserved1 : 14; +#ifdef __cplusplus + public: + npu_set_blockdep_t(uint32_t _blockdep) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), blockdep(_blockdep & ((1U << 2) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_blockdep_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), blockdep(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_BLOCKDEP); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_blockdep_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_blockdep_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_blockdep() const + { + return static_cast(blockdep); + } + CONSTEXPR npu_set_blockdep_t &set_blockdep(uint32_t value) + { + blockdep = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("blockdep", std::to_string(blockdep))); + } +#endif +#endif + }; + // DMA0 source region + struct npu_set_dma0_src_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number + uint32_t reserved1 : 5; + uint32_t region_mode : 1; // Region mode + uint32_t stride_mode : 2; // Stride mode + uint32_t reserved2 : 5; +#ifdef __cplusplus + public: + npu_set_dma0_src_region_t(uint32_t _region, + NPU_NAMESPACE::dma_region_mode _region_mode, + NPU_NAMESPACE::dma_stride_mode _stride_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + region(_region & ((1U << 3) - 1)), reserved1(0), + region_mode(static_cast(_region_mode) & ((1U << 1) - 1)), + stride_mode(static_cast(_stride_mode) & ((1U << 2) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_dma0_src_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), region_mode(0), + stride_mode(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SRC_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_src_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_dma0_src_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_region_mode get_region_mode() const + { + return static_cast(region_mode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_region_mode(NPU_NAMESPACE::dma_region_mode value) + { + region_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_stride_mode get_stride_mode() const + { + return static_cast(stride_mode); + } + CONSTEXPR npu_set_dma0_src_region_t &set_stride_mode(NPU_NAMESPACE::dma_stride_mode value) + { + stride_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "region_mode", + (region_mode < (sizeof(dma_region_mode_str) / sizeof(dma_region_mode_str[0])) ? + dma_region_mode_str[region_mode] : + "****"))); + fields.push_back(std::make_pair( + "stride_mode", + (stride_mode < (sizeof(dma_stride_mode_str) / sizeof(dma_stride_mode_str[0])) ? + dma_stride_mode_str[stride_mode] : + "****"))); + } +#endif +#endif + }; + // DMA0 destination region + struct npu_set_dma0_dst_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Region number if region_mode is region_mode_external. Else core mask to write to (bit k + // set for core k=0,1) + uint32_t reserved1 : 5; + uint32_t region_mode : 1; // Region mode + uint32_t stride_mode : 2; // Stride mode + uint32_t reserved2 : 5; +#ifdef __cplusplus + public: + npu_set_dma0_dst_region_t(uint32_t _region, + NPU_NAMESPACE::dma_region_mode _region_mode, + NPU_NAMESPACE::dma_stride_mode _stride_mode) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + region(_region & ((1U << 3) - 1)), reserved1(0), + region_mode(static_cast(_region_mode) & ((1U << 1) - 1)), + stride_mode(static_cast(_stride_mode) & ((1U << 2) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_dma0_dst_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0), region_mode(0), + stride_mode(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_DST_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_region_mode get_region_mode() const + { + return static_cast(region_mode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_region_mode(NPU_NAMESPACE::dma_region_mode value) + { + region_mode = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::dma_stride_mode get_stride_mode() const + { + return static_cast(stride_mode); + } + CONSTEXPR npu_set_dma0_dst_region_t &set_stride_mode(NPU_NAMESPACE::dma_stride_mode value) + { + stride_mode = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + fields.push_back(std::make_pair( + "region_mode", + (region_mode < (sizeof(dma_region_mode_str) / sizeof(dma_region_mode_str[0])) ? + dma_region_mode_str[region_mode] : + "****"))); + fields.push_back(std::make_pair( + "stride_mode", + (stride_mode < (sizeof(dma_stride_mode_str) / sizeof(dma_stride_mode_str[0])) ? + dma_stride_mode_str[stride_mode] : + "****"))); + } +#endif +#endif + }; + // Size of second dimension for 2D/3D transfers + struct npu_set_dma0_size0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t size : 16; // Size of second dimension for 2D/3D transfers +#ifdef __cplusplus + public: + npu_set_dma0_size0_t(uint32_t _size) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(_size & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_dma0_size0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_size0_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_size0_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_size() const + { + return static_cast(size); + } + CONSTEXPR npu_set_dma0_size0_t &set_size(uint32_t value) + { + size = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("size", std::to_string(size))); + } +#endif +#endif + }; + // Size of third dimension for 3D transfers + struct npu_set_dma0_size1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t size : 16; // Size of third dimension for 3D transfers +#ifdef __cplusplus + public: + npu_set_dma0_size1_t(uint32_t _size) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(_size & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_dma0_size1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), size(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_DMA0_SIZE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_dma0_size1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_dma0_size1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_size() const + { + return static_cast(size); + } + CONSTEXPR npu_set_dma0_size1_t &set_size(uint32_t value) + { + size = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("size", std::to_string(size))); + } +#endif +#endif + }; + // IFM2 broadcast configuration + struct npu_set_ifm2_broadcast_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t + broadcast_h : 1; // Broadcast H dimension (if set then any accesses to IFM2 sets y=0 and IFM2 height=1) + uint32_t broadcast_w : 1; // Broadcast W dimension (if set then any accesses to IFM2 sets x=0 and IFM2 width=1) + uint32_t broadcast_c : 1; // Broadcast C dimension (if set then any accesses to IFM2 sets c=0 and IFM2 depth=1) + uint32_t reserved1 : 3; + uint32_t operand_order : 1; // Operand order + uint32_t broadcast_constant : 1; // Broadcast constant given by NPU_SET_IFM2_SCALAR and so ignore BH, BW and BC + uint32_t reserved2 : 8; +#ifdef __cplusplus + public: + npu_set_ifm2_broadcast_t(NPU_NAMESPACE::broadcast_mode _broadcast_h, + NPU_NAMESPACE::broadcast_mode _broadcast_w, + NPU_NAMESPACE::broadcast_mode _broadcast_c, + NPU_NAMESPACE::ifm2_operand_order _operand_order, + NPU_NAMESPACE::broadcast_mode _broadcast_constant) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + broadcast_h(static_cast(_broadcast_h) & ((1U << 1) - 1)), + broadcast_w(static_cast(_broadcast_w) & ((1U << 1) - 1)), + broadcast_c(static_cast(_broadcast_c) & ((1U << 1) - 1)), reserved1(0), + operand_order(static_cast(_operand_order) & ((1U << 1) - 1)), + broadcast_constant(static_cast(_broadcast_constant) & ((1U << 1) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_ifm2_broadcast_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), broadcast_h(0), broadcast_w(0), + broadcast_c(0), reserved1(0), operand_order(0), broadcast_constant(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_BROADCAST); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_h() const + { + return static_cast(broadcast_h); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_h(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_h = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_w() const + { + return static_cast(broadcast_w); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_w(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_w = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_c() const + { + return static_cast(broadcast_c); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_c(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_c = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::ifm2_operand_order get_operand_order() const + { + return static_cast(operand_order); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_operand_order(NPU_NAMESPACE::ifm2_operand_order value) + { + operand_order = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::broadcast_mode get_broadcast_constant() const + { + return static_cast(broadcast_constant); + } + CONSTEXPR npu_set_ifm2_broadcast_t &set_broadcast_constant(NPU_NAMESPACE::broadcast_mode value) + { + broadcast_constant = static_cast(value) & ((1U << 1) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "broadcast_h", + (broadcast_h < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_h] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_w", + (broadcast_w < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_w] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_c", + (broadcast_c < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_c] : + "****"))); + fields.push_back(std::make_pair( + "operand_order", + (operand_order < (sizeof(ifm2_operand_order_str) / sizeof(ifm2_operand_order_str[0])) ? + ifm2_operand_order_str[operand_order] : + "****"))); + fields.push_back(std::make_pair( + "broadcast_constant", + (broadcast_constant < (sizeof(broadcast_mode_str) / sizeof(broadcast_mode_str[0])) ? + broadcast_mode_str[broadcast_constant] : + "****"))); + } +#endif +#endif + }; + // IFM2 scalar value + struct npu_set_ifm2_scalar_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t scalar : 16; // int16 or uint16 depending on ifm2_precision.type +#ifdef __cplusplus + public: + npu_set_ifm2_scalar_t(uint32_t _scalar) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), scalar(_scalar & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_scalar_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), scalar(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_SCALAR); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_scalar() const + { + return static_cast(scalar); + } + CONSTEXPR npu_set_ifm2_scalar_t &set_scalar(uint32_t value) + { + scalar = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("scalar", std::to_string(scalar))); + } +#endif +#endif + }; + // IFM2 Precision + struct npu_set_ifm2_precision_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t activation_type : 1; // IFM type - MUST MATCH IFM + uint32_t reserved1 : 1; + uint32_t activation_precision : 2; // IFM precision - MUST MATCH IFM + uint32_t reserved2 : 2; + uint32_t activation_format : 2; // IFM format + uint32_t reserved3 : 8; +#ifdef __cplusplus + public: + npu_set_ifm2_precision_t(NPU_NAMESPACE::activation_type _activation_type, + NPU_NAMESPACE::activation_precision _activation_precision, + NPU_NAMESPACE::activation_format _activation_format) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION)), + reserved0(0), control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + activation_type(static_cast(_activation_type) & ((1U << 1) - 1)), reserved1(0), + activation_precision(static_cast(_activation_precision) & ((1U << 2) - 1)), reserved2(0), + activation_format(static_cast(_activation_format) & ((1U << 2) - 1)), reserved3(0) + { + } + CONSTEXPR npu_set_ifm2_precision_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), activation_type(0), reserved1(0), + activation_precision(0), reserved2(0), activation_format(0), reserved3(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_PRECISION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_precision_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_precision_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_type get_activation_type() const + { + return static_cast(activation_type); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_type(NPU_NAMESPACE::activation_type value) + { + activation_type = static_cast(value) & ((1U << 1) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_precision get_activation_precision() const + { + return static_cast(activation_precision); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_precision(NPU_NAMESPACE::activation_precision value) + { + activation_precision = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::activation_format get_activation_format() const + { + return static_cast(activation_format); + } + CONSTEXPR npu_set_ifm2_precision_t &set_activation_format(NPU_NAMESPACE::activation_format value) + { + activation_format = static_cast(value) & ((1U << 2) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair( + "activation_type", + (activation_type < (sizeof(activation_type_str) / sizeof(activation_type_str[0])) ? + activation_type_str[activation_type] : + "****"))); + fields.push_back(std::make_pair( + "activation_precision", + (activation_precision < (sizeof(activation_precision_str) / sizeof(activation_precision_str[0])) ? + activation_precision_str[activation_precision] : + "****"))); + fields.push_back(std::make_pair( + "activation_format", + (activation_format < (sizeof(activation_format_str) / sizeof(activation_format_str[0])) ? + activation_format_str[activation_format] : + "****"))); + } +#endif +#endif + }; + // IFM2 zero point + struct npu_set_ifm2_zero_point_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t zero_point : 16; // Zero point offset +#ifdef __cplusplus + public: + npu_set_ifm2_zero_point_t(uint32_t _zero_point) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), + zero_point(_zero_point & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_zero_point_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), zero_point(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_ZERO_POINT); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_zero_point() const + { + return static_cast(zero_point); + } + CONSTEXPR npu_set_ifm2_zero_point_t &set_zero_point(uint32_t value) + { + zero_point = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("zero_point", std::to_string(zero_point))); + } +#endif +#endif + }; + // IFM2 Tile 0 and tile 2 width + struct npu_set_ifm2_width0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t width_m1 : 16; // IFM2 Tile 0 and tile 2 width +#ifdef __cplusplus + public: + npu_set_ifm2_width0_m1_t(uint32_t _width_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(_width_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_width0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), width_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_WIDTH0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_width_m1() const + { + return static_cast(width_m1); + } + CONSTEXPR npu_set_ifm2_width0_m1_t &set_width_m1(uint32_t value) + { + width_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("width_m1", std::to_string(width_m1))); + } +#endif +#endif + }; + // IFM2 Tile 0 height + struct npu_set_ifm2_height0_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM2 Tile 0 height +#ifdef __cplusplus + public: + npu_set_ifm2_height0_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_height0_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT0_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm2_height0_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // IFM2 Tile 1 height + struct npu_set_ifm2_height1_m1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t height_m1 : 16; // IFM2 Tile 1 height +#ifdef __cplusplus + public: + npu_set_ifm2_height1_m1_t(uint32_t _height_m1) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(_height_m1 & ((1U << 16) - 1)) + { + } + CONSTEXPR npu_set_ifm2_height1_m1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), height_m1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_HEIGHT1_M1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_height_m1() const + { + return static_cast(height_m1); + } + CONSTEXPR npu_set_ifm2_height1_m1_t &set_height_m1(uint32_t value) + { + height_m1 = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("height_m1", std::to_string(height_m1))); + } +#endif +#endif + }; + // Start of IB0,IB1 buffers for IFM2 + struct npu_set_ifm2_ib_start_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t ib_start : 6; // Start of IB0,IB1 buffers for IFM2 in the SHRAM in KB units. Multiple of 2 + uint32_t reserved1 : 10; +#ifdef __cplusplus + public: + npu_set_ifm2_ib_start_t(uint32_t _ib_start) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_start(_ib_start & ((1U << 6) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm2_ib_start_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), ib_start(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_IB_START); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_ib_start() const + { + return static_cast(ib_start); + } + CONSTEXPR npu_set_ifm2_ib_start_t &set_ib_start(uint32_t value) + { + ib_start = static_cast(value) & ((1U << 6) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("ib_start", std::to_string(ib_start))); + } +#endif +#endif + }; + // Index n for IFM2 access + struct npu_set_ifm2_region_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t region : 3; // Index n for IFM2 access + uint32_t reserved1 : 13; +#ifdef __cplusplus + public: + npu_set_ifm2_region_t(uint32_t _region) : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(_region & ((1U << 3) - 1)), + reserved1(0) + { + } + CONSTEXPR npu_set_ifm2_region_t() : + opcode(static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL)), region(0), reserved1(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION) && + control == static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd0_opcode::NPU_SET_IFM2_REGION); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD0_CTRL); + } + operator uint32_t() + { + uint32_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd0_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ifm2_region_t &set_opcode(NPU_NAMESPACE::cmd0_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ifm2_region_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_region() const + { + return static_cast(region); + } + CONSTEXPR npu_set_ifm2_region_t &set_region(uint32_t value) + { + region = static_cast(value) & ((1U << 3) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("region", std::to_string(region))); + } +#endif +#endif + }; + // IFM Tile 0 address + struct npu_set_ifm_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base0_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_base0_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 1 address + struct npu_set_ifm_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base1_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_base1_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 2 address + struct npu_set_ifm_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base2_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_base2_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM Tile 3 address + struct npu_set_ifm_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_base3_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_base3_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between horizontal values + struct npu_set_ifm_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_x_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_stride_x_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between vertical values + struct npu_set_ifm_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_y_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_stride_y_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ifm_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm_stride_c_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm_stride_c_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 0 address + struct npu_set_ofm_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base0_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_base0_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 1 address + struct npu_set_ofm_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base1_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_base1_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 2 address + struct npu_set_ofm_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base2_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_base2_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM Tile 3 address + struct npu_set_ofm_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_base3_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_base3_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between horizontal values + struct npu_set_ofm_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_x_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_stride_x_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between vertical values + struct npu_set_ofm_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_y_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_stride_y_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // OFM byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ofm_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ofm_stride_c_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ofm_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ofm_stride_c_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte offset in WEIGHT_REGION + struct npu_set_weight_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_weight_base_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_weight_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_weight_base_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte length + struct npu_set_weight_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 32; // Weight stream byte length +#ifdef __cplusplus + public: + npu_set_weight_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(_length) + { + } + CONSTEXPR npu_set_weight_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_weight_length_t &set_length(uint32_t value) + { + length = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; + // Scale and bias stream input byte offset from SCALE_REGION + struct npu_set_scale_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_scale_base_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_scale_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_scale_base_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Scale and bias stream input byte length + struct npu_set_scale_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 20; // Scale and bias stream byte length + uint32_t reserved2 : 12; +#ifdef __cplusplus + public: + npu_set_scale_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), + length(_length & ((1U << 20) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_scale_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_scale_length_t &set_length(uint32_t value) + { + length = value & ((1U << 20) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; + // OFM scale + struct npu_set_ofm_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t shift : 6; // Shift + uint32_t reserved1 : 10; + uint32_t scale : 32; // Scale. Not applied for 32-bit operations +#ifdef __cplusplus + public: + npu_set_ofm_scale_t(uint32_t _shift, uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(_shift & ((1U << 6) - 1)), + reserved1(0), scale(_scale) + { + } + CONSTEXPR npu_set_ofm_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(0), reserved1(0), scale(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OFM_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_ofm_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_ofm_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_shift() const + { + return static_cast(shift); + } + CONSTEXPR npu_set_ofm_scale_t &set_shift(uint32_t value) + { + shift = static_cast(value) & ((1U << 6) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_ofm_scale_t &set_scale(uint32_t value) + { + scale = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("shift", std::to_string(shift))); + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // Input operand A scale + struct npu_set_opa_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t shift : 6; // Shift. Ignored if IFM scale mode is 0 + uint32_t reserved1 : 10; + uint32_t scale : 32; // Scale. 16-bit if IFM scale mode is 0 +#ifdef __cplusplus + public: + npu_set_opa_scale_t(uint32_t _shift, uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(_shift & ((1U << 6) - 1)), + reserved1(0), scale(_scale) + { + } + CONSTEXPR npu_set_opa_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), shift(0), reserved1(0), scale(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPA_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_opa_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_opa_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_shift() const + { + return static_cast(shift); + } + CONSTEXPR npu_set_opa_scale_t &set_shift(uint32_t value) + { + shift = static_cast(value) & ((1U << 6) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_opa_scale_t &set_scale(uint32_t value) + { + scale = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("shift", std::to_string(shift))); + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // Input operand B scale + struct npu_set_opb_scale_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t scale : 16; // Scale. Not used if IFM scale mode is 1 or 2 + uint32_t reserved2 : 16; +#ifdef __cplusplus + public: + npu_set_opb_scale_t(uint32_t _scale) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), + scale(_scale & ((1U << 16) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_opb_scale_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), scale(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_OPB_SCALE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_opb_scale_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_opb_scale_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_scale() const + { + return static_cast(scale); + } + CONSTEXPR npu_set_opb_scale_t &set_scale(uint32_t value) + { + scale = static_cast(value) & ((1U << 16) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("scale", std::to_string(scale))); + } +#endif +#endif + }; + // DMA user channel 0 source byte offset from DMA0_SRC_REGION + struct npu_set_dma0_src_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_src_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_dma0_src_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SRC); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_dma0_src_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // DMA user channel 0 destination byte offset from DMA0_DST_REGION + struct npu_set_dma0_dst_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_dst_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_dma0_dst_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_DST); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_dma0_dst_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // DMA user channel 0 transfer length in bytes for each 1D transfer + struct npu_set_dma0_len_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_len_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_dma0_len_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_LEN); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_dma0_len_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // byte distance to skip after each inner (1D) transfer (2D/3D mode) (any alignment) + struct npu_set_dma0_skip0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_skip0_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_dma0_skip0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_dma0_skip0_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // byte distance to skip after each 2D transfer (3D mode) (any alignment) + struct npu_set_dma0_skip1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_dma0_skip1_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_dma0_skip1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_DMA0_SKIP1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_dma0_skip1_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 0 address + struct npu_set_ifm2_base0_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base0_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_base0_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE0); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_base0_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 1 address + struct npu_set_ifm2_base1_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base1_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_base1_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE1); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_base1_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 2 address + struct npu_set_ifm2_base2_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base2_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_base2_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE2); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_base2_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 Tile 3 address + struct npu_set_ifm2_base3_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_base3_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_base3_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_BASE3); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_base3_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between horizontal values + struct npu_set_ifm2_stride_x_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_x_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_stride_x_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_X); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_stride_x_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between vertical values + struct npu_set_ifm2_stride_y_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_y_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_stride_y_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_Y); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_stride_y_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // IFM2 byte stride between channel blocks (of 16 bytes each block) + struct npu_set_ifm2_stride_c_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_ifm2_stride_c_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_ifm2_stride_c_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_IFM2_STRIDE_C); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_ifm2_stride_c_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte offset in WEIGHT_REGION for core 1 + struct npu_set_weight1_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_weight1_base_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_weight1_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_weight1_base_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Weight stream byte length for core 1 + struct npu_set_weight1_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 32; // Weight stream byte length +#ifdef __cplusplus + public: + npu_set_weight1_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(_length) + { + } + CONSTEXPR npu_set_weight1_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_LENGTH) && + control >= 1 && control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_WEIGHT1_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_weight1_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_weight1_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_weight1_length_t &set_length(uint32_t value) + { + length = value; + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; + // Scale and bias stream input byte offset from SCALE_REGION for core 1 + struct npu_set_scale1_base_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t addr_hi : 8; // address extension + uint32_t reserved1 : 8; + uint32_t addr_lo : 32; // address offset +#ifdef __cplusplus + public: + npu_set_scale1_base_t(uint64_t _addr) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), + addr_hi(static_cast((_addr >> 32) & std::numeric_limits::max())), reserved1(0), + addr_lo(static_cast((_addr)&std::numeric_limits::max())) + { + } + CONSTEXPR npu_set_scale1_base_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_BASE)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), addr_hi(0), reserved1(0), addr_lo(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_BASE) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_BASE); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR uint64_t get_addr() const + { + return (static_cast(addr_hi) << 32) | addr_lo; + } + CONSTEXPR npu_set_scale1_base_t &set_addr(uint64_t value) + { + addr_lo = static_cast((value)&std::numeric_limits::max()); + addr_hi = static_cast((value >> 32) & std::numeric_limits::max()); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + std::stringstream saddr; + saddr << std::hex << "0x" << get_addr(); + fields.push_back(std::make_pair("addr", saddr.str())); + } +#endif +#endif + }; + // Scale and bias stream input byte length for core 1 + struct npu_set_scale1_length_t + { +#ifdef __cplusplus + private: +#endif + uint32_t opcode : 10; // opcode + uint32_t reserved0 : 4; + uint32_t control : 2; // control + uint32_t reserved1 : 16; + uint32_t length : 20; // Scale and bias stream byte length + uint32_t reserved2 : 12; +#ifdef __cplusplus + public: + npu_set_scale1_length_t(uint32_t _length) : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), + length(_length & ((1U << 20) - 1)), reserved2(0) + { + } + CONSTEXPR npu_set_scale1_length_t() : + opcode(static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_LENGTH)), reserved0(0), + control(static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL)), reserved1(0), length(0), reserved2(0) + { + } + CONSTEXPR bool valid() const + { + return opcode == static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_LENGTH) && control >= 1 && + control <= 2; + } + CONSTEXPR void init() + { + opcode = static_cast(NPU_NAMESPACE::cmd1_opcode::NPU_SET_SCALE1_LENGTH); + control = static_cast(NPU_NAMESPACE::cmd_ctrl::CMD1_CTRL); + } + operator uint64_t() + { + uint64_t word; + std::memcpy(&word, this, sizeof(word)); + return word; + } + CONSTEXPR NPU_NAMESPACE::cmd1_opcode get_opcode() const + { + return static_cast(opcode); + } + CONSTEXPR npu_set_scale1_length_t &set_opcode(NPU_NAMESPACE::cmd1_opcode value) + { + opcode = static_cast(value) & ((1U << 10) - 1); + return *this; + } + CONSTEXPR NPU_NAMESPACE::cmd_ctrl get_control() const + { + return static_cast(control); + } + CONSTEXPR npu_set_scale1_length_t &set_control(NPU_NAMESPACE::cmd_ctrl value) + { + control = static_cast(value) & ((1U << 2) - 1); + return *this; + } + CONSTEXPR uint32_t get_length() const + { + return static_cast(length); + } + CONSTEXPR npu_set_scale1_length_t &set_length(uint32_t value) + { + length = value & ((1U << 20) - 1); + return *this; + } +#ifdef NPU_DISASSEMBLE + void disassemble(std::vector> &fields) const + { + fields.push_back(std::make_pair("length", std::to_string(length))); + } +#endif +#endif + }; +#ifdef __cplusplus +}; +#endif +#define NPU_OP_STRUCTS \ + NPU_OP_(stop) \ + NPU_OP_(irq) \ + NPU_OP_(conv) \ + NPU_OP_(depthwise) \ + NPU_OP_(pool) \ + NPU_OP_(elementwise) \ + NPU_OP_(dma_start) \ + NPU_OP_(dma_wait) \ + NPU_OP_(kernel_wait) \ + NPU_OP_(pmu_mask) + +#define NPU_SET_STRUCTS \ + NPU_SET_(ifm_pad_top) \ + NPU_SET_(ifm_pad_left) \ + NPU_SET_(ifm_pad_right) \ + NPU_SET_(ifm_pad_bottom) \ + NPU_SET_(ifm_depth_m1) \ + NPU_SET_(ifm_precision) \ + NPU_SET_(ifm_upscale) \ + NPU_SET_(ifm_zero_point) \ + NPU_SET_(ifm_width0_m1) \ + NPU_SET_(ifm_height0_m1) \ + NPU_SET_(ifm_height1_m1) \ + NPU_SET_(ifm_ib_end) \ + NPU_SET_(ifm_region) \ + NPU_SET_(ofm_width_m1) \ + NPU_SET_(ofm_height_m1) \ + NPU_SET_(ofm_depth_m1) \ + NPU_SET_(ofm_precision) \ + NPU_SET_(ofm_blk_width_m1) \ + NPU_SET_(ofm_blk_height_m1) \ + NPU_SET_(ofm_blk_depth_m1) \ + NPU_SET_(ofm_zero_point) \ + NPU_SET_(ofm_width0_m1) \ + NPU_SET_(ofm_height0_m1) \ + NPU_SET_(ofm_height1_m1) \ + NPU_SET_(ofm_region) \ + NPU_SET_(kernel_width_m1) \ + NPU_SET_(kernel_height_m1) \ + NPU_SET_(kernel_stride) \ + NPU_SET_(parallel_mode) \ + NPU_SET_(acc_format) \ + NPU_SET_(activation) \ + NPU_SET_(activation_min) \ + NPU_SET_(activation_max) \ + NPU_SET_(weight_region) \ + NPU_SET_(scale_region) \ + NPU_SET_(ab_start) \ + NPU_SET_(blockdep) \ + NPU_SET_(dma0_src_region) \ + NPU_SET_(dma0_dst_region) \ + NPU_SET_(dma0_size0) \ + NPU_SET_(dma0_size1) \ + NPU_SET_(ifm2_broadcast) \ + NPU_SET_(ifm2_scalar) \ + NPU_SET_(ifm2_precision) \ + NPU_SET_(ifm2_zero_point) \ + NPU_SET_(ifm2_width0_m1) \ + NPU_SET_(ifm2_height0_m1) \ + NPU_SET_(ifm2_height1_m1) \ + NPU_SET_(ifm2_ib_start) \ + NPU_SET_(ifm2_region) \ + NPU_SET_(ifm_base0) \ + NPU_SET_(ifm_base1) \ + NPU_SET_(ifm_base2) \ + NPU_SET_(ifm_base3) \ + NPU_SET_(ifm_stride_x) \ + NPU_SET_(ifm_stride_y) \ + NPU_SET_(ifm_stride_c) \ + NPU_SET_(ofm_base0) \ + NPU_SET_(ofm_base1) \ + NPU_SET_(ofm_base2) \ + NPU_SET_(ofm_base3) \ + NPU_SET_(ofm_stride_x) \ + NPU_SET_(ofm_stride_y) \ + NPU_SET_(ofm_stride_c) \ + NPU_SET_(weight_base) \ + NPU_SET_(weight_length) \ + NPU_SET_(scale_base) \ + NPU_SET_(scale_length) \ + NPU_SET_(ofm_scale) \ + NPU_SET_(opa_scale) \ + NPU_SET_(opb_scale) \ + NPU_SET_(dma0_src) \ + NPU_SET_(dma0_dst) \ + NPU_SET_(dma0_len) \ + NPU_SET_(dma0_skip0) \ + NPU_SET_(dma0_skip1) \ + NPU_SET_(ifm2_base0) \ + NPU_SET_(ifm2_base1) \ + NPU_SET_(ifm2_base2) \ + NPU_SET_(ifm2_base3) \ + NPU_SET_(ifm2_stride_x) \ + NPU_SET_(ifm2_stride_y) \ + NPU_SET_(ifm2_stride_c) \ + NPU_SET_(weight1_base) \ + NPU_SET_(weight1_length) \ + NPU_SET_(scale1_base) \ + NPU_SET_(scale1_length) + +#define EXPAND_ACC_FORMAT(FUNC, SEP) FUNC(acc_format, I32) SEP FUNC(acc_format, I40) SEP FUNC(acc_format, F16) + +#define EXPAND_ACTIVATION_CLIP_RANGE(FUNC, SEP) \ + FUNC(activation_clip_range, OFM_PRECISION) \ + SEP FUNC(activation_clip_range, FORCE_UINT8) SEP FUNC(activation_clip_range, FORCE_INT8) \ + SEP FUNC(activation_clip_range, FORCE_INT16) + +#define EXPAND_ACTIVATION_FORMAT(FUNC, SEP) FUNC(activation_format, NHWC) SEP FUNC(activation_format, NHCWB16) + +#define EXPAND_ACTIVATION_FUNCTION(FUNC, SEP) \ + FUNC(activation_function, RELU) \ + SEP FUNC(activation_function, TANH) SEP FUNC(activation_function, SIGMOID) SEP FUNC(activation_function, TABLE_0) \ + SEP FUNC(activation_function, TABLE_1) SEP FUNC(activation_function, TABLE_2) \ + SEP FUNC(activation_function, TABLE_3) SEP FUNC(activation_function, TABLE_4) \ + SEP FUNC(activation_function, TABLE_5) SEP FUNC(activation_function, TABLE_6) \ + SEP FUNC(activation_function, TABLE_7) + +#define EXPAND_ACTIVATION_PRECISION(FUNC, SEP) \ + FUNC(activation_precision, B8) \ + SEP FUNC(activation_precision, B16) SEP FUNC(activation_precision, B32) SEP FUNC(activation_precision, B64) + +#define EXPAND_ACTIVATION_TYPE(FUNC, SEP) FUNC(activation_type, UNSIGNED) SEP FUNC(activation_type, SIGNED) + +#define EXPAND_AXI_MEM_ENCODING(FUNC, SEP) \ + FUNC(axi_mem_encoding, DEVICE_NON_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, DEVICE_BUFFERABLE) SEP FUNC(axi_mem_encoding, NORMAL_NON_CACHEABLE_NON_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, NORMAL_NON_CACHEABLE_BUFFERABLE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_NO_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_READ_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_THROUGH_READ_AND_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_NO_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_READ_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_WRITE_ALLOCATE) \ + SEP FUNC(axi_mem_encoding, WRITE_BACK_READ_AND_WRITE_ALLOCATE) + +#define EXPAND_BROADCAST_MODE(FUNC, SEP) FUNC(broadcast_mode, DISABLE) SEP FUNC(broadcast_mode, ENABLE) + +#define EXPAND_CMD0_OPCODE(FUNC, SEP) \ + FUNC(cmd0_opcode, NPU_OP_STOP) \ + SEP FUNC(cmd0_opcode, NPU_OP_IRQ) SEP FUNC(cmd0_opcode, NPU_OP_CONV) SEP FUNC( \ + cmd0_opcode, NPU_OP_DEPTHWISE) SEP FUNC(cmd0_opcode, NPU_OP_POOL) SEP FUNC(cmd0_opcode, NPU_OP_ELEMENTWISE) \ + SEP FUNC(cmd0_opcode, NPU_OP_DMA_START) SEP FUNC(cmd0_opcode, NPU_OP_DMA_WAIT) SEP FUNC( \ + cmd0_opcode, NPU_OP_KERNEL_WAIT) SEP FUNC(cmd0_opcode, NPU_OP_PMU_MASK) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_PAD_TOP) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_PAD_LEFT) SEP FUNC(cmd0_opcode, NPU_SET_IFM_PAD_RIGHT) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_PAD_BOTTOM) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_DEPTH_M1) SEP FUNC(cmd0_opcode, \ + NPU_SET_IFM_PRECISION) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_UPSCALE) SEP FUNC(cmd0_opcode, NPU_SET_IFM_ZERO_POINT) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_WIDTH0_M1) SEP FUNC(cmd0_opcode, NPU_SET_IFM_HEIGHT0_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_IFM_HEIGHT1_M1) SEP FUNC(cmd0_opcode, NPU_SET_IFM_IB_END) SEP FUNC( \ + cmd0_opcode, NPU_SET_IFM_REGION) SEP FUNC(cmd0_opcode, NPU_SET_OFM_WIDTH_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_HEIGHT_M1) SEP FUNC(cmd0_opcode, NPU_SET_OFM_DEPTH_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_PRECISION) SEP FUNC( \ + cmd0_opcode, NPU_SET_OFM_BLK_WIDTH_M1) SEP FUNC(cmd0_opcode, \ + NPU_SET_OFM_BLK_HEIGHT_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_BLK_DEPTH_M1) SEP FUNC( \ + cmd0_opcode, NPU_SET_OFM_ZERO_POINT) SEP FUNC(cmd0_opcode, NPU_SET_OFM_WIDTH0_M1) \ + SEP FUNC(cmd0_opcode, NPU_SET_OFM_HEIGHT0_M1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_OFM_HEIGHT1_M1) SEP FUNC(cmd0_opcode, NPU_SET_OFM_REGION) \ + SEP FUNC(cmd0_opcode, NPU_SET_KERNEL_WIDTH_M1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_KERNEL_HEIGHT_M1) SEP FUNC(cmd0_opcode, NPU_SET_KERNEL_STRIDE) \ + SEP FUNC(cmd0_opcode, NPU_SET_PARALLEL_MODE) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_ACC_FORMAT) SEP FUNC(cmd0_opcode, NPU_SET_ACTIVATION) \ + SEP FUNC(cmd0_opcode, \ + NPU_SET_ACTIVATION_MIN) SEP FUNC(cmd0_opcode, \ + NPU_SET_ACTIVATION_MAX) \ + SEP FUNC(cmd0_opcode, NPU_SET_WEIGHT_REGION) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_SCALE_REGION) SEP FUNC(cmd0_opcode, NPU_SET_AB_START) \ + SEP FUNC(cmd0_opcode, NPU_SET_BLOCKDEP) \ + SEP FUNC(cmd0_opcode, NPU_SET_DMA0_SRC_REGION) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_DMA0_DST_REGION) SEP FUNC(cmd0_opcode, \ + NPU_SET_DMA0_SIZE0) \ + SEP FUNC(cmd0_opcode, NPU_SET_DMA0_SIZE1) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_BROADCAST) SEP \ + FUNC(cmd0_opcode, NPU_SET_IFM2_SCALAR) SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_PRECISION) SEP \ + FUNC(cmd0_opcode, NPU_SET_IFM2_ZERO_POINT) SEP \ + FUNC(cmd0_opcode, \ + NPU_SET_IFM2_WIDTH0_M1) SEP \ + FUNC(cmd0_opcode, \ + NPU_SET_IFM2_HEIGHT0_M1) SEP \ + FUNC(cmd0_opcode, \ + NPU_SET_IFM2_HEIGHT1_M1) \ + SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_IB_START) \ + SEP FUNC( \ + cmd0_opcode, \ + NPU_SET_IFM2_REGION) + +#define EXPAND_CMD1_OPCODE(FUNC, SEP) \ + FUNC(cmd1_opcode, NPU_SET_IFM_BASE0) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM_BASE1) SEP FUNC(cmd1_opcode, NPU_SET_IFM_BASE2) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM_BASE3) SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_X) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_Y) SEP FUNC(cmd1_opcode, NPU_SET_IFM_STRIDE_C) SEP FUNC( \ + cmd1_opcode, NPU_SET_OFM_BASE0) SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE1) \ + SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE2) SEP FUNC(cmd1_opcode, NPU_SET_OFM_BASE3) SEP FUNC( \ + cmd1_opcode, NPU_SET_OFM_STRIDE_X) SEP FUNC(cmd1_opcode, NPU_SET_OFM_STRIDE_Y) \ + SEP FUNC(cmd1_opcode, NPU_SET_OFM_STRIDE_C) SEP FUNC(cmd1_opcode, NPU_SET_WEIGHT_BASE) SEP FUNC( \ + cmd1_opcode, NPU_SET_WEIGHT_LENGTH) SEP FUNC(cmd1_opcode, NPU_SET_SCALE_BASE) \ + SEP FUNC(cmd1_opcode, NPU_SET_SCALE_LENGTH) SEP FUNC(cmd1_opcode, NPU_SET_OFM_SCALE) \ + SEP FUNC(cmd1_opcode, NPU_SET_OPA_SCALE) SEP FUNC(cmd1_opcode, NPU_SET_OPB_SCALE) \ + SEP FUNC(cmd1_opcode, NPU_SET_DMA0_SRC) SEP FUNC(cmd1_opcode, NPU_SET_DMA0_DST) \ + SEP FUNC(cmd1_opcode, NPU_SET_DMA0_LEN) SEP FUNC(cmd1_opcode, NPU_SET_DMA0_SKIP0) \ + SEP FUNC(cmd1_opcode, NPU_SET_DMA0_SKIP1) SEP FUNC( \ + cmd1_opcode, NPU_SET_IFM2_BASE0) SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE1) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_BASE2) SEP FUNC(cmd1_opcode, \ + NPU_SET_IFM2_BASE3) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_X) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_Y) \ + SEP FUNC(cmd1_opcode, NPU_SET_IFM2_STRIDE_C) \ + SEP FUNC(cmd1_opcode, NPU_SET_WEIGHT1_BASE) \ + SEP FUNC(cmd1_opcode, NPU_SET_WEIGHT1_LENGTH) \ + SEP FUNC(cmd1_opcode, NPU_SET_SCALE1_BASE) \ + SEP FUNC(cmd1_opcode, NPU_SET_SCALE1_LENGTH) + +#define EXPAND_CMD_CTRL(FUNC, SEP) FUNC(cmd_ctrl, CMD0_CTRL) SEP FUNC(cmd_ctrl, CMD1_CTRL) + +#define EXPAND_CUSTOM_DMA(FUNC, SEP) FUNC(custom_dma, NOT_IMPLEMENTED) SEP FUNC(custom_dma, IMPLEMENTED) + +#define EXPAND_DMA_FAULT_SRC(FUNC, SEP) FUNC(dma_fault_src, AXI_M0) SEP FUNC(dma_fault_src, AXI_M1) + +#define EXPAND_DMA_REGION_MODE(FUNC, SEP) FUNC(dma_region_mode, EXTERNAL) SEP FUNC(dma_region_mode, INTERNAL) + +#define EXPAND_DMA_STRIDE_MODE(FUNC, SEP) \ + FUNC(dma_stride_mode, D1) SEP FUNC(dma_stride_mode, D2) SEP FUNC(dma_stride_mode, D3) + +#define EXPAND_ELEMENTWISE_MODE(FUNC, SEP) \ + FUNC(elementwise_mode, MUL) \ + SEP FUNC(elementwise_mode, ADD) SEP FUNC(elementwise_mode, SUB) SEP FUNC(elementwise_mode, MIN) \ + SEP FUNC(elementwise_mode, MAX) SEP FUNC(elementwise_mode, LRELU) SEP FUNC(elementwise_mode, ABS) \ + SEP FUNC(elementwise_mode, CLZ) SEP FUNC(elementwise_mode, SHR) SEP FUNC(elementwise_mode, SHL) + +#define EXPAND_FUNCTIONAL_SAFETY(FUNC, SEP) \ + FUNC(functional_safety, NOT_IMPLEMENTED) SEP FUNC(functional_safety, IMPLEMENTED) + +#define EXPAND_IFM2_OPERAND_ORDER(FUNC, SEP) FUNC(ifm2_operand_order, ORDER_B) SEP FUNC(ifm2_operand_order, ORDER_A) + +#define EXPAND_IFM_SCALE_MODE(FUNC, SEP) \ + FUNC(ifm_scale_mode, OPA_OPB_16) SEP FUNC(ifm_scale_mode, OPA_32) SEP FUNC(ifm_scale_mode, OPB_32) + +#define EXPAND_IFM_UPSCALE_MODE(FUNC, SEP) \ + FUNC(ifm_upscale_mode, NONE) SEP FUNC(ifm_upscale_mode, NEAREST) SEP FUNC(ifm_upscale_mode, ZEROS) + +#define EXPAND_KERNEL_DECOMPOSITION(FUNC, SEP) FUNC(kernel_decomposition, D8X8) SEP FUNC(kernel_decomposition, D4X4) + +#define EXPAND_KERNEL_DILATION(FUNC, SEP) FUNC(kernel_dilation, NONE) SEP FUNC(kernel_dilation, X2) + +#define EXPAND_MAX_BEATS(FUNC, SEP) FUNC(max_beats, B64) SEP FUNC(max_beats, B128) SEP FUNC(max_beats, B256) + +#define EXPAND_MEM_ATTR(FUNC, SEP) \ + FUNC(mem_attr, AXI0_OUTSTANDING_COUNTER0) \ + SEP FUNC(mem_attr, AXI0_OUTSTANDING_COUNTER1) SEP FUNC(mem_attr, AXI1_OUTSTANDING_COUNTER2) \ + SEP FUNC(mem_attr, AXI1_OUTSTANDING_COUNTER3) + +#define EXPAND_OFM_SCALE_MODE(FUNC, SEP) FUNC(ofm_scale_mode, PER_CHANNEL) SEP FUNC(ofm_scale_mode, GLOBAL) + +#define EXPAND_PARALLEL_MODE(FUNC, SEP) FUNC(parallel_mode, SINGLE_CORE) SEP FUNC(parallel_mode, DUAL_CORE_DEPTH) + +#define EXPAND_PMU_AXI_CHANNEL(FUNC, SEP) \ + FUNC(pmu_axi_channel, RD_CMD) \ + SEP FUNC(pmu_axi_channel, RD_IFM) SEP FUNC(pmu_axi_channel, RD_WEIGHTS) SEP FUNC(pmu_axi_channel, RD_SCALE_BIAS) \ + SEP FUNC(pmu_axi_channel, RD_MEM2MEM) SEP FUNC(pmu_axi_channel, WR_OFM) SEP FUNC(pmu_axi_channel, WR_MEM2MEM) + +#define EXPAND_PMU_EVENT(FUNC, SEP) \ + FUNC(pmu_event, NO_EVENT) \ + SEP FUNC(pmu_event, CYCLE) SEP FUNC(pmu_event, NPU_IDLE) SEP FUNC(pmu_event, CC_STALLED_ON_BLOCKDEP) SEP FUNC( \ + pmu_event, CC_STALLED_ON_SHRAM_RECONFIG) SEP FUNC(pmu_event, NPU_ACTIVE) SEP FUNC(pmu_event, MAC_ACTIVE) \ + SEP FUNC(pmu_event, MAC_ACTIVE_8BIT) SEP FUNC(pmu_event, MAC_ACTIVE_16BIT) SEP FUNC( \ + pmu_event, MAC_DPU_ACTIVE) SEP FUNC(pmu_event, MAC_STALLED_BY_WD_ACC) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_WD) \ + SEP FUNC(pmu_event, MAC_STALLED_BY_ACC) SEP FUNC(pmu_event, MAC_STALLED_BY_IB) SEP FUNC( \ + pmu_event, \ + MAC_ACTIVE_32BIT) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_INT_W) SEP FUNC(pmu_event, \ + MAC_STALLED_BY_INT_ACC) SEP FUNC(pmu_event, \ + AO_ACTIVE) \ + SEP FUNC(pmu_event, AO_ACTIVE_8BIT) SEP FUNC(pmu_event, AO_ACTIVE_16BIT) SEP FUNC( \ + pmu_event, AO_STALLED_BY_OFMP_OB) SEP FUNC(pmu_event, AO_STALLED_BY_OFMP) SEP \ + FUNC(pmu_event, AO_STALLED_BY_OB) SEP FUNC(pmu_event, AO_STALLED_BY_ACC_IB) SEP FUNC( \ + pmu_event, AO_STALLED_BY_ACC) SEP FUNC(pmu_event, AO_STALLED_BY_IB) SEP \ + FUNC(pmu_event, WD_ACTIVE) SEP FUNC(pmu_event, WD_STALLED) SEP FUNC(pmu_event, WD_STALLED_BY_WS) SEP FUNC( \ + pmu_event, WD_STALLED_BY_WD_BUF) SEP FUNC(pmu_event, \ + WD_PARSE_ACTIVE) SEP \ + FUNC(pmu_event, WD_PARSE_STALLED) SEP FUNC(pmu_event, WD_PARSE_STALLED_IN) SEP FUNC( \ + pmu_event, WD_PARSE_STALLED_OUT) SEP FUNC(pmu_event, \ + WD_TRANS_WS) SEP \ + FUNC(pmu_event, WD_TRANS_WB) SEP FUNC(pmu_event, WD_TRANS_DW0) SEP FUNC( \ + pmu_event, WD_TRANS_DW1) SEP FUNC(pmu_event, \ + AXI0_RD_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI0_RD_TRANS_COMPLETED) SEP FUNC(pmu_event, AXI0_RD_DATA_BEAT_RECEIVED) SEP FUNC( \ + pmu_event, AXI0_RD_TRAN_REQ_STALLED) SEP FUNC(pmu_event, \ + AXI0_WR_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI0_WR_TRANS_COMPLETED_M) SEP FUNC( \ + pmu_event, AXI0_WR_TRANS_COMPLETED_S) SEP \ + FUNC(pmu_event, AXI0_WR_DATA_BEAT_WRITTEN) SEP FUNC( \ + pmu_event, AXI0_WR_TRAN_REQ_STALLED) SEP \ + FUNC(pmu_event, AXI0_WR_DATA_BEAT_STALLED) SEP FUNC( \ + pmu_event, \ + AXI0_ENABLED_CYCLES) SEP FUNC(pmu_event, \ + AXI0_RD_STALL_LIMIT) SEP \ + FUNC(pmu_event, AXI0_WR_STALL_LIMIT) SEP FUNC( \ + pmu_event, \ + AXI_LATENCY_ANY) SEP FUNC(pmu_event, \ + AXI_LATENCY_32) SEP \ + FUNC(pmu_event, \ + AXI_LATENCY_64) SEP FUNC(pmu_event, \ + AXI_LATENCY_128) SEP \ + FUNC(pmu_event, AXI_LATENCY_256) SEP FUNC( \ + pmu_event, \ + AXI_LATENCY_512) SEP FUNC(pmu_event, \ + AXI_LATENCY_1024) SEP \ + FUNC(pmu_event, ECC_DMA) SEP FUNC( \ + pmu_event, \ + ECC_SB0) SEP FUNC(pmu_event, \ + AXI1_RD_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI1_RD_TRANS_COMPLETED) SEP FUNC( \ + pmu_event, AXI1_RD_DATA_BEAT_RECEIVED) SEP \ + FUNC(pmu_event, AXI1_RD_TRAN_REQ_STALLED) SEP FUNC( \ + pmu_event, AXI1_WR_TRANS_ACCEPTED) SEP \ + FUNC(pmu_event, AXI1_WR_TRANS_COMPLETED_M) SEP FUNC( \ + pmu_event, \ + AXI1_WR_TRANS_COMPLETED_S) SEP \ + FUNC(pmu_event, \ + AXI1_WR_DATA_BEAT_WRITTEN) SEP \ + FUNC(pmu_event, \ + AXI1_WR_TRAN_REQ_STALLED) SEP \ + FUNC( \ + pmu_event, \ + AXI1_WR_DATA_BEAT_STALLED) SEP \ + FUNC( \ + pmu_event, \ + AXI1_ENABLED_CYCLES) SEP \ + FUNC( \ + pmu_event, \ + AXI1_RD_STALL_LIMIT) SEP \ + FUNC( \ + pmu_event, \ + AXI1_WR_STALL_LIMIT) \ + SEP FUNC( \ + pmu_event, \ + ECC_SB1) + +#define EXPAND_POOLING_MODE(FUNC, SEP) \ + FUNC(pooling_mode, MAX) SEP FUNC(pooling_mode, AVERAGE) SEP FUNC(pooling_mode, REDUCE_SUM) + +#define EXPAND_PRIVILEGE_LEVEL(FUNC, SEP) FUNC(privilege_level, USER) SEP FUNC(privilege_level, PRIVILEGED) + +#define EXPAND_ROUND_MODE(FUNC, SEP) FUNC(round_mode, DBL) SEP FUNC(round_mode, TRUNCATE) SEP FUNC(round_mode, NATURAL) + +#define EXPAND_SECURITY_LEVEL(FUNC, SEP) FUNC(security_level, SECURE) SEP FUNC(security_level, NON_SECURE) + +#define EXPAND_STATE(FUNC, SEP) FUNC(state, STOPPED) SEP FUNC(state, RUNNING) + +#define EXPAND_WD_CORE_SLICE_STATE(FUNC, SEP) \ + FUNC(wd_core_slice_state, HEADER) SEP FUNC(wd_core_slice_state, PALETTE) SEP FUNC(wd_core_slice_state, WEIGHTS) + +#define EXPAND_WD_CTRL_STATE(FUNC, SEP) \ + FUNC(wd_ctrl_state, IDLE) \ + SEP FUNC(wd_ctrl_state, DRAIN) SEP FUNC(wd_ctrl_state, OFD_INIT) SEP FUNC(wd_ctrl_state, OFD_RUN) + +#define EXPAND_WEIGHT_ORDER(FUNC, SEP) FUNC(weight_order, DEPTH_FIRST) SEP FUNC(weight_order, PART_KERNEL_FIRST) + +#ifdef __cplusplus +} +#endif +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_config_u55.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_config_u55.h new file mode 100644 index 0000000..9330bb1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_config_u55.h @@ -0,0 +1,124 @@ +/* + * Copyright (c) 2019-2020,2022 Arm Limited. + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_CONFIG_H +#define ETHOSU_CONFIG_H + +/* Set default values if not manually overriden */ + +#ifndef NPU_QCONFIG +#define NPU_QCONFIG 2 +#endif + +#ifndef NPU_REGIONCFG_0 +#define NPU_REGIONCFG_0 3 +#endif + +#ifndef NPU_REGIONCFG_1 +#define NPU_REGIONCFG_1 0 +#endif + +#ifndef NPU_REGIONCFG_2 +#define NPU_REGIONCFG_2 1 +#endif + +#ifndef NPU_REGIONCFG_3 +#define NPU_REGIONCFG_3 1 +#endif + +#ifndef NPU_REGIONCFG_4 +#define NPU_REGIONCFG_4 1 +#endif + +#ifndef NPU_REGIONCFG_5 +#define NPU_REGIONCFG_5 1 +#endif + +#ifndef NPU_REGIONCFG_6 +#define NPU_REGIONCFG_6 1 +#endif + +#ifndef NPU_REGIONCFG_7 +#define NPU_REGIONCFG_7 1 +#endif + +#ifndef AXI_LIMIT0_MAX_BEATS_BYTES +#define AXI_LIMIT0_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT0_MEM_TYPE +#define AXI_LIMIT0_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT0_MAX_OUTSTANDING_READS +#define AXI_LIMIT0_MAX_OUTSTANDING_READS 32 +#endif + +#ifndef AXI_LIMIT0_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT0_MAX_OUTSTANDING_WRITES 16 +#endif + +#ifndef AXI_LIMIT1_MAX_BEATS_BYTES +#define AXI_LIMIT1_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT1_MEM_TYPE +#define AXI_LIMIT1_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT1_MAX_OUTSTANDING_READS +#define AXI_LIMIT1_MAX_OUTSTANDING_READS 32 +#endif + +#ifndef AXI_LIMIT1_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT1_MAX_OUTSTANDING_WRITES 16 +#endif + +#ifndef AXI_LIMIT2_MAX_BEATS_BYTES +#define AXI_LIMIT2_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT2_MEM_TYPE +#define AXI_LIMIT2_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT2_MAX_OUTSTANDING_READS +#define AXI_LIMIT2_MAX_OUTSTANDING_READS 32 +#endif + +#ifndef AXI_LIMIT2_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT2_MAX_OUTSTANDING_WRITES 16 +#endif + +#ifndef AXI_LIMIT3_MAX_BEATS_BYTES +#define AXI_LIMIT3_MAX_BEATS_BYTES 0x0 +#endif + +#ifndef AXI_LIMIT3_MEM_TYPE +#define AXI_LIMIT3_MEM_TYPE 0x0 +#endif + +#ifndef AXI_LIMIT3_MAX_OUTSTANDING_READS +#define AXI_LIMIT3_MAX_OUTSTANDING_READS 32 +#endif + +#ifndef AXI_LIMIT3_MAX_OUTSTANDING_WRITES +#define AXI_LIMIT3_MAX_OUTSTANDING_WRITES 16 +#endif + +#endif /* #ifndef ETHOSU_CONFIG_H */ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device.h new file mode 100644 index 0000000..02942b1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device.h @@ -0,0 +1,142 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_DEVICE_H +#define ETHOSU_DEVICE_H + +/****************************************************************************** + * Includes + ******************************************************************************/ +#include "ethosu_types.h" + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +/****************************************************************************** + * Defines + ******************************************************************************/ + +// NOTE: Deprecated +#ifndef ETHOSU_PMU_NCOUNTERS +#define ETHOSU_PMU_NCOUNTERS 4 +#endif + +/****************************************************************************** + * Types + ******************************************************************************/ +struct NPU_REG; // Forward declare, to be implemented by each device + +struct ethosu_device +{ + volatile struct NPU_REG *reg; // Register map + uint32_t secure; + uint32_t privileged; +}; + +/****************************************************************************** + * Prototypes + ******************************************************************************/ + +/** + * Initialize the device. + */ +struct ethosu_device *ethosu_dev_init(void *const base_address, uint32_t secure_enable, uint32_t privilege_enable); + +/** + * Deinitialize the device. + */ +void ethosu_dev_deinit(struct ethosu_device *dev); + +/** + * Initialize AXI settings for device. + */ +enum ethosu_error_codes ethosu_dev_axi_init(struct ethosu_device *dev); + +/** + * Execute a given command stream on NPU. + * \param[in] cmd_stream_ptr Pointer to the command stream + * \param[in] cms_length Command stream length + * \param[in] base_addr Pointer to array of base addresses + * - 0: weight tensor + * - 1: scratch tensor + * - All input tensors + * - All output tensors + * \param[in] num_base_addr Number of base addresses. + */ +void ethosu_dev_run_command_stream(struct ethosu_device *dev, + const uint8_t *cmd_stream_ptr, + uint32_t cms_length, + const uint64_t *base_addr, + int num_base_addr); + +/** + * Print information on NPU error status + */ +void ethosu_dev_print_err_status(struct ethosu_device *dev); + +/** + * Interrupt handler on device layer + * \return true if NPU status is OK, otherwise false + */ +bool ethosu_dev_handle_interrupt(struct ethosu_device *dev); + +/** + * Get hardware information from NPU + * \param[out] hwinfo Pointer to the hardware info struct to be filled in. + */ +void ethosu_dev_get_hw_info(struct ethosu_device *dev, struct ethosu_hw_info *hwinfo); + +/** + * Verify that requested security state and privilege mode are active + * \return 32 bit status value + */ +bool ethosu_dev_verify_access_state(struct ethosu_device *dev); + +/** + * Performs a NPU soft reset and waits for the NPU to become ready + * \return \ref ethosu_error_codes + */ +enum ethosu_error_codes ethosu_dev_soft_reset(struct ethosu_device *dev); + +/** + * Enable/disable clock and power using clock/power q interface. + * \param[in] clock_q Clock q ENABLE/DISABLE \ref clock_q_request. + * \param[in] power_q Power q ENABLE/DISABLE \ref power_q_request. + * \return \ref ethosu_error_codes + */ +enum ethosu_error_codes ethosu_dev_set_clock_and_power(struct ethosu_device *dev, + enum ethosu_clock_q_request clock_q, + enum ethosu_power_q_request power_q); + +/** + * Verifies that optimizer parameters from model are compatible with the hardware + * \param[in] cfg Config data from optimizer. + * \param[in] id Id data from optimizer. + * \return true if parameters match with hardware, false otherwise. + */ +bool ethosu_dev_verify_optimizer_config(struct ethosu_device *dev, uint32_t cfg_in, uint32_t id_in); + +#ifdef __cplusplus +} +#endif + +#endif // ETHOSU_DEVICE_H diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device_u55_u65.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device_u55_u65.c new file mode 100644 index 0000000..7de0daa --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_device_u55_u65.c @@ -0,0 +1,392 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************************************************** + * Includes + ******************************************************************************/ +#if EI_ETHOS + +#include "ethosu_interface.h" + +#include "ethosu_device.h" +#include "ethosu_log.h" + +#ifdef ETHOSU55 +#include "ethosu_config_u55.h" +#else +#include "ethosu_config_u65.h" +#endif + +#include +#include +#include +#include +#include +#include + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define ETHOSU_PRODUCT_U55 0 +#define ETHOSU_PRODUCT_U65 1 + +#define BASEP_OFFSET 4 + +#ifdef ETHOSU65 +#define ADDRESS_BITS 40 +#else +#define ADDRESS_BITS 32 +#endif + +#define ADDRESS_MASK ((1ull << ADDRESS_BITS) - 1) + +#define NPU_CMD_PWR_CLK_MASK (0xC) + +/****************************************************************************** + * Functions + ******************************************************************************/ + +uint64_t __attribute__((weak)) ethosu_address_remap(uint64_t address, int index) +{ + (void)(index); + return address; +} + +struct ethosu_device *ethosu_dev_init(void *const base_address, uint32_t secure_enable, uint32_t privilege_enable) +{ + struct ethosu_device *dev = malloc(sizeof(struct ethosu_device)); + if (!dev) + { + LOG_ERR("Failed to allocate memory for Ethos-U device"); + return NULL; + } + + dev->reg = (volatile struct NPU_REG *)base_address; + dev->secure = secure_enable; + dev->privileged = privilege_enable; + +#ifdef ETHOSU55 + if (dev->reg->CONFIG.product != ETHOSU_PRODUCT_U55) +#else + if (dev->reg->CONFIG.product != ETHOSU_PRODUCT_U65) +#endif + { + LOG_ERR("Failed to initialize device. Driver has not been compiled for this product"); + goto err; + } + + // Make sure the NPU is in a known state + if (ethosu_dev_soft_reset(dev) != ETHOSU_SUCCESS) + { + goto err; + } + + return dev; + +err: + free(dev); + return NULL; +} + +void ethosu_dev_deinit(struct ethosu_device *dev) +{ + free(dev); +} + +enum ethosu_error_codes ethosu_dev_axi_init(struct ethosu_device *dev) +{ + struct regioncfg_r rcfg = {0}; + struct axi_limit0_r l0 = {0}; + struct axi_limit1_r l1 = {0}; + struct axi_limit2_r l2 = {0}; + struct axi_limit3_r l3 = {0}; + + dev->reg->QCONFIG.word = NPU_QCONFIG; + + rcfg.region0 = NPU_REGIONCFG_0; + rcfg.region1 = NPU_REGIONCFG_1; + rcfg.region2 = NPU_REGIONCFG_2; + rcfg.region3 = NPU_REGIONCFG_3; + rcfg.region4 = NPU_REGIONCFG_4; + rcfg.region5 = NPU_REGIONCFG_5; + rcfg.region6 = NPU_REGIONCFG_6; + rcfg.region7 = NPU_REGIONCFG_7; + dev->reg->REGIONCFG.word = rcfg.word; + + l0.max_beats = AXI_LIMIT0_MAX_BEATS_BYTES; + l0.memtype = AXI_LIMIT0_MEM_TYPE; + l0.max_outstanding_read_m1 = AXI_LIMIT0_MAX_OUTSTANDING_READS - 1; + l0.max_outstanding_write_m1 = AXI_LIMIT0_MAX_OUTSTANDING_WRITES - 1; + + l1.max_beats = AXI_LIMIT1_MAX_BEATS_BYTES; + l1.memtype = AXI_LIMIT1_MEM_TYPE; + l1.max_outstanding_read_m1 = AXI_LIMIT1_MAX_OUTSTANDING_READS - 1; + l1.max_outstanding_write_m1 = AXI_LIMIT1_MAX_OUTSTANDING_WRITES - 1; + + l2.max_beats = AXI_LIMIT2_MAX_BEATS_BYTES; + l2.memtype = AXI_LIMIT2_MEM_TYPE; + l2.max_outstanding_read_m1 = AXI_LIMIT2_MAX_OUTSTANDING_READS - 1; + l2.max_outstanding_write_m1 = AXI_LIMIT2_MAX_OUTSTANDING_WRITES - 1; + + l3.max_beats = AXI_LIMIT3_MAX_BEATS_BYTES; + l3.memtype = AXI_LIMIT3_MEM_TYPE; + l3.max_outstanding_read_m1 = AXI_LIMIT3_MAX_OUTSTANDING_READS - 1; + l3.max_outstanding_write_m1 = AXI_LIMIT3_MAX_OUTSTANDING_WRITES - 1; + + dev->reg->AXI_LIMIT0.word = l0.word; + dev->reg->AXI_LIMIT1.word = l1.word; + dev->reg->AXI_LIMIT2.word = l2.word; + dev->reg->AXI_LIMIT3.word = l3.word; + + return ETHOSU_SUCCESS; +} + +void ethosu_dev_run_command_stream(struct ethosu_device *dev, + const uint8_t *cmd_stream_ptr, + uint32_t cms_length, + const uint64_t *base_addr, + int num_base_addr) +{ + assert(num_base_addr <= NPU_REG_BASEP_ARRLEN); + + struct cmd_r cmd; + uint64_t qbase = ethosu_address_remap((uintptr_t)cmd_stream_ptr, -1); + assert(qbase <= ADDRESS_MASK); + LOG_DEBUG("QBASE=0x%016llx, QSIZE=%" PRIu32 ", cmd_stream_ptr=%p", qbase, cms_length, cmd_stream_ptr); + + dev->reg->QBASE.word[0] = qbase & 0xffffffff; +#ifdef ETHOSU65 + dev->reg->QBASE.word[1] = qbase >> 32; +#endif + dev->reg->QSIZE.word = cms_length; + + for (int i = 0; i < num_base_addr; i++) + { + uint64_t addr = ethosu_address_remap(base_addr[i], i); + assert(addr <= ADDRESS_MASK); + LOG_DEBUG("BASEP%d=0x%016llx", i, addr); + dev->reg->BASEP[i].word[0] = addr & 0xffffffff; +#ifdef ETHOSU65 + dev->reg->BASEP[i].word[1] = addr >> 32; +#endif + } + + cmd.word = dev->reg->CMD.word & NPU_CMD_PWR_CLK_MASK; + cmd.transition_to_running_state = 1; + + dev->reg->CMD.word = cmd.word; + LOG_DEBUG("CMD=0x%08" PRIx32, cmd.word); +} + +void ethosu_dev_print_err_status(struct ethosu_device *dev) +{ + LOG_ERR("NPU status=0x%08" PRIx32 ", qread=%" PRIu32 ", cmd_end_reached=%u", + dev->reg->STATUS.word, + dev->reg->QREAD.word, + dev->reg->STATUS.cmd_end_reached); +} + +bool ethosu_dev_handle_interrupt(struct ethosu_device *dev) +{ + struct cmd_r cmd; + + // Clear interrupt + cmd.word = dev->reg->CMD.word & NPU_CMD_PWR_CLK_MASK; + cmd.clear_irq = 1; + dev->reg->CMD.word = cmd.word; + + // If a fault has occured, the NPU needs to be reset + if (dev->reg->STATUS.bus_status || dev->reg->STATUS.cmd_parse_error || dev->reg->STATUS.wd_fault || + dev->reg->STATUS.ecc_fault || !dev->reg->STATUS.cmd_end_reached) + { + return false; + } + + return true; +} + +bool ethosu_dev_verify_access_state(struct ethosu_device *dev) +{ + if (dev->reg->PROT.active_CSL != (dev->secure ? SECURITY_LEVEL_SECURE : SECURITY_LEVEL_NON_SECURE) || + dev->reg->PROT.active_CPL != (dev->privileged ? PRIVILEGE_LEVEL_PRIVILEGED : PRIVILEGE_LEVEL_USER)) + { + return false; + } + return true; +} + +enum ethosu_error_codes ethosu_dev_soft_reset(struct ethosu_device *dev) +{ + // Note that after a soft-reset, the NPU is unconditionally + // powered until the next CMD gets written. + + struct reset_r reset; + + reset.word = 0; + reset.pending_CPL = dev->privileged ? PRIVILEGE_LEVEL_PRIVILEGED : PRIVILEGE_LEVEL_USER; + reset.pending_CSL = dev->secure ? SECURITY_LEVEL_SECURE : SECURITY_LEVEL_NON_SECURE; + + // Reset and set security level + LOG_INFO("Soft reset NPU"); + dev->reg->RESET.word = reset.word; + + // Wait until reset status indicates that reset has been completed + for (int i = 0; i < 100000 && dev->reg->STATUS.reset_status != 0; i++) + { + } + + if (dev->reg->STATUS.reset_status != 0) + { + LOG_ERR("Soft reset timed out"); + return ETHOSU_GENERIC_FAILURE; + } + + // Verify that NPU has switched security state and privilege level + if (ethosu_dev_verify_access_state(dev) != true) + { + LOG_ERR("Failed to switch security state and privilege level"); + return ETHOSU_GENERIC_FAILURE; + } + + // Reinitialize AXI settings + ethosu_dev_axi_init(dev); + + return ETHOSU_SUCCESS; +} + +void ethosu_dev_get_hw_info(struct ethosu_device *dev, struct ethosu_hw_info *hwinfo) +{ + struct config_r cfg; + struct id_r id; + + cfg.word = dev->reg->CONFIG.word; + id.word = dev->reg->ID.word; + + hwinfo->cfg.cmd_stream_version = cfg.cmd_stream_version; + hwinfo->cfg.custom_dma = cfg.custom_dma; + hwinfo->cfg.macs_per_cc = cfg.macs_per_cc; + + hwinfo->version.arch_major_rev = id.arch_major_rev; + hwinfo->version.arch_minor_rev = id.arch_minor_rev; + hwinfo->version.arch_patch_rev = id.arch_patch_rev; + hwinfo->version.product_major = id.product_major; + hwinfo->version.version_major = id.version_major; + hwinfo->version.version_minor = id.version_minor; + hwinfo->version.version_status = id.version_status; +} + +enum ethosu_error_codes ethosu_dev_set_clock_and_power(struct ethosu_device *dev, + enum ethosu_clock_q_request clock_q, + enum ethosu_power_q_request power_q) +{ + struct cmd_r cmd = {0}; + cmd.word = dev->reg->CMD.word & NPU_CMD_PWR_CLK_MASK; + + if (power_q != ETHOSU_POWER_Q_UNCHANGED) + { + cmd.power_q_enable = power_q == ETHOSU_POWER_Q_ENABLE ? 1 : 0; + } + if (clock_q != ETHOSU_CLOCK_Q_UNCHANGED) + { + cmd.clock_q_enable = clock_q == ETHOSU_CLOCK_Q_ENABLE ? 1 : 0; + } + + dev->reg->CMD.word = cmd.word; + LOG_DEBUG("CMD=0x%08" PRIx32, cmd.word); + + return ETHOSU_SUCCESS; +} + +bool ethosu_dev_verify_optimizer_config(struct ethosu_device *dev, uint32_t cfg_in, uint32_t id_in) +{ + struct config_r *opt_cfg = (struct config_r *)&cfg_in; + struct config_r hw_cfg; + struct id_r *opt_id = (struct id_r *)&id_in; + struct id_r hw_id; + bool ret = true; + + hw_cfg.word = dev->reg->CONFIG.word; + hw_id.word = dev->reg->ID.word; + + LOG_INFO("Optimizer config. product=%u, cmd_stream_version=%u, macs_per_cc=%u, shram_size=%u, custom_dma=%u", + opt_cfg->product, + opt_cfg->cmd_stream_version, + opt_cfg->macs_per_cc, + opt_cfg->shram_size, + opt_cfg->custom_dma); + LOG_INFO("Optimizer config. arch version: %u.%u.%u", + opt_id->arch_major_rev, + opt_id->arch_minor_rev, + opt_id->arch_patch_rev); + LOG_INFO("Ethos-U config. product=%u, cmd_stream_version=%u, macs_per_cc=%u, shram_size=%u, custom_dma=%u", + hw_cfg.product, + hw_cfg.cmd_stream_version, + hw_cfg.macs_per_cc, + hw_cfg.shram_size, + hw_cfg.custom_dma); + LOG_INFO("Ethos-U. arch version=%u.%u.%u", hw_id.arch_major_rev, hw_id.arch_minor_rev, hw_id.arch_patch_rev); + + if (opt_cfg->word != hw_cfg.word) + { + if (hw_cfg.product != opt_cfg->product) + { + LOG_ERR("NPU config mismatch. npu.product=%u, optimizer.product=%u", hw_cfg.product, opt_cfg->product); + ret = false; + } + + if (hw_cfg.macs_per_cc != opt_cfg->macs_per_cc) + { + LOG_ERR("NPU config mismatch. npu.macs_per_cc=%u, optimizer.macs_per_cc=%u", + hw_cfg.macs_per_cc, + opt_cfg->macs_per_cc); + ret = false; + } + + if (hw_cfg.cmd_stream_version != opt_cfg->cmd_stream_version) + { + LOG_ERR("NPU config mismatch. npu.cmd_stream_version=%u, optimizer.cmd_stream_version=%u", + hw_cfg.cmd_stream_version, + opt_cfg->cmd_stream_version); + ret = false; + } + + if (!hw_cfg.custom_dma && opt_cfg->custom_dma) + { + LOG_ERR("NPU config mismatch. npu.custom_dma=%u, optimizer.custom_dma=%u", + hw_cfg.custom_dma, + opt_cfg->custom_dma); + ret = false; + } + } + + if ((hw_id.arch_major_rev != opt_id->arch_major_rev) || (hw_id.arch_minor_rev < opt_id->arch_minor_rev)) + { + LOG_ERR("NPU arch mismatch. npu.arch=%u.%u.%u, optimizer.arch=%u.%u.%u", + hw_id.arch_major_rev, + hw_id.arch_minor_rev, + hw_id.arch_patch_rev, + opt_id->arch_major_rev, + opt_id->arch_minor_rev, + opt_id->arch_patch_rev); + ret = false; + } + + return ret; +} +#endif // EI_ETHOS diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_driver.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_driver.c new file mode 100644 index 0000000..ae038e3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_driver.c @@ -0,0 +1,765 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/****************************************************************************** + * Includes + ******************************************************************************/ +#if EI_ETHOS + +#include "ethosu_driver.h" +#include "ethosu_device.h" +#include "ethosu_log.h" + +#ifdef ETHOSU55 +#include "ethosu_config_u55.h" +#else +#include "ethosu_config_u65.h" +#endif + +#include +#include +#include +#include +#include +#include +#include + +/****************************************************************************** + * Defines + ******************************************************************************/ + +#define UNUSED(x) ((void)x) + +#define BYTES_IN_32_BITS 4 +#define MASK_16_BYTE_ALIGN (0xF) +#define OPTIMIZER_CONFIG_LENGTH_32_BIT_WORD 2 +#define DRIVER_ACTION_LENGTH_32_BIT_WORD 1 +#define ETHOSU_FOURCC ('1' << 24 | 'P' << 16 | 'O' << 8 | 'C') // "Custom Operator Payload 1" + +#define FAST_MEMORY_BASE_ADDR_INDEX 2 + +/****************************************************************************** + * Types + ******************************************************************************/ + +// Driver actions +enum DRIVER_ACTION_e +{ + RESERVED = 0, + OPTIMIZER_CONFIG = 1, + COMMAND_STREAM = 2, + NOP = 5, +}; + +// Custom operator payload data struct +struct cop_data_s +{ + union + { + // Driver action data + struct + { + uint8_t driver_action_command; // (valid values in DRIVER_ACTION_e) + uint8_t reserved; + + // Driver action data + union + { + // DA_CMD_OPT_CFG + struct + { + uint16_t rel_nbr : 4; + uint16_t patch_nbr : 4; + uint16_t opt_cfg_reserved : 8; + }; + + // DA_CMD_CMSTRM + struct + { + uint16_t length; + }; + + uint16_t driver_action_data; + }; + }; + + uint32_t word; + }; +}; + +// optimizer config struct +struct opt_cfg_s +{ + struct cop_data_s da_data; + uint32_t cfg; + uint32_t id; +}; + +/****************************************************************************** + * Variables + ******************************************************************************/ + +// Registered drivers linked list HEAD +static struct ethosu_driver *registered_drivers = NULL; + +/****************************************************************************** + * Weak functions - Cache + * + * Default NOP operations. Override if available on the targeted device. + ******************************************************************************/ + +/* + * Flush/clean the data cache by address and size. Passing NULL as p argument + * expects the whole cache to be flushed. + */ +void __attribute__((weak)) ethosu_flush_dcache(uint32_t *p, size_t bytes) +{ + UNUSED(p); + UNUSED(bytes); +} + +/* + * Invalidate the data cache by address and size. Passing NULL as p argument + * expects the whole cache to be invalidated. + */ +void __attribute__((weak)) ethosu_invalidate_dcache(uint32_t *p, size_t bytes) +{ + UNUSED(p); + UNUSED(bytes); +} + +/****************************************************************************** + * Weak functions - Semaphore/Mutex for multi NPU + * + * Following section handles the minimal sempahore and mutex implementation in + * case of baremetal applications. Weak symbols will be overridden by RTOS + * definitions and implement true thread-safety (in application layer). + ******************************************************************************/ + +struct ethosu_semaphore_t +{ + uint8_t count; +}; + +static void *ethosu_mutex; +static void *ethosu_semaphore; + +void *__attribute__((weak)) ethosu_mutex_create(void) +{ + return NULL; +} + +void __attribute__((weak)) ethosu_mutex_destroy(void *mutex) +{ + UNUSED(mutex); +} + +int __attribute__((weak)) ethosu_mutex_lock(void *mutex) +{ + UNUSED(mutex); + return 0; +} + +int __attribute__((weak)) ethosu_mutex_unlock(void *mutex) +{ + UNUSED(mutex); + return 0; +} + +// Baremetal implementation of creating a semaphore +void *__attribute__((weak)) ethosu_semaphore_create(void) +{ + struct ethosu_semaphore_t *sem = malloc(sizeof(*sem)); + if (sem != NULL) + { + sem->count = 0; + } + return sem; +} + +void __attribute__((weak)) ethosu_semaphore_destroy(void *sem) +{ + free((struct ethosu_semaphore_t *)sem); +} + +// Baremetal simulation of waiting/sleeping for and then taking a semaphore using intrisics +int __attribute__((weak)) ethosu_semaphore_take(void *sem) +{ + struct ethosu_semaphore_t *s = sem; + while (s->count == 0) + { + __WFE(); + } + s->count--; + return 0; +} + +// Baremetal simulation of giving a semaphore and waking up processes using intrinsics +int __attribute__((weak)) ethosu_semaphore_give(void *sem) +{ + struct ethosu_semaphore_t *s = sem; + s->count++; + __SEV(); + return 0; +} + +/****************************************************************************** + * Weak functions - Inference begin/end callbacks + ******************************************************************************/ + +void __attribute__((weak)) ethosu_inference_begin(struct ethosu_driver *drv, void *user_arg) +{ + UNUSED(user_arg); + UNUSED(drv); +} + +void __attribute__((weak)) ethosu_inference_end(struct ethosu_driver *drv, void *user_arg) +{ + UNUSED(user_arg); + UNUSED(drv); +} + +/****************************************************************************** + * Static functions + ******************************************************************************/ +static void ethosu_register_driver(struct ethosu_driver *drv) +{ + ethosu_mutex_lock(ethosu_mutex); + drv->next = registered_drivers; + registered_drivers = drv; + ethosu_mutex_unlock(ethosu_mutex); + + ethosu_semaphore_give(ethosu_semaphore); + + LOG_INFO("New NPU driver registered (handle: 0x%p, NPU: 0x%p)", drv, drv->dev->reg); +} + +static int ethosu_deregister_driver(struct ethosu_driver *drv) +{ + struct ethosu_driver *curr; + struct ethosu_driver **prev; + + ethosu_mutex_lock(ethosu_mutex); + curr = registered_drivers; + prev = ®istered_drivers; + + while (curr != NULL) + { + if (curr == drv) + { + *prev = curr->next; + LOG_INFO("NPU driver handle %p deregistered.", drv); + ethosu_semaphore_take(ethosu_semaphore); + break; + } + + prev = &curr->next; + curr = curr->next; + } + + ethosu_mutex_unlock(ethosu_mutex); + + if (curr == NULL) + { + LOG_ERR("No NPU driver handle registered at address %p.", drv); + return -1; + } + + return 0; +} + +static void ethosu_reset_job(struct ethosu_driver *drv) +{ + memset(&drv->job, 0, sizeof(struct ethosu_job)); +} + +static int handle_optimizer_config(struct ethosu_driver *drv, struct opt_cfg_s const *opt_cfg_p) +{ + LOG_INFO("Optimizer release nbr: %u patch: %u", opt_cfg_p->da_data.rel_nbr, opt_cfg_p->da_data.patch_nbr); + + if (ethosu_dev_verify_optimizer_config(drv->dev, opt_cfg_p->cfg, opt_cfg_p->id) != true) + { + return -1; + } + + return 0; +} + +static int handle_command_stream(struct ethosu_driver *drv, const uint8_t *cmd_stream, const int cms_length) +{ + uint32_t cms_bytes = cms_length * BYTES_IN_32_BITS; + ptrdiff_t cmd_stream_ptr = (ptrdiff_t)cmd_stream; + + LOG_INFO("handle_command_stream: cmd_stream=%p, cms_length %d", cmd_stream, cms_length); + + if (0 != ((ptrdiff_t)cmd_stream & MASK_16_BYTE_ALIGN)) + { + LOG_ERR("Command stream addr %p not aligned to 16 bytes", cmd_stream); + return -1; + } + + // Verify 16 byte alignment for base address' + for (int i = 0; i < drv->job.num_base_addr; i++) + { + if (0 != (drv->job.base_addr[i] & MASK_16_BYTE_ALIGN)) + { + LOG_ERR("Base addr %d: 0x%llx not aligned to 16 bytes", i, drv->job.base_addr[i]); + return -1; + } + } + + // Flush the cache if available on CPU. + // The upcasting to uin32_t* is ok since the pointer never is dereferenced. + // The base_addr_size is null if invoking from prior to invoke_V2, in that case + // the whole cache is being flushed. + + if (drv->job.base_addr_size != NULL) + { + ethosu_flush_dcache((uint32_t *)cmd_stream_ptr, cms_bytes); + for (int i = 0; i < drv->job.num_base_addr; i++) + { + ethosu_flush_dcache((uint32_t *)(uintptr_t)drv->job.base_addr[i], drv->job.base_addr_size[i]); + } + } + else + { + ethosu_flush_dcache(NULL, 0); + } + + // Request power gating disabled during inference run + if (ethosu_request_power(drv)) + { + LOG_ERR("Failed to request power"); + return -1; + } + + drv->job.state = ETHOSU_JOB_RUNNING; + + // Inference begin callback + ethosu_inference_begin(drv, drv->job.user_arg); + + // Execute the command stream + ethosu_dev_run_command_stream(drv->dev, cmd_stream, cms_bytes, drv->job.base_addr, drv->job.num_base_addr); + + return 0; +} + +/****************************************************************************** + * Weak functions - Interrupt handler + ******************************************************************************/ +void __attribute__((weak)) ethosu_irq_handler(struct ethosu_driver *drv) +{ + LOG_DEBUG("Got interrupt from Ethos-U"); + + drv->job.state = ETHOSU_JOB_DONE; + if (!ethosu_dev_handle_interrupt(drv->dev)) + { + drv->status_error = true; + } + ethosu_semaphore_give(drv->semaphore); +} + +/****************************************************************************** + * Functions API + ******************************************************************************/ + +int ethosu_init(struct ethosu_driver *drv, + void *const base_address, + const void *fast_memory, + const size_t fast_memory_size, + uint32_t secure_enable, + uint32_t privilege_enable) +{ + LOG_INFO("Initializing NPU: base_address=%p, fast_memory=%p, fast_memory_size=%zu, secure=%" PRIu32 + ", privileged=%" PRIu32, + base_address, + fast_memory, + fast_memory_size, + secure_enable, + privilege_enable); + + if (!ethosu_mutex) + { + ethosu_mutex = ethosu_mutex_create(); + } + + if (!ethosu_semaphore) + { + ethosu_semaphore = ethosu_semaphore_create(); + if (!ethosu_semaphore) + { + LOG_ERR("Failed to create global driver semaphore"); + return -1; + } + } + + drv->fast_memory = (uint32_t)fast_memory; + drv->fast_memory_size = fast_memory_size; + drv->power_request_counter = 0; + + // Initialize the device and set requested security state and privilege mode + drv->dev = ethosu_dev_init(base_address, secure_enable, privilege_enable); + + if (drv->dev == NULL) + { + LOG_ERR("Failed to initialize Ethos-U device"); + return -1; + } + + drv->semaphore = ethosu_semaphore_create(); + if (!drv->semaphore) + { + LOG_ERR("Failed to create driver semaphore"); + ethosu_dev_deinit(drv->dev); + drv->dev = NULL; + return -1; + } + + drv->status_error = false; + + ethosu_reset_job(drv); + ethosu_register_driver(drv); + + return 0; +} + +void ethosu_deinit(struct ethosu_driver *drv) +{ + ethosu_deregister_driver(drv); + ethosu_semaphore_destroy(drv->semaphore); + ethosu_dev_deinit(drv->dev); + drv->dev = NULL; +} + +int ethosu_soft_reset(struct ethosu_driver *drv) +{ + // Soft reset the NPU + if (ethosu_dev_soft_reset(drv->dev) != ETHOSU_SUCCESS) + { + LOG_ERR("Failed to soft-reset NPU"); + return -1; + } + + // Update power and clock gating after the soft reset + ethosu_dev_set_clock_and_power(drv->dev, + drv->power_request_counter > 0 ? ETHOSU_CLOCK_Q_DISABLE : ETHOSU_CLOCK_Q_ENABLE, + drv->power_request_counter > 0 ? ETHOSU_POWER_Q_DISABLE : ETHOSU_POWER_Q_ENABLE); + + return 0; +} + +int ethosu_request_power(struct ethosu_driver *drv) +{ + // Check if this is the first power request, increase counter + if (drv->power_request_counter++ == 0) + { + // Always reset to a known state. Changes to requested + // security state/privilege mode if necessary. + if (ethosu_soft_reset(drv)) + { + LOG_ERR("Failed to request power for Ethos-U"); + drv->power_request_counter--; + return -1; + } + } + return 0; +} + +void ethosu_release_power(struct ethosu_driver *drv) +{ + if (drv->power_request_counter == 0) + { + LOG_WARN("No power request left to release, reference counter is 0"); + } + else + { + // Decrement ref counter and enable power gating if no requests remain + if (--drv->power_request_counter == 0) + { + ethosu_dev_set_clock_and_power(drv->dev, ETHOSU_CLOCK_Q_ENABLE, ETHOSU_POWER_Q_ENABLE); + } + } +} + +void ethosu_get_driver_version(struct ethosu_driver_version *ver) +{ + assert(ver != NULL); + ver->major = ETHOSU_DRIVER_VERSION_MAJOR; + ver->minor = ETHOSU_DRIVER_VERSION_MINOR; + ver->patch = ETHOSU_DRIVER_VERSION_PATCH; +} + +void ethosu_get_hw_info(struct ethosu_driver *drv, struct ethosu_hw_info *hw) +{ + assert(hw != NULL); + ethosu_dev_get_hw_info(drv->dev, hw); +} + +int ethosu_wait(struct ethosu_driver *drv, bool block) +{ + int ret = 0; + + switch (drv->job.state) + { + case ETHOSU_JOB_IDLE: + LOG_ERR("Inference job not running..."); + ret = -2; + break; + case ETHOSU_JOB_RUNNING: + if (!block) + { + // Inference still running, do not block + ret = 1; + break; + } + // fall through + case ETHOSU_JOB_DONE: + // Wait for interrupt in blocking mode. In non-blocking mode + // the interrupt has already triggered + ethosu_semaphore_take(drv->semaphore); + + // Inference done callback + ethosu_inference_end(drv, drv->job.user_arg); + + // Relase power gating disabled requirement + ethosu_release_power(drv); + + // Check NPU and interrupt status + if (drv->status_error) + { + LOG_ERR("NPU error(s) occured during inference."); + ethosu_dev_print_err_status(drv->dev); + + // Reset the NPU + (void)ethosu_soft_reset(drv); + // NPU is no longer in error state + drv->status_error = false; + + ret = -1; + } + + if (ret == 0) + { + // Invalidate cache + if (drv->job.base_addr_size != NULL) + { + for (int i = 0; i < drv->job.num_base_addr; i++) + { + ethosu_invalidate_dcache((uint32_t *)(uintptr_t)drv->job.base_addr[i], drv->job.base_addr_size[i]); + } + } + else + { + ethosu_invalidate_dcache(NULL, 0); + } + + LOG_DEBUG("Inference finished successfully..."); + } + + // Reset internal job (state resets to IDLE) + ethosu_reset_job(drv); + break; + + default: + LOG_ERR("Unexpected job state"); + ethosu_reset_job(drv); + ret = -1; + break; + } + + // Return inference job status + return ret; +} + +int ethosu_invoke_async(struct ethosu_driver *drv, + const void *custom_data_ptr, + const int custom_data_size, + uint64_t *const base_addr, + const size_t *base_addr_size, + const int num_base_addr, + void *user_arg) +{ + + const struct cop_data_s *data_ptr = custom_data_ptr; + const struct cop_data_s *data_end = (struct cop_data_s *)((ptrdiff_t)custom_data_ptr + custom_data_size); + + // Make sure an inference is not already running + if (drv->job.state != ETHOSU_JOB_IDLE) + { + LOG_ERR("Inference already running, or waiting to be cleared..."); + return -1; + } + + drv->job.state = ETHOSU_JOB_IDLE; + drv->job.custom_data_ptr = custom_data_ptr; + drv->job.custom_data_size = custom_data_size; + drv->job.base_addr = base_addr; + drv->job.base_addr_size = base_addr_size; + drv->job.num_base_addr = num_base_addr; + drv->job.user_arg = user_arg; + + // First word in custom_data_ptr should contain "Custom Operator Payload 1" + if (data_ptr->word != ETHOSU_FOURCC) + { + LOG_ERR("Custom Operator Payload: %" PRIu32 " is not correct, expected %x", data_ptr->word, ETHOSU_FOURCC); + goto err; + } + + // Custom data length must be a multiple of 32 bits + if ((custom_data_size % BYTES_IN_32_BITS) != 0) + { + LOG_ERR("custom_data_size=0x%x not a multiple of 4", (unsigned)custom_data_size); + goto err; + } + + data_ptr++; + + // Adjust base address to fast memory area + if (drv->fast_memory != 0 && num_base_addr >= FAST_MEMORY_BASE_ADDR_INDEX) + { + + if (base_addr_size != NULL && base_addr_size[FAST_MEMORY_BASE_ADDR_INDEX] > drv->fast_memory_size) + { + LOG_ERR("Fast memory area too small. fast_memory_size=%u, base_addr_size=%u", + drv->fast_memory_size, + base_addr_size[FAST_MEMORY_BASE_ADDR_INDEX]); + goto err; + } + + base_addr[FAST_MEMORY_BASE_ADDR_INDEX] = drv->fast_memory; + } + + drv->status_error = false; + + // Parse Custom Operator Payload data + while (data_ptr < data_end) + { + switch (data_ptr->driver_action_command) + { + case OPTIMIZER_CONFIG: + LOG_DEBUG("OPTIMIZER_CONFIG"); + struct opt_cfg_s const *opt_cfg_p = (const struct opt_cfg_s *)data_ptr; + + if (handle_optimizer_config(drv, opt_cfg_p) < 0) + { + goto err; + } + data_ptr += DRIVER_ACTION_LENGTH_32_BIT_WORD + OPTIMIZER_CONFIG_LENGTH_32_BIT_WORD; + break; + case COMMAND_STREAM: + // Vela only supports putting one COMMAND_STREAM per op + LOG_DEBUG("COMMAND_STREAM"); + const uint8_t *command_stream = (const uint8_t *)(data_ptr + 1); + int cms_length = (data_ptr->reserved << 16) | data_ptr->length; + + if (handle_command_stream(drv, command_stream, cms_length) < 0) + { + goto err; + } + data_ptr += DRIVER_ACTION_LENGTH_32_BIT_WORD + cms_length; + break; + case NOP: + LOG_DEBUG("NOP"); + data_ptr += DRIVER_ACTION_LENGTH_32_BIT_WORD; + break; + default: + LOG_ERR("UNSUPPORTED driver_action_command: %u", data_ptr->driver_action_command); + goto err; + break; + } + } + + return 0; +err: + LOG_ERR("Failed to invoke inference."); + ethosu_reset_job(drv); + return -1; +} + +int ethosu_invoke_v3(struct ethosu_driver *drv, + const void *custom_data_ptr, + const int custom_data_size, + uint64_t *const base_addr, + const size_t *base_addr_size, + const int num_base_addr, + void *user_arg) +{ + if (ethosu_invoke_async( + drv, custom_data_ptr, custom_data_size, base_addr, base_addr_size, num_base_addr, user_arg) < 0) + { + return -1; + } + + return ethosu_wait(drv, true); +} + +struct ethosu_driver *ethosu_reserve_driver(void) +{ + struct ethosu_driver *drv = NULL; + + LOG_INFO("Acquiring NPU driver handle"); + ethosu_semaphore_take(ethosu_semaphore); // This is meant to block until available + + ethosu_mutex_lock(ethosu_mutex); + drv = registered_drivers; + + while (drv != NULL) + { + if (!drv->reserved) + { + drv->reserved = true; + LOG_DEBUG("NPU driver handle %p reserved", drv); + break; + } + drv = drv->next; + } + ethosu_mutex_unlock(ethosu_mutex); + + if (!drv) + { + LOG_ERR("No NPU driver handle available, but semaphore taken"); + } + + return drv; +} + +void ethosu_release_driver(struct ethosu_driver *drv) +{ + ethosu_mutex_lock(ethosu_mutex); + if (drv != NULL && drv->reserved) + { + if (drv->job.state == ETHOSU_JOB_RUNNING || drv->job.state == ETHOSU_JOB_DONE) + { + // Give the inference one shot to complete or force kill the job + if (ethosu_wait(drv, false) == 1) + { + // Still running, soft reset the NPU and reset driver + drv->power_request_counter = 0; + ethosu_soft_reset(drv); + ethosu_reset_job(drv); + drv->status_error = false; + } + } + + drv->reserved = false; + LOG_DEBUG("NPU driver handle %p released", drv); + ethosu_semaphore_give(ethosu_semaphore); + } + ethosu_mutex_unlock(ethosu_mutex); +} +#endif // EI_ETHOS \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/BayesFunctionsF16.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_interface.h similarity index 51% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/BayesFunctionsF16.c rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_interface.h index 0c95392..2409cb4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/CMSIS/DSP/Source/BayesFunctions/BayesFunctionsF16.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_interface.h @@ -1,15 +1,5 @@ -#include "edge-impulse-sdk/dsp/config.hpp" -#if EIDSP_LOAD_CMSIS_DSP_SOURCES -/* ---------------------------------------------------------------------- - * Project: CMSIS DSP Library - * Title: BayesFunctions.c - * Description: Combination of all bayes function f16 source files. - * - * - * Target Processor: Cortex-M cores - * -------------------------------------------------------------------- */ /* - * Copyright (C) 2020 ARM Limited or its affiliates. All rights reserved. + * Copyright (c) 2020-2021 Arm Limited. All rights reserved. * * SPDX-License-Identifier: Apache-2.0 * @@ -25,7 +15,18 @@ * See the License for the specific language governing permissions and * limitations under the License. */ +// clang-format off +#ifndef ETHOSU_INTERFACE_WRAPPER_ +#define ETHOSU_INTERFACE_WRAPPER_ + +#define xstr(a) str(a) +#define str(a) #a + +#define catm(a, b) catm_(a, b) +#define catm_(a, b) a##b + +#define ETHOSU_INTERFACE_FILE xstr(catm(ethos, ETHOSU_ARCH)_interface.h) -#include "arm_gaussian_naive_bayes_predict_f16.c" +#include ETHOSU_INTERFACE_FILE -#endif // EIDSP_LOAD_CMSIS_DSP_SOURCES +#endif // ETHOSU_INTERFACE_WRAPPER_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_log.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_log.h new file mode 100644 index 0000000..582b91d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_log.h @@ -0,0 +1,72 @@ +/* + * SPDX-FileCopyrightText: Copyright 2021-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef ETHOSU_LOG_H +#define ETHOSU_LOG_H + +/****************************************************************************** + * Includes + ******************************************************************************/ + +#include +#include + +/****************************************************************************** + * Defines + ******************************************************************************/ + +// Log severity levels +#define ETHOSU_LOG_ERR 0 +#define ETHOSU_LOG_WARN 1 +#define ETHOSU_LOG_INFO 2 +#define ETHOSU_LOG_DEBUG 3 + +// Define default log severity +#ifndef ETHOSU_LOG_SEVERITY +#define ETHOSU_LOG_SEVERITY ETHOSU_LOG_WARN +#endif + +// Log formatting +#define LOG(f, ...) (void)fprintf(stdout, f, ##__VA_ARGS__) + +#if ETHOSU_LOG_SEVERITY >= ETHOSU_LOG_ERR +#define LOG_ERR(f, ...) \ + (void)fprintf(stderr, "E: " f " (%s:%d)\n", ##__VA_ARGS__, strrchr("/" __FILE__, '/') + 1, __LINE__) +#else +#define LOG_ERR(f, ...) +#endif + +#if ETHOSU_LOG_SEVERITY >= ETHOSU_LOG_WARN +#define LOG_WARN(f, ...) (void)fprintf(stdout, "W: " f "\n", ##__VA_ARGS__) +#else +#define LOG_WARN(f, ...) +#endif + +#if ETHOSU_LOG_SEVERITY >= ETHOSU_LOG_INFO +#define LOG_INFO(f, ...) (void)fprintf(stdout, "I: " f "\n", ##__VA_ARGS__) +#else +#define LOG_INFO(f, ...) +#endif + +#if ETHOSU_LOG_SEVERITY >= ETHOSU_LOG_DEBUG +#define LOG_DEBUG(f, ...) (void)fprintf(stdout, "D: %s(): " f "\n", __FUNCTION__, ##__VA_ARGS__) +#else +#define LOG_DEBUG(f, ...) +#endif + +#endif \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_pmu.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_pmu.c new file mode 100644 index 0000000..6832005 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/src/ethosu_pmu.c @@ -0,0 +1,304 @@ +/* + * SPDX-FileCopyrightText: Copyright 2019-2023 Arm Limited and/or its affiliates + * + * SPDX-License-Identifier: Apache-2.0 + * + * Licensed under the Apache License, Version 2.0 (the License); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an AS IS BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/***************************************************************************** + * Includes + *****************************************************************************/ +#if EI_ETHOS + +#include "ethosu_device.h" +#include "ethosu_driver.h" +#include "ethosu_interface.h" +#include "ethosu_log.h" +#include "pmu_ethosu.h" + +#include +#include +#include + +/***************************************************************************** + * Defines + *****************************************************************************/ + +#define MASK_0_31_BITS (0xFFFFFFFF) +#define MASK_32_47_BITS (0xFFFF00000000) + +#define COMMA , +#define SEMICOLON ; + +#define EVTYPE(A, name) \ + case PMU_EVENT_##name: \ + return ETHOSU_PMU_##name + +#define EVID(A, name) (PMU_EVENT_##name) + +/***************************************************************************** + * Variables + *****************************************************************************/ + +static const enum pmu_event eventbyid[] = {EXPAND_PMU_EVENT(EVID, COMMA)}; + +/***************************************************************************** + * Static functions + *****************************************************************************/ + +static enum ethosu_pmu_event_type pmu_event_type(uint32_t id) +{ + switch (id) + { + EXPAND_PMU_EVENT(EVTYPE, SEMICOLON); + default: + LOG_ERR("Unknown PMU event id: 0x%" PRIx32, id); + } + + return ETHOSU_PMU_SENTINEL; +} + +static uint32_t pmu_event_value(enum ethosu_pmu_event_type event) +{ + int a = event; + if ((a < ETHOSU_PMU_SENTINEL) && (a >= ETHOSU_PMU_NO_EVENT)) + { + return eventbyid[event]; + } + else + { + return (uint32_t)(-1); + } +} + +/***************************************************************************** + * Functions + *****************************************************************************/ + +void ETHOSU_PMU_Enable(struct ethosu_driver *drv) +{ + LOG_DEBUG("Enable PMU"); + struct pmcr_r pmcr = {0}; + pmcr.cnt_en = 1; + ethosu_request_power(drv); + drv->dev->reg->PMCR.word = pmcr.word; +} + +void ETHOSU_PMU_Disable(struct ethosu_driver *drv) +{ + LOG_DEBUG("Disable PMU"); + drv->dev->reg->PMCR.word = 0; + ethosu_release_power(drv); +} + +uint32_t ETHOSU_PMU_Get_NumEventCounters(void) +{ + return NPU_REG_PMEVCNTR_ARRLEN; +} + +void ETHOSU_PMU_Set_EVTYPER(struct ethosu_driver *drv, uint32_t num, enum ethosu_pmu_event_type type) +{ + assert(num < ETHOSU_PMU_NCOUNTERS); + uint32_t val = pmu_event_value(type); + LOG_DEBUG("num=%" PRIu32 ", type=%d, val=%" PRIu32, num, type, val); + drv->dev->reg->PMEVTYPER[num].word = val; +} + +enum ethosu_pmu_event_type ETHOSU_PMU_Get_EVTYPER(struct ethosu_driver *drv, uint32_t num) +{ + assert(num < ETHOSU_PMU_NCOUNTERS); + uint32_t val = drv->dev->reg->PMEVTYPER[num].word; + enum ethosu_pmu_event_type type = pmu_event_type(val); + LOG_DEBUG("num=%" PRIu32 ", type=%d, val=%" PRIu32, num, type, val); + return type; +} + +void ETHOSU_PMU_CYCCNT_Reset(struct ethosu_driver *drv) +{ + LOG_DEBUG("Reset PMU cycle counter"); + struct pmcr_r pmcr; + pmcr.word = drv->dev->reg->PMCR.word; + pmcr.cycle_cnt_rst = 1; + drv->dev->reg->PMCR.word = pmcr.word; +} + +void ETHOSU_PMU_EVCNTR_ALL_Reset(struct ethosu_driver *drv) +{ + LOG_DEBUG("Reset all events"); + struct pmcr_r pmcr; + pmcr.word = drv->dev->reg->PMCR.word; + pmcr.event_cnt_rst = 1; + drv->dev->reg->PMCR.word = pmcr.word; +} + +void ETHOSU_PMU_CNTR_Enable(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG("mask=0x%08" PRIx32, mask); + drv->dev->reg->PMCNTENSET.word = mask; +} + +void ETHOSU_PMU_CNTR_Disable(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG("mask=0x%08" PRIx32, mask); + drv->dev->reg->PMCNTENCLR.word = mask; +} + +uint32_t ETHOSU_PMU_CNTR_Status(struct ethosu_driver *drv) +{ + uint32_t pmcntenset = drv->dev->reg->PMCNTENSET.word; + LOG_DEBUG("mask=0x%08" PRIx32, pmcntenset); + return pmcntenset; +} + +uint64_t ETHOSU_PMU_Get_CCNTR(struct ethosu_driver *drv) +{ + uint32_t val_lo = drv->dev->reg->PMCCNTR.CYCLE_CNT_LO; + uint32_t val_hi = drv->dev->reg->PMCCNTR.CYCLE_CNT_HI; + uint64_t val = ((uint64_t)val_hi << 32) | val_lo; + + LOG_DEBUG("val=%" PRIu64, val); + return val; +} + +void ETHOSU_PMU_Set_CCNTR(struct ethosu_driver *drv, uint64_t val) +{ + uint32_t active = ETHOSU_PMU_CNTR_Status(drv) & ETHOSU_PMU_CCNT_Msk; + + LOG_DEBUG("val=%llu", val); + + if (active) + { + ETHOSU_PMU_CNTR_Disable(drv, ETHOSU_PMU_CCNT_Msk); + } + + drv->dev->reg->PMCCNTR.CYCLE_CNT_LO = val & MASK_0_31_BITS; + drv->dev->reg->PMCCNTR.CYCLE_CNT_HI = (val & MASK_32_47_BITS) >> 32; + + if (active) + { + ETHOSU_PMU_CNTR_Enable(drv, ETHOSU_PMU_CCNT_Msk); + } +} + +uint32_t ETHOSU_PMU_Get_EVCNTR(struct ethosu_driver *drv, uint32_t num) +{ + assert(num < ETHOSU_PMU_NCOUNTERS); + uint32_t val = drv->dev->reg->PMEVCNTR[num].word; + LOG_DEBUG("num=%" PRIu32 ", val=%" PRIu32, num, val); + + return val; +} + +void ETHOSU_PMU_Set_EVCNTR(struct ethosu_driver *drv, uint32_t num, uint32_t val) +{ + assert(num < ETHOSU_PMU_NCOUNTERS); + LOG_DEBUG("num=%" PRIu32 ", val=%" PRIu32, num, val); + drv->dev->reg->PMEVCNTR[num].word = val; +} + +uint32_t ETHOSU_PMU_Get_CNTR_OVS(struct ethosu_driver *drv) +{ + LOG_DEBUG(""); + return drv->dev->reg->PMOVSSET.word; +} + +void ETHOSU_PMU_Set_CNTR_OVS(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG(""); + drv->dev->reg->PMOVSCLR.word = mask; +} + +void ETHOSU_PMU_Set_CNTR_IRQ_Enable(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG("mask=0x%08" PRIx32, mask); + drv->dev->reg->PMINTSET.word = mask; +} + +void ETHOSU_PMU_Set_CNTR_IRQ_Disable(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG("mask=0x%08" PRIx32, mask); + drv->dev->reg->PMINTCLR.word = mask; +} + +uint32_t ETHOSU_PMU_Get_IRQ_Enable(struct ethosu_driver *drv) +{ + uint32_t pmint = drv->dev->reg->PMINTSET.word; + LOG_DEBUG("mask=0x%08" PRIx32, pmint); + return pmint; +} + +void ETHOSU_PMU_CNTR_Increment(struct ethosu_driver *drv, uint32_t mask) +{ + LOG_DEBUG(""); + uint32_t cntrs_active = ETHOSU_PMU_CNTR_Status(drv); + + // Disable counters + ETHOSU_PMU_CNTR_Disable(drv, mask); + + // Increment cycle counter + if (mask & ETHOSU_PMU_CCNT_Msk) + { + uint64_t val = ETHOSU_PMU_Get_CCNTR(drv) + 1; + drv->dev->reg->PMCCNTR.CYCLE_CNT_LO = val & MASK_0_31_BITS; + drv->dev->reg->PMCCNTR.CYCLE_CNT_HI = (val & MASK_32_47_BITS) >> 32; + } + + for (int i = 0; i < ETHOSU_PMU_NCOUNTERS; i++) + { + if (mask & (1u << i)) + { + uint32_t val = ETHOSU_PMU_Get_EVCNTR(drv, i); + drv->dev->reg->PMEVCNTR[i].word = val + 1; + } + } + + // Reenable the active counters + ETHOSU_PMU_CNTR_Enable(drv, cntrs_active); +} + +void ETHOSU_PMU_PMCCNTR_CFG_Set_Start_Event(struct ethosu_driver *drv, enum ethosu_pmu_event_type start_event) +{ + LOG_DEBUG("start_event=%u", start_event); + uint32_t val = pmu_event_value(start_event); + struct pmccntr_cfg_r cfg; + cfg.word = drv->dev->reg->PMCCNTR_CFG.word; + cfg.CYCLE_CNT_CFG_START = val; + drv->dev->reg->PMCCNTR_CFG.word = cfg.word; +} + +void ETHOSU_PMU_PMCCNTR_CFG_Set_Stop_Event(struct ethosu_driver *drv, enum ethosu_pmu_event_type stop_event) +{ + LOG_DEBUG("stop_event=%u", stop_event); + uint32_t val = pmu_event_value(stop_event); + struct pmccntr_cfg_r cfg; + cfg.word = drv->dev->reg->PMCCNTR_CFG.word; + cfg.CYCLE_CNT_CFG_STOP = val; + drv->dev->reg->PMCCNTR_CFG.word = cfg.word; +} + +uint32_t ETHOSU_PMU_Get_QREAD(struct ethosu_driver *drv) +{ + uint32_t val = drv->dev->reg->QREAD.word; + LOG_DEBUG("qread=%" PRIu32, val); + return val; +} + +uint32_t ETHOSU_PMU_Get_STATUS(struct ethosu_driver *drv) +{ + uint32_t val = drv->dev->reg->STATUS.word; + LOG_DEBUG("status=0x%" PRIx32, val); + return val; +} + +#endif // EI_ETHOS \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/version.txt b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/version.txt new file mode 100644 index 0000000..5656be6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ethos-core-driver/version.txt @@ -0,0 +1 @@ +v1.23.2 \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/debug_log.cpp new file mode 100644 index 0000000..3dd5d13 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_HIMAX_WE2 == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_HIMAX_WE2 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/ei_classifier_porting.cpp new file mode 100644 index 0000000..8c291d3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax-we2/ei_classifier_porting.cpp @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_HIMAX_WE2 == 1 + +/* Include ----------------------------------------------------------------- */ +#include +#include +#include +#include +#include "xprintf.h" +extern "C" { + #include "timer_interface.h" +}; + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + hx_drv_timer_cm55x_delay_ms(time_ms, TIMER_STATE_DC); + + return EI_IMPULSE_OK; +} + +// Should be called at least once every ~10.7 seconds +uint64_t ei_read_timer_ms() +{ + uint32_t tick, loop_cnt; + SystemGetTick(&tick, &loop_cnt); + + // tick is counting down, so we need to add elapsed ticks to the total tick count + uint64_t elapsed_ms = (uint64_t)loop_cnt * (uint64_t)(SysTick_LOAD_RELOAD_Msk+1) + (SysTick_LOAD_RELOAD_Msk + 1 - tick); + // convert ticks to ms knowing the CPU frequency + elapsed_ms = elapsed_ms / (SystemCoreClock / 1000); + + return elapsed_ms; +} + +uint64_t ei_read_timer_us() +{ + uint32_t tick, loop_cnt; + SystemGetTick(&tick, &loop_cnt); + + // tick is counting down, so we need to add elapsed ticks to the total tick count + uint64_t elapsed_us = (uint64_t)loop_cnt * (uint64_t)(SysTick_LOAD_RELOAD_Msk+1) + (SysTick_LOAD_RELOAD_Msk + 1 - tick); + // convert ticks to ms knowing the CPU frequency + elapsed_us = elapsed_us / (SystemCoreClock / 1000000); + + return elapsed_us; +} + +void ei_serial_set_baudrate(int baudrate) +{ + // hx_drv_uart_initial((HX_DRV_UART_BAUDRATE_E)baudrate); +} + +void ei_putchar(char c) +{ + /* Send char to serial output */ + xputc(c); +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list args; + va_start(args, format); + xvprintf(format, args); + va_end(args); +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + ei_printf("0.00000"); + } else { + int digit, m; //, m1; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + ei_printf("%s", s); + } +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // #if EI_PORTING_HIMAX_WE2 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/debug_log.cpp new file mode 100644 index 0000000..9022698 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_HIMAX == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_HIMAX == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/ei_classifier_porting.cpp new file mode 100644 index 0000000..1b62b56 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/himax/ei_classifier_porting.cpp @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_HIMAX == 1 + +/* Include ----------------------------------------------------------------- */ +#include +#include +#include +#include "hx_drv_tflm.h" +#include + + +/* Constants ---------------------------------------------------------------- */ +#define HIMAX_TIMER_CLK_FREQ_HZ 400000000 +#define HIMAX_TIMER_TICK_1SEC (HIMAX_TIMER_CLK_FREQ_HZ/1) +#define HIMAX_TIMER_TICK_1MSEC (HIMAX_TIMER_TICK_1SEC/1000) + +extern "C" void print_out(const char *format, va_list args); + +/* Private variables -------------------------------------------------------- */ +static uint64_t system_time_ms = 0; +static uint32_t prev_tick_us = 0; + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + uint64_t end_delay, cur_time = 0; + + end_delay = (uint64_t)time_ms + ei_read_timer_ms(); + + do { + cur_time = ei_read_timer_ms(); + } while (cur_time < end_delay); + + return EI_IMPULSE_OK; +} + +// Should be called at least once every ~10.7 seconds +uint64_t ei_read_timer_ms() +{ + uint32_t tick_us, diff_tick_us, elapsed_time_ms; + + // handles 32-bit overflows + hx_drv_tick_get(&tick_us); + diff_tick_us = (uint32_t)(tick_us - prev_tick_us); + + // integer number of ms elapsed + elapsed_time_ms = diff_tick_us / HIMAX_TIMER_TICK_1MSEC; + + // update system time and previous tick reference + if (elapsed_time_ms > 0) { + system_time_ms += elapsed_time_ms; + + // use the remainder of ms elapsed + // handles 32-bit overflows + prev_tick_us = (uint32_t)(tick_us - (diff_tick_us % HIMAX_TIMER_TICK_1MSEC)); + } + + return system_time_ms; +} + +uint64_t ei_read_timer_us() +{ + return ei_read_timer_ms() * 1000; +} + +void ei_serial_set_baudrate(int baudrate) +{ + hx_drv_uart_initial((HX_DRV_UART_BAUDRATE_E)baudrate); +} + +void ei_putchar(char c) +{ + /* Send char to serial output */ + hx_drv_uart_print("%c", c); +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list args; + va_start(args, format); + print_out(format, args); + va_end(args); +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + ei_printf("0.00000"); + } else { + int digit, m; //, m1; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + ei_printf("%s", s); + } +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // #if EI_PORTING_HIMAX == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/debug_log.cpp new file mode 100644 index 0000000..ae378e4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_IAR == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_IAR == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/ei_classifier_porting.cpp new file mode 100644 index 0000000..3d450f0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/iar/ei_classifier_porting.cpp @@ -0,0 +1,89 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_IAR == 1 + +#include +#include +#include + +#include "main.h" +#include "stm32f4xx_hal.h" + + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + HAL_Delay(time_ms); + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return HAL_GetTick(); +} + +uint64_t ei_read_timer_us() { + + return HAL_GetTick() * 1000; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +__attribute__((weak)) void ei_putchar(char data) +{ + putchar(data); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_IAR == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/debug_log.cpp new file mode 100644 index 0000000..47dfb2f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_INFINEONPSOC62 == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_INFINEONPSOC62 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/ei_classifier_porting.cpp new file mode 100644 index 0000000..23e5b27 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/infineon-psoc62/ei_classifier_porting.cpp @@ -0,0 +1,158 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_INFINEONPSOC62 == 1 + +#include +#include +#include +#include "unistd.h" +#include "cyhal.h" +#ifdef FREERTOS_ENABLED +#include +#include +#include +#else /* bare-metal */ +#include "cyhal_lptimer.h" + +static bool timer_init = false; +static volatile uint64_t tick = 0; + +static void systick_isr(void) +{ + tick++; +} +#endif + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +#ifdef FREERTOS_ENABLED +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + vTaskDelay(time_ms / portTICK_PERIOD_MS); + + return EI_IMPULSE_OK; +} + +__attribute__((weak)) uint64_t ei_read_timer_ms() { + + return xTaskGetTickCount(); +} + +__attribute__((weak)) uint64_t ei_read_timer_us() { + + return xTaskGetTickCount()*1000; +} +#else /* Bare-metal */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + cyhal_system_delay_ms(time_ms); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + if(timer_init == false) { + cyhal_clock_t clock; + uint32_t freq; + + // get IMO clock frequency + cyhal_clock_reserve(&clock, &CYHAL_CLOCK_IMO); + freq = cyhal_clock_get_frequency(&clock); + cyhal_clock_free(&clock); + + // set SysTick to 1 ms + Cy_SysTick_Init(CY_SYSTICK_CLOCK_SOURCE_CLK_IMO, (freq / 1000) - 1); + Cy_SysTick_SetCallback(0, systick_isr); + timer_init = true; + return 0; + } + return tick; +} + +uint64_t ei_read_timer_us() { + return ei_read_timer_ms() * 1000; +} +#endif /* FREERTOS_ENABLED */ + +void ei_putchar(char c) +{ + putchar(c); +} + +__attribute__((weak)) char ei_getchar(void) +{ + return getchar(); +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[256]; + va_list myargs; + va_start(myargs, format); + vsnprintf(buffer, 256, format, myargs); + va_end(myargs); + + printf("%s", buffer); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +#ifdef FREERTOS_ENABLED +__attribute__((weak)) void *ei_malloc(size_t size) { + return pvPortMalloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + void *mem = NULL; + + /* Infineon port of FreeRTOS does not support pvPortCalloc */ + mem = pvPortMalloc(nitems * size); + if (mem) { + /* zero the memory */ + memset(mem, 0, nitems * size); + } + return mem; +} + +__attribute__((weak)) void ei_free(void *ptr) { + vPortFree(ptr); +} +#else +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} +#endif + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_INFINEONPSOC62 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/lib/at_base64_lib.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/lib/at_base64_lib.h deleted file mode 100644 index 49cd424..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/lib/at_base64_lib.h +++ /dev/null @@ -1,81 +0,0 @@ -#ifndef _EDGE_IMPULSE_SDK_BASE64_H_ -#define _EDGE_IMPULSE_SDK_BASE64_H_ - -/* - base64.cpp and base64.h - - Copyright (C) 2004-2008 René Nyffenegger - - This source code is provided 'as-is', without any express or implied - warranty. In no event will the author be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this source code must not be misrepresented; you must not - claim that you wrote the original source code. If you use this source code - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original source code. - - 3. This notice may not be removed or altered from any source distribution. - - René Nyffenegger rene.nyffenegger@adp-gmbh.ch - -*/ - -#include -#include -#include - -static const char *base64_chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz" - "0123456789+/"; - -static void base64_encode(const char *input, size_t input_size, void (*putc_f)(char)) -{ - int i = 0; - int j = 0; - unsigned char char_array_3[3]; - unsigned char char_array_4[4]; - - while (input_size--) { - char_array_3[i++] = *(input++); - if (i == 3) { - char_array_4[0] = (char_array_3[0] & 0xfc) >> 2; - char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4); - char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6); - char_array_4[3] = char_array_3[2] & 0x3f; - - for (i = 0; (i < 4); i++) { - putc_f(base64_chars[char_array_4[i]]); - } - i = 0; - } - } - - if (i) { - for (j = i; j < 3; j++) { - char_array_3[j] = '\0'; - } - - char_array_4[0] = (char_array_3[0] & 0xfc) >> 2; - char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4); - char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6); - char_array_4[3] = char_array_3[2] & 0x3f; - - for (j = 0; (j < i + 1); j++) { - putc_f(base64_chars[char_array_4[j]]); - } - - while ((i++ < 3)) { - putc_f('='); - } - } -} - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/debug_log.cpp new file mode 100644 index 0000000..a7ac637 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_MBED == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// On mbed platforms, we set up a serial port and write to it for debug logging. +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_MBED == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/ei_classifier_porting.cpp new file mode 100644 index 0000000..02a1431 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mbed/ei_classifier_porting.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_MBED == 1 + +#include "mbed.h" +#include +#include +#include "us_ticker_api.h" + +#define EI_WEAK_FN __attribute__((weak)) + +EI_WEAK_FN EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +EI_WEAK_FN EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { +#if MBED_VERSION >= MBED_ENCODE_VERSION((5), (11), (0)) + rtos::ThisThread::sleep_for(time_ms); +#else + wait_ms(time_ms); +#endif // MBED_VERSION >= MBED_ENCODE_VERSION((5), (11), (0)) + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { +#if DEVICE_USTICKER + return us_ticker_read() / 1000L; +#elif DEVICE_LPTICKER + return ei_read_timer_us() / 1000L; +#else + #error "Target does not have DEVICE_LPTICKER nor DEVICE_USTICKER" +#endif +} + +uint64_t ei_read_timer_us() { +#if DEVICE_USTICKER + return us_ticker_read(); +#elif DEVICE_LPTICKER + const ticker_info_t *info = lp_ticker_get_info(); + uint32_t n_ticks = lp_ticker_read(); + return (uint64_t)n_ticks * (1000000UL / info->frequency); +#else + #error "Target does not have DEVICE_LPTICKER nor DEVICE_USTICKER" +#endif +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_MBED == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/debug_log.cpp new file mode 100644 index 0000000..3b57943 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_MINGW32 == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_MINGW32 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/ei_classifier_porting.cpp new file mode 100644 index 0000000..6223e17 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/mingw32/ei_classifier_porting.cpp @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_MINGW32 == 1 + +#include +#include +#include +#include +#include +#include +#include +#include + +EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + usleep(time_ms * 1000); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + auto now = std::chrono::system_clock::now(); + auto duration = now.time_since_epoch(); + auto millis = std::chrono::duration_cast(duration).count(); + return static_cast(millis); +} + +uint64_t ei_read_timer_us() { + auto now = std::chrono::system_clock::now(); + auto duration = now.time_since_epoch(); + auto micros = std::chrono::duration_cast(duration).count(); + return static_cast(micros); +} + +void ei_printf(const char *format, ...) { + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +void ei_printf_float(float f) { + ei_printf("%f", f); +} + +void *ei_malloc(size_t size) { + return malloc(size); +} + +void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +void ei_free(void *ptr) { + free(ptr); +} + +#endif // EI_PORTING_MINGW32 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/debug_log.cpp new file mode 100644 index 0000000..58b697b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_PARTICLE == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// On mbed platforms, we set up a serial port and write to it for debug logging. +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_PARTICLE diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/ei_classifier_porting.cpp new file mode 100644 index 0000000..b280d33 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/particle/ei_classifier_porting.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2023 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_PARTICLE == 1 + +#include +#include +#include + +#define EI_WEAK_FN __attribute__((weak)) + +EI_WEAK_FN EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +EI_WEAK_FN EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + delay(time_ms); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return millis(); +} + +uint64_t ei_read_timer_us() { + return micros(); +} + +void ei_serial_set_baudrate(int baudrate) +{ + +} + +EI_WEAK_FN void ei_putchar(char c) +{ + Serial.write(c); +} + +EI_WEAK_FN char ei_getchar() +{ + char ch = 0; + if (Serial.available() > 0) { + ch = Serial.read(); + } + return ch; +} + +/** + * Printf function uses vsnprintf and output using Arduino Serial + */ +__attribute__((weak)) void ei_printf(const char *format, ...) { + static char print_buf[1024] = { 0 }; + + va_list args; + va_start(args, format); + int r = vsnprintf(print_buf, sizeof(print_buf), format, args); + va_end(args); + + if (r > 0) { + Serial.write(print_buf); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + Serial.print(f, 6); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_PARTICLE == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/debug_log.cpp new file mode 100644 index 0000000..6f7164a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/debug_log.cpp @@ -0,0 +1,32 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_POSIX == 1 + +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_POSIX == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/ei_classifier_porting.cpp new file mode 100644 index 0000000..fe7a60d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/posix/ei_classifier_porting.cpp @@ -0,0 +1,102 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "edge-impulse-sdk/porting/ei_classifier_porting.h" +#if EI_PORTING_POSIX == 1 + +#include +#include +#include +#include +#include +#include +#include + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + usleep(time_ms * 1000); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return ei_read_timer_us() / 1000; +} + +uint64_t ei_read_timer_us() { + uint64_t us; // Milliseconds + uint64_t s; // Seconds + struct timespec spec; + + clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec); + + s = spec.tv_sec; + us = round(spec.tv_nsec / 1.0e3); // Convert nanoseconds to micros + if (us > 999999) { + s++; + us = 0; + } + + return (s * 1000000) + us; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +__attribute__((weak)) void ei_putchar(char data) +{ + putchar(data); +} + +__attribute__((weak)) char ei_getchar(void) +{ + return getchar(); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_POSIX == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/raspberry/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/raspberry/ei_classifier_porting.cpp new file mode 100644 index 0000000..8b699fb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/raspberry/ei_classifier_porting.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_RASPBERRY == 1 + +#include "pico/stdlib.h" +#include +#include +#include +#include + +#ifdef FREERTOS_ENABLED +// Include FreeRTOS for delay +#include +#include +#endif + +#define EI_WEAK_FN __attribute__((weak)) + +EI_WEAK_FN EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +EI_WEAK_FN EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { +#ifdef FREERTOS_ENABLED + vTaskDelay(time_ms / portTICK_PERIOD_MS); +#else + sleep_ms(time_ms); +#endif + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return to_ms_since_boot(get_absolute_time()); +} + +uint64_t ei_read_timer_us() { + return to_us_since_boot(get_absolute_time()); +} + +void ei_putchar(char c) +{ + /* Send char to serial output */ + ei_printf("%c", c); +} + +/** + * Printf function uses vsnprintf and output using USB Serial + */ +__attribute__((weak)) void ei_printf(const char *format, ...) { + static char print_buf[1024] = { 0 }; + + va_list args; + va_start(args, format); + int r = vsnprintf(print_buf, sizeof(print_buf), format, args); + va_end(args); + + if (r > 0) { + printf(print_buf); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + printf("%f", f); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { +#ifdef FREERTOS_ENABLED + return pvPortMalloc(size); +#else + return malloc(size); +#endif +} + +#ifdef FREERTOS_ENABLED +void *pvPortCalloc(size_t sNb, size_t sSize) +{ + void *vPtr = NULL; + if (sSize > 0) { + vPtr = pvPortMalloc(sSize * sNb); // Call FreeRTOS or other standard API + if(vPtr) + memset(vPtr, 0, (sSize * sNb)); // Must required + } + return vPtr; +} +#endif + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { +#ifdef FREERTOS_ENABLED + return pvPortCalloc(nitems, size); +#else + return calloc(nitems, size); +#endif +} + +__attribute__((weak)) void ei_free(void *ptr) { +#ifdef FREERTOS_ENABLED + vPortFree(ptr); +#else + free(ptr); +#endif +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_RASPBERRY == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/debug_log.cpp new file mode 100644 index 0000000..47c6847 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if ((EI_PORTING_RENESASRA65 == 1) || (EI_PORTING_RENESASRA8D1 == 1)) + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_RENESASRA65 == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/ei_classifier_porting.cpp new file mode 100644 index 0000000..0ef561f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/renesas-ra/ei_classifier_porting.cpp @@ -0,0 +1,327 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +/* Includes */ +#include "../ei_classifier_porting.h" + +#if ((EI_PORTING_RENESASRA65 == 1) || (EI_PORTING_RENESASRA8D1 == 1)) + +#include +#include +#include +#include "unistd.h" +#include "peripheral/uart_ep.h" +#include + +extern "C" uint32_t timer_get_ms(void); +extern "C" uint32_t timer_get_us(void); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + uint64_t start_time = ei_read_timer_ms(); + + while(start_time + time_ms > ei_read_timer_ms()){}; + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return timer_get_ms(); +} + +uint64_t ei_read_timer_us() { + + return timer_get_us(); +} + +__attribute__((weak)) char ei_getchar() +{ + // dummy implementation + char ch = 0; + return ch; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[1024] = {0}; + int length; + va_list myargs; + va_start(myargs, format); + length = vsnprintf(buffer, sizeof(buffer), format, myargs); + va_end(myargs); + + if (length > 0) { + uart_print_user_msg((uint8_t *)buffer, length); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + strcpy(s, "0"); + } + else { + int digit, m; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + } + + ei_printf("%s", s); +} + +/** + * + * @param c + */ +void ei_putchar(char c) +{ + uart_putc(c); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#elif EI_PORTING_RENESASRA8D1_FREERTOS == 1 + +#include +#include +#include +#include "unistd.h" +#include "peripheral/uart.h" +#include "peripheral/usb/usb.h" +#include + +#include "FreeRTOS.h" +#include "task.h" +#include "stream_buffer.h" +#include "common_data.h" + +extern "C" uint32_t timer_get_ms(void); +extern "C" uint32_t timer_get_us(void); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + vTaskDelay(time_ms / portTICK_PERIOD_MS); + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return timer_get_ms(); +} + +uint64_t ei_read_timer_us() { + + return timer_get_us(); +} + +__attribute__((weak)) char ei_getchar() +{ + // dummy implementation + char ch = 0; + return ch; +} + +#include + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[1024] = {0}; + int length; + va_list myargs; + va_start(myargs, format); + length = vsnprintf(buffer, sizeof(buffer), format, myargs); + va_end(myargs); + + if (length > 0) { + //uart_print_user_msg((uint8_t *)buffer, length); + //xStreamBufferSend(g_uart_buffer, buffer, length, 0); + //uart_print_to_console((uint8_t *)buffer, length); + comms_send((uint8_t *)buffer, length, 1000); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + strcpy(s, "0"); + } + else { + int digit, m; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + } + + ei_printf("%s", s); +} + +/** + * + * @param c + */ +void ei_putchar(char c) +{ + ei_printf("%c", c); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + if (size > 0){ + return pvPortMalloc(size); + } + else { + return NULL; + } +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + if ((size*nitems) > 0) { + return pvPortCalloc(nitems, size); + } + else { + return NULL; + } +} + +__attribute__((weak)) void ei_free(void *ptr) { + vPortFree(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +void * operator new( size_t size ) +{ + return pvPortMalloc( size ); +} + +void * operator new[]( size_t size ) +{ + return pvPortMalloc(size); +} + +void operator delete( void * ptr ) +{ + vPortFree ( ptr ); +} + +void operator delete[]( void * ptr ) +{ + vPortFree ( ptr ); +} + +#endif // EI_PORTING_RENESASRA8D1_FREERTOS == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/debug_log.cpp new file mode 100644 index 0000000..9022698 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_HIMAX == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_HIMAX == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/ei_classifier_porting.cpp new file mode 100644 index 0000000..39145da --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/seeed-vision-ai/ei_classifier_porting.cpp @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SEEED_VISION_AI == 1 + +/* Include ----------------------------------------------------------------- */ +#include +#include +#include +// #include "hx_drv_tflm.h" +#include "hx_drv_timer.h" +#include + +#include "embARC_debug.h" + + +/* Constants ---------------------------------------------------------------- */ +#define HIMAX_TIMER_CLK_FREQ_HZ 400000000 +#define HIMAX_TIMER_TICK_1SEC (HIMAX_TIMER_CLK_FREQ_HZ/1) +#define HIMAX_TIMER_TICK_1MSEC (HIMAX_TIMER_TICK_1SEC/1000) + +/* Private variables -------------------------------------------------------- */ + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + board_delay_ms(time_ms); + + return EI_IMPULSE_OK; +} + +// Should be called at least once every ~10.7 seconds +uint64_t ei_read_timer_ms() +{ + static uint64_t system_time_ms = 0; + static uint64_t prev_tick_us = 0; + uint64_t tick_us; + int64_t diff_tick_us, elapsed_time_ms; + + tick_us = board_get_cur_us(); + diff_tick_us = tick_us - prev_tick_us; + elapsed_time_ms = diff_tick_us / 1000; + + // update system time and previous tick reference + if (elapsed_time_ms > 0) { + system_time_ms += elapsed_time_ms; + prev_tick_us = tick_us; + } + + return system_time_ms; +} + +uint64_t ei_read_timer_us() +{ + return board_get_cur_us(); +} + +void ei_serial_set_baudrate(int baudrate) +{ + // hx_drv_uart_initial((HX_DRV_UART_BAUDRATE_E)baudrate); +} + +void ei_putchar(char c) +{ + /* Send char to serial output */ + ei_printf("%c", c); +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list args; + va_start(args, format); + // print_out(format, args); + xvprintf(format, args); + va_end(args); +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + ei_printf("0.00000"); + } else { + int digit, m; //, m1; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + ei_printf("%s", s); + } +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // #if EI_PORTING_SEEED_VISION_AI == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/debug_log.cpp new file mode 100644 index 0000000..1cc01e3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SILABS == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SILABS == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/ei_classifier_porting.cpp new file mode 100644 index 0000000..42903a1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/silabs/ei_classifier_porting.cpp @@ -0,0 +1,106 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SILABS == 1 + +/* Include ----------------------------------------------------------------- */ +#include +#include +#include +#include "sl_sleeptimer.h" +#include "sl_stdio.h" + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + sl_sleeptimer_delay_millisecond(time_ms); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() +{ + return (uint32_t)sl_sleeptimer_tick_to_ms(sl_sleeptimer_get_tick_count()); +} + +uint64_t ei_read_timer_us() +{ + return ei_read_timer_ms() * 1000; +} + +void ei_serial_set_baudrate(int baudrate) +{ +} + +void ei_putchar(char c) +{ + sl_putchar(c); +} + +__attribute__((weak)) char ei_getchar() +{ + char ch = 0; + + if(sl_getchar(&ch) == SL_STATUS_OK) { + return ch; + } + else { + return 0; + } +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +__attribute__((weak)) void ei_putc(char c) +{ + sl_putchar(c); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SILABS == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/debug_log.cpp new file mode 100644 index 0000000..51cc138 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SONY_SPRESENSE == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SONY_SPRESENSE == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/ei_classifier_porting.cpp new file mode 100644 index 0000000..fe0f5be --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/sony/ei_classifier_porting.cpp @@ -0,0 +1,119 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SONY_SPRESENSE == 1 + +#include +#include +#include + +extern "C" void spresense_time_cb(uint32_t *sec, uint32_t *nano); +extern "C" void spresense_putchar(char cChar); +extern "C" char spresense_getchar(void); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + uint64_t end_ms = ei_read_timer_ms() + time_ms; + + while(end_ms > ei_read_timer_ms()){}; + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + uint64_t time_ms; + uint32_t seconds, nano_seconds; + + spresense_time_cb(&seconds, &nano_seconds); + + time_ms = (seconds * 1000) + (nano_seconds / 1000000); + return time_ms; +} + +uint64_t ei_read_timer_us() { + + uint64_t time_us; + uint32_t seconds, nano_seconds; + + spresense_time_cb(&seconds, &nano_seconds); + + time_us = (seconds * 1000000) + (nano_seconds / 1000); + return time_us; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[256]; + int length; + va_list myargs; + va_start(myargs, format); + length = vsprintf(buffer, format, myargs); + va_end(myargs); + + for(int i = 0; i < length; i++) { + spresense_putchar(buffer[i]); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +/** + * @brief Write single character to serial output + * + * @param[in] cChar The character + */ +__attribute__((weak)) void ei_putchar(char cChar) +{ + spresense_putchar(cChar); +} + +__attribute__((weak)) char ei_getchar(void) +{ + return spresense_getchar(); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SONY_SPRESENSE == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/debug_log.cpp new file mode 100644 index 0000000..35408b7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_STM32_CUBEAI == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_STM32_CUBEAI == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/ei_classifier_porting.cpp new file mode 100644 index 0000000..c626f30 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/stm32-cubeai/ei_classifier_porting.cpp @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_STM32_CUBEAI == 1 + +#include "main.h" +#include +#include +#include +#include +#include + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + HAL_Delay(time_ms); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return HAL_GetTick(); +} + +uint64_t ei_read_timer_us() { + return HAL_GetTick() * 1000; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list myargs; + va_start(myargs, format); + vprintf(format, myargs); + va_end(myargs); +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + strcpy(s, "0"); + } + else { + int digit, m; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + } + + + ei_printf("%s", s); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_STM32_CUBEAI diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/debug_log.cpp new file mode 100644 index 0000000..381c82b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SYNAPTICS == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SYNAPTICS == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/ei_classifier_porting.cpp new file mode 100644 index 0000000..bc5ac26 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/synaptics/ei_classifier_porting.cpp @@ -0,0 +1,132 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_SYNAPTICS == 1 + +#include +#include +#include +#include + +#include "mcu.h" +#include "uart_drv.h" + +extern "C" void *os_Malloc(unsigned long); +extern "C" int os_Free(void *); +extern "C" uint64_t get_time_ms(void); +extern void print_out(const char *format, va_list args); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + os_TaskSleep(time_ms); + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return get_time_ms(); +} + +uint64_t ei_read_timer_us() { + + return get_time_ms() * 1000; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + va_list args; + va_start(args, format); + print_out(format, args); + va_end(args); +} + + +__attribute__((weak)) void ei_putchar(char c) { + uart_putchar(c); +} + +__attribute__((weak)) void ei_printf_float(float f) { + float n = f; + + static double PRECISION = 0.00001; + static int MAX_NUMBER_STRING_SIZE = 32; + + char s[MAX_NUMBER_STRING_SIZE]; + + if (n == 0.0) { + ei_printf("0.00000"); + } else { + int digit, m; //, m1; + char *c = s; + int neg = (n < 0); + if (neg) { + n = -n; + } + // calculate magnitude + m = log10(n); + if (neg) { + *(c++) = '-'; + } + if (m < 1.0) { + m = 0; + } + // convert the number + while (n > PRECISION || m >= 0) { + double weight = pow(10.0, m); + if (weight > 0 && !isinf(weight)) { + digit = floor(n / weight); + n -= (digit * weight); + *(c++) = '0' + digit; + } + if (m == 0 && n > 0) { + *(c++) = '.'; + } + m--; + } + *(c) = '\0'; + ei_printf("%s", s); + } +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return os_Malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return os_Malloc(nitems * size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + os_Free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SYNAPTICS == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/debug_log.cpp new file mode 100644 index 0000000..4f8ee4a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_TI == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Redirect TFLite DebugLog to ei_printf +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_SONY_SPRESENSE == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/ei_classifier_porting.cpp new file mode 100644 index 0000000..b0e38f4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/ti/ei_classifier_porting.cpp @@ -0,0 +1,96 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_TI == 1 + +#include + +#include +#include +#include +#include "unistd.h" + +extern "C" void Serial_Out(char *string, int length); +extern "C" uint64_t Timer_getMs(void); + +__attribute__((weak)) EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +/** + * Cancelable sleep, can be triggered with signal from other thread + */ +__attribute__((weak)) EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + + usleep(time_ms * 1000); + + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + + return Timer_getMs(); +} + +uint64_t ei_read_timer_us() { + + /* TI board hangs when trying to call callback function each micro second */ + return Timer_getMs() * 1000; +} + +__attribute__((weak)) void ei_printf(const char *format, ...) { + + char buffer[256]; + int length; + va_list myargs; + va_start(myargs, format); + length = vsnprintf(buffer, 256, format, myargs); + va_end(myargs); + + Serial_Out(buffer, length); +} + +__attribute__((weak)) void ei_printf_float(float f) { + ei_printf("%f", f); +} + +__attribute__((weak)) void ei_putchar(char data) +{ + Serial_Out(&data, 1); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // EI_PORTING_TI == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/debug_log.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/debug_log.cpp new file mode 100644 index 0000000..53e4c46 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/debug_log.cpp @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_ZEPHYR == 1 + +#include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" +#include +#include + +// Route back to `ei_printf` +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif // defined(__cplusplus) && EI_C_LINKAGE == 1 +void DebugLog(const char* s) { + ei_printf("%s", s); +} + +#endif // #if EI_PORTING_ZEPHYR == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/ei_classifier_porting.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/ei_classifier_porting.cpp new file mode 100644 index 0000000..0ba58b8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/porting/zephyr/ei_classifier_porting.cpp @@ -0,0 +1,104 @@ +/* + * Copyright (c) 2022 EdgeImpulse Inc. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an "AS + * IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either + * express or implied. See the License for the specific language + * governing permissions and limitations under the License. + * + * SPDX-License-Identifier: Apache-2.0 + */ + +#include "../ei_classifier_porting.h" +#if EI_PORTING_ZEPHYR == 1 + +#include +// Zpehyr 3.1.x and newer uses different include scheme +#if (KERNEL_VERSION_MAJOR > 3) || ((KERNEL_VERSION_MAJOR == 3) && (KERNEL_VERSION_MINOR >= 1)) +#include +#include +#else +#include +#include +#endif +#include +#include + +extern const struct device *uart; + +#define EI_WEAK_FN __attribute__((weak)) + +EI_WEAK_FN EI_IMPULSE_ERROR ei_run_impulse_check_canceled() { + return EI_IMPULSE_OK; +} + +EI_WEAK_FN EI_IMPULSE_ERROR ei_sleep(int32_t time_ms) { + k_msleep(time_ms); + return EI_IMPULSE_OK; +} + +uint64_t ei_read_timer_ms() { + return k_uptime_get(); +} + +uint64_t ei_read_timer_us() { + return k_uptime_get() * 1000; +} + +EI_WEAK_FN char ei_getchar() +{ + uint8_t rcv_char = 0; + if(uart_fifo_read(uart, &rcv_char, 1) == 1) { + return rcv_char; + } + else { + return 0; + } +} + +/** + * Printf function uses vsnprintf and output using Arduino Serial + */ +__attribute__((weak)) void ei_printf(const char *format, ...) { + static char print_buf[1024] = { 0 }; + + va_list args; + va_start(args, format); + int r = vsnprintf(print_buf, sizeof(print_buf), format, args); + va_end(args); + + if(r > 0) { + printf("%s", print_buf); + } +} + +__attribute__((weak)) void ei_printf_float(float f) { + printf("%f", f); +} + +__attribute__((weak)) void *ei_malloc(size_t size) { + return malloc(size); +} + +__attribute__((weak)) void *ei_calloc(size_t nitems, size_t size) { + return calloc(nitems, size); +} + +__attribute__((weak)) void ei_free(void *ptr) { + free(ptr); +} + +#if defined(__cplusplus) && EI_C_LINKAGE == 1 +extern "C" +#endif +__attribute__((weak)) void DebugLog(const char* s) { + printf("%s", s); +} + +#endif // #if EI_PORTING_ZEPHYR == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/scripts/leak-detection.js b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/scripts/leak-detection.js deleted file mode 100644 index c1bfd78..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/scripts/leak-detection.js +++ /dev/null @@ -1,152 +0,0 @@ -// How to use this script: -// 1. Enable the 'EIDSP_TRACK_ALLOCATIONS' macro in dsp/config.hpp -// 2. Run the application, and paste the full output below (in the report variable) -// 3. Run the script via: -// $ node leak-detection.js -// 4. You'll see exactly if there's any leaks - -const report = `alloc matrix 2 1 x 21 = 84 bytes (in_use=84, peak=84) (ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:117) 0x7fd8bc4058a0 -alloc matrix 2 200 x 3 = 2400 bytes (in_use=2484, peak=2484) (ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:117) 0x7fd8bc80da00 -alloc matrix 2 3 x 200 = 2400 bytes (in_use=4884, peak=4884) (transpose@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:324) 0x7fd8bc80e400 -free matrix 3 x 200 = 2400 bytes (in_use=2484, peak=4884) (transpose@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:324) 0x7fd8bc80e400 -alloc matrix 2 64 x 1 = 256 bytes (in_use=2740, peak=4884) (ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:117) 0x7fd8bc4059a0 -alloc matrix 2 3 x 1 = 12 bytes (in_use=2752, peak=4884) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:85) 0x7fd8bc405900 -alloc matrix 2 3 x 1 = 12 bytes (in_use=2764, peak=4884) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:114) 0x7fd8bc405960 -alloc matrix 2 3 x 2 = 24 bytes (in_use=2788, peak=4884) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:121) 0x7fd8bc405970 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3048, peak=4884) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8bc405aa0 -alloc matrix 2 1 x 128 = 512 bytes (in_use=3560, peak=4884) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8bc405ca0 -alloc 520 bytes (in_use=4080, peak=4884) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2045) 0x7fd8bc405ea0 -alloc 1568 bytes (in_use=5648, peak=5648) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2059) 0x7fd8bd008200 -free 1568 bytes (in_use=4080, peak=5648) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2069) 0x7fd8bd008200 -free 520 bytes (in_use=3560, peak=5648) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2070) 0x7fd8bc405ea0 -free matrix 1 x 128 = 512 bytes (in_use=3048, peak=5648) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8bc405ca0 -alloc matrix 2 1 x 2 = 8 bytes (in_use=3056, peak=5648) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8bc405990 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=5648) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8bc405bb0 -alloc matrix 2 10 x 1 = 40 bytes (in_use=3356, peak=5648) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8bc405910 -free matrix 10 x 1 = 40 bytes (in_use=3316, peak=5648) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8bc405910 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=5648) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8bc405bb0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=5648) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8bc405bb0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3576, peak=5648) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8bc405cc0 -alloc matrix 2 1 x 128 = 512 bytes (in_use=4088, peak=5648) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8bc405dd0 -alloc matrix 2 1 x 1 = 4 bytes (in_use=4092, peak=5648) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8bc405940 -alloc 520 bytes (in_use=4612, peak=5648) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:791) 0x7fd8bc4060b0 -alloc 1568 bytes (in_use=6180, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2085) 0x7fd8bc80ae00 -free 1568 bytes (in_use=4612, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2090) 0x7fd8bc80ae00 -free 520 bytes (in_use=4092, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:814) 0x7fd8bc4060b0 -free matrix 1 x 1 = 4 bytes (in_use=4088, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8bc405940 -free matrix 1 x 128 = 512 bytes (in_use=3576, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8bc405dd0 -alloc matrix 2 4 x 1 = 16 bytes (in_use=3592, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8bc405940 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8bc405950 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3624, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8bc504080 -free matrix 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8bc504080 -free matrix 1 x 4 = 16 bytes (in_use=3592, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8bc405950 -free matrix 4 x 1 = 16 bytes (in_use=3576, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8bc405940 -free matrix 1 x 65 = 260 bytes (in_use=3316, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8bc405cc0 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8bc405bb0 -free matrix 1 x 2 = 8 bytes (in_use=3048, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8bc405990 -free matrix 1 x 65 = 260 bytes (in_use=2788, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8bc405aa0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3048, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8bc604080 -alloc matrix 2 1 x 128 = 512 bytes (in_use=3560, peak=6180) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8bc604190 -alloc 520 bytes (in_use=4080, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2045) 0x7fd8bc604390 -alloc 1568 bytes (in_use=5648, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2059) 0x7fd8bd008200 -free 1568 bytes (in_use=4080, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2069) 0x7fd8bd008200 -free 520 bytes (in_use=3560, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2070) 0x7fd8bc604390 -free matrix 1 x 128 = 512 bytes (in_use=3048, peak=6180) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8bc604190 -alloc matrix 2 1 x 2 = 8 bytes (in_use=3056, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8bc604190 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8bc6041a0 -alloc matrix 2 10 x 1 = 40 bytes (in_use=3356, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8bc6042b0 -free matrix 10 x 1 = 40 bytes (in_use=3316, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8bc6042b0 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8bc6041a0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8bc704080 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3576, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8bc704190 -alloc matrix 2 1 x 128 = 512 bytes (in_use=4088, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8bc7042a0 -alloc matrix 2 1 x 1 = 4 bytes (in_use=4092, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8bc7044a0 -alloc 520 bytes (in_use=4612, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:791) 0x7fd8bc7044b0 -alloc 1568 bytes (in_use=6180, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2085) 0x7fd8bd808200 -free 1568 bytes (in_use=4612, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2090) 0x7fd8bd808200 -free 520 bytes (in_use=4092, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:814) 0x7fd8bc7044b0 -free matrix 1 x 1 = 4 bytes (in_use=4088, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8bc7044a0 -free matrix 1 x 128 = 512 bytes (in_use=3576, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8bc7042a0 -alloc matrix 2 4 x 1 = 16 bytes (in_use=3592, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8bc7044a0 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8bc7042a0 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3624, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8bc7042b0 -free matrix 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8bc7042b0 -free matrix 1 x 4 = 16 bytes (in_use=3592, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8bc7042a0 -free matrix 4 x 1 = 16 bytes (in_use=3576, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8bc7044a0 -free matrix 1 x 65 = 260 bytes (in_use=3316, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8bc704190 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8bc704080 -free matrix 1 x 2 = 8 bytes (in_use=3048, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8bc604190 -free matrix 1 x 65 = 260 bytes (in_use=2788, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8bc604080 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3048, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8be004080 -alloc matrix 2 1 x 128 = 512 bytes (in_use=3560, peak=6180) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8be004190 -alloc 520 bytes (in_use=4080, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2045) 0x7fd8be004390 -alloc 1568 bytes (in_use=5648, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2059) 0x7fd8be808200 -free 1568 bytes (in_use=4080, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2069) 0x7fd8be808200 -free 520 bytes (in_use=3560, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2070) 0x7fd8be004390 -free matrix 1 x 128 = 512 bytes (in_use=3048, peak=6180) (rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:1316) 0x7fd8be004190 -alloc matrix 2 1 x 2 = 8 bytes (in_use=3056, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8be004190 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8be0041a0 -alloc matrix 2 10 x 1 = 40 bytes (in_use=3356, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8be0042b0 -free matrix 10 x 1 = 40 bytes (in_use=3316, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:386) 0x7fd8be0042b0 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=6180) (find_fft_peaks@./edge-impulse-sdk/dsp/spectral/processing.hpp:380) 0x7fd8be0041a0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3316, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8be0041a0 -alloc matrix 2 1 x 65 = 260 bytes (in_use=3576, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8be0042e0 -alloc matrix 2 1 x 128 = 512 bytes (in_use=4088, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8be0045a0 -alloc matrix 2 1 x 1 = 4 bytes (in_use=4092, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8be0043f0 -alloc 520 bytes (in_use=4612, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:791) 0x7fd8be0047a0 -alloc 1568 bytes (in_use=6180, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2085) 0x7fd8be808200 -free 1568 bytes (in_use=4612, peak=6180) (software_rfft@./edge-impulse-sdk/dsp/spectral/../numpy.hpp:2090) 0x7fd8be808200 -free 520 bytes (in_use=4092, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:814) 0x7fd8be0047a0 -free matrix 1 x 1 = 4 bytes (in_use=4088, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:780) 0x7fd8be0043f0 -free matrix 1 x 128 = 512 bytes (in_use=3576, peak=6180) (periodogram@./edge-impulse-sdk/dsp/spectral/processing.hpp:766) 0x7fd8be0045a0 -alloc matrix 2 4 x 1 = 16 bytes (in_use=3592, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8be0043f0 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8be0042b0 -alloc matrix 2 1 x 4 = 16 bytes (in_use=3624, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8be0042c0 -free matrix 1 x 4 = 16 bytes (in_use=3608, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:626) 0x7fd8be0042c0 -free matrix 1 x 4 = 16 bytes (in_use=3592, peak=6180) (spectral_power_edges@./edge-impulse-sdk/dsp/spectral/processing.hpp:625) 0x7fd8be0042b0 -free matrix 4 x 1 = 16 bytes (in_use=3576, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:156) 0x7fd8be0043f0 -free matrix 1 x 65 = 260 bytes (in_use=3316, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:149) 0x7fd8be0042e0 -free matrix 1 x 65 = 260 bytes (in_use=3056, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:148) 0x7fd8be0041a0 -free matrix 1 x 2 = 8 bytes (in_use=3048, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:140) 0x7fd8be004190 -free matrix 1 x 65 = 260 bytes (in_use=2788, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:130) 0x7fd8be004080 -free matrix 3 x 2 = 24 bytes (in_use=2764, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:121) 0x7fd8bc405970 -free matrix 3 x 1 = 12 bytes (in_use=2752, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:114) 0x7fd8bc405960 -free matrix 3 x 1 = 12 bytes (in_use=2740, peak=6180) (spectral_analysis@./edge-impulse-sdk/dsp/spectral/feature.hpp:85) 0x7fd8bc405900 -free matrix 5 x 1 = 20 bytes (in_use=2720, peak=6180) (~ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:132) 0x7fd8bc4059a0 -free matrix 3 x 200 = 2400 bytes (in_use=320, peak=6180) (~ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:132) 0x7fd8bc80da00 -Features (2 ms.): 0.190703 2.380952 0.147790 0.000000 0.000872 0.000080 0.000395 1.033266 0.793651 1.161580 0.000000 0.086353 0.034655 0.002571 0.510732 0.793651 0.648773 0.000000 0.026938 0.004146 0.001570 -Running neural network... -Predictions (time: 0 ms.): -updown: 0.996094 -free matrix 1 x 21 = 84 bytes (in_use=236, peak=6180) (~ei_matrix@edge-impulse-sdk/dsp/numpy_types.h:132) 0x7fd8bc4058a0`; - -let allocated = []; - -for (let line of report.split('\n')) { - let s = line.split(' '); - let ptr = s[s.length - 1]; - - if (line.startsWith('alloc')) { - let splitted = line.split(' bytes')[0].split(' '); - let alloc = Number(splitted[splitted.length - 1]); - // console.log('alloc', alloc); - allocated.push({ - ptr: ptr, - size: alloc - }); - } - else if (line.startsWith('free')) { - let splitted = line.split(' bytes')[0].split(' '); - let free = Number(splitted[splitted.length - 1]); - - let p = allocated.find(x => x.ptr === ptr && x.size === free); - if (!p) { - console.warn('Could not find ptr', ptr, free); - } - else { - allocated.splice(allocated.indexOf(p), 1); - } - } -} - -console.log('dangling', allocated); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/LICENSE b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/LICENSE index fb26962..d645695 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/LICENSE +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/LICENSE @@ -1,4 +1,3 @@ -Copyright 2019 The TensorFlow Authors. All rights reserved. Apache License Version 2.0, January 2004 @@ -201,48 +200,3 @@ Copyright 2019 The TensorFlow Authors. All rights reserved. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. - -MIT License - -Copyright (c) 2017-2021 Arm Limited - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -LICENSE - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND - ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR - ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_op_data.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_op_data.h new file mode 100644 index 0000000..b512ba7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_op_data.h @@ -0,0 +1,22 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +// Compatibility shim for new location of interface definitions. + +#ifndef TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ + +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" + +#endif // TENSORFLOW_LITE_BUILTIN_OP_DATA_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_ops.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_ops.h new file mode 100644 index 0000000..3370730 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/builtin_ops.h @@ -0,0 +1,194 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_BUILTIN_OPS_H_ +#define TENSORFLOW_LITE_BUILTIN_OPS_H_ + +// DO NOT EDIT MANUALLY: This file is automatically generated by +// `schema/builtin_ops_header/generator.cc`. + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// The enum for builtin operators. +// Note: CUSTOM, DELEGATE, and PLACEHOLDER_FOR_GREATER_OP_CODES are 3 special +// ops which are not real built-in ops. +typedef enum { + kTfLiteBuiltinAdd = 0, + kTfLiteBuiltinAveragePool2d = 1, + kTfLiteBuiltinConcatenation = 2, + kTfLiteBuiltinConv2d = 3, + kTfLiteBuiltinDepthwiseConv2d = 4, + kTfLiteBuiltinDepthToSpace = 5, + kTfLiteBuiltinDequantize = 6, + kTfLiteBuiltinEmbeddingLookup = 7, + kTfLiteBuiltinFloor = 8, + kTfLiteBuiltinFullyConnected = 9, + kTfLiteBuiltinHashtableLookup = 10, + kTfLiteBuiltinL2Normalization = 11, + kTfLiteBuiltinL2Pool2d = 12, + kTfLiteBuiltinLocalResponseNormalization = 13, + kTfLiteBuiltinLogistic = 14, + kTfLiteBuiltinLshProjection = 15, + kTfLiteBuiltinLstm = 16, + kTfLiteBuiltinMaxPool2d = 17, + kTfLiteBuiltinMul = 18, + kTfLiteBuiltinRelu = 19, + kTfLiteBuiltinReluN1To1 = 20, + kTfLiteBuiltinRelu6 = 21, + kTfLiteBuiltinReshape = 22, + kTfLiteBuiltinResizeBilinear = 23, + kTfLiteBuiltinRnn = 24, + kTfLiteBuiltinSoftmax = 25, + kTfLiteBuiltinSpaceToDepth = 26, + kTfLiteBuiltinSvdf = 27, + kTfLiteBuiltinTanh = 28, + kTfLiteBuiltinConcatEmbeddings = 29, + kTfLiteBuiltinSkipGram = 30, + kTfLiteBuiltinCall = 31, + kTfLiteBuiltinCustom = 32, + kTfLiteBuiltinEmbeddingLookupSparse = 33, + kTfLiteBuiltinPad = 34, + kTfLiteBuiltinUnidirectionalSequenceRnn = 35, + kTfLiteBuiltinGather = 36, + kTfLiteBuiltinBatchToSpaceNd = 37, + kTfLiteBuiltinSpaceToBatchNd = 38, + kTfLiteBuiltinTranspose = 39, + kTfLiteBuiltinMean = 40, + kTfLiteBuiltinSub = 41, + kTfLiteBuiltinDiv = 42, + kTfLiteBuiltinSqueeze = 43, + kTfLiteBuiltinUnidirectionalSequenceLstm = 44, + kTfLiteBuiltinStridedSlice = 45, + kTfLiteBuiltinBidirectionalSequenceRnn = 46, + kTfLiteBuiltinExp = 47, + kTfLiteBuiltinTopkV2 = 48, + kTfLiteBuiltinSplit = 49, + kTfLiteBuiltinLogSoftmax = 50, + kTfLiteBuiltinDelegate = 51, + kTfLiteBuiltinBidirectionalSequenceLstm = 52, + kTfLiteBuiltinCast = 53, + kTfLiteBuiltinPrelu = 54, + kTfLiteBuiltinMaximum = 55, + kTfLiteBuiltinArgMax = 56, + kTfLiteBuiltinMinimum = 57, + kTfLiteBuiltinLess = 58, + kTfLiteBuiltinNeg = 59, + kTfLiteBuiltinPadv2 = 60, + kTfLiteBuiltinGreater = 61, + kTfLiteBuiltinGreaterEqual = 62, + kTfLiteBuiltinLessEqual = 63, + kTfLiteBuiltinSelect = 64, + kTfLiteBuiltinSlice = 65, + kTfLiteBuiltinSin = 66, + kTfLiteBuiltinTransposeConv = 67, + kTfLiteBuiltinSparseToDense = 68, + kTfLiteBuiltinTile = 69, + kTfLiteBuiltinExpandDims = 70, + kTfLiteBuiltinEqual = 71, + kTfLiteBuiltinNotEqual = 72, + kTfLiteBuiltinLog = 73, + kTfLiteBuiltinSum = 74, + kTfLiteBuiltinSqrt = 75, + kTfLiteBuiltinRsqrt = 76, + kTfLiteBuiltinShape = 77, + kTfLiteBuiltinPow = 78, + kTfLiteBuiltinArgMin = 79, + kTfLiteBuiltinFakeQuant = 80, + kTfLiteBuiltinReduceProd = 81, + kTfLiteBuiltinReduceMax = 82, + kTfLiteBuiltinPack = 83, + kTfLiteBuiltinLogicalOr = 84, + kTfLiteBuiltinOneHot = 85, + kTfLiteBuiltinLogicalAnd = 86, + kTfLiteBuiltinLogicalNot = 87, + kTfLiteBuiltinUnpack = 88, + kTfLiteBuiltinReduceMin = 89, + kTfLiteBuiltinFloorDiv = 90, + kTfLiteBuiltinReduceAny = 91, + kTfLiteBuiltinSquare = 92, + kTfLiteBuiltinZerosLike = 93, + kTfLiteBuiltinFill = 94, + kTfLiteBuiltinFloorMod = 95, + kTfLiteBuiltinRange = 96, + kTfLiteBuiltinResizeNearestNeighbor = 97, + kTfLiteBuiltinLeakyRelu = 98, + kTfLiteBuiltinSquaredDifference = 99, + kTfLiteBuiltinMirrorPad = 100, + kTfLiteBuiltinAbs = 101, + kTfLiteBuiltinSplitV = 102, + kTfLiteBuiltinUnique = 103, + kTfLiteBuiltinCeil = 104, + kTfLiteBuiltinReverseV2 = 105, + kTfLiteBuiltinAddN = 106, + kTfLiteBuiltinGatherNd = 107, + kTfLiteBuiltinCos = 108, + kTfLiteBuiltinWhere = 109, + kTfLiteBuiltinRank = 110, + kTfLiteBuiltinElu = 111, + kTfLiteBuiltinReverseSequence = 112, + kTfLiteBuiltinMatrixDiag = 113, + kTfLiteBuiltinQuantize = 114, + kTfLiteBuiltinMatrixSetDiag = 115, + kTfLiteBuiltinRound = 116, + kTfLiteBuiltinHardSwish = 117, + kTfLiteBuiltinIf = 118, + kTfLiteBuiltinWhile = 119, + kTfLiteBuiltinNonMaxSuppressionV4 = 120, + kTfLiteBuiltinNonMaxSuppressionV5 = 121, + kTfLiteBuiltinScatterNd = 122, + kTfLiteBuiltinSelectV2 = 123, + kTfLiteBuiltinDensify = 124, + kTfLiteBuiltinSegmentSum = 125, + kTfLiteBuiltinBatchMatmul = 126, + kTfLiteBuiltinPlaceholderForGreaterOpCodes = 127, + kTfLiteBuiltinCumsum = 128, + kTfLiteBuiltinCallOnce = 129, + kTfLiteBuiltinBroadcastTo = 130, + kTfLiteBuiltinRfft2d = 131, + kTfLiteBuiltinConv3d = 132, + kTfLiteBuiltinImag = 133, + kTfLiteBuiltinReal = 134, + kTfLiteBuiltinComplexAbs = 135, + kTfLiteBuiltinHashtable = 136, + kTfLiteBuiltinHashtableFind = 137, + kTfLiteBuiltinHashtableImport = 138, + kTfLiteBuiltinHashtableSize = 139, + kTfLiteBuiltinReduceAll = 140, + kTfLiteBuiltinConv3dTranspose = 141, + kTfLiteBuiltinVarHandle = 142, + kTfLiteBuiltinReadVariable = 143, + kTfLiteBuiltinAssignVariable = 144, + kTfLiteBuiltinBroadcastArgs = 145, + kTfLiteBuiltinRandomStandardNormal = 146, + kTfLiteBuiltinBucketize = 147, + kTfLiteBuiltinRandomUniform = 148, + kTfLiteBuiltinMultinomial = 149, + kTfLiteBuiltinGelu = 150, + kTfLiteBuiltinDynamicUpdateSlice = 151, + kTfLiteBuiltinRelu0To1 = 152, + kTfLiteBuiltinUnsortedSegmentProd = 153, + kTfLiteBuiltinUnsortedSegmentMax = 154, + kTfLiteBuiltinUnsortedSegmentSum = 155, + kTfLiteBuiltinAtan2 = 156, + kTfLiteBuiltinUnsortedSegmentMin = 157, + kTfLiteBuiltinSign = 158, +} TfLiteBuiltinOperator; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus +#endif // TENSORFLOW_LITE_BUILTIN_OPS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h index 29bdd7e..f1e511a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h @@ -15,488 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ #define TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible -// number of dimensions. -#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 - -// TODO(aselle): Consider using "if this then that" for testing. - -// Useful placeholder to put in otherwise empty structs to avoid size warnings. -typedef struct { - char dummy; -} EmptyStructPlaceholder; - -// IMPORTANT: All new members of structs must be added at the end to ensure -// backwards compatibility. - -// Possible padding types (for convolutions) -typedef enum { - kTfLitePaddingUnknown = 0, - kTfLitePaddingSame, - kTfLitePaddingValid, -} TfLitePadding; - -typedef enum { - kTfLiteMirrorPaddingUnknown = 0, - kTfLiteMirrorPaddingReflect, - kTfLiteMirrorPaddingSymmetric, -} TfLiteMirrorPaddingMode; - -// TODO(b/130259536): We should move this out of builtin_op_data. -typedef struct { - int width; - int height; - int width_offset; - int height_offset; -} TfLitePaddingValues; - -typedef struct { - TfLiteMirrorPaddingMode mode; -} TfLiteMirrorPaddingParams; - -// Possible fused activation functions. -// TODO(aselle): rename to TfLiteActivation -typedef enum { - kTfLiteActNone = 0, - kTfLiteActRelu, - kTfLiteActReluN1To1, // min(max(-1, x), 1) - kTfLiteActRelu6, // min(max(0, x), 6) - kTfLiteActTanh, - kTfLiteActSignBit, - kTfLiteActSigmoid, -} TfLiteFusedActivation; - -typedef struct { - // Parameters for CONV_2D version 1. - TfLitePadding padding; - int stride_width; - int stride_height; - TfLiteFusedActivation activation; - - // Parameters for CONV_2D version 2. - // Note: Version 2 supports dilation values not equal to 1. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteConvParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; - int stride_depth; - int dilation_width_factor; - int dilation_height_factor; - int dilation_depth_factor; - TfLiteFusedActivation activation; -} TfLiteConv3DParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; - int filter_width; - int filter_height; - TfLiteFusedActivation activation; - struct { - TfLitePaddingValues padding; - } computed; -} TfLitePoolParams; - -typedef struct { - // Parameters for DepthwiseConv version 1 or above. - TfLitePadding padding; - int stride_width; - int stride_height; - // `depth_multiplier` is redundant. It's used by CPU kernels in - // TensorFlow 2.0 or below, but ignored in versions above. - // - // The information can be deduced from the shape of input and the shape of - // weights. Since the TFLiteConverter toolchain doesn't support partially - // specified shapes, relying on `depth_multiplier` stops us from supporting - // graphs with dynamic shape tensors. - // - // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this - // field. - int depth_multiplier; - TfLiteFusedActivation activation; - // Parameters for DepthwiseConv version 2 or above. - int dilation_width_factor; - int dilation_height_factor; -} TfLiteDepthwiseConvParams; - -typedef struct { - int rank; - TfLiteFusedActivation activation; - - // Parameter for SVDF version 4. - bool asymmetric_quantize_inputs; -} TfLiteSVDFParams; - -typedef struct { - TfLiteFusedActivation activation; - - // Parameter for RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - - // Parameter for Sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteSequenceRNNParams; - -typedef struct { - bool time_major; - TfLiteFusedActivation activation; - bool merge_outputs; - - // Parameter for Bidirectional RNN verison 3. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceRNNParams; - -typedef enum { - kTfLiteFullyConnectedWeightsFormatDefault = 0, - kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, -} TfLiteFullyConnectedWeightsFormat; - -typedef struct { - // Parameters for FullyConnected version 1 or above. - TfLiteFusedActivation activation; - - // Parameters for FullyConnected version 2 or above. - TfLiteFullyConnectedWeightsFormat weights_format; - - // Parameters for FullyConnected version 5 or above. - // If set to true, then the number of dimensions in the input and the output - // tensors are the same. Furthermore, all but the last dimension of the input - // and output shapes will be equal. - bool keep_num_dims; - - // Parameters for FullyConnected version 7 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteFullyConnectedParams; - -typedef enum { - kTfLiteLshProjectionUnknown = 0, - kTfLiteLshProjectionSparse = 1, - kTfLiteLshProjectionDense = 2, -} TfLiteLSHProjectionType; - -typedef struct { - TfLiteLSHProjectionType type; -} TfLiteLSHProjectionParams; - -typedef struct { - float beta; -} TfLiteSoftmaxParams; - -typedef struct { - int axis; - TfLiteFusedActivation activation; -} TfLiteConcatenationParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 4. - bool pot_scale_int16; -} TfLiteAddParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteSpaceToBatchNDParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteBatchToSpaceNDParams; - -typedef struct { - bool adj_x; - bool adj_y; - // Parameters for BatchMatMul version 4 or above. - // If set to true and the weights are quantized, then non constant inputs - // are quantized at evaluation time with asymmetric quantization. - bool asymmetric_quantize_inputs; -} TfLiteBatchMatMulParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteMulParams; - -typedef struct { - TfLiteFusedActivation activation; - // Parameter added for the version 5. - bool pot_scale_int16; -} TfLiteSubParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteDivParams; - -typedef struct { - TfLiteFusedActivation activation; -} TfLiteL2NormParams; - -typedef struct { - int radius; - float bias; - float alpha; - float beta; -} TfLiteLocalResponseNormParams; - -typedef enum { - kTfLiteLSTMFullKernel = 0, - kTfLiteLSTMBasicKernel -} TfLiteLSTMKernelType; - -typedef struct { - // Parameters for LSTM version 1. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // Parameters for LSTM version 2. - // kTfLiteLSTMBasicKernel is only supported in version 2 or above. - TfLiteLSTMKernelType kernel_type; - - // Parameters for LSTM version 4. - bool asymmetric_quantize_inputs; -} TfLiteLSTMParams; - -typedef struct { - // Parameters needed for the underlying LSTM. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameter for unidirectional sequence RNN version 3. - bool asymmetric_quantize_inputs; -} TfLiteUnidirectionalSequenceLSTMParams; - -typedef struct { - // Parameters supported by version 1: - // Parameters inherited for the LSTM kernel. - TfLiteFusedActivation activation; - float cell_clip; - float proj_clip; - - // If true, store the outputs of both directions in the first output. - bool merge_outputs; - - // Parameters supported by version 2: - // If set to true then the first dimension is time, otherwise batch. - bool time_major; - - // Parameters supported by version 4: - // If set to true, then hybrid ops use asymmetric quantization for inputs. - bool asymmetric_quantize_inputs; -} TfLiteBidirectionalSequenceLSTMParams; - -typedef struct { - bool align_corners; - // half_pixel_centers assumes pixels are of half the actual dimensions, and - // yields more accurate resizes. Corresponds to the same argument for the - // original TensorFlow op in TF2.0. - bool half_pixel_centers; -} TfLiteResizeBilinearParams; - -typedef struct { - bool align_corners; - bool half_pixel_centers; -} TfLiteResizeNearestNeighborParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLitePadV2Params; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; - int num_dimensions; -} TfLiteReshapeParams; - -typedef struct { - int ngram_size; - int max_skip_size; - bool include_all_ngrams; -} TfLiteSkipGramParams; - -typedef struct { - int block_size; -} TfLiteSpaceToDepthParams; - -typedef struct { - int block_size; -} TfLiteDepthToSpaceParams; - -typedef struct { - TfLiteType in_data_type; - TfLiteType out_data_type; -} TfLiteCastParams; - -typedef enum { - kTfLiteCombinerTypeSum = 0, - kTfLiteCombinerTypeMean = 1, - kTfLiteCombinerTypeSqrtn = 2, -} TfLiteCombinerType; - -typedef struct { - TfLiteCombinerType combiner; -} TfLiteEmbeddingLookupSparseParams; - -typedef struct { - int axis; - int batch_dims; -} TfLiteGatherParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteTransposeParams; - -typedef struct { - bool keep_dims; -} TfLiteReducerParams; - -typedef struct { - int num_splits; -} TfLiteSplitParams; - -typedef struct { - int num_splits; -} TfLiteSplitVParams; - -typedef struct { - // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. - // For now we will fix the maximum possible number of dimensions. - int squeeze_dims[8]; - int num_squeeze_dims; -} TfLiteSqueezeParams; - -typedef struct { - int begin_mask; - int end_mask; - int ellipsis_mask; - int new_axis_mask; - int shrink_axis_mask; -} TfLiteStridedSliceParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMaxParams; - -typedef struct { - TfLiteType output_type; -} TfLiteArgMinParams; - -typedef struct { - TfLitePadding padding; - int stride_width; - int stride_height; -} TfLiteTransposeConvParams; - -typedef struct { - bool validate_indices; -} TfLiteSparseToDenseParams; - -typedef struct { - TfLiteType out_type; -} TfLiteShapeParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteRankParams; - -typedef struct { - // Parameters supported by version 1: - float min; - float max; - int num_bits; - - // Parameters supported by version 2: - bool narrow_range; -} TfLiteFakeQuantParams; - -typedef struct { - int values_count; - int axis; -} TfLitePackParams; - -typedef struct { - int axis; -} TfLiteOneHotParams; - -typedef struct { - int num; - int axis; -} TfLiteUnpackParams; - -typedef struct { - float alpha; -} TfLiteLeakyReluParams; - -typedef struct { - TfLiteType index_out_type; -} TfLiteUniqueParams; - -typedef struct { - int seq_dim; - int batch_dim; -} TfLiteReverseSequenceParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixDiagParams; - -typedef struct { - EmptyStructPlaceholder placeholder; -} TfLiteMatrixSetDiagParams; - -typedef struct { - int then_subgraph_index; - int else_subgraph_index; -} TfLiteIfParams; - -typedef struct { - int cond_subgraph_index; - int body_subgraph_index; -} TfLiteWhileParams; - -typedef struct { - bool exclusive; - bool reverse; -} TfLiteCumsumParams; - -typedef struct { - int init_subgraph_index; -} TfLiteCallOnceParams; - -typedef struct { - int table_id; - TfLiteType key_dtype; - TfLiteType value_dtype; -} TfLiteHashtableParams; - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus +/// For documentation, see +/// third_party/tensorflow/lite/core/c/builtin_op_data.h. +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" // IWYU pragma: export #endif // TENSORFLOW_LITE_C_BUILTIN_OP_DATA_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/c_api_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/c_api_types.h index 0128477..4d3fab2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/c_api_types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/c_api_types.h @@ -19,77 +19,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_C_C_API_TYPES_H_ #define TENSORFLOW_LITE_C_C_API_TYPES_H_ -#include +/// For documentation, see +/// third_party/tensorflow/lite/core/c/c_api_types.h. +#include "edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export -#ifdef __cplusplus -extern "C" { -#endif - -// Define TFL_CAPI_EXPORT macro to export a function properly with a shared -// library. -#ifdef SWIG -#define TFL_CAPI_EXPORT -#else -#if defined(_WIN32) -#ifdef TFL_COMPILE_LIBRARY -#define TFL_CAPI_EXPORT __declspec(dllexport) -#else -#define TFL_CAPI_EXPORT __declspec(dllimport) -#endif // TFL_COMPILE_LIBRARY -#else -#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) -#endif // _WIN32 -#endif // SWIG - -typedef enum TfLiteStatus { - kTfLiteOk = 0, - - // Generally referring to an error in the runtime (i.e. interpreter) - kTfLiteError = 1, - - // Generally referring to an error from a TfLiteDelegate itself. - kTfLiteDelegateError = 2, - - // Generally referring to an error in applying a delegate due to - // incompatibility between runtime and delegate, e.g., this error is returned - // when trying to apply a TfLite delegate onto a model graph that's already - // immutable. - kTfLiteApplicationError = 3 -} TfLiteStatus; - -// Types supported by tensor -typedef enum { - kTfLiteNoType = 0, - kTfLiteFloat32 = 1, - kTfLiteInt32 = 2, - kTfLiteUInt8 = 3, - kTfLiteInt64 = 4, - kTfLiteString = 5, - kTfLiteBool = 6, - kTfLiteInt16 = 7, - kTfLiteComplex64 = 8, - kTfLiteInt8 = 9, - kTfLiteFloat16 = 10, - kTfLiteFloat64 = 11, - kTfLiteComplex128 = 12, - kTfLiteUInt64 = 13, - kTfLiteResource = 14, - kTfLiteVariant = 15, - kTfLiteUInt32 = 16, -} TfLiteType; - -// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. -// If per-layer quantization is specified this field will still be populated in -// addition to TfLiteAffineQuantization. -// Parameters for asymmetric quantization. Quantized values can be converted -// back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteQuantizationParams { - float scale; - int32_t zero_point; -} TfLiteQuantizationParams; - -#ifdef __cplusplus -} // extern C -#endif #endif // TENSORFLOW_LITE_C_C_API_TYPES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.c b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.c index e141d66..9efcd3a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.c +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.c @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -12,233 +12,6 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +// Dummy file for backwards compatibility. +// See core/api/common.cc -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" - -#ifndef TF_LITE_STATIC_MEMORY -#include -#include -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteIntArrayGetSizeInBytes(int size) { - static TfLiteIntArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { - if (a == b) return 1; - if (a == NULL || b == NULL) return 0; - return TfLiteIntArrayEqualsArray(a, b->size, b->data); -} - -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]) { - if (a == NULL) return (b_size == 0); - if (a->size != b_size) return 0; - int i = 0; - for (; i < a->size; i++) - if (a->data[i] != b_data[i]) return 0; - return 1; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteIntArray* TfLiteIntArrayCreate(int size) { - int alloc_size = TfLiteIntArrayGetSizeInBytes(size); - if (alloc_size <= 0) return NULL; - TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); - if (!ret) return ret; - ret->size = size; - return ret; -} - -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { - if (!src) return NULL; - TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size); - if (ret) { - memcpy(ret->data, src->data, src->size * sizeof(int)); - } - return ret; -} - -void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); } - -#endif // TF_LITE_STATIC_MEMORY - -int TfLiteFloatArrayGetSizeInBytes(int size) { - static TfLiteFloatArray dummy; - return sizeof(dummy) + sizeof(dummy.data[0]) * size; -} - -#ifndef TF_LITE_STATIC_MEMORY - -TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { - TfLiteFloatArray* ret = - (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size)); - ret->size = size; - return ret; -} - -void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); } - -void TfLiteTensorDataFree(TfLiteTensor* t) { - if (t->allocation_type == kTfLiteDynamic || - t->allocation_type == kTfLitePersistentRo) { - free(t->data.raw); - } - t->data.raw = NULL; -} - -void TfLiteQuantizationFree(TfLiteQuantization* quantization) { - if (quantization->type == kTfLiteAffineQuantization) { - TfLiteAffineQuantization* q_params = - (TfLiteAffineQuantization*)(quantization->params); - if (q_params->scale) { - TfLiteFloatArrayFree(q_params->scale); - q_params->scale = NULL; - } - if (q_params->zero_point) { - TfLiteIntArrayFree(q_params->zero_point); - q_params->zero_point = NULL; - } - free(q_params); - } - quantization->params = NULL; - quantization->type = kTfLiteNoQuantization; -} - -void TfLiteSparsityFree(TfLiteSparsity* sparsity) { - if (sparsity == NULL) { - return; - } - - if (sparsity->traversal_order) { - TfLiteIntArrayFree(sparsity->traversal_order); - sparsity->traversal_order = NULL; - } - - if (sparsity->block_map) { - TfLiteIntArrayFree(sparsity->block_map); - sparsity->block_map = NULL; - } - - if (sparsity->dim_metadata) { - int i = 0; - for (; i < sparsity->dim_metadata_size; i++) { - TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; - if (metadata.format == kTfLiteDimSparseCSR) { - TfLiteIntArrayFree(metadata.array_segments); - metadata.array_segments = NULL; - TfLiteIntArrayFree(metadata.array_indices); - metadata.array_indices = NULL; - } - } - free(sparsity->dim_metadata); - sparsity->dim_metadata = NULL; - } - - free(sparsity); -} - -void TfLiteTensorFree(TfLiteTensor* t) { - TfLiteTensorDataFree(t); - if (t->dims) TfLiteIntArrayFree(t->dims); - t->dims = NULL; - - if (t->dims_signature) { - TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature); - } - t->dims_signature = NULL; - - TfLiteQuantizationFree(&t->quantization); - TfLiteSparsityFree(t->sparsity); - t->sparsity = NULL; -} - -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor) { - TfLiteTensorFree(tensor); - tensor->type = type; - tensor->name = name; - tensor->dims = dims; - tensor->params = quantization; - tensor->data.raw = buffer; - tensor->bytes = size; - tensor->allocation_type = allocation_type; - tensor->allocation = allocation; - tensor->is_variable = is_variable; - - tensor->quantization.type = kTfLiteNoQuantization; - tensor->quantization.params = NULL; -} - -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { - if (tensor->allocation_type != kTfLiteDynamic && - tensor->allocation_type != kTfLitePersistentRo) { - return; - } - // TODO(b/145340303): Tensor data should be aligned. - if (!tensor->data.raw) { - tensor->data.raw = malloc(num_bytes); - } else if (num_bytes > tensor->bytes) { - tensor->data.raw = realloc(tensor->data.raw, num_bytes); - } - tensor->bytes = num_bytes; -} -#endif // TF_LITE_STATIC_MEMORY - -const char* TfLiteTypeGetName(TfLiteType type) { - switch (type) { - case kTfLiteNoType: - return "NOTYPE"; - case kTfLiteFloat32: - return "FLOAT32"; - case kTfLiteInt16: - return "INT16"; - case kTfLiteInt32: - return "INT32"; - case kTfLiteUInt32: - return "UINT32"; - case kTfLiteUInt8: - return "UINT8"; - case kTfLiteInt8: - return "INT8"; - case kTfLiteInt64: - return "INT64"; - case kTfLiteUInt64: - return "UINT64"; - case kTfLiteBool: - return "BOOL"; - case kTfLiteComplex64: - return "COMPLEX64"; - case kTfLiteComplex128: - return "COMPLEX128"; - case kTfLiteString: - return "STRING"; - case kTfLiteFloat16: - return "FLOAT16"; - case kTfLiteFloat64: - return "FLOAT64"; - case kTfLiteResource: - return "RESOURCE"; - case kTfLiteVariant: - return "VARIANT"; - } - return "Unknown type"; -} - -TfLiteDelegate TfLiteDelegateCreate() { - TfLiteDelegate d = { - .data_ = NULL, - .Prepare = NULL, - .CopyFromBufferHandle = NULL, - .CopyToBufferHandle = NULL, - .FreeBufferHandle = NULL, - .flags = kTfLiteDelegateFlagsNone, - }; - return d; -} diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.h index b0a029e..00c3768 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/c/common.h @@ -36,891 +36,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_C_COMMON_H_ #define TENSORFLOW_LITE_C_COMMON_H_ -#include -#include -#include +/// For documentation, see +/// third_party/tensorflow/lite/core/c/common.h. +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" // IWYU pragma: export -#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" // IWYU pragma: export - -#ifdef __cplusplus -extern "C" { -#endif // __cplusplus - -// The list of external context types known to TF Lite. This list exists solely -// to avoid conflicts and to ensure ops can share the external contexts they -// need. Access to the external contexts is controlled by one of the -// corresponding support files. -typedef enum TfLiteExternalContextType { - kTfLiteEigenContext = 0, // include eigen_support.h to use. - kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. - kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. - kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. - kTfLiteMaxExternalContexts = 4 -} TfLiteExternalContextType; - -// Forward declare so dependent structs and methods can reference these types -// prior to the struct definitions. -struct TfLiteContext; -struct TfLiteDelegate; -struct TfLiteRegistration; - -// An external context is a collection of information unrelated to the TF Lite -// framework, but useful to a subset of the ops. TF Lite knows very little -// about the actual contexts, but it keeps a list of them, and is able to -// refresh them if configurations like the number of recommended threads -// change. -typedef struct TfLiteExternalContext { - TfLiteExternalContextType type; - TfLiteStatus (*Refresh)(struct TfLiteContext* context); -} TfLiteExternalContext; - -#define kTfLiteOptionalTensor (-1) - -// Fixed size list of integers. Used for dimensions and inputs/outputs tensor -// indices -typedef struct TfLiteIntArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1) || \ - defined(HEXAGON) || \ - (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) - int data[0]; -#else - int data[]; -#endif -} TfLiteIntArray; - -// Given the size (number of elements) in a TfLiteIntArray, calculate its size -// in bytes. -int TfLiteIntArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteIntArrayFree(). -TfLiteIntArray* TfLiteIntArrayCreate(int size); -#endif - -// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. -int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); - -// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. -int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, - const int b_data[]); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a copy of an array passed as `src`. -// You are expected to free memory with TfLiteIntArrayFree -TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); - -// Free memory of array `a`. -void TfLiteIntArrayFree(TfLiteIntArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Fixed size list of floats. Used for per-channel quantization. -typedef struct TfLiteFloatArray { - int size; -// gcc 6.1+ have a bug where flexible members aren't properly handled -// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c -// This also applies to the toolchain used for Qualcomm Hexagon DSPs. -#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ - __GNUC_MINOR__ >= 1 - float data[0]; -#else - float data[]; -#endif -} TfLiteFloatArray; - -// Given the size (number of elements) in a TfLiteFloatArray, calculate its size -// in bytes. -int TfLiteFloatArrayGetSizeInBytes(int size); - -#ifndef TF_LITE_STATIC_MEMORY -// Create a array of a given `size` (uninitialized entries). -// This returns a pointer, that you must free using TfLiteFloatArrayFree(). -TfLiteFloatArray* TfLiteFloatArrayCreate(int size); - -// Free memory of array `a`. -void TfLiteFloatArrayFree(TfLiteFloatArray* a); -#endif // TF_LITE_STATIC_MEMORY - -// Since we must not depend on any libraries, define a minimal subset of -// error macros while avoiding names that have pre-conceived meanings like -// assert and check. - -// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than -// calling the context->ReportError function directly, so that message strings -// can be stripped out if the binary size needs to be severely optimized. -#ifndef TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) \ - do { \ - (context)->ReportError((context), __VA_ARGS__); \ - } while (false) - -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ - do { \ - if ((context) != nullptr) { \ - (context)->ReportError((context), __VA_ARGS__); \ - } \ - } while (false) -#else // TF_LITE_STRIP_ERROR_STRINGS -#define TF_LITE_KERNEL_LOG(context, ...) -#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) -#endif // TF_LITE_STRIP_ERROR_STRINGS - -// Check whether value is true, and if not return kTfLiteError from -// the current function (and report the error string msg). -#define TF_LITE_ENSURE_MSG(context, value, msg) \ - do { \ - if (!(value)) { \ - TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ - return kTfLiteError; \ - } \ - } while (0) - -// Check whether the value `a` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -#define TF_LITE_ENSURE(context, a) \ - do { \ - if (!(a)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ - __LINE__, #a); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_STATUS(a) \ - do { \ - const TfLiteStatus s = (a); \ - if (s != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Check whether the value `a == b` is true, and if not return kTfLiteError from -// the current function, while also reporting the location of the error. -// `a` and `b` may be evaluated more than once, so no side effects or -// extremely expensive computations should be done. -// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. -#define TF_LITE_ENSURE_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ - __LINE__, #a, #b, (a), (b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ - do { \ - if ((a) != (b)) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ - __LINE__, #a, #b, TfLiteTypeGetName(a), \ - TfLiteTypeGetName(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ - do { \ - auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ - if (delta > epsilon) { \ - TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ - __FILE__, __LINE__, #a, #b, static_cast(a), \ - static_cast(b)); \ - return kTfLiteError; \ - } \ - } while (0) - -#define TF_LITE_ENSURE_OK(context, status) \ - do { \ - const TfLiteStatus s = (status); \ - if ((s) != kTfLiteOk) { \ - return s; \ - } \ - } while (0) - -// Single-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex64 { - float re, im; // real and imaginary parts, respectively. -} TfLiteComplex64; - -// Double-precision complex data type compatible with the C99 definition. -typedef struct TfLiteComplex128 { - double re, im; // real and imaginary parts, respectively. -} TfLiteComplex128; - -// Half precision data type compatible with the C99 definition. -typedef struct TfLiteFloat16 { - uint16_t data; -} TfLiteFloat16; - -// Return the name of a given type, for error reporting purposes. -const char* TfLiteTypeGetName(TfLiteType type); - -// SupportedQuantizationTypes. -typedef enum TfLiteQuantizationType { - // No quantization. - kTfLiteNoQuantization = 0, - // Affine quantization (with support for per-channel quantization). - // Corresponds to TfLiteAffineQuantization. - kTfLiteAffineQuantization = 1, -} TfLiteQuantizationType; - -// Structure specifying the quantization used by the tensor, if-any. -typedef struct TfLiteQuantization { - // The type of quantization held by params. - TfLiteQuantizationType type; - // Holds an optional reference to a quantization param structure. The actual - // type depends on the value of the `type` field (see the comment there for - // the values and corresponding types). - void* params; -} TfLiteQuantization; - -// Parameters for asymmetric quantization across a dimension (i.e per output -// channel quantization). -// quantized_dimension specifies which dimension the scales and zero_points -// correspond to. -// For a particular value in quantized_dimension, quantized values can be -// converted back to float using: -// real_value = scale * (quantized_value - zero_point) -typedef struct TfLiteAffineQuantization { - TfLiteFloatArray* scale; - TfLiteIntArray* zero_point; - int32_t quantized_dimension; -} TfLiteAffineQuantization; - -/* A union of pointers that points to memory for a given tensor. */ -typedef union TfLitePtrUnion { - /* Do not access these members directly, if possible, use - * GetTensorData(tensor) instead, otherwise only access .data, as other - * members are deprecated. */ - int32_t* i32; - uint32_t* u32; - int64_t* i64; - uint64_t* u64; - float* f; - TfLiteFloat16* f16; - double* f64; - char* raw; - const char* raw_const; - uint8_t* uint8; - bool* b; - int16_t* i16; - TfLiteComplex64* c64; - TfLiteComplex128* c128; - int8_t* int8; - /* Only use this member. */ - void* data; -} TfLitePtrUnion; - -// Memory allocation strategies. -// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. -// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, -// and available during eval. -// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and -// only available during eval. -// * kTfLiteDynamic: Allocated during eval, or for string tensors. -// * kTfLitePersistentRo: Allocated and populated during prepare. This is -// useful for tensors that can be computed during prepare and treated -// as constant inputs for downstream ops (also in prepare). -// * kTfLiteCustom: Custom memory allocation provided by the user. See -// TfLiteCustomAllocation below. -typedef enum TfLiteAllocationType { - kTfLiteMemNone = 0, - kTfLiteMmapRo, - kTfLiteArenaRw, - kTfLiteArenaRwPersistent, - kTfLiteDynamic, - kTfLitePersistentRo, - kTfLiteCustom, -} TfLiteAllocationType; - -// The delegates should use zero or positive integers to represent handles. -// -1 is reserved from unallocated status. -typedef int TfLiteBufferHandle; -enum { - kTfLiteNullBufferHandle = -1, -}; - -// Storage format of each dimension in a sparse tensor. -typedef enum TfLiteDimensionType { - kTfLiteDimDense = 0, - kTfLiteDimSparseCSR, -} TfLiteDimensionType; - -// Metadata to encode each dimension in a sparse tensor. -typedef struct TfLiteDimensionMetadata { - TfLiteDimensionType format; - int dense_size; - TfLiteIntArray* array_segments; - TfLiteIntArray* array_indices; -} TfLiteDimensionMetadata; - -// Parameters used to encode a sparse tensor. For detailed explanation of each -// field please refer to lite/schema/schema.fbs. -typedef struct TfLiteSparsity { - TfLiteIntArray* traversal_order; - TfLiteIntArray* block_map; - TfLiteDimensionMetadata* dim_metadata; - int dim_metadata_size; -} TfLiteSparsity; - -// Defines a custom memory allocation not owned by the runtime. -// `data` should be aligned to kDefaultTensorAlignment defined in -// lite/util.h. (Currently 64 bytes) -// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. -typedef struct TfLiteCustomAllocation { - void* data; - size_t bytes; -} TfLiteCustomAllocation; - -// The flags used in `Interpreter::SetCustomAllocationForTensor`. -// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteCustomAllocationFlags { - kTfLiteCustomAllocationFlagsNone = 0, - // Skips checking whether allocation.data points to an aligned buffer as - // expected by the TFLite runtime. - // NOTE: Setting this flag can cause crashes when calling Invoke(). - // Use with caution. - kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, -} TfLiteCustomAllocationFlags; - -// A tensor in the interpreter system which is a wrapper around a buffer of -// data including a dimensionality (or NULL if not currently defined). -#ifndef TF_LITE_STATIC_MEMORY -typedef struct TfLiteTensor { - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - // Quantization information. - TfLiteQuantizationParams params; - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // An opaque pointer to a tflite::MMapAllocation - const void* allocation; - - // Null-terminated name of this tensor. - const char* name; - - // The delegate which knows how to handle `buffer_handle`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; - - // An integer buffer handle that can be handled by `delegate`. - // The value is valid only when delegate is not null. - // WARNING: This is an experimental interface that is subject to change. - TfLiteBufferHandle buffer_handle; - - // If the delegate uses its own buffer (e.g. GPU memory), the delegate is - // responsible to set data_is_stale to true. - // `delegate->CopyFromBufferHandle` can be called to copy the data from - // delegate buffer. - // WARNING: This is an // experimental interface that is subject to change. - bool data_is_stale; - - // True if the tensor is a variable. - bool is_variable; - - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Parameters used to encode a sparse tensor. - // This is optional. The field is NULL if a tensor is dense. - // WARNING: This is an experimental interface that is subject to change. - TfLiteSparsity* sparsity; - - // Optional. Encodes shapes with unknown dimensions with -1. This field is - // only populated when unknown dimensions exist in a read-write tensor (i.e. - // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and - // `dims_signature` contains [1, -1, -1, 3]). - const TfLiteIntArray* dims_signature; -} TfLiteTensor; - -// A structure representing an instance of a node. -// This structure only exhibits the inputs, outputs and user defined data, not -// other features like the type. -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // intermediate tensors to this node expressed as indices into the simulator's - // tensors. - TfLiteIntArray* intermediates; - - // Temporary tensors uses during the computations. This usually contains no - // tensors, but ops are allowed to change that if they need scratch space of - // any sort. - TfLiteIntArray* temporaries; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; - - // The pointer to the delegate. This is non-null only when the node is - // created by calling `interpreter.ModifyGraphWithDelegate`. - // WARNING: This is an experimental interface that is subject to change. - struct TfLiteDelegate* delegate; -} TfLiteNode; -#else // defined(TF_LITE_STATIC_MEMORY)? -// NOTE: This flag is opt-in only at compile time. -// -// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct -// contains only the minimum fields required to initialize and prepare a micro -// inference graph. The fields in this struct have been ordered from -// largest-to-smallest for optimal struct sizeof. -// -// This struct does not use: -// - allocation -// - buffer_handle -// - data_is_stale -// - delegate -// - dims_signature -// - name -// - sparsity -typedef struct TfLiteTensor { - // TODO(b/155784997): Consider consolidating these quantization fields: - // Quantization information. Replaces params field above. - TfLiteQuantization quantization; - - // Quantization information. - TfLiteQuantizationParams params; - - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. NOTE: the product of elements of `dims` - // and the element datatype size should be equal to `bytes` below. - TfLiteIntArray* dims; - - // The number of bytes required to store the data of this Tensor. I.e. - // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if - // type is kTfLiteFloat32 and dims = {3, 2} then - // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. - size_t bytes; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; - - // How memory is mapped - // kTfLiteMmapRo: Memory mapped read only. - // i.e. weights - // kTfLiteArenaRw: Arena allocated read write memory - // (i.e. temporaries, outputs). - TfLiteAllocationType allocation_type; - - // True if the tensor is a variable. - bool is_variable; -} TfLiteTensor; - -// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains -// only the minimum fields required to represent a node. -// -// This struct does not use: -// - delegate -// - intermediates -// - temporaries -typedef struct TfLiteNode { - // Inputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* inputs; - - // Outputs to this node expressed as indices into the simulator's tensors. - TfLiteIntArray* outputs; - - // Opaque data provided by the node implementer through `Registration.init`. - void* user_data; - - // Opaque data provided to the node if the node is a builtin. This is usually - // a structure defined in builtin_op_data.h - void* builtin_data; - - // Custom initial data. This is the opaque data provided in the flatbuffer. - // WARNING: This is an experimental interface that is subject to change. - const void* custom_initial_data; - int custom_initial_data_size; -} TfLiteNode; -#endif // TF_LITE_STATIC_MEMORY - -// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount -// of information required for a kernel to run during TfLiteRegistration::Eval. -// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM -// builds with this flag by default internally. -typedef struct TfLiteEvalTensor { - // A union of data pointers. The appropriate type should be used for a typed - // tensor based on `type`. - TfLitePtrUnion data; - - // A pointer to a structure representing the dimensionality interpretation - // that the buffer should have. - TfLiteIntArray* dims; - - // The data type specification for data stored in `data`. This affects - // what member of `data` union should be used. - TfLiteType type; -} TfLiteEvalTensor; - -#ifndef TF_LITE_STATIC_MEMORY -// Free data memory of tensor `t`. -void TfLiteTensorDataFree(TfLiteTensor* t); - -// Free quantization data. -void TfLiteQuantizationFree(TfLiteQuantization* quantization); - -// Free sparsity parameters. -void TfLiteSparsityFree(TfLiteSparsity* sparsity); - -// Free memory of tensor `t`. -void TfLiteTensorFree(TfLiteTensor* t); - -// Set all of a tensor's fields (and free any previously allocated data). -void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, - TfLiteQuantizationParams quantization, char* buffer, - size_t size, TfLiteAllocationType allocation_type, - const void* allocation, bool is_variable, - TfLiteTensor* tensor); - -// Resize the allocated data of a (dynamic) tensor. Tensors with allocation -// types other than kTfLiteDynamic will be ignored. -void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); -#endif // TF_LITE_STATIC_MEMORY - -// WARNING: This is an experimental interface that is subject to change. -// -// Currently, TfLiteDelegateParams has to be allocated in a way that it's -// trivially destructable. It will be stored as `builtin_data` field in -// `TfLiteNode` of the delegate node. -// -// See also the `CreateDelegateParams` function in `interpreter.cc` details. -typedef struct TfLiteDelegateParams { - struct TfLiteDelegate* delegate; - TfLiteIntArray* nodes_to_replace; - TfLiteIntArray* input_tensors; - TfLiteIntArray* output_tensors; -} TfLiteDelegateParams; - -typedef struct TfLiteContext { - // Number of tensors in the context. - size_t tensors_size; - - // The execution plan contains a list of the node indices in execution - // order. execution_plan->size is the current number of nodes. And, - // execution_plan->data[0] is the first node that needs to be run. - // TfLiteDelegates can traverse the current execution plan by iterating - // through each member of this array and using GetNodeAndRegistration() to - // access details about a node. i.e. - // TfLiteIntArray* execution_plan; - // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); - // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { - // int node_index = execution_plan->data[exec_index]; - // TfLiteNode* node; - // TfLiteRegistration* reg; - // context->GetNodeAndRegistration(context, node_index, &node, ®); - // } - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, - TfLiteIntArray** execution_plan); - - // An array of tensors in the interpreter context (of length `tensors_size`) - TfLiteTensor* tensors; - - // opaque full context ptr (an opaque c++ data structure) - void* impl_; - - // Request memory pointer be resized. Updates dimensions on the tensor. - // NOTE: ResizeTensor takes ownership of newSize. - TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, - TfLiteIntArray* new_size); - // Request that an error be reported with format string msg. - void (*ReportError)(struct TfLiteContext*, const char* msg, ...); - - // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If - // non-null, the value pointed to by `first_new_tensor_index` will be set to - // the index of the first new tensor. - TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, - int* first_new_tensor_index); - - // Get a Tensor node by node_index. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*GetNodeAndRegistration)( - struct TfLiteContext*, int node_index, TfLiteNode** node, - struct TfLiteRegistration** registration); - - // Replace ops with one or more stub delegate operations. This function - // does not take ownership of `nodes_to_replace`. - TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( - struct TfLiteContext*, struct TfLiteRegistration registration, - const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); - - // Number of threads that are recommended to subsystems like gemmlowp and - // eigen. - int recommended_num_threads; - - // Access external contexts by type. - // WARNING: This is an experimental interface that is subject to change. - TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, - TfLiteExternalContextType); - // Set the value of a external context. Does not take ownership of the - // pointer. - // WARNING: This is an experimental interface that is subject to change. - void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, - TfLiteExternalContext*); - - // Flag for allowing float16 precision for FP32 calculation. - // default: false. - // WARNING: This is an experimental API and subject to change. - bool allow_fp32_relax_to_fp16; - - // Pointer to the op-level profiler, if set; nullptr otherwise. - void* profiler; - - // Allocate persistent buffer which has the same life time as the interpreter. - // Returns nullptr on failure. - // The memory is allocated from heap for TFL, and from tail in TFLM. - // This method is only available in Init or Prepare stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); - - // Allocate a buffer which will be deallocated right after invoke phase. - // The memory is allocated from heap in TFL, and from volatile arena in TFLM. - // This method is only available in invoke stage. - // NOTE: If possible use RequestScratchBufferInArena method to avoid memory - // allocation during inference time. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, - void** ptr); - - // Request a scratch buffer in the arena through static memory planning. - // This method is only available in Prepare stage and the buffer is allocated - // by the interpreter between Prepare and Eval stage. In Eval stage, - // GetScratchBuffer API can be used to fetch the address. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, - size_t bytes, int* buffer_idx); - - // Get the scratch buffer pointer. - // This method is only available in Eval stage. - // WARNING: This is an experimental interface that is subject to change. - void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); - - // Resize the memory pointer of the `tensor`. This method behaves the same as - // `ResizeTensor`, except that it makes a copy of the shape array internally - // so the shape array could be deallocated right afterwards. - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, - TfLiteTensor* tensor, int dims, - const int* shape); - - // This method provides a preview of post-delegation partitioning. Each - // TfLiteDelegateParams in the referenced array corresponds to one instance of - // the delegate kernel. - // Example usage: - // - // TfLiteIntArray* nodes_to_replace = ...; - // TfLiteDelegateParams* params_array; - // int num_partitions = 0; - // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( - // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); - // for (int idx = 0; idx < num_partitions; idx++) { - // const auto& partition_params = params_array[idx]; - // ... - // } - // - // NOTE: The context owns the memory referenced by partition_params_array. It - // will be cleared with another call to PreviewDelegateParitioning, or after - // TfLiteDelegateParams::Prepare returns. - // - // WARNING: This is an experimental interface that is subject to change. - TfLiteStatus (*PreviewDelegatePartitioning)( - struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, - TfLiteDelegateParams** partition_params_array, int* num_partitions); - - // Returns a TfLiteTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, - int tensor_idx); - - // Returns a TfLiteEvalTensor struct for a given index. - // WARNING: This is an experimental interface that is subject to change. - // WARNING: This method may not be available on all platforms. - TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, - int tensor_idx); -} TfLiteContext; - -typedef struct TfLiteRegistration { - // Initializes the op from serialized data. - // If a built-in op: - // `buffer` is the op's params data (TfLiteLSTMParams*). - // `length` is zero. - // If custom op: - // `buffer` is the op's `custom_options`. - // `length` is the size of the buffer. - // - // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer - // or an instance of a struct). - // - // The returned pointer will be stored with the node in the `user_data` field, - // accessible within prepare and invoke functions below. - // NOTE: if the data is already in the desired format, simply implement this - // function to return `nullptr` and implement the free function to be a no-op. - void* (*init)(TfLiteContext* context, const char* buffer, size_t length); - - // The pointer `buffer` is the data previously returned by an init invocation. - void (*free)(TfLiteContext* context, void* buffer); - - // prepare is called when the inputs this node depends on have been resized. - // context->ResizeTensor() can be called to request output tensors to be - // resized. - // - // Returns kTfLiteOk on success. - TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); - - // Execute the node (should read node->inputs and output to node->outputs). - // Returns kTfLiteOk on success. - TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); - - // profiling_string is called during summarization of profiling information - // in order to group executions together. Providing a value here will cause a - // given op to appear multiple times is the profiling report. This is - // particularly useful for custom ops that can perform significantly - // different calculations depending on their `user-data`. - const char* (*profiling_string)(const TfLiteContext* context, - const TfLiteNode* node); - - // Builtin codes. If this kernel refers to a builtin this is the code - // of the builtin. This is so we can do marshaling to other frameworks like - // NN API. - // Note: It is the responsibility of the registration binder to set this - // properly. - int32_t builtin_code; - - // Custom op name. If the op is a builtin, this will be null. - // Note: It is the responsibility of the registration binder to set this - // properly. - // WARNING: This is an experimental interface that is subject to change. - const char* custom_name; - - // The version of the op. - // Note: It is the responsibility of the registration binder to set this - // properly. - int version; -} TfLiteRegistration; - -// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the -// values should be 1, 2, 4, 8, ...etc. -typedef enum TfLiteDelegateFlags { - kTfLiteDelegateFlagsNone = 0, - // The flag is set if the delegate can handle dynamic sized tensors. - // For example, the output shape of a `Resize` op with non-constant shape - // can only be inferred when the op is invoked. - // In this case, the Delegate is responsible for calling - // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling - // `ResizeTensor` when invoking the op. - // - // If the delegate isn't capable to handle dynamic tensors, this flag need - // to be set to false. - kTfLiteDelegateFlagsAllowDynamicTensors = 1, - - // This flag can be used by delegates (that allow dynamic tensors) to ensure - // applicable tensor shapes are automatically propagated in the case of tensor - // resizing. - // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors - // of a delegate kernel will have correct shapes before its Prepare() method - // is called. The runtime leverages TFLite builtin ops in the original - // execution plan to propagate shapes. - // - // A few points to note: - // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is - // false, this one is redundant since the delegate kernels are re-initialized - // every time tensors are resized. - // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra - // work is required to prepare the original execution plan. - // 3. This flag requires that the original execution plan only have ops with - // valid registrations (and not 'dummy' custom ops like with Flex). - // WARNING: This feature is experimental and subject to change. - kTfLiteDelegateFlagsRequirePropagatedShapes = 2 -} TfLiteDelegateFlags; - -// WARNING: This is an experimental interface that is subject to change. -typedef struct TfLiteDelegate { - // Data that delegate needs to identify itself. This data is owned by the - // delegate. The delegate is owned in the user code, so the delegate is - // responsible for doing this when it is destroyed. - void* data_; - - // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the - // delegate a view of the current graph through TfLiteContext*. It typically - // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() - // to ask the TensorFlow lite runtime to create macro-nodes to represent - // delegated subgraphs of the original graph. - TfLiteStatus (*Prepare)(TfLiteContext* context, - struct TfLiteDelegate* delegate); - - // Copy the data from delegate buffer handle into raw memory of the given - // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as - // long as it follows the rules for kTfLiteDynamic tensors, in which case this - // cannot be null. - TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Copy the data from raw memory of the given 'tensor' to delegate buffer - // handle. This can be null if the delegate doesn't use its own buffer. - TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle buffer_handle, - TfLiteTensor* tensor); - - // Free the Delegate Buffer Handle. Note: This only frees the handle, but - // this doesn't release the underlying resource (e.g. textures). The - // resources are either owned by application layer or the delegate. - // This can be null if the delegate doesn't use its own buffer. - void (*FreeBufferHandle)(TfLiteContext* context, - struct TfLiteDelegate* delegate, - TfLiteBufferHandle* handle); - - // Bitmask flags. See the comments in `TfLiteDelegateFlags`. - int64_t flags; -} TfLiteDelegate; - -// Build a 'null' delegate, with all the fields properly set to their default -// values. -TfLiteDelegate TfLiteDelegateCreate(); - -#ifdef __cplusplus -} // extern "C" -#endif // __cplusplus #endif // TENSORFLOW_LITE_C_COMMON_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/context_util.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/context_util.h new file mode 100644 index 0000000..8c97a8d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/context_util.h @@ -0,0 +1,54 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// \file +/// +/// This provides a few C++ helpers that are useful for manipulating C +/// structures in C++. +#ifndef TENSORFLOW_LITE_CONTEXT_UTIL_H_ +#define TENSORFLOW_LITE_CONTEXT_UTIL_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" + +namespace tflite { + +/// Provides a range iterable wrapper for TfLiteIntArray* (C lists) that TfLite +/// C api uses. +// Can't use the google array_view, since we can't depend on even +// absl for embedded device reasons. +class TfLiteIntArrayView { + public: + /// Construct a view of a TfLiteIntArray*. Note, `int_array` should be + /// non-null and this view does not take ownership of it. + explicit TfLiteIntArrayView(const TfLiteIntArray* int_array) + : int_array_(int_array) {} + + TfLiteIntArrayView(const TfLiteIntArrayView&) = default; + TfLiteIntArrayView& operator=(const TfLiteIntArrayView& rhs) = default; + + typedef const int* const_iterator; + const_iterator begin() const { return int_array_->data; } + const_iterator end() const { return &int_array_->data[int_array_->size]; } + size_t size() const { return end() - begin(); } + int operator[](size_t pos) const { return int_array_->data[pos]; } + + private: + const TfLiteIntArray* int_array_; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_CONTEXT_UTIL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/common.cc new file mode 100644 index 0000000..67b8c6c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/common.cc @@ -0,0 +1,354 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" + +#include "edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h" +#ifdef TF_LITE_TENSORFLOW_PROFILER +#include "edge-impulse-sdk/tensorflow/lite/tensorflow_profiler_logger.h" +#endif + +#ifndef TF_LITE_STATIC_MEMORY +#include +#include +#endif // TF_LITE_STATIC_MEMORY + +extern "C" { + +size_t TfLiteIntArrayGetSizeInBytes(int size) { + static TfLiteIntArray dummy; + + size_t computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + computed_size -= sizeof(dummy.data[0]); +#endif + return computed_size; +} + +int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) { + if (a == b) return 1; + if (a == nullptr || b == nullptr) return 0; + return TfLiteIntArrayEqualsArray(a, b->size, b->data); +} + +int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, + const int b_data[]) { + if (a == nullptr) return (b_size == 0); + if (a->size != b_size) return 0; + int i = 0; + for (; i < a->size; i++) + if (a->data[i] != b_data[i]) return 0; + return 1; +} + +#ifndef TF_LITE_STATIC_MEMORY + +TfLiteIntArray* TfLiteIntArrayCreate(int size) { + size_t alloc_size = TfLiteIntArrayGetSizeInBytes(size); + if (alloc_size <= 0) return nullptr; + TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size); + if (!ret) return ret; + ret->size = size; + return ret; +} + +TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) { + if (!src) return nullptr; + TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size); + if (ret) { + memcpy(ret->data, src->data, src->size * sizeof(int)); + } + return ret; +} + +void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); } + +#endif // TF_LITE_STATIC_MEMORY + +int TfLiteFloatArrayGetSizeInBytes(int size) { + static TfLiteFloatArray dummy; + + int computed_size = sizeof(dummy) + sizeof(dummy.data[0]) * size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + computed_size -= sizeof(dummy.data[0]); +#endif + return computed_size; +} + +#ifndef TF_LITE_STATIC_MEMORY + +TfLiteFloatArray* TfLiteFloatArrayCreate(int size) { + TfLiteFloatArray* ret = + (TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size)); + ret->size = size; + return ret; +} + +void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); } + +void TfLiteTensorDataFree(TfLiteTensor* t) { + if (t->allocation_type == kTfLiteDynamic || + t->allocation_type == kTfLitePersistentRo) { + if (t->data.raw) { +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/true); + tflite::OnTfLiteTensorDealloc(t); +#endif + free(t->data.raw); +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/false); +#endif + } + } + t->data.raw = nullptr; +} + +void TfLiteQuantizationFree(TfLiteQuantization* quantization) { + if (quantization->type == kTfLiteAffineQuantization) { + TfLiteAffineQuantization* q_params = + (TfLiteAffineQuantization*)(quantization->params); + if (q_params->scale) { + TfLiteFloatArrayFree(q_params->scale); + q_params->scale = nullptr; + } + if (q_params->zero_point) { + TfLiteIntArrayFree(q_params->zero_point); + q_params->zero_point = nullptr; + } + free(q_params); + } + quantization->params = nullptr; + quantization->type = kTfLiteNoQuantization; +} + +void TfLiteSparsityFree(TfLiteSparsity* sparsity) { + if (sparsity == nullptr) { + return; + } + + if (sparsity->traversal_order) { + TfLiteIntArrayFree(sparsity->traversal_order); + sparsity->traversal_order = nullptr; + } + + if (sparsity->block_map) { + TfLiteIntArrayFree(sparsity->block_map); + sparsity->block_map = nullptr; + } + + if (sparsity->dim_metadata) { + int i = 0; + for (; i < sparsity->dim_metadata_size; i++) { + TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i]; + if (metadata.format == kTfLiteDimSparseCSR) { + TfLiteIntArrayFree(metadata.array_segments); + metadata.array_segments = nullptr; + TfLiteIntArrayFree(metadata.array_indices); + metadata.array_indices = nullptr; + } + } + free(sparsity->dim_metadata); + sparsity->dim_metadata = nullptr; + } + + free(sparsity); +} + +void TfLiteTensorFree(TfLiteTensor* t) { + TfLiteTensorDataFree(t); + if (t->dims) TfLiteIntArrayFree(t->dims); + t->dims = nullptr; + + if (t->dims_signature) { + TfLiteIntArrayFree((TfLiteIntArray*)t->dims_signature); + } + t->dims_signature = nullptr; + + TfLiteQuantizationFree(&t->quantization); + TfLiteSparsityFree(t->sparsity); + t->sparsity = nullptr; +} + +void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, + TfLiteQuantizationParams quantization, char* buffer, + size_t size, TfLiteAllocationType allocation_type, + const void* allocation, bool is_variable, + TfLiteTensor* tensor) { + TfLiteTensorFree(tensor); + tensor->type = type; + tensor->name = name; + tensor->dims = dims; + tensor->params = quantization; + tensor->data.raw = buffer; + tensor->bytes = size; + tensor->allocation_type = allocation_type; + tensor->allocation = allocation; + tensor->is_variable = is_variable; + + tensor->quantization.type = kTfLiteNoQuantization; + tensor->quantization.params = nullptr; +} + +TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst) { + if (!src || !dst) return kTfLiteOk; + if (src->bytes != dst->bytes) return kTfLiteError; + if (src == dst) return kTfLiteOk; + + dst->type = src->type; + if (dst->dims) TfLiteIntArrayFree(dst->dims); + dst->dims = TfLiteIntArrayCopy(src->dims); + memcpy(dst->data.raw, src->data.raw, src->bytes); + dst->buffer_handle = src->buffer_handle; + dst->data_is_stale = src->data_is_stale; + dst->delegate = src->delegate; + + return kTfLiteOk; +} + +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data) { + if (tensor->allocation_type != kTfLiteDynamic && + tensor->allocation_type != kTfLitePersistentRo) { + return kTfLiteOk; + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/true); +#endif + size_t alloc_bytes = num_bytes; + // TODO(b/145340303): Tensor data should be aligned. +#ifdef TFLITE_KERNEL_USE_XNNPACK + alloc_bytes += 16; // XNNPACK_EXTRA_BYTES = 16 +#endif + if (!tensor->data.data) { + tensor->data.data = (char*)malloc(alloc_bytes); +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorAlloc(tensor, alloc_bytes); +#endif + } else if (num_bytes > tensor->bytes) { +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorDealloc(tensor); +#endif + if (preserve_data) { + tensor->data.data = (char*)realloc(tensor->data.data, alloc_bytes); + } else { + // Calling free and malloc can be more efficient as it avoids needlessly + // copying the data when it is not required. + free(tensor->data.data); + tensor->data.data = (char*)malloc(alloc_bytes); + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::OnTfLiteTensorAlloc(tensor, alloc_bytes); +#endif + } +#ifdef TF_LITE_TENSORFLOW_PROFILER + tflite::PauseHeapMonitoring(/*pause=*/false); +#endif + tensor->bytes = num_bytes; + if (tensor->data.data == nullptr && num_bytes != 0) { + // We are done allocating but tensor is pointing to null and a valid size + // was requested, so we error. + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) { + return TfLiteTensorResizeMaybeCopy(num_bytes, tensor, true); +} +#endif // TF_LITE_STATIC_MEMORY + +const char* TfLiteTypeGetName(TfLiteType type) { + switch (type) { + case kTfLiteNoType: + return "NOTYPE"; + case kTfLiteFloat32: + return "FLOAT32"; + case kTfLiteUInt16: + return "UINT16"; + case kTfLiteInt16: + return "INT16"; + case kTfLiteInt32: + return "INT32"; + case kTfLiteUInt32: + return "UINT32"; + case kTfLiteUInt8: + return "UINT8"; + case kTfLiteInt8: + return "INT8"; + case kTfLiteInt64: + return "INT64"; + case kTfLiteUInt64: + return "UINT64"; + case kTfLiteBool: + return "BOOL"; + case kTfLiteComplex64: + return "COMPLEX64"; + case kTfLiteComplex128: + return "COMPLEX128"; + case kTfLiteString: + return "STRING"; + case kTfLiteFloat16: + return "FLOAT16"; + case kTfLiteFloat64: + return "FLOAT64"; + case kTfLiteResource: + return "RESOURCE"; + case kTfLiteVariant: + return "VARIANT"; + case kTfLiteInt4: + return "INT4"; + } + return "Unknown type"; +} + +TfLiteDelegate TfLiteDelegateCreate() { return TfLiteDelegate{}; } + +TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate( + const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder) { + if (!opaque_delegate_builder) return nullptr; + + TfLiteDelegate* result = new TfLiteDelegate{}; + result->opaque_delegate_builder = new TfLiteOpaqueDelegateBuilder{}; + *(result->opaque_delegate_builder) = *opaque_delegate_builder; + + return reinterpret_cast(result); +} + +void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* opaque_delegate) { + if (!opaque_delegate) return; + + const TfLiteDelegate* tflite_delegate = + reinterpret_cast(opaque_delegate); + delete tflite_delegate->opaque_delegate_builder; + delete tflite_delegate; +} + +void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate) { + if (!delegate) return nullptr; + + // The following cast is safe only because this code is part of the + // TF Lite runtime implementation. Apps using TF Lite should not rely on + // 'TfLiteOpaqueDelegate' and 'TfLiteDelegate' being equivalent. + const auto* tflite_delegate = + reinterpret_cast(delegate); + + if (!tflite_delegate->opaque_delegate_builder) return tflite_delegate->data_; + + return tflite_delegate->opaque_delegate_builder->data; +} + +} // extern "C" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.cc similarity index 100% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.cc diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h index 05839a6..99ab8cf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h @@ -34,9 +34,22 @@ namespace tflite { /// that drives a GUI error log box. class ErrorReporter { public: - virtual ~ErrorReporter() {} + virtual ~ErrorReporter() = default; + /// Converts `args` to character equivalents according to `format` string, + /// constructs the error string and report it. + /// Returns number of characters written or zero on success, and negative + /// number on error. virtual int Report(const char* format, va_list args) = 0; + + /// Converts arguments to character equivalents according to `format` string, + /// constructs the error string and report it. + /// Returns number of characters written or zero on success, and negative + /// number on error. int Report(const char* format, ...); + + /// Equivalent to `Report` above. The additional `void*` parameter is unused. + /// This method is for compatibility with macros that takes `TfLiteContext`, + /// like TF_LITE_ENSURE and related macros. int ReportError(void*, const char* format, ...); }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cc similarity index 82% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cc index 3916605..31d4af9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,12 +19,13 @@ limitations under the License. #include #include -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" namespace tflite { @@ -131,6 +132,17 @@ TfLitePadding ConvertPadding(Padding padding) { return kTfLitePaddingUnknown; } +// Converts the flatbuffer mirror padding enum to what is used at runtime. +TfLiteMirrorPaddingMode ConvertMirrorPadding(MirrorPadMode padding) { + switch (padding) { + case MirrorPadMode_REFLECT: + return kTfLiteMirrorPaddingReflect; + case MirrorPadMode_SYMMETRIC: + return kTfLiteMirrorPaddingSymmetric; + } + return kTfLiteMirrorPaddingUnknown; +} + #ifndef TF_LITE_STATIC_MEMORY TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, ErrorReporter* error_reporter, @@ -181,6 +193,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseArgMin(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_ASSIGN_VARIABLE: { + return ParseAssignVariable(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_AVERAGE_POOL_2D: { return ParsePool(op, error_reporter, allocator, builtin_data); } @@ -193,6 +209,18 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_BROADCAST_ARGS: { + return ParseBroadcastArgs(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_BROADCAST_TO: { + return ParseBroadcastTo(op, error_reporter, allocator, builtin_data); + } + + case BuiltinOperator_CALL_ONCE: { + return ParseCallOnce(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_CEIL: { return ParseCeil(op, error_reporter, allocator, builtin_data); } @@ -317,6 +345,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseLogSoftmax(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_LSTM: { + return ParseLSTM(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_MAXIMUM: { return ParseMaximum(op, error_reporter, allocator, builtin_data); } @@ -325,6 +357,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParsePool(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_MIRROR_PAD: { + return ParseMirrorPad(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_MEAN: { return ParseReducer(op, error_reporter, allocator, builtin_data); } @@ -369,10 +405,18 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseQuantize(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_READ_VARIABLE: { + return ParseReadVariable(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_REDUCE_ANY: { return ParseReducer(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_REDUCE_ALL: { + return ParseReducer(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_REDUCE_MAX: { return ParseReducer(op, error_reporter, allocator, builtin_data); } @@ -414,6 +458,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseRsqrt(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SELECT_V2: { + return ParseSelectV2(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_SHAPE: { return ParseShape(op, error_reporter, allocator, builtin_data); } @@ -450,6 +498,11 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseSquare(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_SQUARED_DIFFERENCE: { + return ParseSquaredDifference(op, error_reporter, allocator, + builtin_data); + } + case BuiltinOperator_SQUEEZE: { return ParseSqueeze(op, error_reporter, allocator, builtin_data); } @@ -482,6 +535,10 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return ParseUnpack(op, error_reporter, allocator, builtin_data); } + case BuiltinOperator_VAR_HANDLE: { + return ParseVarHandle(op, error_reporter, allocator, builtin_data); + } + case BuiltinOperator_ZEROS_LIKE: { return ParseZerosLike(op, error_reporter, allocator, builtin_data); } @@ -570,53 +627,9 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_LSTM: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { - params->activation = - ConvertActivation(lstm_params->fused_activation_function()); - params->cell_clip = lstm_params->cell_clip(); - params->proj_clip = lstm_params->proj_clip(); - switch (lstm_params->kernel_type()) { - case LSTMKernelType_FULL: - params->kernel_type = kTfLiteLSTMFullKernel; - break; - case LSTMKernelType_BASIC: - params->kernel_type = kTfLiteLSTMBasicKernel; - break; - default: - TF_LITE_REPORT_ERROR(error_reporter, - "Unhandled LSTM kernel type: %d", - lstm_params->kernel_type()); - return kTfLiteError; - } - params->asymmetric_quantize_inputs = - lstm_params->asymmetric_quantize_inputs(); - } else { - TF_LITE_REPORT_ERROR(error_reporter, - "No valid LSTM builtin options exist"); - return kTfLiteError; - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: { - auto params = - safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* seq_lstm_params = - op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { - params->activation = - ConvertActivation(seq_lstm_params->fused_activation_function()); - params->cell_clip = seq_lstm_params->cell_clip(); - params->proj_clip = seq_lstm_params->proj_clip(); - params->time_major = seq_lstm_params->time_major(); - params->asymmetric_quantize_inputs = - seq_lstm_params->asymmetric_quantize_inputs(); - } - *builtin_data = params.release(); - return kTfLiteOk; + return ParseUnidirectionalSequenceLSTM(op, error_reporter, allocator, + builtin_data); } case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: { auto params = @@ -663,7 +676,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, return kTfLiteOk; } case BuiltinOperator_DELEGATE: { - // TODO(ycling): Revisit when supporting saving delegated models. TF_LITE_REPORT_ERROR(error_reporter, "DELEGATE op shouldn't exist in model."); return kTfLiteError; @@ -690,19 +702,6 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_MIRROR_PAD: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions(); - if (mirror_pad_params != nullptr) { - params->mode = - mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT - ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect - : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric; - } - *builtin_data = params.release(); - return kTfLiteOk; - } case BuiltinOperator_UNIQUE: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); @@ -747,17 +746,8 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } - case BuiltinOperator_CALL_ONCE: { - auto params = safe_allocator.Allocate(); - TF_LITE_ENSURE(error_reporter, params != nullptr); - if (const auto* call_once_params = - op->builtin_options_as_CallOnceOptions()) { - params->init_subgraph_index = call_once_params->init_subgraph_index(); - } - *builtin_data = params.release(); - return kTfLiteOk; - } - case BuiltinOperator_CONV_3D: { + case BuiltinOperator_CONV_3D: + case BuiltinOperator_CONV_3D_TRANSPOSE: { auto params = safe_allocator.Allocate(); TF_LITE_ENSURE(error_reporter, params != nullptr); if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) { @@ -789,42 +779,114 @@ TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type, *builtin_data = params.release(); return kTfLiteOk; } + case BuiltinOperator_MULTINOMIAL: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* multinomial_params = + op->builtin_options_as_RandomOptions()) { + params->seed = multinomial_params->seed(); + params->seed2 = multinomial_params->seed2(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_RANDOM_STANDARD_NORMAL: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* random_std_normal_params = + op->builtin_options_as_RandomOptions()) { + params->seed = random_std_normal_params->seed(); + params->seed2 = random_std_normal_params->seed2(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_BUCKETIZE: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* bucketize_params = + op->builtin_options_as_BucketizeOptions()) { + const flatbuffers::Vector* boundaries = + bucketize_params->boundaries(); + if (boundaries == nullptr) { + TF_LITE_REPORT_ERROR( + error_reporter, + "boundaries array not provided for operation 'bucketize'.\n"); + return kTfLiteError; + } + params->num_boundaries = boundaries->size(); + if (boundaries->data() == nullptr) { + TF_LITE_REPORT_ERROR(error_reporter, + "boundaries.data() returned nullptr for " + "operation 'bucketize'.\n"); + return kTfLiteError; + } + params->boundaries = boundaries->data(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_RANDOM_UNIFORM: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* random_uniform_params = + op->builtin_options_as_RandomOptions()) { + params->seed = random_uniform_params->seed(); + params->seed2 = random_uniform_params->seed2(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } + case BuiltinOperator_GELU: { + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* gelu_params = op->builtin_options_as_GeluOptions()) { + params->approximate = gelu_params->approximate(); + } + *builtin_data = params.release(); + return kTfLiteOk; + } // Below are the ops with no builtin_data structure. // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are // ok for now, since there is no call implementation either. case BuiltinOperator_CALL: + case BuiltinOperator_COMPLEX_ABS: case BuiltinOperator_CONCAT_EMBEDDINGS: case BuiltinOperator_COS: case BuiltinOperator_CUSTOM: + case BuiltinOperator_DENSIFY: + case BuiltinOperator_DYNAMIC_UPDATE_SLICE: case BuiltinOperator_EMBEDDING_LOOKUP: case BuiltinOperator_EQUAL: + case BuiltinOperator_HASHTABLE_FIND: + case BuiltinOperator_HASHTABLE_IMPORT: + case BuiltinOperator_HASHTABLE_SIZE: + case BuiltinOperator_IMAG: case BuiltinOperator_MATRIX_DIAG: case BuiltinOperator_MATRIX_SET_DIAG: + case BuiltinOperator_NON_MAX_SUPPRESSION_V4: + case BuiltinOperator_NON_MAX_SUPPRESSION_V5: case BuiltinOperator_RELU_N1_TO_1: + case BuiltinOperator_RELU_0_TO_1: + case BuiltinOperator_SCATTER_ND: case BuiltinOperator_SELECT: - case BuiltinOperator_SELECT_V2: case BuiltinOperator_SLICE: case BuiltinOperator_TILE: case BuiltinOperator_TOPK_V2: case BuiltinOperator_TRANSPOSE: case BuiltinOperator_RANGE: - case BuiltinOperator_SQUARED_DIFFERENCE: - case BuiltinOperator_REVERSE_V2: - case BuiltinOperator_WHERE: case BuiltinOperator_RANK: - case BuiltinOperator_NON_MAX_SUPPRESSION_V4: - case BuiltinOperator_NON_MAX_SUPPRESSION_V5: - case BuiltinOperator_SCATTER_ND: - case BuiltinOperator_DENSIFY: - case BuiltinOperator_SEGMENT_SUM: - case BuiltinOperator_BROADCAST_TO: - case BuiltinOperator_RFFT2D: - case BuiltinOperator_IMAG: case BuiltinOperator_REAL: - case BuiltinOperator_COMPLEX_ABS: - case BuiltinOperator_HASHTABLE_FIND: - case BuiltinOperator_HASHTABLE_IMPORT: - case BuiltinOperator_HASHTABLE_SIZE: + case BuiltinOperator_RFFT2D: + case BuiltinOperator_SEGMENT_SUM: + case BuiltinOperator_REVERSE_V2: + case BuiltinOperator_UNSORTED_SEGMENT_MAX: + case BuiltinOperator_UNSORTED_SEGMENT_MIN: + case BuiltinOperator_UNSORTED_SEGMENT_PROD: + case BuiltinOperator_UNSORTED_SEGMENT_SUM: + case BuiltinOperator_ATAN2: + case BuiltinOperator_SIGN: + case BuiltinOperator_WHERE: return kTfLiteOk; case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES: return kTfLiteError; @@ -849,6 +911,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_INT16: *type = kTfLiteInt16; return kTfLiteOk; + case TensorType_UINT16: + *type = kTfLiteUInt16; + return kTfLiteOk; case TensorType_INT32: *type = kTfLiteInt32; return kTfLiteOk; @@ -885,6 +950,9 @@ TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type, case TensorType_VARIANT: *type = kTfLiteVariant; return kTfLiteOk; + case TensorType_INT4: + *type = kTfLiteInt4; + return kTfLiteOk; default: *type = kTfLiteNoType; TF_LITE_REPORT_ERROR(error_reporter, @@ -981,6 +1049,14 @@ TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseAssignVariable(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1010,6 +1086,49 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBroadcastArgs(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseBroadcastTo(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const CallOnceOptions* schema_params = + op->builtin_options_as_CallOnceOptions(); + + if (schema_params != nullptr) { + params->init_subgraph_index = schema_params->init_subgraph_index(); + + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1068,6 +1187,14 @@ TfLiteStatus ParseConcatenation(const Operator* op, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseComplexAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -1372,6 +1499,38 @@ TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseImag(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const IfOptions* schema_params = op->builtin_options_as_IfOptions(); + + if (schema_params != nullptr) { + params->then_subgraph_index = schema_params->then_subgraph_index(); + params->else_subgraph_index = schema_params->else_subgraph_index(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseL2Normalization(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -1479,6 +1638,40 @@ TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) { + params->activation = + ConvertActivation(lstm_params->fused_activation_function()); + params->cell_clip = lstm_params->cell_clip(); + params->proj_clip = lstm_params->proj_clip(); + switch (lstm_params->kernel_type()) { + case LSTMKernelType_FULL: + params->kernel_type = kTfLiteLSTMFullKernel; + break; + case LSTMKernelType_BASIC: + params->kernel_type = kTfLiteLSTMBasicKernel; + break; + default: + TF_LITE_REPORT_ERROR(error_reporter, "Unhandled LSTM kernel type: %d", + lstm_params->kernel_type()); + return kTfLiteError; + } + params->asymmetric_quantize_inputs = + lstm_params->asymmetric_quantize_inputs(); + } else { + TF_LITE_REPORT_ERROR(error_reporter, "No valid LSTM builtin options exist"); + return kTfLiteError; + } + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1495,6 +1688,32 @@ TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*, return kTfLiteOk; } +TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const MirrorPadOptions* schema_params = + op->builtin_options_as_MirrorPadOptions(); + + if (schema_params != nullptr) { + params->mode = ConvertMirrorPadding(schema_params->mode()); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { CheckParsePointerParams(op, error_reporter, allocator, builtin_data); @@ -1630,6 +1849,22 @@ TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseReal(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseReadVariable(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1763,6 +1998,14 @@ TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseRfft2D(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. @@ -1779,6 +2022,22 @@ TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSelect(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSelectV2(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { SafeBuiltinDataAllocator safe_allocator(allocator); @@ -1810,6 +2069,14 @@ TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSlice(const Operator*, ErrorReporter*, BuiltinDataAllocator*, + void**) { + return kTfLiteOk; +} + TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1916,6 +2183,31 @@ TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + SafeBuiltinDataAllocator safe_allocator(allocator); + auto params = + safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + if (const auto* seq_lstm_params = + op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) { + params->activation = + ConvertActivation(seq_lstm_params->fused_activation_function()); + params->cell_clip = seq_lstm_params->cell_clip(); + params->proj_clip = seq_lstm_params->proj_clip(); + params->time_major = seq_lstm_params->time_major(); + params->asymmetric_quantize_inputs = + seq_lstm_params->asymmetric_quantize_inputs(); + params->diagonal_recurrent_tensors = + seq_lstm_params->diagonal_recurrent_tensors(); + } + *builtin_data = params.release(); + return kTfLiteOk; +} + TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data) { @@ -1965,6 +2257,14 @@ TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*, return kTfLiteOk; } +// We have this parse function instead of directly returning kTfLiteOk from the +// switch-case in ParseOpData because this function is used as part of the +// selective registration for the OpResolver implementation in micro. +TfLiteStatus ParseSquaredDifference(const Operator*, ErrorReporter*, + BuiltinDataAllocator*, void**) { + return kTfLiteOk; +} + TfLiteStatus ParseStridedSlice(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -2081,6 +2381,9 @@ TfLiteStatus ParseTransposeConv(const Operator* op, params->padding = ConvertPadding(transpose_conv_params->padding()); params->stride_width = transpose_conv_params->stride_w(); params->stride_height = transpose_conv_params->stride_h(); + + params->activation = + ConvertActivation(transpose_conv_params->fused_activation_function()); } else { // TODO(b/157480169): We should either return kTfLiteError or fill in some // reasonable defaults in the params struct. We are not doing so until we @@ -2115,6 +2418,62 @@ TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, return kTfLiteOk; } +TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const VarHandleOptions* schema_params = + op->builtin_options_as_VarHandleOptions(); + + if (schema_params != nullptr) { + if (schema_params->container()) { + params->container = schema_params->container()->c_str(); + } + if (schema_params->shared_name()) { + params->shared_name = schema_params->shared_name()->c_str(); + } + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + +TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data) { + CheckParsePointerParams(op, error_reporter, allocator, builtin_data); + + SafeBuiltinDataAllocator safe_allocator(allocator); + std::unique_ptr + params = safe_allocator.Allocate(); + TF_LITE_ENSURE(error_reporter, params != nullptr); + + const WhileOptions* schema_params = op->builtin_options_as_WhileOptions(); + + if (schema_params != nullptr) { + params->cond_subgraph_index = schema_params->cond_subgraph_index(); + params->body_subgraph_index = schema_params->body_subgraph_index(); + } else { + // TODO(b/157480169): We should either return kTfLiteError or fill in some + // reasonable defaults in the params struct. We are not doing so until we + // better undertand the ramifications of changing the legacy behavior. + } + + *builtin_data = params.release(); + return kTfLiteOk; +} + // We have this parse function instead of directly returning kTfLiteOk from the // switch-case in ParseOpData because this function is used as part of the // selective registration for the OpResolver implementation in micro. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h index b1370e0..b8e6019 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,8 +23,8 @@ limitations under the License. #include #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -84,6 +84,11 @@ TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseAssignVariable(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -93,12 +98,28 @@ TfLiteStatus ParseBatchToSpaceNd(const Operator* op, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseBroadcastArgs(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseBroadcastTo(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseCallOnce(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseComplexAbs(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseConcatenation(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -181,6 +202,12 @@ TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseImag(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseIf(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseL2Normalization(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -220,12 +247,19 @@ TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseLSTM(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseMirrorPad(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -258,6 +292,14 @@ TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseReal(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseReadVariable(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -280,18 +322,31 @@ TfLiteStatus ParseResizeNearestNeighbor(const Operator* op, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseRfft2D(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSelect(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +TfLiteStatus ParseSelectV2(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSlice(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); @@ -320,6 +375,11 @@ TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter, TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseSquaredDifference(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + TfLiteStatus ParseStridedSlice(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, @@ -346,6 +406,18 @@ TfLiteStatus ParseTransposeConv(const Operator* op, TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); +TfLiteStatus ParseUnidirectionalSequenceLSTM(const Operator* op, + ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseVarHandle(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, + void** builtin_data); + +TfLiteStatus ParseWhile(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter, BuiltinDataAllocator* allocator, void** builtin_data); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cc similarity index 89% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cc index 528adc3..bb2e080 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.cc @@ -16,8 +16,8 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h" #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_utils.h" namespace tflite { @@ -30,8 +30,7 @@ TfLiteStatus GetRegistrationFromOpCode( auto builtin_code = GetBuiltinCode(opcode); int version = opcode->version(); - if (builtin_code > BuiltinOperator_MAX || - builtin_code < BuiltinOperator_MIN) { + if (builtin_code > BuiltinOperator_MAX) { TF_LITE_REPORT_ERROR( error_reporter, "Op builtin_code out of range: %d. Are you using old TFLite binary " @@ -44,8 +43,8 @@ TfLiteStatus GetRegistrationFromOpCode( TF_LITE_REPORT_ERROR( error_reporter, "Didn't find op for builtin opcode '%s' version '%d'. " - "An older version of this builtin might be supported. " - "Are you using an old TFLite binary with a newer model?\n", + "This model is not supported by EON Compiler of TensorFlow Lite Micro,", + "but is in full TFLite (e.g. on Linux).\n", EnumNameBuiltinOperator(builtin_code), version); status = kTfLiteError; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h index b87548d..75fc5d0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h @@ -15,11 +15,12 @@ limitations under the License. #ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ #define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_ +#include #include #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -36,16 +37,83 @@ class OpResolver { virtual const TfLiteRegistration* FindOp(const char* op, int version) const = 0; + // Represents a sequence of delegates. + using TfLiteDelegatePtrVector = + std::vector>; + // Returns optional delegates for resolving and handling ops in the flatbuffer // model. This may be used in addition to the standard TfLiteRegistration // lookup for graph resolution. - using TfLiteDelegatePtrVector = - std::vector>; + // WARNING: This API is deprecated, GetDelegateCreators is preferred. virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const { - return TfLiteDelegatePtrVector(); + return {}; + } + + // Represents a function that creates a TfLite delegate instance. + using TfLiteDelegateCreator = + std::function( + TfLiteContext* /*context*/)>; + + // Represents a sequence of delegate creator functions. + using TfLiteDelegateCreators = std::vector; + + // Returns a vector of delegate creators to create optional delegates for + // resolving and handling ops in the flatbuffer model. This may be used in + // addition to the standard TfLiteRegistration lookup for graph resolution. + // + // Note that this method is not used (will not be called) if you are using + // TF Lite in Google Play Services; the GetOpaqueDelegateCreators method + // (see below) is used for that case. + virtual TfLiteDelegateCreators GetDelegateCreators() const { return {}; } + + // TODO(b/202712825): it would be nice if we could avoid the need for separate + // "opaque" types & methods for use only with TF Lite in Google Play Services. + + // Represents an opaque delegate instance. + // WARNING: Experimental interface, subject to change. + using TfLiteOpaqueDelegatePtr = + std::unique_ptr; + + // Represents a function that creates an opaque delegate instance. + // WARNING: Experimental interface, subject to change. + using TfLiteOpaqueDelegateCreator = + std::function; + + // Represents a sequence of opaque delegate creator functions. + // WARNING: Experimental interface, subject to change. + using TfLiteOpaqueDelegateCreators = std::vector; + + // Returns a vector of opaque delegate creators to create optional opaque + // delegates for resolving and handling ops in the flatbuffer model. This may + // be used in addition to the standard TfLiteRegistration lookup for graph + // resolution. + // + // Note that this method will be called only if you are using TF Lite in + // Google Play Services; if you are using regular TF Lite, GetDelegateCreators + // (see above) is used instead. + // + // WARNING: Experimental interface, subject to change. + virtual TfLiteOpaqueDelegateCreators GetOpaqueDelegateCreators() const { + return {}; } virtual ~OpResolver() {} + + private: + /// Returns true if this OpResolver may contain any "user defined" ops. + /// By "user defined" ops, we mean any op definitions other than those + /// contained in tflite::ops::builtin::BuiltinOpResolver. + /// + /// If this method returns true, it doesn't necessarily mean that the + /// OpResolver contains a user-defined op, just that the absence of + /// user-defined ops can't be guaranteed. + /// + /// Note that "user-defined" ops are not the same as "custom" ops; + /// BuiltinOpResolver may support certain "custom" ops, in addition to + /// "builtin" ops, and may not support all of the "builtin" op enum values. + virtual bool MayContainUserDefinedOps() const { return true; } + + friend class OpResolverInternal; }; // Handles the logic for converting between an OperatorCode structure extracted diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cc similarity index 96% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cc index 4288daf..b62d50c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.cc @@ -17,7 +17,7 @@ limitations under the License. #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h index 76d7545..608128a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h @@ -16,7 +16,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ #define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_ -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h new file mode 100644 index 0000000..3a1ee0e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h @@ -0,0 +1,537 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/builtin_op_data.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. +#ifndef TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ +#define TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// TfLiteReshapeParams can't have dynamic data so we fix the maximum possible +// number of dimensions. +#define TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT 8 + +// TODO(aselle): Consider using "if this then that" for testing. + +// Useful placeholder to put in otherwise empty structs to avoid size warnings. +typedef struct { + char dummy; +} EmptyStructPlaceholder; + +// IMPORTANT: All new members of structs must be added at the end to ensure +// backwards compatibility. + +// Possible padding types (for convolutions) +typedef enum { + kTfLitePaddingUnknown = 0, + kTfLitePaddingSame, + kTfLitePaddingValid, +} TfLitePadding; + +typedef enum { + kTfLiteMirrorPaddingUnknown = 0, + kTfLiteMirrorPaddingReflect, + kTfLiteMirrorPaddingSymmetric, +} TfLiteMirrorPaddingMode; + +// TODO(b/130259536): We should move this out of builtin_op_data. +typedef struct { + int width; + int height; + int width_offset; + int height_offset; +} TfLitePaddingValues; + +typedef struct { + TfLiteMirrorPaddingMode mode; +} TfLiteMirrorPaddingParams; + +// Possible fused activation functions. +typedef enum { + kTfLiteActNone = 0, + kTfLiteActRelu, + kTfLiteActReluN1To1, // min(max(-1, x), 1) + kTfLiteActRelu6, // min(max(0, x), 6) + kTfLiteActTanh, + kTfLiteActSignBit, + kTfLiteActSigmoid, +} TfLiteFusedActivation; + +typedef struct { + // Parameters for CONV_2D version 1. + TfLitePadding padding; + int stride_width; + int stride_height; + TfLiteFusedActivation activation; + + // Parameters for CONV_2D version 2. + // Note: Version 2 supports dilation values not equal to 1. + int dilation_width_factor; + int dilation_height_factor; +} TfLiteConvParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int stride_depth; + int dilation_width_factor; + int dilation_height_factor; + int dilation_depth_factor; + TfLiteFusedActivation activation; +} TfLiteConv3DParams; + +typedef TfLiteConv3DParams TfLiteConv3DTransposeParams; + +typedef struct { + TfLitePadding padding; + int stride_width; + int stride_height; + int filter_width; + int filter_height; + TfLiteFusedActivation activation; + struct { + TfLitePaddingValues padding; + } computed; +} TfLitePoolParams; + +typedef struct { + // Parameters for DepthwiseConv version 1 or above. + TfLitePadding padding; + int stride_width; + int stride_height; + // `depth_multiplier` is redundant. It's used by CPU kernels in + // TensorFlow 2.0 or below, but ignored in versions above. + // + // The information can be deduced from the shape of input and the shape of + // weights. Since the TFLiteConverter toolchain doesn't support partially + // specified shapes, relying on `depth_multiplier` stops us from supporting + // graphs with dynamic shape tensors. + // + // Note: Some of the delegates (e.g. NNAPI, GPU) are still relying on this + // field. + int depth_multiplier; + TfLiteFusedActivation activation; + // Parameters for DepthwiseConv version 2 or above. + int dilation_width_factor; + int dilation_height_factor; +} TfLiteDepthwiseConvParams; + +typedef struct { + int rank; + TfLiteFusedActivation activation; + + // Parameter for SVDF version 4. + bool asymmetric_quantize_inputs; +} TfLiteSVDFParams; + +typedef struct { + TfLiteFusedActivation activation; + + // Parameter for RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + + // Parameter for Sequence RNN version 3. + bool asymmetric_quantize_inputs; +} TfLiteSequenceRNNParams; + +typedef struct { + bool time_major; + TfLiteFusedActivation activation; + bool merge_outputs; + + // Parameter for Bidirectional RNN verison 3. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceRNNParams; + +typedef enum { + kTfLiteFullyConnectedWeightsFormatDefault = 0, + kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8 = 1, +} TfLiteFullyConnectedWeightsFormat; + +typedef struct { + // Parameters for FullyConnected version 1 or above. + TfLiteFusedActivation activation; + + // Parameters for FullyConnected version 2 or above. + TfLiteFullyConnectedWeightsFormat weights_format; + + // Parameters for FullyConnected version 5 or above. + // If set to true, then the number of dimensions in the input and the output + // tensors are the same. Furthermore, all but the last dimension of the input + // and output shapes will be equal. + bool keep_num_dims; + + // Parameters for FullyConnected version 7 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; +} TfLiteFullyConnectedParams; + +typedef enum { + kTfLiteLshProjectionUnknown = 0, + kTfLiteLshProjectionSparse = 1, + kTfLiteLshProjectionDense = 2, +} TfLiteLSHProjectionType; + +typedef struct { + TfLiteLSHProjectionType type; +} TfLiteLSHProjectionParams; + +typedef struct { + float beta; +} TfLiteSoftmaxParams; + +typedef struct { + int axis; + TfLiteFusedActivation activation; +} TfLiteConcatenationParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 4. + bool pot_scale_int16; +} TfLiteAddParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteSpaceToBatchNDParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteBatchToSpaceNDParams; + +typedef struct { + bool adj_x; + bool adj_y; + // Parameters for BatchMatMul version 4 or above. + // If set to true and the weights are quantized, then non constant inputs + // are quantized at evaluation time with asymmetric quantization. + bool asymmetric_quantize_inputs; +} TfLiteBatchMatMulParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteMulParams; + +typedef struct { + TfLiteFusedActivation activation; + // Parameter added for the version 5. + bool pot_scale_int16; +} TfLiteSubParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteDivParams; + +typedef struct { + TfLiteFusedActivation activation; +} TfLiteL2NormParams; + +typedef struct { + int radius; + float bias; + float alpha; + float beta; +} TfLiteLocalResponseNormParams; + +typedef enum { + kTfLiteLSTMFullKernel = 0, + kTfLiteLSTMBasicKernel +} TfLiteLSTMKernelType; + +typedef struct { + // Parameters for LSTM version 1. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // Parameters for LSTM version 2. + // kTfLiteLSTMBasicKernel is only supported in version 2 or above. + TfLiteLSTMKernelType kernel_type; + + // Parameters for LSTM version 4. + bool asymmetric_quantize_inputs; +} TfLiteLSTMParams; + +typedef struct { + // Parameters needed for the underlying LSTM. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameter for unidirectional sequence RNN version 3. + bool asymmetric_quantize_inputs; + + // Parameter for unidirectional sequence RNN version 4. + bool diagonal_recurrent_tensors; +} TfLiteUnidirectionalSequenceLSTMParams; + +typedef struct { + // Parameters supported by version 1: + // Parameters inherited for the LSTM kernel. + TfLiteFusedActivation activation; + float cell_clip; + float proj_clip; + + // If true, store the outputs of both directions in the first output. + bool merge_outputs; + + // Parameters supported by version 2: + // If set to true then the first dimension is time, otherwise batch. + bool time_major; + + // Parameters supported by version 3: + // If set to true, then hybrid ops use asymmetric quantization for inputs. + bool asymmetric_quantize_inputs; +} TfLiteBidirectionalSequenceLSTMParams; + +typedef struct { + bool align_corners; + // half_pixel_centers assumes pixels are of half the actual dimensions, and + // yields more accurate resizes. Corresponds to the same argument for the + // original TensorFlow op in TF2.0. + bool half_pixel_centers; +} TfLiteResizeBilinearParams; + +typedef struct { + bool align_corners; + bool half_pixel_centers; +} TfLiteResizeNearestNeighborParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLitePadV2Params; + +typedef struct { + // These fields are only used in old models for backward compatibility. + // In the current implementation, we use the 2nd input of the op as the shape, + // and these fields are unused. + int shape[TFLITE_RESHAPE_PARAMS_MAX_DIMENSION_COUNT]; + int num_dimensions; +} TfLiteReshapeParams; + +typedef struct { + int ngram_size; + int max_skip_size; + bool include_all_ngrams; +} TfLiteSkipGramParams; + +typedef struct { + int block_size; +} TfLiteSpaceToDepthParams; + +typedef struct { + int block_size; +} TfLiteDepthToSpaceParams; + +typedef struct { + TfLiteType in_data_type; + TfLiteType out_data_type; +} TfLiteCastParams; + +typedef enum { + kTfLiteCombinerTypeSum = 0, + kTfLiteCombinerTypeMean = 1, + kTfLiteCombinerTypeSqrtn = 2, +} TfLiteCombinerType; + +typedef struct { + TfLiteCombinerType combiner; +} TfLiteEmbeddingLookupSparseParams; + +typedef struct { + int axis; + int batch_dims; +} TfLiteGatherParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteTransposeParams; + +typedef struct { + bool keep_dims; +} TfLiteReducerParams; + +typedef struct { + int num_splits; +} TfLiteSplitParams; + +typedef struct { + int num_splits; +} TfLiteSplitVParams; + +typedef struct { + // TODO(ahentz): We can't have dynamic data in this struct, at least not yet. + // For now we will fix the maximum possible number of dimensions. + int squeeze_dims[8]; + int num_squeeze_dims; +} TfLiteSqueezeParams; + +typedef struct { + int begin_mask; + int end_mask; + int ellipsis_mask; + int new_axis_mask; + int shrink_axis_mask; +} TfLiteStridedSliceParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMaxParams; + +typedef struct { + TfLiteType output_type; +} TfLiteArgMinParams; + +typedef struct { + // Parameters supported by version 1: + TfLitePadding padding; + int stride_width; + int stride_height; + + // Parameters supported by version 4: + TfLiteFusedActivation activation; +} TfLiteTransposeConvParams; + +typedef struct { + bool validate_indices; +} TfLiteSparseToDenseParams; + +typedef struct { + TfLiteType out_type; +} TfLiteShapeParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteRankParams; + +typedef struct { + // Parameters supported by version 1: + float min; + float max; + int num_bits; + + // Parameters supported by version 2: + bool narrow_range; +} TfLiteFakeQuantParams; + +typedef struct { + int values_count; + int axis; +} TfLitePackParams; + +typedef struct { + int axis; +} TfLiteOneHotParams; + +typedef struct { + int num; + int axis; +} TfLiteUnpackParams; + +typedef struct { + float alpha; +} TfLiteLeakyReluParams; + +typedef struct { + TfLiteType index_out_type; +} TfLiteUniqueParams; + +typedef struct { + int seq_dim; + int batch_dim; +} TfLiteReverseSequenceParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixDiagParams; + +typedef struct { + EmptyStructPlaceholder placeholder; +} TfLiteMatrixSetDiagParams; + +typedef struct { + int then_subgraph_index; + int else_subgraph_index; +} TfLiteIfParams; + +typedef struct { + int cond_subgraph_index; + int body_subgraph_index; +} TfLiteWhileParams; + +typedef struct { + bool exclusive; + bool reverse; +} TfLiteCumsumParams; + +typedef struct { + int init_subgraph_index; +} TfLiteCallOnceParams; + +typedef struct { + int table_id; + TfLiteType key_dtype; + TfLiteType value_dtype; +} TfLiteHashtableParams; + +typedef struct { + const char* container; + const char* shared_name; +} TfLiteVarHandleParams; + +typedef struct { + int seed; + int seed2; +} TfLiteRandomParams; + +typedef struct { + int num_boundaries; + // This points to the memory stored in the model (flatbuffer), + // and is not owned. + const float* boundaries; +} TfLiteBucketizeParams; + +typedef struct { + bool approximate; +} TfLiteGeluParams; + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus + +#endif // TENSORFLOW_LITE_CORE_C_BUILTIN_OP_DATA_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h new file mode 100644 index 0000000..3aab43f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h @@ -0,0 +1,168 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file declares types used by the pure C inference API defined in c_api.h, +// some of which are also used in the C++ and C kernel and interpreter APIs. + +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/c_api_types.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. + +#ifndef TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ +#define TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ + +#include + +#ifdef __cplusplus +extern "C" { +#endif + +// Define TFL_CAPI_EXPORT macro to export a function properly with a shared +// library. +#ifdef SWIG +#define TFL_CAPI_EXPORT +#elif defined(TFL_STATIC_LIBRARY_BUILD) +#define TFL_CAPI_EXPORT +#else // not definded TFL_STATIC_LIBRARY_BUILD +#if defined(_WIN32) +#ifdef TFL_COMPILE_LIBRARY +#define TFL_CAPI_EXPORT __declspec(dllexport) +#else +#define TFL_CAPI_EXPORT __declspec(dllimport) +#endif // TFL_COMPILE_LIBRARY +#else +#define TFL_CAPI_EXPORT __attribute__((visibility("default"))) +#endif // _WIN32 +#endif // SWIG + +// Note that new error status values may be added in future in order to +// indicate more fine-grained internal states, therefore, applications should +// not rely on status values being members of the enum. +typedef enum TfLiteStatus { + kTfLiteOk = 0, + + // Generally referring to an error in the runtime (i.e. interpreter) + kTfLiteError = 1, + + // Generally referring to an error from a TfLiteDelegate itself. + kTfLiteDelegateError = 2, + + // Generally referring to an error in applying a delegate due to + // incompatibility between runtime and delegate, e.g., this error is returned + // when trying to apply a TF Lite delegate onto a model graph that's already + // immutable. + kTfLiteApplicationError = 3, + + // Generally referring to serialized delegate data not being found. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataNotFound = 4, + + // Generally referring to data-writing issues in delegate serialization. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataWriteError = 5, + + // Generally referring to data-reading issues in delegate serialization. + // See tflite::delegates::Serialization. + kTfLiteDelegateDataReadError = 6, + + // Generally referring to issues when the TF Lite model has ops that cannot be + // resolved at runtime. This could happen when the specific op is not + // registered or built with the TF Lite framework. + kTfLiteUnresolvedOps = 7, + + // Generally referring to invocation cancelled by the user. + // See `interpreter::Cancel`. + // TODO(b/194915839): Implement `interpreter::Cancel`. + // TODO(b/250636993): Cancellation triggered by `SetCancellationFunction` + // should also return this status code. + kTfLiteCancelled = 8, +} TfLiteStatus; + +// Types supported by tensor +typedef enum { + kTfLiteNoType = 0, + kTfLiteFloat32 = 1, + kTfLiteInt32 = 2, + kTfLiteUInt8 = 3, + kTfLiteInt64 = 4, + kTfLiteString = 5, + kTfLiteBool = 6, + kTfLiteInt16 = 7, + kTfLiteComplex64 = 8, + kTfLiteInt8 = 9, + kTfLiteFloat16 = 10, + kTfLiteFloat64 = 11, + kTfLiteComplex128 = 12, + kTfLiteUInt64 = 13, + kTfLiteResource = 14, + kTfLiteVariant = 15, + kTfLiteUInt32 = 16, + kTfLiteUInt16 = 17, + kTfLiteInt4 = 18, +} TfLiteType; + +// Legacy. Will be deprecated in favor of TfLiteAffineQuantization. +// If per-layer quantization is specified this field will still be populated in +// addition to TfLiteAffineQuantization. +// Parameters for asymmetric quantization. Quantized values can be converted +// back to float using: +// real_value = scale * (quantized_value - zero_point) +typedef struct TfLiteQuantizationParams { + float scale; + int32_t zero_point; +} TfLiteQuantizationParams; + +// -------------------------------------------------------------------------- +// Opaque types used by c_api.h, c_api_opaque.h and common.h. + +// TfLiteOpaqueContext is an opaque version of TfLiteContext; +typedef struct TfLiteOpaqueContext TfLiteOpaqueContext; + +// TfLiteOpaqueNode is an opaque version of TfLiteNode; +typedef struct TfLiteOpaqueNode TfLiteOpaqueNode; + +// TfLiteOpaqueTensor is an opaque version of TfLiteTensor; +typedef struct TfLiteOpaqueTensor TfLiteOpaqueTensor; + +// TfLiteDelegate: allows delegation of nodes to alternative backends. +// Forward declaration of concrete type declared in common.h. +typedef struct TfLiteDelegate TfLiteDelegate; + +// TfLiteOpaqueDelegateStruct: unconditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. +// +// This is an abstract type that is intended to have the same +// role as TfLiteDelegate, but without exposing the implementation +// details of how delegates are implemented. +// WARNING: This is an experimental type and subject to change. +typedef struct TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegateStruct; + +// TfLiteOpaqueDelegate: conditionally opaque version of +// TfLiteDelegate; allows delegation of nodes to alternative backends. +// For TF Lite in Play Services, this is an opaque type, +// but for regular TF Lite, this is just a typedef for TfLiteDelegate. +// WARNING: This is an experimental type and subject to change. +#if TFLITE_WITH_STABLE_ABI || TFLITE_USE_OPAQUE_DELEGATE +typedef TfLiteOpaqueDelegateStruct TfLiteOpaqueDelegate; +#else +typedef TfLiteDelegate TfLiteOpaqueDelegate; +#endif + +#ifdef __cplusplus +} // extern C +#endif +#endif // TENSORFLOW_LITE_CORE_C_C_API_TYPES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/common.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/common.h new file mode 100644 index 0000000..83b4a31 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/core/c/common.h @@ -0,0 +1,1170 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file defines common C types and APIs for implementing operations, +// delegates and other constructs in TensorFlow Lite. The actual operations and +// delegates can be defined using C++, but the interface between the interpreter +// and the operations are C. +// +// Summary of abstractions +// TF_LITE_ENSURE - Self-sufficient error checking +// TfLiteStatus - Status reporting +// TfLiteIntArray - stores tensor shapes (dims), +// TfLiteContext - allows an op to access the tensors +// TfLiteTensor - tensor (a multidimensional array) +// TfLiteNode - a single node or operation +// TfLiteRegistration - the implementation of a conceptual operation. +// TfLiteDelegate - allows delegation of nodes to alternative backends. +// +// Some abstractions in this file are created and managed by Interpreter. +// +// NOTE: The order of values in these structs are "semi-ABI stable". New values +// should be added only to the end of structs and never reordered. + +/// WARNING: Users of TensorFlow Lite should not include this file directly, +/// but should instead include +/// "third_party/tensorflow/lite/c/common.h". +/// Only the TensorFlow Lite implementation itself should include this +/// file directly. + +#ifndef TENSORFLOW_LITE_CORE_C_COMMON_H_ +#define TENSORFLOW_LITE_CORE_C_COMMON_H_ + +#include +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/core/c/c_api_types.h" // IWYU pragma: export + +#ifdef __cplusplus +extern "C" { +#endif // __cplusplus + +// The list of external context types known to TF Lite. This list exists solely +// to avoid conflicts and to ensure ops can share the external contexts they +// need. Access to the external contexts is controlled by one of the +// corresponding support files. +typedef enum TfLiteExternalContextType { + kTfLiteEigenContext = 0, // include eigen_support.h to use. + kTfLiteGemmLowpContext = 1, // include gemm_support.h to use. + kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support. + kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use. + kTfLiteMaxExternalContexts = 4 +} TfLiteExternalContextType; + +// Forward declare so dependent structs and methods can reference these types +// prior to the struct definitions. +struct TfLiteContext; +struct TfLiteDelegate; +struct TfLiteRegistration; +struct TfLiteOpaqueDelegateBuilder; + +// An external context is a collection of information unrelated to the TF Lite +// framework, but useful to a subset of the ops. TF Lite knows very little +// about the actual contexts, but it keeps a list of them, and is able to +// refresh them if configurations like the number of recommended threads +// change. +typedef struct TfLiteExternalContext { + TfLiteExternalContextType type; + TfLiteStatus (*Refresh)(struct TfLiteContext* context); +} TfLiteExternalContext; + +#define kTfLiteOptionalTensor (-1) + +// Fixed size list of integers. Used for dimensions and inputs/outputs tensor +// indices +typedef struct TfLiteIntArray { + int size; + +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + int data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + int data[0]; +#else + int data[]; +#endif +} TfLiteIntArray; + +// Given the size (number of elements) in a TfLiteIntArray, calculate its size +// in bytes. +size_t TfLiteIntArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a array of a given `size` (uninitialized entries). +// This returns a pointer, that you must free using TfLiteIntArrayFree(). +TfLiteIntArray* TfLiteIntArrayCreate(int size); +#endif + +// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise. +int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b); + +// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise. +int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size, + const int b_data[]); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a copy of an array passed as `src`. +// You are expected to free memory with TfLiteIntArrayFree +TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src); + +// Free memory of array `a`. +void TfLiteIntArrayFree(TfLiteIntArray* a); +#endif // TF_LITE_STATIC_MEMORY + +// Fixed size list of floats. Used for per-channel quantization. +typedef struct TfLiteFloatArray { + int size; +#if defined(_MSC_VER) + // Context for why this is needed is in http://b/189926408#comment21 + float data[1]; +#elif (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \ + __GNUC_MINOR__ >= 1) || \ + defined(HEXAGON) || \ + (defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1) + // gcc 6.1+ have a bug where flexible members aren't properly handled + // https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c + float data[0]; +#else + float data[]; +#endif +} TfLiteFloatArray; + +// Given the size (number of elements) in a TfLiteFloatArray, calculate its size +// in bytes. +int TfLiteFloatArrayGetSizeInBytes(int size); + +#ifndef TF_LITE_STATIC_MEMORY +// Create a array of a given `size` (uninitialized entries). +// This returns a pointer, that you must free using TfLiteFloatArrayFree(). +TfLiteFloatArray* TfLiteFloatArrayCreate(int size); + +// Free memory of array `a`. +void TfLiteFloatArrayFree(TfLiteFloatArray* a); +#endif // TF_LITE_STATIC_MEMORY + +// Since we must not depend on any libraries, define a minimal subset of +// error macros while avoiding names that have pre-conceived meanings like +// assert and check. + +// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than +// calling the context->ReportError function directly, so that message strings +// can be stripped out if the binary size needs to be severely optimized. +#ifndef TF_LITE_STRIP_ERROR_STRINGS +#ifdef TF_LITE_LOG_FILE_NAME +#define TF_LITE_KERNEL_LOG(context, ...) \ + do { \ + (context)->ReportError((context), __FILE__ " " __VA_ARGS__); \ + } while (false) + +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ + do { \ + if ((context) != nullptr) { \ + (context)->ReportError((context), __FILE__ " " __VA_ARGS__); \ + } \ + } while (false) +#else // TF_LITE_LOG_FILE_NAME +#define TF_LITE_KERNEL_LOG(context, ...) \ + do { \ + (context)->ReportError((context), __VA_ARGS__); \ + } while (false) + +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \ + do { \ + if ((context) != nullptr) { \ + (context)->ReportError((context), __VA_ARGS__); \ + } \ + } while (false) +#endif // TF_LITE_LOG_FILE_NAME +#else // TF_LITE_STRIP_ERROR_STRINGS +#define ARGS_UNUSED(...) (void)sizeof(#__VA_ARGS__) +#define TF_LITE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) ARGS_UNUSED(__VA_ARGS__) +#endif // TF_LITE_STRIP_ERROR_STRINGS + +// Check whether value is true, and if not return kTfLiteError from +// the current function (and report the error string msg). +#define TF_LITE_ENSURE_MSG(context, value, msg) \ + do { \ + if (!(value)) { \ + TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \ + return kTfLiteError; \ + } \ + } while (0) + +// Check whether the value `a` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +#define TF_LITE_ENSURE(context, a) \ + do { \ + if (!(a)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \ + __LINE__, #a); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_STATUS(a) \ + do { \ + const TfLiteStatus s = (a); \ + if (s != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +// Check whether the value `a == b` is true, and if not return kTfLiteError from +// the current function, while also reporting the location of the error. +// `a` and `b` may be evaluated more than once, so no side effects or +// extremely expensive computations should be done. +// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes. +#define TF_LITE_ENSURE_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \ + __LINE__, #a, #b, (a), (b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \ + do { \ + if ((a) != (b)) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \ + __LINE__, #a, #b, TfLiteTypeGetName(a), \ + TfLiteTypeGetName(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \ + do { \ + auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \ + if (delta > epsilon) { \ + TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \ + __FILE__, __LINE__, #a, #b, static_cast(a), \ + static_cast(b)); \ + return kTfLiteError; \ + } \ + } while (0) + +#define TF_LITE_ENSURE_OK(context, status) \ + do { \ + const TfLiteStatus s = (status); \ + if ((s) != kTfLiteOk) { \ + return s; \ + } \ + } while (0) + +// Single-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex64 { + float re, im; // real and imaginary parts, respectively. +} TfLiteComplex64; + +// Double-precision complex data type compatible with the C99 definition. +typedef struct TfLiteComplex128 { + double re, im; // real and imaginary parts, respectively. +} TfLiteComplex128; + +// Half precision data type compatible with the C99 definition. +typedef struct TfLiteFloat16 { + uint16_t data; +} TfLiteFloat16; + +// Return the name of a given type, for error reporting purposes. +const char* TfLiteTypeGetName(TfLiteType type); + +// SupportedQuantizationTypes. +typedef enum TfLiteQuantizationType { + // No quantization. + kTfLiteNoQuantization = 0, + // Affine quantization (with support for per-channel quantization). + // Corresponds to TfLiteAffineQuantization. + kTfLiteAffineQuantization = 1, +} TfLiteQuantizationType; + +// Structure specifying the quantization used by the tensor, if-any. +typedef struct TfLiteQuantization { + // The type of quantization held by params. + TfLiteQuantizationType type; + // Holds an optional reference to a quantization param structure. The actual + // type depends on the value of the `type` field (see the comment there for + // the values and corresponding types). + void* params; +} TfLiteQuantization; + +// Parameters for asymmetric quantization across a dimension (i.e per output +// channel quantization). +// quantized_dimension specifies which dimension the scales and zero_points +// correspond to. +// For a particular value in quantized_dimension, quantized values can be +// converted back to float using: +// real_value = scale * (quantized_value - zero_point) +typedef struct TfLiteAffineQuantization { + TfLiteFloatArray* scale; + TfLiteIntArray* zero_point; + int32_t quantized_dimension; +} TfLiteAffineQuantization; + +/* A union of pointers that points to memory for a given tensor. */ +typedef union TfLitePtrUnion { + /* Do not access these members directly, if possible, use + * GetTensorData(tensor) instead, otherwise only access .data, as other + * members are deprecated. */ + int32_t* i32; + uint32_t* u32; + int64_t* i64; + uint64_t* u64; + float* f; + TfLiteFloat16* f16; + double* f64; + char* raw; + const char* raw_const; + uint8_t* uint8; + bool* b; + int16_t* i16; + uint16_t* ui16; + TfLiteComplex64* c64; + TfLiteComplex128* c128; + int8_t* int8; + /* Only use this member. */ + void* data; +} TfLitePtrUnion; + +// Memory allocation strategies. +// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated. +// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence, +// and available during eval. +// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and +// only available during eval. +// * kTfLiteDynamic: Allocated during eval, or for string tensors. +// * kTfLitePersistentRo: Allocated and populated during prepare. This is +// useful for tensors that can be computed during prepare and treated +// as constant inputs for downstream ops (also in prepare). +// * kTfLiteCustom: Custom memory allocation provided by the user. See +// TfLiteCustomAllocation below. +typedef enum TfLiteAllocationType { + kTfLiteMemNone = 0, + kTfLiteMmapRo, + kTfLiteArenaRw, + kTfLiteArenaRwPersistent, + kTfLiteDynamic, + kTfLitePersistentRo, + kTfLiteCustom, +} TfLiteAllocationType; + +// The delegates should use zero or positive integers to represent handles. +// -1 is reserved from unallocated status. +typedef int TfLiteBufferHandle; +enum { + kTfLiteNullBufferHandle = -1, +}; + +// Storage format of each dimension in a sparse tensor. +typedef enum TfLiteDimensionType { + kTfLiteDimDense = 0, + kTfLiteDimSparseCSR, +} TfLiteDimensionType; + +// Metadata to encode each dimension in a sparse tensor. +typedef struct TfLiteDimensionMetadata { + TfLiteDimensionType format; + int dense_size; + TfLiteIntArray* array_segments; + TfLiteIntArray* array_indices; +} TfLiteDimensionMetadata; + +// Parameters used to encode a sparse tensor. For detailed explanation of each +// field please refer to lite/schema/schema.fbs. +typedef struct TfLiteSparsity { + TfLiteIntArray* traversal_order; + TfLiteIntArray* block_map; + TfLiteDimensionMetadata* dim_metadata; + int dim_metadata_size; +} TfLiteSparsity; + +// Defines a custom memory allocation not owned by the runtime. +// `data` should be aligned to kDefaultTensorAlignment defined in +// lite/util.h. (Currently 64 bytes) +// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage. +typedef struct TfLiteCustomAllocation { + void* data; + size_t bytes; +} TfLiteCustomAllocation; + +// The flags used in `Interpreter::SetCustomAllocationForTensor`. +// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteCustomAllocationFlags { + kTfLiteCustomAllocationFlagsNone = 0, + // Skips checking whether allocation.data points to an aligned buffer as + // expected by the TFLite runtime. + // NOTE: Setting this flag can cause crashes when calling Invoke(). + // Use with caution. + kTfLiteCustomAllocationFlagsSkipAlignCheck = 1, +} TfLiteCustomAllocationFlags; + +// A tensor in the interpreter system which is a wrapper around a buffer of +// data including a dimensionality (or NULL if not currently defined). +#ifndef TF_LITE_STATIC_MEMORY +typedef struct TfLiteTensor { + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. NOTE: the product of elements of `dims` + // and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + // Quantization information. + TfLiteQuantizationParams params; + // How memory is mapped + // kTfLiteMmapRo: Memory mapped read only. + // i.e. weights + // kTfLiteArenaRw: Arena allocated read write memory + // (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + // The number of bytes required to store the data of this Tensor. I.e. + // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + // type is kTfLiteFloat32 and dims = {3, 2} then + // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + // An opaque pointer to a tflite::MMapAllocation + const void* allocation; + + // Null-terminated name of this tensor. + const char* name; + + // The delegate which knows how to handle `buffer_handle`. + // WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + // An integer buffer handle that can be handled by `delegate`. + // The value is valid only when delegate is not null. + // WARNING: This is an experimental interface that is subject to change. + TfLiteBufferHandle buffer_handle; + + // If the delegate uses its own buffer (e.g. GPU memory), the delegate is + // responsible to set data_is_stale to true. + // `delegate->CopyFromBufferHandle` can be called to copy the data from + // delegate buffer. + // WARNING: This is an // experimental interface that is subject to change. + bool data_is_stale; + + // True if the tensor is a variable. + bool is_variable; + + // Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + // Parameters used to encode a sparse tensor. + // This is optional. The field is NULL if a tensor is dense. + // WARNING: This is an experimental interface that is subject to change. + TfLiteSparsity* sparsity; + + // Optional. Encodes shapes with unknown dimensions with -1. This field is + // only populated when unknown dimensions exist in a read-write tensor (i.e. + // an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and + // `dims_signature` contains [1, -1, -1, 3]). If no unknown dimensions exist + // then `dims_signature` is either null, or set to an empty array. Note that + // this field only exists when TF_LITE_STATIC_MEMORY is not defined. + const TfLiteIntArray* dims_signature; +} TfLiteTensor; + +// A structure representing an instance of a node. +// This structure only exhibits the inputs, outputs, user defined data and some +// node properties (like statefulness), not other features like the type. +typedef struct TfLiteNode { + // Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + // Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + // intermediate tensors to this node expressed as indices into the simulator's + // tensors. + TfLiteIntArray* intermediates; + + // Temporary tensors uses during the computations. This usually contains no + // tensors, but ops are allowed to change that if they need scratch space of + // any sort. + TfLiteIntArray* temporaries; + + // Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + // Opaque data provided to the node if the node is a builtin. This is usually + // a structure defined in builtin_op_data.h + void* builtin_data; + + // Custom initial data. This is the opaque data provided in the flatbuffer. + // WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; + + // The pointer to the delegate. This is non-null only when the node is + // created by calling `interpreter.ModifyGraphWithDelegate`. + // WARNING: This is an experimental interface that is subject to change. + struct TfLiteDelegate* delegate; + + // Whether this op might have side effect (e.g. stateful op). + bool might_have_side_effect; +} TfLiteNode; +#else // defined(TF_LITE_STATIC_MEMORY)? +// NOTE: This flag is opt-in only at compile time. +// +// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct +// contains only the minimum fields required to initialize and prepare a micro +// inference graph. The fields in this struct have been ordered from +// largest-to-smallest for optimal struct sizeof. +// +// This struct does not use: +// - allocation +// - buffer_handle +// - data_is_stale +// - delegate +// - dims_signature +// - name +// - sparsity +typedef struct TfLiteTensor { + // TODO(b/155784997): Consider consolidating these quantization fields: + // Quantization information. Replaces params field above. + TfLiteQuantization quantization; + + // Quantization information. + TfLiteQuantizationParams params; + + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. NOTE: the product of elements of `dims` + // and the element datatype size should be equal to `bytes` below. + TfLiteIntArray* dims; + + // The number of bytes required to store the data of this Tensor. I.e. + // (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if + // type is kTfLiteFloat32 and dims = {3, 2} then + // bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24. + size_t bytes; + + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; + + // How memory is mapped + // kTfLiteMmapRo: Memory mapped read only. + // i.e. weights + // kTfLiteArenaRw: Arena allocated read write memory + // (i.e. temporaries, outputs). + TfLiteAllocationType allocation_type; + + // True if the tensor is a variable. + bool is_variable; +} TfLiteTensor; + +// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains +// only the minimum fields required to represent a node. +// +// This struct does not use: +// - delegate +// - intermediates +// - temporaries +typedef struct TfLiteNode { + // Inputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* inputs; + + // Outputs to this node expressed as indices into the simulator's tensors. + TfLiteIntArray* outputs; + + // intermediate tensors to this node expressed as indices into the simulator's + // tensors. + TfLiteIntArray* intermediates; + + // Opaque data provided by the node implementer through `Registration.init`. + void* user_data; + + // Opaque data provided to the node if the node is a builtin. This is usually + // a structure defined in builtin_op_data.h + void* builtin_data; + + // Custom initial data. This is the opaque data provided in the flatbuffer. + // WARNING: This is an experimental interface that is subject to change. + const void* custom_initial_data; + int custom_initial_data_size; +} TfLiteNode; +#endif // TF_LITE_STATIC_MEMORY + +// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount +// of information required for a kernel to run during TfLiteRegistration::Eval. +// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM +// builds with this flag by default internally. +typedef struct TfLiteEvalTensor { + // A union of data pointers. The appropriate type should be used for a typed + // tensor based on `type`. + TfLitePtrUnion data; + + // A pointer to a structure representing the dimensionality interpretation + // that the buffer should have. + TfLiteIntArray* dims; + + // The data type specification for data stored in `data`. This affects + // what member of `data` union should be used. + TfLiteType type; +} TfLiteEvalTensor; + +#ifndef TF_LITE_STATIC_MEMORY +// Free data memory of tensor `t`. +void TfLiteTensorDataFree(TfLiteTensor* t); + +// Free quantization data. +void TfLiteQuantizationFree(TfLiteQuantization* quantization); + +// Free sparsity parameters. +void TfLiteSparsityFree(TfLiteSparsity* sparsity); + +// Free memory of tensor `t`. +void TfLiteTensorFree(TfLiteTensor* t); + +// Set all of a tensor's fields (and free any previously allocated data). +void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims, + TfLiteQuantizationParams quantization, char* buffer, + size_t size, TfLiteAllocationType allocation_type, + const void* allocation, bool is_variable, + TfLiteTensor* tensor); + +// Copies the contents of 'src' in 'dst'. +// Function does nothing if either 'src' or 'dst' is passed as nullptr and +// return kTfLiteOk. +// Returns kTfLiteError if 'src' and 'dst' doesn't have matching data size. +// Note function copies contents, so it won't create new data pointer +// or change allocation type. +// All Tensor related properties will be copied from 'src' to 'dst' like +// quantization, sparsity, ... +TfLiteStatus TfLiteTensorCopy(const TfLiteTensor* src, TfLiteTensor* dst); + +// Change the size of the memory block owned by `tensor` to `num_bytes`. +// Tensors with allocation types other than `kTfLiteDynamic` will be ignored and +// a kTfLiteOk will be returned. +// `tensor`'s internal data buffer will be assigned a pointer +// which can safely be passed to free or realloc if `num_bytes` is zero. +// If `preserve_data` is true, tensor data will be unchanged in the range from +// the start of the region up to the minimum of the old and new sizes. In the +// case of NULL tensor, or an error allocating new memory, returns +// `kTfLiteError`. +TfLiteStatus TfLiteTensorResizeMaybeCopy(size_t num_bytes, TfLiteTensor* tensor, + bool preserve_data); + +// Change the size of the memory block owned by `tensor` to `num_bytes`. +// Tensors with allocation types other than kTfLiteDynamic will be ignored and +// a kTfLiteOk will be returned. +// `tensor`'s internal data buffer will be assigned a pointer +// which can safely be passed to free or realloc if `num_bytes` is zero. +// Tensor data will be unchanged in the range from the start of the region up to +// the minimum of the old and new sizes. In the case +// of NULL tensor, or an error allocating new memory, returns `kTfLiteError`. +TfLiteStatus TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor); +#endif // TF_LITE_STATIC_MEMORY + +// WARNING: This is an experimental interface that is subject to change. +// +// Currently, TfLiteDelegateParams has to be allocated in a way that it's +// trivially destructable. It will be stored as `builtin_data` field in +// `TfLiteNode` of the delegate node. +// +// See also the `CreateDelegateParams` function in `interpreter.cc` details. +typedef struct TfLiteDelegateParams { + struct TfLiteDelegate* delegate; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteDelegateParams; + +// WARNING: This is an experimental interface that is subject to change. +// +// Currently, TfLiteOpaqueDelegateParams has to be allocated in a way that it's +// trivially destructable. It will be stored as `builtin_data` field in +// `TfLiteNode` of the delegate node. +// +// See also the `CreateOpaqueDelegateParams` function in `subgraph.cc` +// details. +typedef struct TfLiteOpaqueDelegateParams { + TfLiteOpaqueDelegate* delegate; + void* delegate_data; + TfLiteIntArray* nodes_to_replace; + TfLiteIntArray* input_tensors; + TfLiteIntArray* output_tensors; +} TfLiteOpaqueDelegateParams; + +typedef struct TfLiteContext { + // Number of tensors in the context. + size_t tensors_size; + + // The execution plan contains a list of the node indices in execution + // order. execution_plan->size is the current number of nodes. And, + // execution_plan->data[0] is the first node that needs to be run. + // TfLiteDelegates can traverse the current execution plan by iterating + // through each member of this array and using GetNodeAndRegistration() to + // access details about a node. i.e. + // + // TfLiteIntArray* execution_plan; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan)); + // for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) { + // int node_index = execution_plan->data[exec_index]; + // TfLiteNode* node; + // TfLiteRegistration* reg; + // context->GetNodeAndRegistration(context, node_index, &node, ®); + // } + // Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime. + // Future calls to GetExecutionPlan invalidates earlier outputs. The following + // code snippet shows the issue of such an invocation pattern. After calling + // CheckNode, subsequent access to `plan_1st` is undefined. + // + // void CheckNode(const TfLiteNode* node) { + // ... + // TfLiteIntArray* plan_2nd; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd)); + // ... + // } + // + // TfLiteIntArray* plan_1st; + // TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st)); + // for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) { + // int node_index = plan_1st->data[exec_index]; + // TfLiteNode* node; + // TfLiteRegistration* reg; + // context->GetNodeAndRegistration(context, node_index, &node, ®); + // CheckNode(node); + // } + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context, + TfLiteIntArray** execution_plan); + + // opaque full context ptr (an opaque c++ data structure) + void* impl_; + + // Request memory pointer be resized. Updates dimensions on the tensor. + // NOTE: ResizeTensor takes ownership of newSize. + TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor, + TfLiteIntArray* new_size); + // Request that an error be reported with format string msg. + void (*ReportError)(struct TfLiteContext*, const char* msg, ...); + + // Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If + // non-null, the value pointed to by `first_new_tensor_index` will be set to + // the index of the first new tensor. + TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add, + int* first_new_tensor_index); + + // Get a Tensor node by node_index. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetNodeAndRegistration)( + struct TfLiteContext*, int node_index, TfLiteNode** node, + struct TfLiteRegistration** registration); + + // Replace ops with one or more stub delegate operations. This function + // does not take ownership of `nodes_to_replace`. + TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)( + struct TfLiteContext*, struct TfLiteRegistration registration, + const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate); + + // Number of threads that are recommended to subsystems like gemmlowp and + // eigen. + int recommended_num_threads; + + // Access external contexts by type. + // WARNING: This is an experimental interface that is subject to change. + TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*, + TfLiteExternalContextType); + // Set the value of a external context. Does not take ownership of the + // pointer. + // WARNING: This is an experimental interface that is subject to change. + void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType, + TfLiteExternalContext*); + + // Flag for allowing float16 precision for FP32 calculation. + // default: false. + // WARNING: This is an experimental API and subject to change. + bool allow_fp32_relax_to_fp16; + + // Pointer to the op-level profiler, if set; nullptr otherwise. + void* profiler; + + // Allocate persistent buffer which has the same life time as the interpreter. + // Returns nullptr on failure. + // The memory is allocated from heap for TFL, and from tail in TFLM. + // This method is only available in Init or Prepare stage. + // WARNING: This is an experimental interface that is subject to change. + void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes); + + // Allocate a buffer which will be deallocated right after invoke phase. + // The memory is allocated from heap in TFL, and from volatile arena in TFLM. + // This method is only available in invoke stage. + // NOTE: If possible use RequestScratchBufferInArena method to avoid memory + // allocation during inference time. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes, + void** ptr); + + // Request a scratch buffer in the arena through static memory planning. + // This method is only available in Prepare stage and the buffer is allocated + // by the interpreter between Prepare and Eval stage. In Eval stage, + // GetScratchBuffer API can be used to fetch the address. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx, + size_t bytes, int* buffer_idx); + + // Get the scratch buffer pointer. + // This method is only available in Eval stage. + // WARNING: This is an experimental interface that is subject to change. + void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx); + + // Resize the memory pointer of the `tensor`. This method behaves the same as + // `ResizeTensor`, except that it makes a copy of the shape array internally + // so the shape array could be deallocated right afterwards. + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx, + TfLiteTensor* tensor, int dims, + const int* shape); + + // This method provides a preview of post-delegation partitioning. Each + // TfLiteDelegateParams in the referenced array corresponds to one instance of + // the delegate kernel. + // Example usage: + // + // TfLiteIntArray* nodes_to_replace = ...; + // TfLiteDelegateParams* params_array; + // int num_partitions = 0; + // TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning( + // context, delegate, nodes_to_replace, ¶ms_array, &num_partitions)); + // for (int idx = 0; idx < num_partitions; idx++) { + // const auto& partition_params = params_array[idx]; + // ... + // } + // + // NOTE: The context owns the memory referenced by partition_params_array. It + // will be cleared with another call to PreviewDelegateParitioning, or after + // TfLiteDelegateParams::Prepare returns. + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*PreviewDelegatePartitioning)( + struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace, + TfLiteDelegateParams** partition_params_array, int* num_partitions); + + // Returns a TfLiteTensor struct for a given index. + // WARNING: This is an experimental interface that is subject to change. + // WARNING: This method may not be available on all platforms. + TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context, + int tensor_idx); + + // Returns a TfLiteEvalTensor struct for a given index. + // WARNING: This is an experimental interface that is subject to change. + // WARNING: This method may not be available on all platforms. + TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context, + int tensor_idx); + + // Retrieves named metadata buffer from the TFLite model. + // Returns kTfLiteOk if metadata is successfully obtained from the flatbuffer + // Model: that is, there exists a `metadata` entry with given `name` string. + // (see TFLite's schema.fbs). + // The corresponding `buffer` information is populated in `ptr` & `bytes`. + // The data from `ptr` is valid for the lifetime of the Interpreter. + // + // WARNING: This is an experimental interface that is subject to change. + TfLiteStatus (*GetModelMetadata)(const struct TfLiteContext* context, + const char* name, const char** ptr, + size_t* bytes); +} TfLiteContext; + +// `TfLiteRegistrationExternal` is an external version of `TfLiteRegistration` +// for C API which doesn't use internal types (such as `TfLiteContext`) but only +// uses stable API types (such as `TfLiteOpaqueContext`). The purpose of each +// field is the exactly the same as with `TfLiteRegistration`. +typedef struct TfLiteRegistrationExternal TfLiteRegistrationExternal; + +typedef struct TfLiteRegistration { + // Initializes the op from serialized data. + // Called only *once* for the lifetime of the op, so any one-time allocations + // should be made here (unless they depend on tensor sizes). + // + // If a built-in op: + // `buffer` is the op's params data (TfLiteLSTMParams*). + // `length` is zero. + // If custom op: + // `buffer` is the op's `custom_options`. + // `length` is the size of the buffer. + // + // Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer + // or an instance of a struct). + // + // The returned pointer will be stored with the node in the `user_data` field, + // accessible within prepare and invoke functions below. + // NOTE: if the data is already in the desired format, simply implement this + // function to return `nullptr` and implement the free function to be a no-op. + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + + // The pointer `buffer` is the data previously returned by an init invocation. + void (*free)(TfLiteContext* context, void* buffer); + + // prepare is called when the inputs this node depends on have been resized. + // context->ResizeTensor() can be called to request output tensors to be + // resized. + // Can be called multiple times for the lifetime of the op. + // + // Returns kTfLiteOk on success. + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + + // Execute the node (should read node->inputs and output to node->outputs). + // Returns kTfLiteOk on success. + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + + // profiling_string is called during summarization of profiling information + // in order to group executions together. Providing a value here will cause a + // given op to appear multiple times is the profiling report. This is + // particularly useful for custom ops that can perform significantly + // different calculations depending on their `user-data`. + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + + // Builtin codes. If this kernel refers to a builtin this is the code + // of the builtin. This is so we can do marshaling to other frameworks like + // NN API. + // Note: It is the responsibility of the registration binder to set this + // properly. + int32_t builtin_code; + + // Custom op name. If the op is a builtin, this will be null. + // Note: It is the responsibility of the registration binder to set this + // properly. + // WARNING: This is an experimental interface that is subject to change. + const char* custom_name; + + // The version of the op. + // Note: It is the responsibility of the registration binder to set this + // properly. + int version; + + // The external version of `TfLiteRegistration`. Since we can't use internal + // types (such as `TfLiteContext`) for C API to maintain ABI stability. + // C API user will provide `TfLiteRegistrationExternal` to implement custom + // ops. We keep it inside of `TfLiteRegistration` and use it to route + // callbacks properly. + TfLiteRegistrationExternal* registration_external; +} TfLiteRegistration; + +// Old version of `TfLiteRegistration` to maintain binary backward +// compatibility. +// WARNING: This structure is deprecated / not an official part of the API. +// It should be only used for binary backward compatibility. +typedef struct TfLiteRegistration_V1 { + void* (*init)(TfLiteContext* context, const char* buffer, size_t length); + void (*free)(TfLiteContext* context, void* buffer); + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node); + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node); + const char* (*profiling_string)(const TfLiteContext* context, + const TfLiteNode* node); + int32_t builtin_code; + const char* custom_name; + int version; +} TfLiteRegistration_V1; + +// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the +// values should be 1, 2, 4, 8, ...etc. +typedef enum TfLiteDelegateFlags { + kTfLiteDelegateFlagsNone = 0, + // The flag is set if the delegate can handle dynamic sized tensors. + // For example, the output shape of a `Resize` op with non-constant shape + // can only be inferred when the op is invoked. + // In this case, the Delegate is responsible for calling + // `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling + // `ResizeTensor` when invoking the op. + // + // If the delegate isn't capable to handle dynamic tensors, this flag need + // to be set to false. + kTfLiteDelegateFlagsAllowDynamicTensors = 1, + + // This flag can be used by delegates (that allow dynamic tensors) to ensure + // applicable tensor shapes are automatically propagated in the case of tensor + // resizing. + // This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors + // of a delegate kernel will have correct shapes before its Prepare() method + // is called. The runtime leverages TFLite builtin ops in the original + // execution plan to propagate shapes. + // + // A few points to note: + // 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is + // false, this one is redundant since the delegate kernels are re-initialized + // every time tensors are resized. + // 2. Enabling this flag adds some overhead to AllocateTensors(), since extra + // work is required to prepare the original execution plan. + // 3. This flag requires that the original execution plan only have ops with + // valid registrations (and not 'dummy' custom ops like with Flex). + // WARNING: This feature is experimental and subject to change. + kTfLiteDelegateFlagsRequirePropagatedShapes = 2, + + // This flag can be used by delegates to request per-operator profiling. If a + // node is a delegate node, this flag will be checked before profiling. If + // set, then the node will not be profiled. The delegate will then add per + // operator information using Profiler::EventType::OPERATOR_INVOKE_EVENT and + // the results will appear in the operator-wise Profiling section and not in + // the Delegate internal section. + kTfLiteDelegateFlagsPerOperatorProfiling = 4 +} TfLiteDelegateFlags; + +// WARNING: This is an experimental interface that is subject to change. +typedef struct TfLiteDelegate { + // Data that delegate needs to identify itself. This data is owned by the + // delegate. The delegate is owned in the user code, so the delegate is + // responsible for deallocating this when it is destroyed. + void* data_; + + // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + // delegate a view of the current graph through TfLiteContext*. It typically + // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() + // to ask the TensorFlow lite runtime to create macro-nodes to represent + // delegated subgraphs of the original graph. + TfLiteStatus (*Prepare)(TfLiteContext* context, + struct TfLiteDelegate* delegate); + + // Copy the data from delegate buffer handle into raw memory of the given + // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as + // long as it follows the rules for kTfLiteDynamic tensors, in which case this + // cannot be null. + TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + // Copy the data from raw memory of the given 'tensor' to delegate buffer + // handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle buffer_handle, + TfLiteTensor* tensor); + + // Free the Delegate Buffer Handle. Note: This only frees the handle, but + // this doesn't release the underlying resource (e.g. textures). The + // resources are either owned by application layer or the delegate. + // This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteContext* context, + struct TfLiteDelegate* delegate, + TfLiteBufferHandle* handle); + + // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; + + // The opaque delegate builder associated with this object. If set then the + // TF Lite runtime will give precedence to this field. E.g. instead of + // invoking 'Prepare' via the function pointer inside the 'TfLiteDelegate' + // object, the runtime will first check if the corresponding function + // pointer inside 'opaque_delegate_builder' is set and if so invoke that. + // + // If this field is non-null, then the 'Prepare' field (of the + // 'TfLiteDelegate') should be null. + struct TfLiteOpaqueDelegateBuilder* opaque_delegate_builder; +} TfLiteDelegate; + +// Build a 'null' delegate, with all the fields properly set to their default +// values. +TfLiteDelegate TfLiteDelegateCreate(void); + +// `TfLiteOpaqueDelegateBuilder` is used for constructing +// `TfLiteOpaqueDelegate`, see `TfLiteOpaqueDelegateCreate` below. Note: +// This struct is not ABI stable. +// +// For forward source compatibility `TfLiteOpaqueDelegateBuilder` objects should +// be brace-initialized, so that all fields (including any that might be added +// in the future) get zero-initialized. The purpose of each field is exactly +// the same as with `TfLiteDelegate`. +// +// WARNING: This is an experimental interface that is subject to change. +typedef struct TfLiteOpaqueDelegateBuilder { + // Data that delegate needs to identify itself. This data is owned by the + // delegate. The delegate is owned in the user code, so the delegate is + // responsible for deallocating this when it is destroyed. + void* data; + // Invoked by ModifyGraphWithDelegate. This prepare is called, giving the + // delegate a view of the current graph through TfLiteContext*. It typically + // will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels() + // to ask the TensorFlow lite runtime to create macro-nodes to represent + // delegated subgraphs of the original graph. + TfLiteStatus (*Prepare)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data); + // Copies the data from delegate buffer handle into raw memory of the given + // 'tensor'. Note that the delegate is allowed to allocate the raw bytes as + // long as it follows the rules for kTfLiteDynamic tensors, in which case this + // cannot be null. + TfLiteStatus (*CopyFromBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + // Copies the data from raw memory of the given 'tensor' to delegate buffer + // handle. This can be null if the delegate doesn't use its own buffer. + TfLiteStatus (*CopyToBufferHandle)( // NOLINT + TfLiteOpaqueContext* context, TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle buffer_handle, TfLiteOpaqueTensor* tensor); + // Frees the Delegate Buffer Handle. Note: This only frees the handle, but + // this doesn't release the underlying resource (e.g. textures). The + // resources are either owned by application layer or the delegate. + // This can be null if the delegate doesn't use its own buffer. + void (*FreeBufferHandle)(TfLiteOpaqueContext* context, // NOLINT + TfLiteOpaqueDelegate* delegate, void* data, + TfLiteBufferHandle* handle); + // Bitmask flags. See the comments in `TfLiteDelegateFlags`. + int64_t flags; +} TfLiteOpaqueDelegateBuilder; + +// Creates an opaque delegate and returns its address. The opaque delegate will +// behave according to the provided 'opaque_delegate_builder'. The lifetime of +// the objects pointed to by any of the fields within the +// 'opaque_delegate_builder' must outlive the returned +// 'TfLiteOpaqueDelegate' and any 'TfLiteInterpreter', +// 'TfLiteInterpreterOptions', 'tflite::Interpreter', or +// 'tflite::InterpreterBuilder' that the delegate is added to. The returned +// address should be passed to 'TfLiteOpaqueDelegateDelete' for deletion. If +// 'opaque_delegate_builder' is a null pointer, then a null pointer will be +// returned. +TfLiteOpaqueDelegate* TfLiteOpaqueDelegateCreate( + const TfLiteOpaqueDelegateBuilder* opaque_delegate_builder); + +// Deletes the provided opaque 'delegate'. This function has no effect if the +// 'delegate' is a null pointer. +void TfLiteOpaqueDelegateDelete(TfLiteOpaqueDelegate* delegate); + +// Returns a pointer to the data associated with the provided opaque 'delegate'. +// +// A null pointer will be returned when: +// - The 'delegate' is null. +// - The 'data' field of the 'TfLiteOpaqueDelegateBuilder' used to construct the +// 'delegate' was null. +// - Or in case of any other error. +// - The 'delegate' has been constructed via a 'TfLiteOpaqueDelegateBuilder', +// but the 'data' field of the 'TfLiteOpaqueDelegateBuilder' is null. +// +// The data_ field of 'delegate' will be returned if the +// 'opaque_delegate_builder' field is null. +void* TfLiteOpaqueDelegateGetData(const TfLiteOpaqueDelegate* delegate); + +#ifdef __cplusplus +} // extern "C" +#endif // __cplusplus +#endif // TENSORFLOW_LITE_CORE_C_COMMON_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.cc new file mode 100644 index 0000000..dd733f4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.cc @@ -0,0 +1,192 @@ +/* Copyright 2023 Edge Impulse Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#include +#include +#include + +#include +#include +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" + +#define FEATURE_TYPE float + +namespace tflite { +namespace ops { +namespace custom { +namespace tree_ensemble_classifier { + +struct OpDataTree { + uint32_t num_leaf_nodes; + uint32_t num_internal_nodes; + uint32_t num_trees; + const uint16_t* nodes_modes; + const uint16_t* nodes_featureids; + const float* nodes_values; + const uint16_t* nodes_truenodeids; + const uint16_t* nodes_falsenodeids; + const float* nodes_weights; + const uint8_t* nodes_classids; + const uint16_t* tree_root_ids; + const uint8_t* buffer_t; + size_t buffer_length; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + + auto* data = new OpDataTree; + + data->buffer_t = buffer_t; + data->buffer_length = length; + + data->num_leaf_nodes = m["num_leaf_nodes"].AsUInt32(); + data->num_internal_nodes = m["num_internal_nodes"].AsUInt32(); + data->num_trees = m["num_trees"].AsUInt32(); + + data->nodes_modes = (uint16_t*)(m["nodes_modes"].AsBlob().data()); + data->nodes_featureids = (uint16_t*)(m["nodes_featureids"].AsBlob().data()); + data->nodes_values = (float*)(m["nodes_values"].AsBlob().data()); + data->nodes_truenodeids = (uint16_t*)(m["nodes_truenodeids"].AsBlob().data()); + data->nodes_falsenodeids = (uint16_t*)(m["nodes_falsenodeids"].AsBlob().data()); + data->nodes_weights = (float*)(m["nodes_weights"].AsBlob().data()); + data->nodes_classids = (uint8_t*)(m["nodes_classids"].AsBlob().data()); + data->tree_root_ids = (uint16_t*)(m["tree_root_ids"].AsBlob().data()); + + return data; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + + const OpDataTree* data = static_cast(node->user_data); + const flexbuffers::Map& m = flexbuffers::GetRoot(data->buffer_t, data->buffer_length).AsMap(); + + // The OOB checks below are very important to prevent vulnerabilities where an adversary sends + // us a malicious TFLite model, similar to: https://nvd.nist.gov/vuln/detail/CVE-2022-23560 + + int num_nodes = data->num_leaf_nodes + data->num_internal_nodes; + + // Check that the tree root ids are valid. + for (uint32_t i = 0; i < data->num_trees; i++) { + TF_LITE_ENSURE_EQ(context, data->tree_root_ids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->tree_root_ids[i] >= 0, true); + } + + // Check that all node indices are valid + for (uint32_t i = 0; i < data->num_internal_nodes; i++) { + TF_LITE_ENSURE_EQ(context, data->nodes_truenodeids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->nodes_truenodeids[i] >= 0, true); + TF_LITE_ENSURE_EQ(context, data->nodes_falsenodeids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->nodes_falsenodeids[i] >= 0, true); + } + + // Check all node arrays have the same length + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_featureids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_values"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_truenodeids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_falsenodeids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_leaf_nodes, m["nodes_weights"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_leaf_nodes, m["nodes_classids"].AsBlob().size()); + + // Check data types are supported. Currently we only support one combination. + TF_LITE_ENSURE_EQ(context, strncmp(m["tree_index_type"].AsString().c_str(), "uint16", 6), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["node_value_type"].AsString().c_str(), "float32", 7), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["class_index_type"].AsString().c_str(), "uint8", 5), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["class_weight_type"].AsString().c_str(), "float32", 7), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["equality_operator"].AsString().c_str(), "leq", 3), 0); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + const TfLiteTensor* input = GetInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) == 2); + TfLiteTensor* output = GetOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + int input_width = SizeOfDimension(input, 1); + int output_width = SizeOfDimension(output, 1); + + // Check that all indices into the input/output tensor are valid + for (uint32_t i = 0; i < data->num_internal_nodes; i++) { + TF_LITE_ENSURE(context, data->nodes_featureids[i] < input_width); + TF_LITE_ENSURE(context, data->nodes_featureids[i] >= 0); + if (data->nodes_modes[i] == 0) { + TF_LITE_ENSURE(context, data->nodes_classids[i] < output_width); + TF_LITE_ENSURE(context, data->nodes_classids[i] >= 0); + } + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + + const OpDataTree* data = static_cast(node->user_data); + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); + + float* output_data = GetTensorData(output); + memset(output_data, 0, GetTensorShape(output).FlatSize() * sizeof(float)); + + for (uint32_t i = 0; i < data->num_trees; i++) { + uint16_t ix = data->tree_root_ids[i]; + while (ix < data->num_internal_nodes) { + if (input->data.f[data->nodes_featureids[ix]] <= data->nodes_values[ix]) { + ix = data->nodes_truenodeids[ix]; + } else { + ix = data->nodes_falsenodeids[ix]; + } + } + ix -= data->num_internal_nodes; + output->data.f[data->nodes_classids[ix]] += data->nodes_weights[ix]; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration* Register_TREE_ENSEMBLE_CLASSIFIER() { + static TfLiteRegistration r = { + tree_ensemble_classifier::Init, + nullptr, + tree_ensemble_classifier::Prepare, + tree_ensemble_classifier::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; + return &r; +} + +TfLiteRegistration* Register_TFLITE_TREE_ENSEMBLE_CLASSIFIER() { + return Register_TREE_ENSEMBLE_CLASSIFIER(); +} + +} // namespace custom +} // namespace ops +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h new file mode 100644 index 0000000..fcdc98a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/custom/tree_ensemble_classifier.h @@ -0,0 +1,31 @@ +/* Copyright 2023 Edge Impulse Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { +namespace ops { +namespace custom { + +TfLiteRegistration* Register_TREE_ENSEMBLE_CLASSIFIER(); + +} +} +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h index e72ebdc..05af6fd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h @@ -15,12 +15,14 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_ +#include #ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK #ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK #define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK #endif #endif +#include #include #include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" @@ -75,6 +77,7 @@ float ActivationFunction(float x) { inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, const float* bias_data, int array_size, float* array_data) { + if (bias_size == 0) return; // Note: see b/132215220: in May 2019 we thought it would be OK to replace // this with the Eigen one-liner: // return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max). @@ -138,6 +141,100 @@ inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size, #endif } +// Single-rounding MultiplyByQuantizedMultiplier +#if TFLITE_SINGLE_ROUNDING +inline int32_t MultiplyByQuantizedMultiplier(int32_t x, + int32_t quantized_multiplier, + int shift) { + TFLITE_DCHECK(quantized_multiplier >= 0); + TFLITE_DCHECK(shift >= -31 && shift <= 30); + + const int64_t total_shift = 31 - shift; + const int64_t round = static_cast(1) << (total_shift - 1); + int64_t result = x * static_cast(quantized_multiplier) + round; + result = result >> total_shift; + + TFLITE_DCHECK(result >= std::numeric_limits::min() && + result <= std::numeric_limits::max()); + return static_cast(result); +} + +inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp( + int32_t x, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK_LE(shift, 0); + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} + +inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne( + int32_t x, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK_GE(shift, 0); + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} + +inline int32_t MultiplyByQuantizedMultiplier(int64_t x, + int32_t quantized_multiplier, + int shift) { + // Inputs: + // - quantized_multiplier has fixed point at bit 31 + // - shift is -31 to +7 (negative for right shift) + // + // Assumptions: The following input ranges are assumed + // - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1) + // - scaling is chosen so final scaled result fits in int32_t + // - input x is in the range -(1<<47) <= x < (1<<47) + TFLITE_DCHECK(quantized_multiplier >= 0); + TFLITE_DCHECK(shift >= -31 && shift < 8); + TFLITE_DCHECK(x >= -(static_cast(1) << 47) && + x < (static_cast(1) << 47)); + + const int32_t reduced_multiplier = + (quantized_multiplier < 0x7FFF0000) + ? ((quantized_multiplier + (1 << 15)) >> 16) + : 0x7FFF; + const int64_t total_shift = 15 - shift; + const int64_t round = static_cast(1) << (total_shift - 1); + int64_t result = x * static_cast(reduced_multiplier) + round; + result = result >> total_shift; + + TFLITE_DCHECK(result >= std::numeric_limits::min() && + result <= std::numeric_limits::max()); + return static_cast(result); +} + +#ifdef USE_NEON +inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( + int32x4x4_t input_val, int32_t quantized_multiplier, int shift) { + TFLITE_DCHECK(quantized_multiplier >= 0); + + const int right_shift = std::min(-1, shift); + const int left_shift = shift - right_shift; + + const int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier); + const int32x4_t left_shift_dup = vdupq_n_s32(left_shift); + const int32x4_t right_shift_dup = vdupq_n_s32(right_shift); + + int32x4x4_t result; + result.val[0] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[1] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[2] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup), multiplier_dup), + right_shift_dup); + + result.val[3] = vrshlq_s32( + vqdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup), multiplier_dup), + right_shift_dup); + + return result; +} +#endif // USE_NEON +// Double-rounding MultiplyByQuantizedMultiplier +#else inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp( int32_t x, int32_t quantized_multiplier, int left_shift) { using gemmlowp::RoundingDivideByPOT; @@ -224,7 +321,8 @@ inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows( return result; } -#endif +#endif // USE_NEON +#endif // TFLITE_SINGLE_ROUNDING template int CountLeadingZeros(T integer_input) { @@ -279,81 +377,216 @@ inline Integer FloorLog2(Integer n) { } } -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(double (*func)(double), double min, double max, - int16_t* table, const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - double step = (max - min) / (num - 1); - double half_step = step / 2.0; - for (int i = 0; i < num - 1; i++) { - double sample_val = TfLiteRound(func(min + i * step) * 32768.0); - double midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0 + - TfLiteRound(func(min + i * step) * 32768.0)) / - 2.0); - double midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0); - double midpoint_err = midpoint_interp_val - midpoint_val; - double bias = TfLiteRound(midpoint_err / 2.0); - table[i] = std::min(std::max(sample_val - bias, -32768.0), - 32767.0); +namespace detail { + +// LUTPopulate takes an optional type-erased transform_params to allow passing +// extra parameters to the transform function pointer. const void* is used +// instead of std::function to be compatible with TFLite Micro +template +inline typename std::enable_if::value, + FloatT>::type +LUTTransform(Func transform, const void* /*transform_params*/, FloatT value) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + return transform(value); +} + +template +inline typename std::enable_if< + std::is_same::value, FloatT>::type +LUTTransform(Func transform, const void* transform_params, FloatT value) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + return transform(value, transform_params); +} + +// Use the same LUT generation code for both uint8_t and int8_t. Int8_t indexes +// will be directly casted to uint8_t, the int8 LUT will thus be ordered as [0, +// 1, ..., 127, -128, ..., -2, -1] instead of [-128, -127, ..., -1, 0, 1, ..., +// 126, 127]. +template +inline void LUTPopulateInt8(float input_scale, int32_t input_zero_point, + float output_scale, int32_t output_zero_point, + Func transform, const void* transform_params, + T* lut) { + static_assert( + std::is_same::value || std::is_same::value, + "T must be an uint8 or int8 type."); + uint8_t* lut_uint8 = reinterpret_cast(lut); + const float inverse_scale = 1 / output_scale; + int32_t maxval = std::numeric_limits::max(); + int32_t minval = std::numeric_limits::min(); + for (int32_t val = minval; val <= maxval; ++val) { + const float dequantized = input_scale * (val - input_zero_point); + const float transformed = + LUTTransform(transform, transform_params, dequantized); + const float rescaled = TfLiteRound(transformed * inverse_scale); + const int32_t quantized = + static_cast(rescaled + output_zero_point); + lut_uint8[static_cast(static_cast(val))] = static_cast( + static_cast(std::max(std::min(maxval, quantized), minval))); } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0); } -// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in -// softmax -// func - the function to build the LUT for (e.g exp(x)) -// min,max - table limits -// table - pointer to buffer -// num - number of elements in the LUT -inline void gen_lut(float (*func)(float), float min, float max, int16_t* table, - const int num) { - // size of table should equal to num + 1 - // last element only for slope calculation - float step = (max - min) / (num - 1); - float half_step = step / 2.0f; - for (int i = 0; i < num - 1; i++) { - float sample_val = TfLiteRound(func(min + i * step) * 32768.0f); - float midpoint_interp_val = - TfLiteRound((func(min + (i + 1) * step) * 32768.0f + - TfLiteRound(func(min + i * step) * 32768.0f)) / - 2.0f); - float midpoint_val = - TfLiteRound(func(min + i * step + half_step) * 32768.0f); - float midpoint_err = midpoint_interp_val - midpoint_val; - float bias = TfLiteRound(midpoint_err / 2.0f); - table[i] = std::min(std::max(sample_val - bias, -32768.0f), - 32767.0f); +// Keep floating-point type configurable for backward compatibility. float +// should be used for FloatT by default. +template +inline void LUTPopulateInt16(FloatT input_scale, int32_t input_zero_point, + FloatT output_scale, int32_t output_zero_point, + Func transform, const void* transform_params, + int16_t* lut) { + static_assert(std::is_floating_point::value, + "FloatT must be a floating-point type."); + const FloatT input_min = + input_scale * (std::numeric_limits::min() - input_zero_point); + const FloatT input_max = + input_scale * (std::numeric_limits::max() - input_zero_point); + const FloatT output_min = + output_scale * (std::numeric_limits::min() - output_zero_point); + const FloatT output_max = + output_scale * (std::numeric_limits::max() - output_zero_point); + + const int nb_steps = 512; + const FloatT step = (input_max - input_min) / nb_steps; + const FloatT half_step = step / 2; + const FloatT output_scaling_inv = + static_cast(std::numeric_limits::max() - + std::numeric_limits::min() + 1) / + (output_max - output_min); + const FloatT table_min = + static_cast(std::numeric_limits::min()); + const FloatT table_max = + static_cast(std::numeric_limits::max()); + + for (int i = 0; i < nb_steps; i++) { + const FloatT val = + LUTTransform(transform, transform_params, input_min + i * step); + const FloatT val_midpoint = LUTTransform( + transform, transform_params, input_min + i * step + half_step); + const FloatT val_next = LUTTransform(transform, transform_params, + input_min + (i + 1) * step); + + const FloatT sample_val = TfLiteRound(val * output_scaling_inv); + const FloatT midpoint_interp_val = + TfLiteRound((val_next * output_scaling_inv + + TfLiteRound(val * output_scaling_inv)) / + 2); + const FloatT midpoint_val = TfLiteRound(val_midpoint * output_scaling_inv); + const FloatT midpoint_err = midpoint_interp_val - midpoint_val; + const FloatT bias = TfLiteRound(midpoint_err / 2); + + lut[i] = static_cast(std::min( + std::max(sample_val - bias, table_min), table_max)); } - table[num - 1] = std::min( - std::max(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f); + + lut[nb_steps] = static_cast(std::min( + std::max(TfLiteRound(LUTTransform( + transform, transform_params, input_max) * + output_scaling_inv), + table_min), + table_max)); } -// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax -inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) { - // 512 base value, lut[513] only for calculate slope - uint16_t index = static_cast(256 + (value >> 7)); +} // namespace detail + +template +inline typename std::enable_if::value || + std::is_same::value, + void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float), T* lut) { + detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +template +inline typename std::enable_if::value || + std::is_same::value, + void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float, const void*), + const void* transform_params, T* lut) { + detail::LUTPopulateInt8(input_scale, input_zero_point, output_scale, + output_zero_point, transform, transform_params, lut); +} + +template +inline typename std::enable_if::value, void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float), T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +template +inline typename std::enable_if::value, void>::type +LUTPopulate(float input_scale, int32_t input_zero_point, float output_scale, + int32_t output_zero_point, float (*transform)(float, const void*), + const void* transform_params, T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, + transform_params, lut); +} + +// Deprecated, avoid usage and prefer the float version. Kept for +// backward-compatiblity. +template +inline typename std::enable_if::value, void>::type +LUTPopulate(double input_scale, int32_t input_zero_point, double output_scale, + int32_t output_zero_point, double (*transform)(double), T* lut) { + detail::LUTPopulateInt16(input_scale, input_zero_point, output_scale, + output_zero_point, transform, nullptr, lut); +} + +// The size of the LUT depends on the type of input. For uint8 and int8 inputs a +// simple 256 entries LUT is used. For int16 inputs the high 9 bits are used for +// indexing and the 7 remaining bits are used for interpolation. We thus use a +// 513-entries LUT for int16 cases, 512 for the 9-bit indexing and 1 extra entry +// to interpolate the last value. +template +constexpr int LUTSize() { + static_assert(std::is_same::value || + std::is_same::value || + std::is_same::value, + "Only LUTs with uint8, int8 or int16 inputs are supported."); + // As per c++11: constexpr methods cannot have more than one return statement. + return (std::is_same::value || std::is_same::value) + ? 256 + : 513; +} + +// int16_t -> int16_t table lookup with interpolation +// LUT must have 513 values +inline int16_t LUTLookup(int16_t value, const int16_t* lut) { + // 512 base values, lut[513] is only used to calculate the slope + const uint16_t index = static_cast(256 + (value >> 7)); assert(index < 512 && "LUT index out of range."); - int16_t offset = value & 0x7f; + const int16_t offset = value & 0x7f; - // base and slope are Q0.15 - int16_t base = lut[index]; - int16_t slope = lut[index + 1] - lut[index]; + // Base and slope are Q0.x + const int16_t base = lut[index]; + const int16_t slope = lut[index + 1] - lut[index]; - // Q0.15 * Q0.7 = Q0.22 - // Round and convert from Q0.22 to Q0.15 - int32_t delta = (static_cast(slope) * offset + 64) >> 7; + // Q0.x * Q0.7 = Q0.(x + 7) + // Round and convert from Q0.(x + 7) to Q0.x + const int delta = (slope * offset + 64) >> 7; // Q0.15 + Q0.15 - return base + delta; + return static_cast(base + delta); +} + +// int8_t -> int8_t table lookup without interpolation +// LUT must have 256 values +// LUTPopulate has ordered the LUT so that indexing it with an +// int8_t is just done by casting it to an uint8_t. +inline int8_t LUTLookup(int8_t value, const int8_t* lut) { + return lut[static_cast(value)]; +} + +// uint8_t -> uint8_t table lookup without interpolation +// LUT must have 256 values +inline uint8_t LUTLookup(uint8_t value, const uint8_t* lut) { + return lut[value]; } // Table of sigmoid(i/24) at 0.16 format - 256 elements. @@ -575,7 +808,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl( // InputIntegerBits - z_b_headroom - 0.25); const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp( FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_a_headroom_plus_1, 31 - kAccumIntegerBits)), + static_cast(InputIntegerBits - z_a_headroom_plus_1), + 31 - kAccumIntegerBits)), shifted_quarter); // z_b is treated like z_a, but premultiplying by sqrt(0.5). @@ -585,7 +819,8 @@ log_x_for_x_greater_than_or_equal_to_1_impl( SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom); const FixedPointAccum z_b_pow_2_adj = SaturatingSub( FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam( - InputIntegerBits - z_b_headroom, 31 - kAccumIntegerBits)), + static_cast(InputIntegerBits - z_b_headroom), + 31 - kAccumIntegerBits)), shifted_quarter); const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw)); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h index 99b7e41..ede9cd6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h @@ -16,7 +16,6 @@ limitations under the License. #define TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_ #include -#include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" @@ -87,6 +86,16 @@ using int32 = std::int32_t; using uint32 = std::uint32_t; #endif // !defined(TF_LITE_STATIC_MEMORY) +// Allow for cross-compiler usage of function signatures - currently used for +// specifying named RUY profiler regions in templated methods. +#if defined(_MSC_VER) +#define TFLITE_PRETTY_FUNCTION __FUNCSIG__ +#elif defined(__GNUC__) +#define TFLITE_PRETTY_FUNCTION __PRETTY_FUNCTION__ +#else +#define TFLITE_PRETTY_FUNCTION __func__ +#endif + // TFLITE_DEPRECATED() // // Duplicated from absl/base/macros.h to avoid pulling in that library. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h index 5a32774..c97cc31 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h @@ -19,9 +19,8 @@ limitations under the License. namespace tflite { -#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ - (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \ - defined(__ZEPHYR__) +#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \ + (defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(__ZEPHYR__) #define TF_LITE_GLOBAL_STD_PREFIX #else #define TF_LITE_GLOBAL_STD_PREFIX std diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/optimized/neon_check.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/optimized/neon_check.h index bbf745c..7df1129 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/optimized/neon_check.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/optimized/neon_check.h @@ -15,26 +15,6 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ -#if defined(__ARM_NEON__) || defined(__ARM_NEON) -#define USE_NEON -#include -#endif - -#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON -#define USE_NEON -#include "NEON_2_SSE.h" -#endif - -// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is -// defined, PortableSomeFunc(args) otherwise. -#ifdef USE_NEON -// Always use Neon code -#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__) - -#else -// No NEON available: Use Portable code -#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__) - -#endif // defined(USE_NEON) +// TFLM does not need to utilize any Neon optimizations. #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h index 667e918..a03e502 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h @@ -17,7 +17,7 @@ limitations under the License. #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" @@ -42,7 +42,7 @@ class VectorOfTensors { all_shape_ptr_.reserve(num_tensors); for (int i = 0; i < num_tensors; ++i) { - TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; + TfLiteTensor* t = context.GetTensor(&context, tensor_list.data[i]); all_data_.push_back(GetTensorData(t)); all_shape_.push_back(GetTensorShape(t)); } @@ -81,7 +81,7 @@ class VectorOfQuantizedTensors : public VectorOfTensors { const TfLiteIntArray& tensor_list) : VectorOfTensors(context, tensor_list) { for (int i = 0; i < tensor_list.size; ++i) { - TfLiteTensor* t = &context.tensors[tensor_list.data[i]]; + TfLiteTensor* t = context.GetTensor(&context, tensor_list.data[i]); zero_point_.push_back(t->params.zero_point); scale_.push_back(t->params.scale); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.cc new file mode 100644 index 0000000..ec7ad76 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.cc @@ -0,0 +1,86 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. Use of CpuBackendContext in method +// implementations is purely optional. +class CpuBackendContext; + +namespace tensor_utils { + +// Apply Rectified Linear to elements of a vector. +void ApplyReluToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(0.0f, vector[v]); + } +} + +// Apply Rectified Linear 1 (cap to [-1;1]) to elements of a vector +void ApplyRelu1ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(-1.0f, std::min(vector[v], 1.0f)); + } +} + +// Apply Rectified Linear 6 (cap to [0;6]) to elements of a vector +void ApplyRelu6ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::max(0.0f, std::min(vector[v], 6.0f)); + } +} + +// Apply signbit to elements of a vector +void ApplySignbitToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::signbit(vector[v]); + } +} + +void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, + int8_t* dst_buffer) { + for (int i = 0; i < num_elements; i += 2) { + // Shift left first so that sign is properly extended when shifted right + dst_buffer[i] = static_cast(src_buffer[i / 2] << 4) >> 4; + // Break early if the tensor has odd length and the higher nibble should be + // ignored. + if (i + 1 == num_elements) break; + dst_buffer[i + 1] = static_cast(src_buffer[i / 2]) >> 4; + } +} + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h new file mode 100644 index 0000000..5674e2e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h @@ -0,0 +1,623 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. Use of CpuBackendContext in method +// implementations is purely optional. +class CpuBackendContext; + +namespace tensor_utils { + +// Multiplies a matrix with a scalar and reduce the result on each row to a +// scalar. +// Parameters: +// - matrix: matrix of size n_row * n_col +// - scalar: the scalar that is multiplied to each element in the matrix +// - n_row: the row count of the matrix +// - n_col: the column count of the matrix +// - output: the 32bit output +// Note: We do not need saturation because the int8 * int8 is safe from overflow +// in (2^31-1) / (2^14) = 131072, which is bigger than the n_row. Non-zero +// initial output value is not exceptionally large. +void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar, + int32_t n_row, int32_t n_col, + int32_t* output); + +// Add another vector for each batch in the batch vector. +template +void VectorBatchVectorAdd(const T* vector, int v_size, int n_batch, + T* batch_vector) { + for (int b = 0; b < n_batch; b++) { + for (int i = 0; i < v_size; ++i) { + batch_vector[i] += vector[i]; + } + batch_vector += v_size; + } +} + +// Cwise product of two vectors. +template +inline void VectorVectorCwiseProduct(const T* vector1, const T* vector2, + int v_size, T* result) { + for (int v = 0; v < v_size; v++) { + *result++ = *vector1++ * *vector2++; + } +} + +// Cwise product of a vector and a batch-vector. +template +inline void VectorBatchVectorCwiseProduct(const T* vector, int v_size, + const T* batch_vector, int n_batch, + T* result) { + for (int b = 0; b < n_batch; b++) { + VectorVectorCwiseProduct(vector, batch_vector, v_size, result); + // Update the pointers. + result += v_size; + batch_vector += v_size; + } +} + +// Cwise product and accumulate of two vectors. Since it's a MAC operation, the +// assumption here is that result array is initialized to valid values. +template +inline void VectorVectorCwiseProductAccumulate(const T* __restrict__ vector1, + const T* __restrict__ vector2, + int v_size, + T* __restrict__ result) { + for (int v = 0; v < v_size; v++) { + *result++ += *vector1++ * *vector2++; + } +} + +// Cwise product and accumulate of a vector and a batch-vector. Since it's a MAC +// operation, the assumption here is that result array is initialized to valid +// values. +template +inline void VectorBatchVectorCwiseProductAccumulate(const T* vector, int v_size, + const T* batch_vector, + int n_batch, T* result) { + for (int b = 0; b < n_batch; b++) { + VectorVectorCwiseProductAccumulate(vector, batch_vector, v_size, result); + // Update the pointers. + result += v_size; + batch_vector += v_size; + } +} + +// Batch vector initialization with another vector. +template +void VectorBatchVectorAssign(const T* vector, int v_size, int n_batch, + T* batch_vector) { + for (int b = 0; b < n_batch; b++) { + std::copy_n(vector, v_size, batch_vector + b * v_size); + } +} + +// Checks if all entries of vector are zero for float. +bool IsZeroVector(const float* vector, int v_size); + +// Checks if all entries of vector are zero for int8. +bool IsZeroVector(const int8_t* vector, int v_size); + +// Quantizes a buffer of floating point values using a symmetric quantization +// (i.e. linear quantization without an offset) to 8-bit signed integers. +// It also outputs the range (min, max) of the floating point buffer, and the +// scaling factor used to quantize the values. +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor); + +// Quantizes a buffer of floating point values using a symmetric quantization +// (i.e. linear quantization without an offset) to 8-bit signed integers. +// It uses the range (min, max) provided to the function to calculate the +// appropriate scaling factor to quantize the values. +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor); + +void AsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* scaling_factor, + int32_t* offset); + +// Helper function to quantize floats. +// float_data_ptr input float vectors +// n_batch number of input vectors +// n_data size of a single input vector +// quantized_data_ptr (out) vector with quantized data +// scaling_factors (out) scaling factors (one per vector) +// zero_points (out) zero points (one per vector) +// do_asymmetric controls if the quantization should be asymmetric. +inline void BatchQuantizeFloats(const float* float_data_ptr, int n_batch, + int n_data, int8_t* quantized_data_ptr, + float* scaling_factors, int32_t* zero_points, + bool do_asymmetric) { + for (int b = 0; b < n_batch; ++b) { + const int offset = b * n_data; + if (do_asymmetric) { + tensor_utils::AsymmetricQuantizeFloats( + float_data_ptr + offset, n_data, quantized_data_ptr + offset, + &scaling_factors[b], &zero_points[b]); + } else { + float unused_min, unused_max; + tensor_utils::SymmetricQuantizeFloats( + float_data_ptr + offset, n_data, quantized_data_ptr + offset, + &unused_min, &unused_max, &scaling_factors[b]); + } + } +} + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer. +void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, + int m_cols, const float* vector, + int n_batch, float* result); + +// Same as the function above, but the matrix is a sparse tensor with block +// pattern 1x4. +// This function assumes that m_cols is a multiple of the block size (4 in this +// case) so that there's no incomplete block. +void SparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result); + +// Same as the function above, but the matrix is stored in block compressed +// sparse row format with block pattern 1x16 which consists of two arrays: +// 1. A matrix array stores non-zero blocks of the matrix in row major. +// 2. A ledger array stores nrows groups, one group per row. Each group starts +// with an integer representing the number of non-zero blocks for the +// corresponding row and follows with column indexes of the first element +// of each non-zero block. +// This function assumes that +// 1. m_cols is a multiple of 16 so that all blocks are full blocks. +// 2. m_cols < 254 * 16 so that block index can be represented by uint8. +void SparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result); + +// Same as the function above, but for values quantized using symmetric +// quantization (e.g. by calling SymmetricQuantizeFloats). +// The passed scaling factors is a buffer of the quantization scaling factors +// that will be used to dequentize the products into the final result buffer. +// These scaling factors are the multiplication of the matrix scaling factor +// by the vector's scaling factor, one per batch (i.e. this allows quantizing +// each batch in the batch-vector matrix independently). +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result); + +// Same as the function above except that vector values +// are quantized with asymmetric quantization per-batch and the matrix +// is quantized per row. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result, const float* __restrict__ per_channel_scale, + const int32_t* __restrict__ input_offset); + +// Same as the function above, but the matrix is a sparse tensor with block +// pattern 1x16. +// This function assumes that m_cols is a multiple of the block size (16 in this +// case) so that there's no incomplete block. Also, it assumes all offsets of +// input, output and filter are zero. +void SparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result); + +// Same as the function above, but the matrix is stored in block compressed +// sparse row format with block pattern 1x16 which consists of two arrays: +// 1. A matrix array stores non-zero blocks of the matrix in row major. +// 2. A ledger array stores nrows groups, one group per row. Each group starts +// with an integer representing the number of non-zero blocks for the +// corresponding row followed by column index of the first element of +// each non-zero block. +// This function assumes that +// 1. m_cols is a multiple of 16 so that all blocks are full blocks. +// 2. m_cols < 254 * 16 so that block index can be represented by uint8. +void SparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger, + const int m_rows, const int m_cols, const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + float* __restrict__ result); + +// Same as the above 8, 8, 8 integer matmul except for the presence of zero +// point and non-accumulative. +// TODO(b/148688698): remove this function by folding zero point calculation in +// prepare() function. +void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, int32_t n_cell, + int8_t* gate_output, int8_t gate_output_zp); + +// Same as above but has 16 bit and 8 bit input and 8 bit output. +// Used in projection when hidden is 16bit. +void MatrixBatchVectorMultiply(const int16_t* hidden, + const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, + int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, + int32_t n_hidden, int32_t n_output, + int32_t output_zp, int8_t* proj_output); + +// Apply Layer Normalization (https://arxiv.org/abs/1607.06450) to a Quantized +// vector. +// Parameters: +// - input: batch vector of size n_batch * n_input; 16 bit. +// - layer_norm_weights: the quantized layer normalization weights. +// - bias: the bias for the layer normalization. +// - layer_norm_scale_a: multiplier for scale factor. +// - layer_norm_scale_b: shift for scale factor. +// - variance_limit: the guard to make sure the inverse does not overflow. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output); + +// Same as above but the internal calculation is done in float. +void ApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output); + +// Apply Sigmoid to a quantized vector. +// Parameters: +// - input: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +// The input is in Q3.12 format and the output is in Q0.15 format. +void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output); + +// Same as above but the internal calcualtion is float. +void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output); + +// Apply Tanh to a quantized vector. +// Parameters: +// - integer_bits: the integer bits of the input. +// Currently supports 0, 1, 2, 3, 4, 5, 6. +// - input: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 16 bit output +// The input is in Qm.15-m format and the output is in Q0.15 format. +void ApplyTanh(int32_t intger_bits, const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +// Apply Tanh to a quantized vector. Tbe internal calculation is in float. +// - Input has 2^(integer_bits) as scale. +// - Output has Q0.15 as scale. +void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int32_t integer_bits, int16_t* output); + +// Element-wise multiplication of two quantized vectors. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - shift: the shift needed to produce the output. +// - output: the 16 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int16_t* output); + +// Element-wise multiplication of two quantized vectors. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - shift: the shift needed to produce the output. +// - output: the 8 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int8_t* output); + +// Element-wise multiplication of two quantized vectors with rescaling. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - multiplier: the multiplier part of scale. +// - shift: the shift part of scale. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 8 bit output of size n_batch * n_input. +// - output_zp: the zero point of output. +// Output does not need to be initialized. +// Multiplier ("m") and shift ("s") are connected to scale ("s") with s = m * +// 2^(s - 31). +void CwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output); + +// Element-wise saturating addition of two quantized vectors without rescaling. +// Parameters: +// - input_1: batch vector of size n_batch * n_input; 16 bit. +// - input_2: batch vector of size n_batch * n_input; 16 bit. +// - n_batch: the number of batches. +// - n_input: the size for input and output. +// - output: the 8 bit output of size n_batch * n_input. +// Output does not need to be initialized. +void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output); + +// Element-wise in-place clipping of a vector. Overloaded for float, int16_t, +// int8_t. Parameters: +// - vector: vector of size v_size. +// - v_size: the size of the vector. +// - clipping_value: the value used for clipping. +void CwiseClipping(float* vector, const int v_size, const float clipping_value); +void CwiseClipping(int16_t* vector, const int v_size, + const int16_t clipping_value); +void CwiseClipping(int8_t* vector, const int v_size, + const int8_t clipping_value); + +// Dot product of two vectors. +float VectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size); + +// Dot product of two batch vectors of size n_batch * v_size: +// vector1 = [x_1_1, x_1_2, ..., x_1_vsize, +// x_2_1, x_2_2, ..., x_2_vsize, +// ... +// x_nbatch_1,..., x_nbatch_vsize] +// vector2 = [y_1_1, y_1_2, ..., y_1_vsize, +// y_2_1, y_2_2, ..., y_2_vsize, +// ... +// y_nbatch_1,..., y_nbatch_vsize] +// Then result will be a vector of n_batch size starting from 'result': +// [x_1_1 * y_1_1 + x_1_2 * y_1_2 + ... + x_1_vsize * y_1_vsize, +// x_2_1 * y_2_1 + x_2_2 * y_2_2 + ... + x_2_vsize * y_2_vsize, +// ... +// x_nbatch_1 * y_nbatch_1 + ... + x_nbatch_vsize * y_nbatch_vsize] +template +inline void BatchVectorBatchVectorDotProduct(const T* vector1, const T* vector2, + int v_size, int n_batch, + T* result) { + for (int b = 0; b < n_batch; b++) { + result[b] = VectorVectorDotProduct(vector1, vector2, v_size); + vector1 += v_size; + vector2 += v_size; + } +} + +// Same as above but input is 16bit and output is 32bit. +void BatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size, + int n_batch, int32_t* result); + +// Same as above, but inputs are 16bit integer and output is 16bit integer. +void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size, + const int16_t* batch_vector, + int n_batch, int32_t multiplier, + int shift, int16_t* result); + +// Compute "1.0f - elements of vector" (used in CIFG). +void Sub1Vector(const float* vector, int v_size, float* result); + +// Compute "1.0f - elements of vector" (used in CIFG) for int16 input. +// "vector" has range [0, 32767] because it is the output of sigmoid function. +void Sub1Vector(const int16_t* vector, int v_size, int16_t* result); + +// Reduce-sum on a float input vector: +// input_vector: float pointer to input vector. +// output_vector: float pointer to vector. +// output_size: output vector size. +// reduction_size: number of consecutive elements from input vector which are +// added to get one element of output. +void ReductionSumVector(const float* input_vector, float* output_vector, + int output_size, int reduction_size); + +// Same as above but input/output is 32 bit integer. +void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size); + +// Same as above but input is 8 bit integer. +void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size); + +// Multiply all elements of vector with a scalar. +void VectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result); + +// Layer norm for each batch. +void MeanStddevNormalization(const float* input_vector, float* output_vector, + int v_size, int n_batch); + +// Saturate Add with rescale on both inputs. +void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, int32_t n_batch, + int32_t n_cell, int16_t* output); + +// Same as the function above, but provide a scratch buffer for the +// int8 x int8 -> int32 and a CpuBackendContext for the accumulator +// computation. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, + const float* __restrict__ scaling_factors, int n_batch, + int32_t* __restrict__ scratch, float* __restrict__ result, + CpuBackendContext* __restrict__ context); + +// Same as the function above except that can make use of cached row sums. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context); + +// Same as the function above, but provides separate scaling factor for the +// matrix and the vectors. The scaling factors are multiplied in the +// scaling_factor_scratch buffer. +inline void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float matrix_scaling_factor, + const float* vector_scaling_factors, int n_batch, + float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, float* scaling_factor_scratch, + CpuBackendContext* context) { + for (int b = 0; b < n_batch; ++b) { + scaling_factor_scratch[b] = + vector_scaling_factors[b] * matrix_scaling_factor; + } + MatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors, + scaling_factor_scratch, n_batch, result, + per_channel_scale, input_offset, scratch, + row_sums, compute_row_sums, context); +} + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer, +// Parameters: +// - input: batch vector of size n_batch * n_input +// - bias: vector of size b_input +// - input_to_gate_weights: matrix of size n_input * n_output +// - multiplier: scalar +// - shift: scalar +// - n_batch: the batch size +// - n_input: the input size +// - n_output: the output size +// - output_zp: the zero point of the output. +// - scratch: batch vector of size n_batch * n_output +// - output: the 16 bit output +// Notes: +// - this is used for gate matmul: for non-cifg it is for input, forget, +// cell, output gates; for cifg, it is for forget, cell, output gates. +// - multiplier and shift combined gives the scale. +// - assumes input zero point is 0. +// - scratch is created for optimization purpose only. +// TODO(b/152066492): this can be removed if some future optimization +// work makes it unnecessary. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context); + +// Multiplies a matrix by a "batched" vector (i.e. a matrix with a batch +// dimension composed by input vectors independent from each other). The result +// of the multiplication is accumulated to the passed result buffer. +// More specifically, for a matrix M of shape [n, i] and a batched-vector +// of shape [i, batch] it will first compute the product of shape [n, batch]. +// This product will be accumulated to the result buffer, +// Parameters: +// - input: batch vector of size n_batch * n_input +// - bias: vector of size b_input +// - input_to_gate_weights: matrix of size n_input * n_output +// - multiplier: scalar +// - shift: scalar +// - n_batch: the batch size +// - n_input: the input size +// - n_output: the output size +// - output_zp: the zero point of the output. +// - scratch: batch vector of size n_batch * n_output +// - output: the 8 bit output +// Notes: +// - this is used for projection matmul. +// - multiplier and shift combined gives the scale. +// - assumes input zero point is 0. +// - scratch is created for optimization purpose only. +// TODO(b/152066492): this can be removed if some future optimization +// work makes it unnecessary. +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context); + +// Apply Rectified Linear to elements of a vector. +void ApplyReluToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply Rectified Linear 1 (cap to [-1;1]) to elements of a vector +void ApplyRelu1ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply Rectified Linear 6 (cap to [0;6]) to elements of a vector +void ApplyRelu6ToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Apply signbit to elements of a vector +void ApplySignbitToVector(const float* __restrict__ vector, int v_size, + float* __restrict__ result); + +// Unpack or inflate `src_buffer` by taking each element and splitting it as +// two elements into `dst_buffer`. +// Parameters: +// src_buffer : Densely packed buffer containing int4 values +// num_elements : Number of elements stored in the buffer. Note that this can +// be smaller than the size of `src_buffer` by 1 if it's odd, +// in which case the last nibble in `src_buffer` is ignored. +// This should be equal to the size of `dst_buffer`. +// dst_buffer : Buffer to unpack into. Should be allocated by the caller. +// Size should be at least `num_elements`. +// Notes: +// For example, given `src_buffer = {0x12, 0x34};`, calling this function +// will return `dst_buffer = {0x02, 0x01, 0x04, 0x03}`. +void UnpackDenseInt4IntoInt8(const int8_t* src_buffer, int num_elements, + int8_t* dst_buffer); + +} // namespace tensor_utils + +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cc similarity index 93% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cc index 88285f4..efd57db 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.cc @@ -52,6 +52,11 @@ constexpr uint32_t kFractionRoundingThreshold = 0x00200000; void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int* shift) { +#if TFLITE_SINGLE_ROUNDING + // Single-rounding MultiplyByQuantizedMultiplier only supports positive + // multipliers. + // TFLITE_DCHECK(double_multiplier >= 0); +#endif if (double_multiplier == 0.) { *quantized_multiplier = 0; *shift = 0; @@ -65,10 +70,10 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, int64_t q_fixed = IntegerFrExp(double_multiplier, shift); #else // TFLITE_EMULATE_FLOAT const double q = std::frexp(double_multiplier, shift); - auto q_fixed = static_cast(TfLiteRound(q * (1ll << 31))); + auto q_fixed = static_cast(TfLiteRound(q * (1LL << 31))); #endif // TFLITE_EMULATE_FLOAT - TFLITE_CHECK(q_fixed <= (1ll << 31)); - if (q_fixed == (1ll << 31)) { + TFLITE_CHECK(q_fixed <= (1LL << 31)); + if (q_fixed == (1LL << 31)) { q_fixed /= 2; ++*shift; } @@ -87,6 +92,14 @@ void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier, *shift = 0; q_fixed = 0; } +#if TFLITE_SINGLE_ROUNDING + // Single-rounding MultiplyByQuantizedMultiplier doesn't support a shift > 30, + // saturate it. + if (*shift > 30) { + *shift = 30; + q_fixed = (1LL << 31) - 1; + } +#endif *quantized_multiplier = static_cast(q_fixed); } @@ -278,6 +291,12 @@ void PreprocessSoftmaxScaling(double beta, double input_scale, // result is double equivalent of Q0.31 (actually with more precision). Thus // this generates a Q(input_integer_bits).(31-input_integer_bits) // representation. +#if TFLITE_SINGLE_ROUNDING + const double max_real_multiplier = (1LL << 30) - 1.0; +#else + const double max_real_multiplier = (1LL << 31) - 1.0; +#endif + #ifdef TFLITE_EMULATE_FLOAT const double input_beta = IntegerDoubleMultiply(beta, input_scale); int shift; @@ -285,12 +304,14 @@ void PreprocessSoftmaxScaling(double beta, double input_scale, shift += (31 - input_integer_bits); double input_beta_real_multiplier = DoubleFromFractionAndShift(fraction, shift); - if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) { - input_beta_real_multiplier = (1ll << 31) - 1.0; + if (IntegerDoubleCompare(input_beta_real_multiplier, max_real_multiplier) > + 0) { + input_beta_real_multiplier = max_real_multiplier; } #else // TFLITE_EMULATE_FLOAT - const double input_beta_real_multiplier = std::min( - beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0); + const double input_beta_real_multiplier = + std::min(beta * input_scale * (1 << (31 - input_integer_bits)), + max_real_multiplier); #endif // TFLITE_EMULATE_FLOAT QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier, @@ -324,8 +345,8 @@ int CalculateInputRadius(int input_integer_bits, int input_left_shift, #else // TFLITE_EMULATE_FLOAT const double max_input_rescaled = 1.0 * ((1 << input_integer_bits) - 1) * - (1ll << (total_signed_bits - input_integer_bits)) / - (1ll << input_left_shift); + (1LL << (total_signed_bits - input_integer_bits)) / + (1LL << input_left_shift); // Tighten bound using floor. Suppose that we could use the exact value. // After scaling the difference, the result would be at the maximum. Thus we // must ensure that our value has lower magnitude. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h index ef664be..ada6696 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h @@ -15,6 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_ +#include +#include + #include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" @@ -27,25 +30,14 @@ inline void Add(const ArithmeticParams& params, const RuntimeShape& input1_shape, const T* input1_data, const RuntimeShape& input2_shape, const T* input2_data, const RuntimeShape& output_shape, T* output_data) { - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - output_data[i] = ActivationFunctionWithMinMax( - input1_data[i] + input2_data[i], params.quantized_activation_min, - params.quantized_activation_max); - } -} + T activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const float* input1_data, - const RuntimeShape& input2_shape, const float* input2_data, - const RuntimeShape& output_shape, float* output_data) { const int flat_size = MatchingElementsSize(input1_shape, input2_shape, output_shape); - for (int i = 0; i < flat_size; i++) { - auto x = input1_data[i] + input2_data[i]; + for (int i = 0; i < flat_size; ++i) { output_data[i] = ActivationFunctionWithMinMax( - x, params.float_activation_min, params.float_activation_max); + input1_data[i] + input2_data[i], activation_min, activation_max); } } @@ -202,13 +194,12 @@ inline void Add(const ArithmeticParams& params, } } -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const float* input1_data, - const RuntimeShape& input2_shape, - const float* input2_data, - const RuntimeShape& output_shape, - float* output_data) { +template +inline typename std::enable_if::value, void>::type +BroadcastAdd4DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { NdArrayDesc<4> desc1; NdArrayDesc<4> desc2; NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, @@ -216,45 +207,8 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, const RuntimeShape extended_output_shape = RuntimeShape::ExtendedShape(4, output_shape); - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( - input1_data[SubscriptToIndex(desc1, b, y, x, c)] + - input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.float_activation_min, params.float_activation_max); - } - } - } - } -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int32_t* input1_data, - const RuntimeShape& input2_shape, - const int32_t* input2_data, - const RuntimeShape& output_shape, - int32_t* output_data) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); + T activation_min, activation_max; + GetActivationParams(params, &activation_min, &activation_max); // In Tensorflow, the dimensions are canonically named (batch_number, row, // col, channel), with extents (batches, height, width, depth), with the @@ -272,11 +226,10 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, for (int x = 0; x < extended_output_shape.Dims(2); ++x) { for (int c = 0; c < extended_output_shape.Dims(3); ++c) { output_data[Offset(extended_output_shape, b, y, x, c)] = - ActivationFunctionWithMinMax( + ActivationFunctionWithMinMax( input1_data[SubscriptToIndex(desc1, b, y, x, c)] + input2_data[SubscriptToIndex(desc2, b, y, x, c)], - params.quantized_activation_min, - params.quantized_activation_max); + activation_min, activation_max); } } } @@ -287,10 +240,11 @@ inline void BroadcastAdd4DSlow(const ArithmeticParams& params, // is 32-bit for both cases. The overflow does not happen due to the // choice of the shift (20 or 15, accordingly - see add.cc for more comments). template -inline void BroadcastAdd4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const T* input1_data, const RuntimeShape& input2_shape, - const T* input2_data, const RuntimeShape& output_shape, T* output_data) { +inline typename std::enable_if::value, void>::type +BroadcastAdd4DSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { NdArrayDesc<4> desc1; NdArrayDesc<4> desc2; NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h index dde1501..7b5424c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h @@ -15,7 +15,10 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_ops { @@ -36,6 +39,47 @@ inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs, } } +inline void AddN(const ArithmeticParams& params, + const RuntimeShape& input_shape, const size_t num_inputs, + const int8_t* const* input_data, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + // All inputs should have same zero-point and scale, this is checked during + // Prepare stage. + TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); + TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); + + // All inputs and output should have the same shape, this is checked during + // Prepare stage. + const size_t size = input_shape.FlatSize(); + for (size_t i = 0; i < size; ++i) { + // accumulate in scaled_x before clamping to avoid overflow + const int32_t x = params.input1_offset; // x = 0 + const int32_t shifted_x = x * (1 << params.left_shift); + int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_x, params.input1_multiplier, params.input1_shift); + + for (size_t j = 0; j < num_inputs; ++j) { + const int32_t y = params.input1_offset + input_data[j][i]; + const int32_t shifted_y = y * (1 << params.left_shift); + int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_y, params.input1_multiplier, params.input1_shift); + scaled_x += scaled_y; + } + + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + scaled_x, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[i] = static_cast(clamped_output); + } +} + } // namespace reference_ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/batch_matmul.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/batch_matmul.h new file mode 100644 index 0000000..3695bad --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/batch_matmul.h @@ -0,0 +1,275 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { +namespace batch_matmul { + +// Determine which dimension is the broadcast dimension. +inline int broadcast_dim(int lhs_dim, int rhs_dim) { + if (lhs_dim == rhs_dim) return lhs_dim; + if (lhs_dim == 1) return rhs_dim; + TFLITE_DCHECK_EQ(rhs_dim, 1); + return lhs_dim; +} + +// Compute the "extent" for iterating on this dimension. +// If we are broadcasting, then don't advance (i.e return 0). +inline int extent(const RuntimeShape& shape, int x) { + if (shape.Dims(x) == 1) { + return 0; + } + int prod = 1; + for (int i = x + 1; i < shape.DimensionsCount(); ++i) { + prod *= shape.Dims(i); + } + return prod; +} + +} // namespace batch_matmul + +template +inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data, + const RuntimeShape& rhs_shape, const Tb* rhs_data, + const RuntimeShape& output_shape, Tout* output_data) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) + + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + for (int j = 0; j < rhs_cols; ++j) { + for (int i = 0; i < lhs_rows; ++i) { + Tout total = 0; + for (int k = 0; k < accum_depth; ++k) { + total += static_cast(lhs_ptr2[accum_depth * i + k]) * + static_cast(rhs_ptr2[j * accum_depth + k]); + } + int idx = lhs_rows * j + i; + out_ptr[idx] = total; + } + } + } + } + } +} + +inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data, + const RuntimeShape& rhs_shape, const int8_t* rhs_data, + const float* scaling_factors, + const int32_t* input_offset, int32_t* row_sums, + const RuntimeShape& output_shape, float* output_data, + bool* compute_row_sums) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols; + const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols; + const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols; + const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows; + const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows; + const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows; + + if (!compute_row_sums || *compute_row_sums) { + int num_weights_matrices = 1; + for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) { + num_weights_matrices *= extended_lhs_shape.Dims(i); + } + tensor_utils::ReductionSumVector( + lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth); + if (compute_row_sums) { + *compute_row_sums = false; + } + } + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0); + const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0); + const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1); + const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1); + const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1); + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2); + const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2); + const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2); + float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) + + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + for (int j = 0; j < rhs_cols; ++j) { + const float batch_scaling_factor = scale_ptr2[j]; + const float batch_offset = static_cast(ioff_ptr2[j]); + for (int i = 0; i < lhs_rows; ++i) { + int32_t total = 0; + for (int k = 0; k < accum_depth; ++k) { + total += + lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k]; + } + int32_t row_sum = woff_ptr2[i]; + total -= row_sum * batch_offset; + int idx = lhs_rows * j + i; + out_ptr[idx] += batch_scaling_factor * total; + } + } + } + } + } +} + +template +inline void BatchMatMul(const FullyConnectedParams& params, + const RuntimeShape& lhs_shape, const T* lhs_data, + const RuntimeShape& rhs_shape, const T* rhs_data, + const RuntimeShape& output_shape, T* output_data) { + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(5, lhs_shape); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(5, rhs_shape); + + const int batch_dim0 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0)); + const int batch_dim1 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1)); + const int batch_dim2 = batch_matmul::broadcast_dim( + extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2)); + + const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0); + const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1); + const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2); + const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0); + const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1); + const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2); + + // Set params for each matrix multiply. + const int lhs_rows = extended_lhs_shape.Dims(3); + const int rhs_cols = extended_rhs_shape.Dims(4); + const int accum_depth = extended_lhs_shape.Dims(4); + + const int32_t input_offset = params.input_offset; + const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; + const int32_t output_multiplier = params.output_multiplier; + const int output_shift = params.output_shift; + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; + TFLITE_DCHECK_LE(output_activation_min, output_activation_max); + + for (int b0 = 0; b0 < batch_dim0; ++b0) { + const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0); + const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0); + for (int b1 = 0; b1 < batch_dim1; ++b1) { + const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1; + const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1; + for (int b2 = 0; b2 < batch_dim2; ++b2) { + const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2; + const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2; + T* out_ptr = output_data + + ((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) * + lhs_rows * rhs_cols; + + for (int j = 0; j < rhs_cols; ++j) { + for (int i = 0; i < lhs_rows; ++i) { + AccumT total = 0; + for (int k = 0; k < accum_depth; ++k) { + AccumT lhs_val = lhs_ptr2[accum_depth * i + k]; + AccumT rhs_val = rhs_ptr2[accum_depth * j + k]; + total += (lhs_val + filter_offset) * (rhs_val + input_offset); + } + int32_t total_scaled = MultiplyByQuantizedMultiplier( + total, output_multiplier, output_shift); + total_scaled += output_offset; + total_scaled = std::max(total_scaled, output_activation_min); + total_scaled = std::min(total_scaled, output_activation_max); + const int idx = lhs_rows * j + i; + out_ptr[idx] = static_cast(total_scaled); + } + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h index a747931..66101d9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h @@ -43,16 +43,27 @@ inline void BroadcastBinaryFunction4DSlow( NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, unextended_input2_shape, &desc1, &desc2); + const int* dims_data = + reinterpret_cast(output_shape.DimsDataUpTo5D()); for (int b = 0; b < output_shape.Dims(0); ++b) { + int out_idx_b = b * dims_data[1]; + int in_idx1_b = desc1.strides[0] * b; + int in_idx2_b = desc2.strides[0] * b; for (int y = 0; y < output_shape.Dims(1); ++y) { + int out_idx_y = (out_idx_b + y) * dims_data[2]; + int in_idx1_y = in_idx1_b + desc1.strides[1] * y; + int in_idx2_y = in_idx2_b + desc2.strides[1] * y; for (int x = 0; x < output_shape.Dims(2); ++x) { + int out_idx_x = (out_idx_y + x) * dims_data[3]; + int in1_idx = in_idx1_y + desc1.strides[2] * x; + int in2_idx = in_idx2_y + desc2.strides[2] * x; for (int c = 0; c < output_shape.Dims(3); ++c) { - auto out_idx = Offset(output_shape, b, y, x, c); - auto in1_idx = SubscriptToIndex(desc1, b, y, x, c); - auto in2_idx = SubscriptToIndex(desc2, b, y, x, c); + auto out_idx = out_idx_x + c; auto in1_val = input1_data[in1_idx]; auto in2_val = input2_data[in2_idx]; output_data[out_idx] = func(in1_val, in2_val); + in1_idx += desc1.strides[3]; + in2_idx += desc2.strides[3]; } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_args.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_args.h new file mode 100644 index 0000000..341c418 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_args.h @@ -0,0 +1,56 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void BroadcastArgs(const RuntimeShape& input1_shape, const T* input1_data, + const RuntimeShape& input2_shape, const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + // Gets data at the backward index i of the shape tensor. Returns 1 if the + // index is out of range. + auto get_shape_data = [](const RuntimeShape& shape, const T* data, + int backward_idx) -> T { + int forward_idx = shape.FlatSize() - 1 - backward_idx; + if (forward_idx < 0) return 1; + return data[forward_idx]; + }; + + int output_num_elements = output_shape.FlatSize(); + for (int i = 0; i < output_num_elements; ++i) { + int backward_i = output_num_elements - 1 - i; + int shape1_i = get_shape_data(input1_shape, input1_data, i); + int shape2_i = get_shape_data(input2_shape, input2_data, i); + if (shape1_i == 1) { + output_data[backward_i] = shape2_i; + } else if (shape2_i == 1) { + output_data[backward_i] = shape1_i; + } else { + TFLITE_CHECK_EQ(shape1_i, shape2_i); + output_data[backward_i] = shape1_i; + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_ARGS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_to.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_to.h new file mode 100644 index 0000000..79756cb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_to.h @@ -0,0 +1,97 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" + +namespace tflite { +namespace reference_ops { +template +void BroadcastImpl(const NdArrayDesc& input_desc, const char* input_data, + const NdArrayDesc& output_desc, char* output_data, + int indexes[N], int dim, const int last_broadcasting_dim, + const int type_size) { + // Copy data from input to output. + if (dim == last_broadcasting_dim) { + int copy_size = output_desc.strides[dim] * type_size; + const char* data_src = + input_data + SubscriptToIndex(input_desc, indexes) * type_size; + char* data_dst = + output_data + SubscriptToIndex(output_desc, indexes) * type_size; + for (int i = 0; i < output_desc.extents[dim]; ++i, data_dst += copy_size) { + memcpy(data_dst, data_src, copy_size); + } + return; + } + + // Recursive call to find the next broadcasting. + for (indexes[dim] = 0; indexes[dim] < input_desc.extents[dim]; + ++indexes[dim]) { + BroadcastImpl(input_desc, input_data, output_desc, output_data, indexes, + dim + 1, last_broadcasting_dim, type_size); + } + + // Duplicate data in output tensor. + indexes[dim] = 0; + if (input_desc.extents[dim] != output_desc.extents[dim]) { + int copy_size = output_desc.strides[dim] * type_size; + char* data_src = + output_data + SubscriptToIndex(output_desc, indexes) * type_size; + char* data_dst = data_src + copy_size; + for (int i = 1; i < output_desc.extents[dim]; ++i, data_dst += copy_size) { + memcpy(data_dst, data_src, copy_size); + } + } +} + +template +inline void BroadcastTo(const RuntimeShape& unextended_input_shape, + const char* input_data, + const RuntimeShape& unextended_output_shape, + char* output_data, TfLiteType data_type) { + NdArrayDesc input_desc; + NdArrayDesc output_desc; + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_input_shape), + &input_desc); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape), + &output_desc); + + // Get the last dimension has broadcasting. At this dimension, the data is + // copied from input tensor to output tensor. + int last_broadcast_dim = -1; + for (int i = N - 1; i >= 0; --i) { + if (input_desc.extents[i] != output_desc.extents[i]) { + last_broadcast_dim = i; + break; + } + } + + // If non-broadcasting, just copy data from input to output tensor. + if (last_broadcast_dim == -1) { + memcpy(output_data, input_data, + unextended_input_shape.FlatSize() * TfLiteTypeGetSize(data_type)); + return; + } + + // Broadcasting using memcpy. + int indexes[N] = {0}; + BroadcastImpl(input_desc, input_data, output_desc, output_data, indexes, 0, + last_broadcast_dim, TfLiteTypeGetSize(data_type)); +} +} // namespace reference_ops +} // namespace tflite +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/comparisons.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/comparisons.h index cd2c741..f3d6bcc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/comparisons.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/comparisons.h @@ -15,7 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_ -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/concatenation.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/concatenation.h index 4f3637e..9d03523 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/concatenation.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/concatenation.h @@ -16,6 +16,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h index a9b73d2..a244ec0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" @@ -43,7 +45,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, (void)im2col_data; // only used in optimized code. (void)im2col_shape; // only used in optimized code. const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -52,14 +54,20 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); + for (int batch = 0; batch < batches; ++batch) { for (int out_y = 0; out_y < output_height; ++out_y) { const int in_y_origin = (out_y * stride_height) - pad_height; for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; float total = 0.f; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -74,10 +82,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, if (!is_point_inside_image) { continue; } - - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - float input_value = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + float input_value = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; float filter_value = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; total += (input_value * filter_value); @@ -126,7 +135,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -135,6 +144,10 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -143,6 +156,7 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -158,9 +172,11 @@ inline void Conv(const ConvParams& params, const RuntimeShape& input_shape, continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; acc += @@ -206,7 +222,7 @@ inline void HybridConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -215,18 +231,24 @@ inline void HybridConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { for (int out_y = 0; out_y < output_height; ++out_y) { for (int out_x = 0; out_x < output_width; ++out_x) { for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; const int in_x_origin = (out_x * stride_width) - pad_width; const int in_y_origin = (out_y * stride_height) - pad_height; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { for (int filter_x = 0; filter_x < filter_width; ++filter_x) { - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { const int in_x = in_x_origin + dilation_width_factor * filter_x; const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -235,7 +257,8 @@ inline void HybridConvPerChannel( if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) && (in_y < input_height)) { int32_t input_val = input_data[Offset( - input_shape, batch, in_y, in_x, in_channel)]; + input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset(filter_shape, out_channel, filter_y, filter_x, in_channel)]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/cumsum.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/cumsum.h new file mode 100644 index 0000000..56698a0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/cumsum.h @@ -0,0 +1,175 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" + +namespace tflite { +namespace reference_ops { + +template +inline void CumSum(const T* input_data, const RuntimeShape& shape, int32_t axis, + bool exclusive, bool reverse, T* output_data) { + const int32_t rank = shape.DimensionsCount(); + TFLITE_DCHECK_GE(rank, 1); + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, rank); + + size_t inner = 1; + size_t outer = 1; + size_t depth = 1; + for (int32_t i = 0; i < rank; i++) { + if (i < axis) + inner *= shape.Dims(i); + else if (i > axis) + outer *= shape.Dims(i); + else + depth = shape.Dims(i); + } + + for (size_t outer_index = 0; outer_index < outer; outer_index++) { + size_t outer_index_adj; + if (reverse) + outer_index_adj = (outer - 1) - outer_index; + else + outer_index_adj = outer_index; + for (size_t inner_index = 0; inner_index < inner; inner_index++) { + T accumulator = 0; + size_t inner_index_adj; + if (reverse) + inner_index_adj = (inner - 1) - inner_index; + else + inner_index_adj = inner_index; + for (size_t depth_index = 0; depth_index < depth; depth_index++) { + size_t depth_index_adj; + if (reverse) + depth_index_adj = (depth - 1) - depth_index; + else + depth_index_adj = depth_index; + + size_t index = outer_index_adj; + index += inner_index_adj * depth * outer; + index += depth_index_adj * outer; + + if (exclusive) { + output_data[index] = accumulator; + accumulator += input_data[index]; + } else { + accumulator += input_data[index]; + output_data[index] = accumulator; + } + } + } + } +} + +// +// Quantized INT8 CUMSUM +// +inline void CumSum(const ArithmeticParams& params, const int8_t* input_data, + const RuntimeShape& shape, int32_t axis, bool exclusive, + bool reverse, int8_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + // Input offset is negative input zero point. Activation tensors are + // asymmetric quantized so they span the full int8 range. + // All inputs should have same zero-point and scale, this is checked during + // Prepare stage. + TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); + TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); + + const int32_t rank = shape.DimensionsCount(); + TFLITE_DCHECK_GE(rank, 1); + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, rank); + + size_t inner = 1; + size_t outer = 1; + size_t depth = 1; + for (int32_t i = 0; i < rank; i++) { + if (i < axis) + inner *= shape.Dims(i); + else if (i > axis) + outer *= shape.Dims(i); + else + depth = shape.Dims(i); + } + + for (size_t outer_index = 0; outer_index < outer; outer_index++) { + size_t outer_index_adj; + if (reverse) + outer_index_adj = (outer - 1) - outer_index; + else + outer_index_adj = outer_index; + for (size_t inner_index = 0; inner_index < inner; inner_index++) { + int32_t accumulator = params.input1_offset; // accumulator = 0 + accumulator *= (1 << params.left_shift); + accumulator = MultiplyByQuantizedMultiplierSmallerThanOneExp( + accumulator, params.input1_multiplier, params.input1_shift); + + size_t inner_index_adj; + if (reverse) + inner_index_adj = (inner - 1) - inner_index; + else + inner_index_adj = inner_index; + + for (size_t depth_index = 0; depth_index < depth; depth_index++) { + size_t depth_index_adj; + if (reverse) + depth_index_adj = (depth - 1) - depth_index; + else + depth_index_adj = depth_index; + + size_t index = outer_index_adj; + index += inner_index_adj * depth * outer; + index += depth_index_adj * outer; + + const int32_t y = params.input1_offset + input_data[index]; + const int32_t shifted_y = y * (1 << params.left_shift); + const int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_y, params.input1_multiplier, params.input1_shift); + + int32_t scaled_output; + if (exclusive) { + scaled_output = accumulator; + accumulator += scaled_y; + } else { + accumulator += scaled_y; + scaled_output = accumulator; + } + + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + scaled_output, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + output_data[index] = static_cast(clamped_output); + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depth_to_space.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depth_to_space.h new file mode 100644 index 0000000..41b2679 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depth_to_space.h @@ -0,0 +1,79 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int input_depth = input_shape.Dims(3); + const int input_width = input_shape.Dims(2); + const int input_height = input_shape.Dims(1); + const int input_batch = input_shape.Dims(0); + + const int output_depth = output_shape.Dims(3); + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch = output_shape.Dims(0); + + const int32_t block_size = op_params.block_size; + + TFLITE_DCHECK_EQ(input_width * block_size, output_width); + TFLITE_DCHECK_EQ(input_height * block_size, output_height); + TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size); + TFLITE_DCHECK_EQ(input_batch, output_batch); + + for (int out_b = 0; out_b < output_batch; ++out_b) { + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + for (int out_d = 0; out_d < output_depth; ++out_d) { + const int in_d = + out_d + ((out_h % block_size) * block_size + out_w % block_size) * + output_depth; + + const int in_w = out_w / block_size; + const int in_h = out_h / block_size; + const int in_b = out_b; + + const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); + const int output_index = + Offset(output_shape, out_b, out_h, out_w, out_d); + + output_data[output_index] = input_data[input_index]; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h index dd418ce..4dc5245 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h @@ -68,6 +68,27 @@ inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier, return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); } +// Single-rounding MultiplyByQuantizedMultiplier +#if TFLITE_SINGLE_ROUNDING +template <> +inline int32_t DepthwiseConvRound( + int32_t x, int32_t quantized_multiplier, int shift) { + using gemmlowp::RoundingDivideByPOT; + using gemmlowp::SaturatingRoundingDoublingHighMul; + int left_shift = shift > 0 ? shift : 0; + int right_shift = shift > 0 ? 0 : -shift; + return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul( + x * (1 << left_shift), quantized_multiplier), + right_shift); +} + +template <> +inline int32_t DepthwiseConvRound( + int32_t x, int32_t quantized_multiplier, int shift) { + return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift); +} +// Double-rounding MultiplyByQuantizedMultiplier +#else template <> inline int32_t DepthwiseConvRound( int32_t x, int32_t quantized_multiplier, int shift) { @@ -86,6 +107,7 @@ inline int32_t DepthwiseConvRound( rounding_offset) >> right_shift; } +#endif // TFLITE_SINGLE_ROUNDING template struct DepthwiseConvBasicKernel { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/div.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/div.h index a38a503..71bbeaf 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/div.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/div.h @@ -48,13 +48,17 @@ inline void DivElementwise(int size, const ArithmeticParams& params, DivCheckArithmeticParams(params); for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; + int32_t input1_val = params.input1_offset + input1_data[i]; + int32_t input2_val = params.input2_offset + input2_data[i]; TFLITE_DCHECK_NE(input2_val, 0); + if (input2_val < 0) { + // Invert signs to avoid a negative input2_val as input2_inv needs to be + // positive to be used as multiplier of MultiplyByQuantizedMultiplier. + input1_val = -input1_val; + input2_val = -input2_val; + } int recip_shift; - const int32_t input2_inv = - (input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift) - : -GetReciprocal(-input2_val, 31, &recip_shift); + const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift); const int headroom = CountLeadingSignBits(input1_val); const int32_t unscaled_quotient = MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, @@ -116,15 +120,19 @@ inline void BroadcastDivSlowQuantized( DivCheckArithmeticParams(params); auto div_func = [&](int indexes[N]) { - const int32_t input1_val = + int32_t input1_val = params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = + int32_t input2_val = params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; TFLITE_DCHECK_NE(input2_val, 0); + if (input2_val < 0) { + // Invert signs to avoid a negative input2_val as input2_inv needs to be + // positive to be used as multiplier of MultiplyByQuantizedMultiplier. + input1_val = -input1_val; + input2_val = -input2_val; + } int recip_shift; - const int32_t input2_inv = - (input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift) - : -GetReciprocal(-input2_val, 31, &recip_shift); + const int32_t input2_inv = GetReciprocal(input2_val, 31, &recip_shift); const int headroom = CountLeadingSignBits(input1_val); const int32_t unscaled_quotient = MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_div.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_div.h new file mode 100644 index 0000000..dbda3f8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_div.h @@ -0,0 +1,35 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +T FloorDiv(T input1, T input2) { + return std::floor(std::divides()(static_cast(input1), + static_cast(input2))); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h new file mode 100644 index 0000000..20ce18b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h @@ -0,0 +1,44 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ + +#include +#include + +namespace tflite { + +namespace reference_ops { + +template +T FloorMod(T input1, T input2) { + struct FloatMod { + float operator()(const float lhs, const float rhs) const { + return std::fmod(lhs, rhs); + } + }; + using ModFunc = typename std::conditional::value, + std::modulus, FloatMod>::type; + ModFunc mod_func; + T trunc_mod = mod_func(input1, input2); + return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0)) + ? (trunc_mod + input2) + : trunc_mod; +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h index adb4ea8..6cd8f66 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h @@ -15,6 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_ +#include + +#include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h index 30e18af..c427205 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h @@ -12,8 +12,10 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ + +#include #include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" @@ -23,9 +25,9 @@ namespace tflite { namespace reference_ops { inline int16_t SaturatingLeftShift(int16_t value, int amount) { - int32_t result = static_cast(value) * (1 << amount); - result = std::min(result, std::numeric_limits::max()); - result = std::max(result, std::numeric_limits::min()); + int64_t result = static_cast(value) * (1 << amount); + result = std::min(result, std::numeric_limits::max()); + result = std::max(result, std::numeric_limits::min()); return result; } @@ -163,4 +165,4 @@ inline void HardSwish(const HardSwishParams& params, } // namespace reference_ops } // namespace tflite -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_ +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_HARD_SWISH_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h index c334fe4..12064e3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ +#include #include #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h'' b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h'' deleted file mode 100644 index c334fe4..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h'' +++ /dev/null @@ -1,144 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ -#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ - -#include - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" - -namespace tflite { -namespace reference_integer_ops { - -inline void CheckArithmeticParams(const ArithmeticParams& params) { - TFLITE_DCHECK_LE(params.quantized_activation_min, - params.quantized_activation_max); - // Input offset is negative input zero point. Activation tensors are - // asymmetric quantized so they span the full int8 range. - TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits::min()); - TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits::min()); - TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits::max()); - TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits::max()); -} - -inline void ElementWise( - int size, const ArithmeticParams& params, const int8_t* input1_data, - const int8_t* input2_data, int8_t* output_data, - void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { - CheckArithmeticParams(params); - for (int i = 0; i < size; ++i) { - output_data[i] = binary_func(input1_data[i], input2_data[i], params); - } -} - -inline void BroadcastBinaryFunction4DSlow( - const ArithmeticParams& params, const RuntimeShape& input1_shape, - const int8_t* input1_data, const RuntimeShape& input2_shape, - const int8_t* input2_data, const RuntimeShape& output_shape, - int8_t* output_data, - void (*check_arithmetic_params)(const ArithmeticParams&), - int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) { - NdArrayDesc<4> desc1; - NdArrayDesc<4> desc2; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - const RuntimeShape extended_output_shape = - RuntimeShape::ExtendedShape(4, output_shape); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - for (int b = 0; b < extended_output_shape.Dims(0); ++b) { - for (int y = 0; y < extended_output_shape.Dims(1); ++y) { - for (int x = 0; x < extended_output_shape.Dims(2); ++x) { - for (int c = 0; c < extended_output_shape.Dims(3); ++c) { - output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func( - input1_data[SubscriptToIndex(desc1, b, y, x, c)], - input2_data[SubscriptToIndex(desc2, b, y, x, c)], params); - } - } - } - } -} - -inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) { - const int32_t input1_val = params.input1_offset + x; - const int32_t input2_val = params.input2_offset + y; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sum = scaled_input1_val + scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sum, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - return static_cast(clamped_output); -} - -// Element-wise add that can often be used for inner loop of broadcast add as -// well as the non-broadcast add. -inline void AddElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { - ElementWise(size, params, input1_data, input2_data, output_data, - CheckArithmeticParams, AddFunc); -} - -inline void Add(const ArithmeticParams& params, - const RuntimeShape& input1_shape, const int8_t* input1_data, - const RuntimeShape& input2_shape, const int8_t* input2_data, - const RuntimeShape& output_shape, int8_t* output_data) { - CheckArithmeticParams(params); - - const int flat_size = - MatchingElementsSize(input1_shape, input2_shape, output_shape); - - AddElementwise(flat_size, params, input1_data, input2_data, output_data); -} - -inline void BroadcastAdd4DSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { - BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape, - input2_data, output_shape, output_data, - CheckArithmeticParams, AddFunc); -} - -} // namespace reference_integer_ops -} // namespace tflite - -#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h index 413e5da..3b9adcb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -48,7 +50,7 @@ inline void ConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -59,6 +61,10 @@ inline void ConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -67,6 +73,7 @@ inline void ConvPerChannel( for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + auto group = out_channel / filters_per_group; int32_t acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; @@ -82,9 +89,11 @@ inline void ConvPerChannel( continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; // Accumulate with 32 bits accumulator. @@ -124,14 +133,16 @@ inline void ConvPerChannel( } } + // Fixed-point per-channel-quantization convolution reference kernel. // 16-bit data and 8-bit filter +template inline void ConvPerChannel( const ConvParams& params, const int32_t* output_multiplier, const int32_t* output_shift, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& filter_shape, const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, + const AccumScalar* bias_data, const RuntimeShape& output_shape, int16_t* output_data) { // Get parameters. const int stride_width = params.stride_width; @@ -151,7 +162,7 @@ inline void ConvPerChannel( TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); const int batches = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int input_depth = input_shape.Dims(3); const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); @@ -162,6 +173,10 @@ inline void ConvPerChannel( const int input_width = input_shape.Dims(2); const int filter_height = filter_shape.Dims(1); const int filter_width = filter_shape.Dims(2); + const int filter_input_depth = filter_shape.Dims(3); + const int groups = input_depth / filter_input_depth; + TFLITE_DCHECK_EQ(input_depth % filter_input_depth, 0); + const int filters_per_group = output_depth / groups; const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); for (int batch = 0; batch < batches; ++batch) { @@ -170,7 +185,8 @@ inline void ConvPerChannel( for (int out_x = 0; out_x < output_width; ++out_x) { const int in_x_origin = (out_x * stride_width) - pad_width; for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - std::int64_t acc = 0; + auto group = out_channel / filters_per_group; + AccumScalar acc = 0; for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + dilation_height_factor * filter_y; for (int filter_x = 0; filter_x < filter_width; ++filter_x) { @@ -185,9 +201,11 @@ inline void ConvPerChannel( continue; } - for (int in_channel = 0; in_channel < input_depth; ++in_channel) { - int32_t input_val = input_data[Offset(input_shape, batch, in_y, - in_x, in_channel)]; + for (int in_channel = 0; in_channel < filter_input_depth; + ++in_channel) { + int32_t input_val = + input_data[Offset(input_shape, batch, in_y, in_x, + in_channel + group * filter_input_depth)]; int32_t filter_val = filter_data[Offset( filter_shape, out_channel, filter_y, filter_x, in_channel)]; // Accumulate with 64 bits accumulator. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h index 42d2536..95e7337 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h index de21e14..4be7987 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h @@ -15,22 +15,30 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_integer_ops { -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int8_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int32_t* bias_data, const RuntimeShape& output_shape, - int8_t* output_data) { +// For per-channel functions, since it is defined in quantization spec that +// weights are symmetric +// (https://www.tensorflow.org/lite/performance/quantization_spec#symmetric_vs_asymmetric), +// zero_point (params.weights_offset) is always 0. +// However, for per-tensor functions, params.weights_offset is still applied for +// backward compatibility. +template +void FullyConnectedPerChannel( + const FullyConnectedParams& params, const int32_t* output_multiplier, + const int* output_shift, const RuntimeShape& input_shape, + const InputType* input_data, const RuntimeShape& filter_shape, + const WeightType* filter_data, const RuntimeShape& bias_shape, + const BiasType* bias_data, const RuntimeShape& output_shape, + OutputType* output_data) { const int32_t input_offset = params.input_offset; - const int32_t filter_offset = params.weights_offset; const int32_t output_offset = params.output_offset; - const int32_t output_multiplier = params.output_multiplier; - const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); @@ -44,60 +52,70 @@ inline void FullyConnected( const int accum_depth = filter_shape.Dims(filter_dim_count - 1); for (int b = 0; b < batches; ++b) { for (int out_c = 0; out_c < output_depth; ++out_c) { - int32_t acc = 0; + BiasType acc = 0; for (int d = 0; d < accum_depth; ++d) { int32_t input_val = input_data[b * accum_depth + d]; int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * (input_val + input_offset); + acc += filter_val * (input_val + input_offset); } if (bias_data) { acc += bias_data[out_c]; } - acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); - acc += output_offset; - acc = std::max(acc, output_activation_min); - acc = std::min(acc, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc); + int32_t acc_scaled = MultiplyByQuantizedMultiplier( + acc, output_multiplier[out_c], output_shift[out_c]); + acc_scaled += output_offset; + acc_scaled = std::max(acc_scaled, output_activation_min); + acc_scaled = std::min(acc_scaled, output_activation_max); + output_data[out_c + output_depth * b] = + static_cast(acc_scaled); } } } -inline void FullyConnected( - const FullyConnectedParams& params, const RuntimeShape& input_shape, - const int16_t* input_data, const RuntimeShape& filter_shape, - const int8_t* filter_data, const RuntimeShape& bias_shape, - const int64_t* bias_data, const RuntimeShape& output_shape, - int16_t* output_data) { +template +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, + const InputType* input_data, + const RuntimeShape& filter_shape, + const WeightType* filter_data, + const RuntimeShape& bias_shape, const BiasType* bias_data, + const RuntimeShape& output_shape, OutputType* output_data) { + const int32_t input_offset = params.input_offset; const int32_t filter_offset = params.weights_offset; + const int32_t output_offset = params.output_offset; const int32_t output_multiplier = params.output_multiplier; const int output_shift = params.output_shift; const int32_t output_activation_min = params.quantized_activation_min; const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); + TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1); TFLITE_DCHECK_LE(output_activation_min, output_activation_max); const int filter_dim_count = filter_shape.DimensionsCount(); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); + const int output_dim_count = output_shape.DimensionsCount(); + const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); + const int output_depth = output_shape.Dims(output_dim_count - 1); TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); const int accum_depth = filter_shape.Dims(filter_dim_count - 1); for (int b = 0; b < batches; ++b) { for (int out_c = 0; out_c < output_depth; ++out_c) { - int64_t acc = 0; + BiasType acc = 0; for (int d = 0; d < accum_depth; ++d) { int32_t input_val = input_data[b * accum_depth + d]; int32_t filter_val = filter_data[out_c * accum_depth + d]; - acc += (filter_val + filter_offset) * input_val; + acc += (filter_val + filter_offset) * (input_val + input_offset); } if (bias_data) { acc += bias_data[out_c]; } int32_t acc_scaled = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift); + acc_scaled += output_offset; acc_scaled = std::max(acc_scaled, output_activation_min); acc_scaled = std::min(acc_scaled, output_activation_max); - output_data[out_c + output_depth * b] = static_cast(acc_scaled); + output_data[out_c + output_depth * b] = + static_cast(acc_scaled); } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h index c5fb00e..582713b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h index b53bfd9..2119103 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h @@ -15,7 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_ +#include #include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h index f30e586..0ba0f66 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h index 3c809db..168e3ae 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_ +#include + #include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" #include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" @@ -22,10 +24,10 @@ limitations under the License. namespace tflite { namespace reference_integer_ops { -template -inline void MulElementwise(int size, const ArithmeticParams& params, - const T* input1_data, const T* input2_data, - T* output_data) { +template +void MulElementwise(int size, const ArithmeticParams& params, + const InputType* input1_data, const InputType* input2_data, + OutputType* output_data) { for (int i = 0; i < size; ++i) { const int32_t input1_val = params.input1_offset + input1_data[i]; const int32_t input2_val = params.input2_offset + input2_data[i]; @@ -37,7 +39,7 @@ inline void MulElementwise(int size, const ArithmeticParams& params, const int32_t clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, unclamped_result)); - output_data[i] = static_cast(clamped_output); + output_data[i] = static_cast(clamped_output); } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h index f4eedc6..ee026fd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h @@ -15,13 +15,15 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_ +#include #include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { namespace reference_integer_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int8_t* input_data, const RuntimeShape& output_shape, int8_t* output_data) { @@ -66,6 +68,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -77,6 +80,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, @@ -136,7 +140,7 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, } } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& output_shape, @@ -182,6 +186,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; // Round to the closest integer value. acc = acc > 0 ? (acc + filter_count / 2) / filter_count : (acc - filter_count / 2) / filter_count; @@ -193,6 +198,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h index 2dc2ad4..d7feb45 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_ +#include #include #include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h index 57622ba..8ce1cb7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -53,8 +55,8 @@ inline void TransposeConv( const int output_width = output_shape.Dims(2); const int32_t input_offset = params.input_offset; const int32_t output_offset = params.output_offset; - const int32_t output_activation_min = std::numeric_limits::min(); - const int32_t output_activation_max = std::numeric_limits::max(); + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_LE(output_activation_min, output_activation_max); const int num_elements = output_shape.FlatSize(); @@ -119,15 +121,16 @@ inline void TransposeConv( } } -// int16_t input (zero_point=0), int8_t filter, int64 accumulator +// int16_t input (zero_point=0), int8_t filter, int32 or int64 accumulator +template inline void TransposeConv( const ConvParams& params, const int32_t* output_multiplier, const int32_t* output_shift, const RuntimeShape& input_shape, const int16_t* input_data, const RuntimeShape& filter_shape, const int8_t* filter_data, const RuntimeShape& bias_shape, - const std::int64_t* bias_data, const RuntimeShape& output_shape, + const Scalar* bias_data, const RuntimeShape& output_shape, int16_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data, - std::int64_t* scratch_buffer) { + Scalar* scratch_buffer) { const int stride_width = params.stride_width; const int stride_height = params.stride_height; const int pad_width = params.padding_values.width; @@ -150,14 +153,14 @@ inline void TransposeConv( const int filter_width = filter_shape.Dims(2); const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); - const int32_t output_activation_min = std::numeric_limits::min(); - const int32_t output_activation_max = std::numeric_limits::max(); + const int32_t output_activation_min = params.quantized_activation_min; + const int32_t output_activation_max = params.quantized_activation_max; TFLITE_DCHECK_LE(output_activation_min, output_activation_max); const int num_elements = output_shape.FlatSize(); // We need to initialize scratch_buffer to all 0s, as we apply the same // 'scatter' based trick as in float version. - memset(scratch_buffer, 0, num_elements * sizeof(std::int64_t)); + memset(scratch_buffer, 0, num_elements * sizeof(Scalar)); // Loop through input elements one at a time. for (int batch = 0; batch < batches; ++batch) { @@ -198,8 +201,8 @@ inline void TransposeConv( for (int out_y = 0; out_y < output_height; ++out_y) { for (int out_x = 0; out_x < output_width; ++out_x) { for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - std::int64_t acc = scratch_buffer[Offset(output_shape, batch, out_y, - out_x, out_channel)]; + Scalar acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x, + out_channel)]; if (bias_data) { acc += bias_data[out_channel]; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/l2normalization.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/l2normalization.h index af83de9..cf32ea5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/l2normalization.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/l2normalization.h @@ -18,7 +18,7 @@ limitations under the License. #include #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/log_softmax.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/log_softmax.h new file mode 100644 index 0000000..af55755 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/log_softmax.h @@ -0,0 +1,256 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ + +#include +#include +#include + +#include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" + +namespace tflite { +namespace reference_ops { + +inline void LogSoftmax(const SoftmaxParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + + for (int i = 0; i < outer_size; ++i) { + // Find max element value which we'll use to ensure numerical stability + // taking advantage of the following equality: + // log(exp(x[i])/sum(exp(x[i]))) == log(exp(x[i]+C)/sum(exp(x[i]+C))) + float max = std::numeric_limits::lowest(); + for (int c = 0; c < depth; ++c) { + max = std::max(max, input_data[i * depth + c]); + } + + // Compute sum. + float sum = 0.f; + for (int c = 0; c < depth; ++c) { + sum += std::exp(input_data[i * depth + c] - max); + } + + // Compute result. + const float log_sum = std::log(sum); + for (int c = 0; c < depth; ++c) { + output_data[i * depth + c] = input_data[i * depth + c] - max - log_sum; + } + } +} + +inline void LogSoftmax(const SoftmaxParams& params, + const RuntimeShape& input_shape, + const uint8_t* input_data, + const RuntimeShape& output_shape, uint8_t* output_data) { + const int32_t input_multiplier = params.input_multiplier; + const int32_t input_left_shift = params.input_left_shift; + const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor; + const int32_t reverse_scaling_right_shift = + params.reverse_scaling_right_shift; + const int diff_min = params.diff_min; + // The representation chosen for the input to the exp() function is Q5.26. + // We need to leave extra space since values that we skip might be as large + // as -32 before multiplying by input_beta_multiplier, and therefore as + // large as -16 afterwards. Note that exp(-8) is definitely not + // insignificant to accumulation, but exp(-16) definitely is. + static constexpr int kScaledDiffIntegerBits = 5; + static constexpr int kAccumulationIntegerBits = 12; + static constexpr int kOutputIntegerBits = 4; + using FixedPointScaledDiff = + gemmlowp::FixedPoint; + using FixedPointAccum = + gemmlowp::FixedPoint; + + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + + for (int i = 0; i < outer_size; ++i) { + uint8_t max_in_row = 0; + for (int c = 0; c < depth; ++c) { + max_in_row = std::max(max_in_row, input_data[i * depth + c]); + } + + FixedPointAccum sum_of_exps = FixedPointAccum::Zero(); + for (int c = 0; c < depth; ++c) { + int32_t input_diff = + static_cast(input_data[i * depth + c]) - max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_rescaled = + MultiplyByQuantizedMultiplierGreaterThanOne( + input_diff, input_multiplier, input_left_shift); + const FixedPointScaledDiff scaled_diff_f8 = + FixedPointScaledDiff::FromRaw(input_diff_rescaled); + sum_of_exps = sum_of_exps + gemmlowp::Rescale( + exp_on_negative_values(scaled_diff_f8)); + } + } + + const int32_t fixed_log_sum_of_exps = + log_x_for_x_greater_than_or_equal_to_1( + sum_of_exps) + .raw(); + + // rescaled_diff_min is smallest representable in + // Q(kScaledDiffIntegerBits).(31-kScaledDiffIntegerBits) plus the + // log-sub-exps that will be subtracted in the loop. + // + // The thresholds diff_min, etc are negative. + const int rescaled_diff_min = + fixed_log_sum_of_exps + std::numeric_limits::lowest(); + const int adjusted_diff_min = + std::max(static_cast( + diff_min - 1), // Note use of > below instead of >= above. + MultiplyByQuantizedMultiplierSmallerThanOneExp( + rescaled_diff_min, reverse_scaling_divisor, + -reverse_scaling_right_shift)); + + for (int c = 0; c < depth; ++c) { + int32_t input_diff = + static_cast(input_data[i * depth + c]) - max_in_row; + if (input_diff > adjusted_diff_min) { + const int32_t input_diff_rescaled = + MultiplyByQuantizedMultiplierGreaterThanOne( + input_diff, input_multiplier, input_left_shift); + int32_t unsat_output = + gemmlowp::RoundingDivideByPOT( + (input_diff_rescaled - fixed_log_sum_of_exps), + 31 - kScaledDiffIntegerBits - kOutputIntegerBits) + + 255; + + output_data[i * depth + c] = static_cast( + std::max(std::min(unsat_output, static_cast(255)), + static_cast(0))); + } else { + // Set output to smallest value. + output_data[i * depth + c] = 0; + } + } + } +} + +template +inline void LogSoftmaxQuantized(const SoftmaxParams& params, + const size_t outer_size, const size_t depth, + const RuntimeShape& input_shape, + const T* input_data, + const RuntimeShape& output_shape, + T* output_data) { + const int32_t input_multiplier = params.input_multiplier; + const int32_t input_left_shift = params.input_left_shift; + const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor; + const int32_t reverse_scaling_right_shift = + params.reverse_scaling_right_shift; + const int diff_min = params.diff_min; + + static constexpr T kMinT8 = std::numeric_limits::min(); + static constexpr T kMaxT8 = std::numeric_limits::max(); + static constexpr int32_t kMinInt32 = std::numeric_limits::min(); + + // All IntegerBits must agree with Prepare function. + // Input is chosen as Q5.26 so exp(-1 * 2^5 * 2^-1) = exp(-16) is negligible. + static constexpr int kInputIntegerBits = 5; + static constexpr int kAccumulationIntegerBits = 12; + static constexpr int kOutputIntegerBits = 4; + using F5 = gemmlowp::FixedPoint; + using F12 = gemmlowp::FixedPoint; + + for (size_t outer_index = 0; outer_index < outer_size; ++outer_index) { + T max_in_row = kMinT8; + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + max_in_row = + std::max(max_in_row, input_data[outer_index * depth + inner_index]); + } + + // Accumulator "sum_of_exps_in_q12" is safe from overflowing in 2^12 steps. + F12 sum_of_exps_in_q12 = F12::FromRaw(0); + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + int32_t input_diff = + static_cast(input_data[outer_index * depth + inner_index]) - + max_in_row; + if (input_diff >= diff_min) { + const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier( + input_diff, input_multiplier, input_left_shift); + sum_of_exps_in_q12 = + sum_of_exps_in_q12 + + gemmlowp::Rescale( + exp_on_negative_values(F5::FromRaw(input_diff_in_q5))); + } + } + + const int32_t log_sum_of_exps_in_q5 = + log_x_for_x_greater_than_or_equal_to_1( + sum_of_exps_in_q12) + .raw(); + + // Potentially reduced the valid range. shifted_log_sum_of_exps_in_q5 is + // smallest representable in Q5.26 plus the log_sum_of_exps. + const int32_t shifted_log_sum_of_exps_in_q5 = + log_sum_of_exps_in_q5 + kMinInt32; + const int32_t adjusted_diff_min = + std::max(static_cast(diff_min - 1), + MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5, + reverse_scaling_divisor, + -reverse_scaling_right_shift)); + + for (size_t inner_index = 0; inner_index < depth; ++inner_index) { + int32_t input_diff = + static_cast(input_data[outer_index * depth + inner_index]) - + max_in_row; + // Note use of > below instead of >= above. + if (input_diff > adjusted_diff_min) { + const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier( + input_diff, input_multiplier, input_left_shift); + + // Rescale and downcast. + int32_t output_in_q27 = + gemmlowp::RoundingDivideByPOT( + (input_diff_in_q5 - log_sum_of_exps_in_q5), + 31 - kInputIntegerBits - kOutputIntegerBits) + + kMaxT8; + + output_in_q27 = + std::max(std::min(output_in_q27, static_cast(kMaxT8)), + static_cast(kMinT8)); + output_data[outer_index * depth + inner_index] = + static_cast(output_in_q27); + } else { + output_data[outer_index * depth + inner_index] = kMinT8; + } + } + } +} + +inline void LogSoftmax(const SoftmaxParams& params, const size_t outer_size, + const size_t depth, const RuntimeShape& input_shape, + const int8_t* input_data, + const RuntimeShape& output_shape, int8_t* output_data) { + LogSoftmaxQuantized(params, outer_size, depth, input_shape, input_data, + output_shape, output_data); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/lstm_cell.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/lstm_cell.h new file mode 100644 index 0000000..de1c485 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/lstm_cell.h @@ -0,0 +1,422 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/concatenation.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +inline void LstmCell( + const LstmCellParams& params, const RuntimeShape& unextended_input_shape, + const float* input_data, const RuntimeShape& unextended_prev_activ_shape, + const float* prev_activ_data, const RuntimeShape& weights_shape, + const float* weights_data, const RuntimeShape& unextended_bias_shape, + const float* bias_data, const RuntimeShape& unextended_prev_state_shape, + const float* prev_state_data, + const RuntimeShape& unextended_output_state_shape, float* output_state_data, + const RuntimeShape& unextended_output_activ_shape, float* output_activ_data, + const RuntimeShape& unextended_concat_temp_shape, float* concat_temp_data, + const RuntimeShape& unextended_activ_temp_shape, float* activ_temp_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape prev_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape); + const RuntimeShape bias_shape = + RuntimeShape::ExtendedShape(4, unextended_bias_shape); + const RuntimeShape prev_state_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_state_shape); + const RuntimeShape output_state_shape = + RuntimeShape::ExtendedShape(4, unextended_output_state_shape); + const RuntimeShape output_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_output_activ_shape); + const RuntimeShape concat_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape); + const RuntimeShape activ_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape); + TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); + + const int weights_dim_count = weights_shape.DimensionsCount(); + const int batches = + MatchingDim(input_shape, 0, prev_activ_shape, 0, prev_state_shape, 0, + output_state_shape, 0, output_activ_shape, 0); + const int height = + MatchingDim(input_shape, 1, prev_activ_shape, 1, prev_state_shape, 1, + output_state_shape, 1, output_activ_shape, 1); + const int width = + MatchingDim(input_shape, 2, prev_activ_shape, 2, prev_state_shape, 2, + output_state_shape, 2, output_activ_shape, 2); + const int input_depth = input_shape.Dims(3); + const int prev_activ_depth = prev_activ_shape.Dims(3); + const int total_input_depth = prev_activ_depth + input_depth; + TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1), + total_input_depth); + TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1); + const int intern_activ_depth = + MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3); + TFLITE_DCHECK_EQ(weights_shape.FlatSize(), + intern_activ_depth * total_input_depth); + TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0); + const int output_depth = + MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape, + 3, output_activ_shape, 3); + TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4); + + // Concatenate prev_activ and input data together + float const* concat_input_arrays_data[2] = {input_data, prev_activ_data}; + const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape, + &prev_activ_shape}; + tflite::ConcatenationParams concat_params; + concat_params.axis = 3; + concat_params.inputs_count = 2; + Concatenation(concat_params, concat_input_arrays_shapes, + concat_input_arrays_data, concat_temp_shape, concat_temp_data); + + // Fully connected + tflite::FullyConnectedParams fc_params; + fc_params.float_activation_min = std::numeric_limits::lowest(); + fc_params.float_activation_max = std::numeric_limits::max(); + FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape, + weights_data, bias_shape, bias_data, activ_temp_shape, + activ_temp_data); + + // Memory state update (the LSTM "guts") + for (int b = 0; b < batches; ++b) { + for (int w = 0; w < width; ++w) { + for (int h = 0; h < height; ++h) { + for (int c = 0; c < output_depth; ++c) { + const float input_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 0 * output_depth + c)])); + const float new_input = std::tanh(activ_temp_data[Offset( + activ_temp_shape, b, h, w, 1 * output_depth + c)]); + const float forget_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 2 * output_depth + c)])); + const float output_gate = + 1.f / + (1.f + std::exp(-activ_temp_data[Offset(activ_temp_shape, b, h, w, + 3 * output_depth + c)])); + const float new_state = + input_gate * new_input + + forget_gate * + prev_state_data[Offset(prev_state_shape, b, h, w, c)]; + output_state_data[Offset(output_state_shape, b, h, w, c)] = new_state; + output_activ_data[Offset(output_activ_shape, b, h, w, c)] = + output_gate * std::tanh(new_state); + } + } + } + } +} + +// Quantized LSTM cell implementation. +// The quantization of the input, output arrays is as follows: +// - The input activations are quantized as uint8 on the interval +// [-1, 127/128]. +// The rationale for that is that is the natural interval for output +// activations (see next point) and these need to be concatenated together. +// We could accommodate different ranges by re-scaling, but we empirically +// found that setting the input activations range to be [-1, 127/128] in the +// first place, removing the need for re-scaling, greatly improves accuracy. +// - The output activations are quantized as uint8 on the interval +// [-1, 127/128]. +// The rationale for that is that the definition of a LSTM cell makes them +// intrinsically constrained in [-1, 1]; tweaking that to [-1, 127/128] +// makes for simpler, more accurate fixed-point arithmetic. +// - The output-at-previous-timestep state array is obviously quantized as +// the output activations. +// - The internal LSTM memory (not the output-at-previous-timestep, the other +// internal state array) is int16-quantized and may use any power-of-two, +// symmetric range i.e. [-2^N, 2^N * 32767/32768] for any N, which we call +// StateIntegerBits below, see the below discussion of that template +// parameter ("The StateIntegerBits template parameter"). +// - The output of the internal fully-connected node is int16-quantized +// on the interval [-8, 8 * 32767/32768], the rationale for which is +// explained just below ("Why [-8, 8] for fully-connected output?"). +// +// +// === The StateIntegerBits template parameter === +// +// The StateIntegerBits template parameter controls the fixed-point format used +// to represent the internal memory of the LSTM cell (not the +// output-at-previous-timestep, the other internal state array). It's currently +// a template parameter so that the model can control that. The most typical +// value for StateIntegerBits is 4. Other plausible values are anywhere between +// 3 and 5. We might eventually standardize on a single supported value, e.g. 4, +// and drop that template parameter. The reason why it can't be a runtime +// parameter is that this controls the fixed-point format used, i.e. we need to +// generate actually different code based on it. In particular, we generate code +// for a fixed-point tanh() implementation for that format, which internally +// uses a fixed-point exp() implementation, which internally uses a +// barrel-shifter with a number of steps that depends on StateIntegerBits. +// Another consequence of that is that a higher value of StateIntegerBits +// results in a more expensive implementation (more barrel shifter steps +// needed). +// +// +// === Why [-8, 8] for fully-connected output? === +// +// This array is only fed to Logistic and Tanh functions, for which +// the quantized implementation will want to use fixed-point arithmetic, +// requiring a power-of-two representation interval. Thus, we should right +// away quantize this array to a power-of-two interval; otherwise, +// implementation will need to rescale that, losing any benefit that a tighter +// representation interval might otherwise yield, while introducing some +// numerical error and computational overhead. +// +// Now, Logistic and Tanh +// are nearly constant (nearly equal to their horizontal asymptotes) +// outside of a small bounded interval around 0: +// +// Logistic(4) = 1 - 1.8e-2 Tanh(4) = 1 - 6.7e-4 +// Logistic(8) = 1 - 3.4e-4 Tanh(8) = 1 - 2.3e-7 +// Logistic(16) = 1 - 1.1e-7 Tanh(16) = 1 - 2.5e-14 +// +// From this, we see that clamping to [-4, 4] would be too inaccurate +// (the error of 1.8e-2 on Logistic would be felt even in 8bit precision) +// while clamping to [-16, 16] would make no difference even in float32. +// However, for a fixed-point implementation in 16-bit integers, using 5 +// integer bits to represent the [-16, 16] range would leave only 11 +// fractional bits, giving an increment of 2^-11 = 4.9e-4 between consecutive +// representable values. Notice that is higher than the +// worst-case clamping error with clamping to [-8, 8]: 3.4e-4 for Logistic. +// Using [-8, 8] thus seems like the better compromise overall, enjoying +// an increment of 2.4e-4 between representable values and a worst-case +// clamping error of 3.4e-4, both better than the increment of 4.9e-4 with +// [-16, 16]. +// +// Moreover, all other things being equal, it is nice to choose the narrower +// representation range, as that makes the implementation of fixed-point +// math functions a little cheaper (each integer bit requires an additional +// barrel-shifter atep in the implementation of exp(-x)). That is further +// reason to prefer [-8, 8] over [-16, 16]. The choice of [-16, 16] would make +// sense for 32-bit float or 32-bit fixed-point quantization, but we are +// aiming for 16-bit fixed-point quantization of these internal nodes here. +// +template +inline void LstmCell(const LstmCellParams& params, + const RuntimeShape& unextended_input_shape, + const uint8_t* input_data_uint8, + const RuntimeShape& unextended_prev_activ_shape, + const uint8_t* prev_activ_data_uint8, + const RuntimeShape& weights_shape, + const uint8_t* weights_data_uint8, + const RuntimeShape& unextended_bias_shape, + const int32_t* bias_data_int32, + const RuntimeShape& unextended_prev_state_shape, + const int16_t* prev_state_data_int16, + const RuntimeShape& unextended_output_state_shape, + int16_t* output_state_data_int16, + const RuntimeShape& unextended_output_activ_shape, + uint8_t* output_activ_data_uint8, + const RuntimeShape& unextended_concat_temp_shape, + uint8_t* concat_temp_data_uint8, + const RuntimeShape& unextended_activ_temp_shape, + int16_t* activ_temp_data_int16, void* gemmlowp_context) { + (void)gemmlowp_context; // only used in optimized code. + int32_t weights_zero_point = params.weights_zero_point; + int32_t accum_multiplier = params.accum_multiplier; + int accum_shift = params.accum_shift; + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape prev_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape); + const RuntimeShape bias_shape = + RuntimeShape::ExtendedShape(4, unextended_bias_shape); + const RuntimeShape prev_state_shape = + RuntimeShape::ExtendedShape(4, unextended_prev_state_shape); + const RuntimeShape output_state_shape = + RuntimeShape::ExtendedShape(4, unextended_output_state_shape); + const RuntimeShape output_activ_shape = + RuntimeShape::ExtendedShape(4, unextended_output_activ_shape); + const RuntimeShape concat_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape); + const RuntimeShape activ_temp_shape = + RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape); + TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2); + + // Gather dimensions information, and perform consistency checks. + const int weights_dim_count = weights_shape.DimensionsCount(); + const int outer_size = MatchingFlatSizeSkipDim( + input_shape, 3, prev_activ_shape, prev_state_shape, output_state_shape, + output_activ_shape); + const int input_depth = input_shape.Dims(3); + const int prev_activ_depth = prev_activ_shape.Dims(3); + const int total_input_depth = prev_activ_depth + input_depth; + TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1), + total_input_depth); + const int intern_activ_depth = + MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3); + TFLITE_DCHECK_EQ(weights_shape.FlatSize(), + intern_activ_depth * total_input_depth); + TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1); + TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0); + const int output_depth = + MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape, + 3, output_activ_shape, 3); + TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4); + const int fc_batches = FlatSizeSkipDim(activ_temp_shape, 3); + const int fc_output_depth = + MatchingDim(weights_shape, weights_dim_count - 2, activ_temp_shape, 3); + const int fc_accum_depth = total_input_depth; + TFLITE_DCHECK_EQ(fc_output_depth, 4 * output_depth); + + // Depth-concatenate prev_activ and input data together. + uint8_t const* concat_input_arrays_data[2] = {input_data_uint8, + prev_activ_data_uint8}; + const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape, + &prev_activ_shape}; + tflite::ConcatenationParams concat_params; + concat_params.axis = 3; + concat_params.inputs_count = 2; + Concatenation(concat_params, concat_input_arrays_shapes, + concat_input_arrays_data, concat_temp_shape, + concat_temp_data_uint8); + + // Implementation of the fully connected node inside the LSTM cell. + // The operands are 8-bit integers, the accumulators are internally 32bit + // integers, and the output is 16-bit fixed-point with 3 integer bits so + // the output range is [-2^3, 2^3] == [-8, 8]. The rationale for that + // is explained in the function comment above. + for (int b = 0; b < fc_batches; ++b) { + for (int out_c = 0; out_c < fc_output_depth; ++out_c) { + // Internal accumulation. + // Initialize accumulator with the bias-value. + int32_t accum = bias_data_int32[out_c]; + // Accumulation loop. + for (int d = 0; d < fc_accum_depth; ++d) { + int16_t input_val = + concat_temp_data_uint8[b * fc_accum_depth + d] - 128; + int16_t weights_val = + weights_data_uint8[out_c * fc_accum_depth + d] - weights_zero_point; + accum += input_val * weights_val; + } + // Down-scale the final int32 accumulator to the scale used by our + // (16-bit, using 3 integer bits) fixed-point format. The quantized + // multiplier and shift here have been pre-computed offline + // (e.g. by toco). + accum = + MultiplyByQuantizedMultiplier(accum, accum_multiplier, accum_shift); + // Saturate, cast to int16, and store to the temporary activations array. + accum = std::max(-32768, std::min(32767, accum)); + activ_temp_data_int16[out_c + fc_output_depth * b] = accum; + } + } + + // Rest of the LSTM cell: tanh and logistic math functions, and some adds + // and muls, all done in 16-bit fixed-point. + for (int b = 0; b < outer_size; ++b) { + for (int c = 0; c < output_depth; ++c) { + // Define the fixed-point data types that we will use here. All use + // int16 as the underlying integer type i.e. all are 16-bit fixed-point. + // They only differ by the number of integral vs. fractional bits, + // determining the range of values that they can represent. + // + // F0 uses 0 integer bits, range [-1, 1]. + // This is the return type of math functions such as tanh, logistic, + // whose range is in [-1, 1]. + using F0 = gemmlowp::FixedPoint; + // F3 uses 3 integer bits, range [-8, 8]. + // This is the range of the previous fully-connected node's output, + // which is our input here. + using F3 = gemmlowp::FixedPoint; + // FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits, + // 2^StateIntegerBits]. It's used to represent the internal state, whose + // number of integer bits is currently dictated by the model. See comment + // on the StateIntegerBits template parameter above. + using FS = gemmlowp::FixedPoint; + // Implementation of input gate, using fixed-point logistic function. + F3 input_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 0 * output_depth + c]); + F0 input_gate_output = gemmlowp::logistic(input_gate_input); + // Implementation of input modulation gate, using fixed-point tanh + // function. + F3 input_modulation_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 1 * output_depth + c]); + F0 input_modulation_gate_output = + gemmlowp::tanh(input_modulation_gate_input); + // Implementation of forget gate, using fixed-point logistic function. + F3 forget_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 2 * output_depth + c]); + F0 forget_gate_output = gemmlowp::logistic(forget_gate_input); + // Implementation of output gate, using fixed-point logistic function. + F3 output_gate_input = F3::FromRaw( + activ_temp_data_int16[b * fc_output_depth + 3 * output_depth + c]); + F0 output_gate_output = gemmlowp::logistic(output_gate_input); + // Implementation of internal multiplication nodes, still in fixed-point. + F0 input_times_input_modulation = + input_gate_output * input_modulation_gate_output; + FS prev_state = FS::FromRaw(prev_state_data_int16[b * output_depth + c]); + FS prev_state_times_forget_state = forget_gate_output * prev_state; + // Implementation of internal addition node, saturating. + FS new_state = gemmlowp::SaturatingAdd( + gemmlowp::Rescale(input_times_input_modulation), + prev_state_times_forget_state); + // Implementation of last internal Tanh node, still in fixed-point. + // Since a Tanh fixed-point implementation is specialized for a given + // number or integer bits, and each specialization can have a substantial + // code size, and we already used above a Tanh on an input with 3 integer + // bits, and per the table in the above function comment there is no + // significant accuracy to be lost by clamping to [-8, +8] for a + // 3-integer-bits representation, let us just do that. This helps people + // porting this to targets where code footprint must be minimized. + F3 new_state_f3 = gemmlowp::Rescale<3>(new_state); + F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3); + // Store the new internal state back to memory, as 16-bit integers. + // Note: here we store the original value with StateIntegerBits, not + // the rescaled 3-integer-bits value fed to tanh. + output_state_data_int16[b * output_depth + c] = new_state.raw(); + // Down-scale the output activations to 8-bit integers, saturating, + // and store back to memory. + int16_t rescaled_output_activ = + gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8); + int16_t clamped_output_activ = std::max( + -128, std::min(127, rescaled_output_activ)); + output_activ_data_uint8[b * output_depth + c] = + 128 + clamped_output_activ; + } + } +} + +} // namespace reference_ops +} // namespace tflite +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LSTM_CELL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h index d6aaf8b..63ece01 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h @@ -15,6 +15,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_ +#include +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" namespace tflite { @@ -51,7 +54,7 @@ inline void Mul(const ArithmeticParams& params, GetActivationParams(params, &output_activation_min, &output_activation_max); const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); for (int i = 0; i < flat_size; ++i) { output_data[i] = ActivationFunctionWithMinMax( input1_data[i] * input2_data[i], output_activation_min, @@ -59,6 +62,20 @@ inline void Mul(const ArithmeticParams& params, } } +inline void Mul(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const std::complex* input1_data, + const RuntimeShape& input2_shape, + const std::complex* input2_data, + const RuntimeShape& output_shape, + std::complex* output_data) { + const int flat_size = + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + output_data[i] = input1_data[i] * input2_data[i]; + } +} + inline void Mul(const ArithmeticParams& params, const RuntimeShape& input1_shape, const uint8_t* input1_data, const RuntimeShape& input2_shape, const uint8_t* input2_data, @@ -66,7 +83,7 @@ inline void Mul(const ArithmeticParams& params, TFLITE_DCHECK_LE(params.quantized_activation_min, params.quantized_activation_max); const int flat_size = - MatchingFlatSize(input1_shape, input2_shape, output_shape); + MatchingExtendedShapeFlatSize(input1_shape, input2_shape, output_shape); MulElementwise(flat_size, params, input1_data, input2_data, output_data); } @@ -160,6 +177,37 @@ void BroadcastMul4DSlow(const ArithmeticParams& params, } } +inline void BroadcastMul4DSlow(const ArithmeticParams& params, + const RuntimeShape& unextended_input1_shape, + const std::complex* input1_data, + const RuntimeShape& unextended_input2_shape, + const std::complex* input2_data, + const RuntimeShape& unextended_output_shape, + std::complex* output_data) { + TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + NdArrayDesc<4> desc1; + NdArrayDesc<4> desc2; + NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, + unextended_input2_shape, &desc1, &desc2); + + for (int b = 0; b < output_shape.Dims(0); ++b) { + for (int y = 0; y < output_shape.Dims(1); ++y) { + for (int x = 0; x < output_shape.Dims(2); ++x) { + for (int c = 0; c < output_shape.Dims(3); ++c) { + output_data[Offset(output_shape, b, y, x, c)] = + input1_data[SubscriptToIndex(desc1, b, y, x, c)] * + input2_data[SubscriptToIndex(desc2, b, y, x, c)]; + } + } + } + } +} + } // namespace reference_ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pad.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pad.h index fe1b8f4..b4b2a75 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pad.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pad.h @@ -24,8 +24,8 @@ namespace tflite { namespace reference_ops { -// TFLite Pad supports activation tensors with up to 4 dimensions. -constexpr int PadKernelMaxDimensionCount() { return 4; } +// TFLite Pad supports activation tensors with up to 5 dimensions. +constexpr int PadKernelMaxDimensionCount() { return 5; } // There are two versions of pad: Pad and PadV2. In PadV2 there is a second // scalar input that provides the padding value. Therefore pad_value_ptr can be @@ -46,8 +46,8 @@ inline void PadImpl(const tflite::PadParams& op_params, TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount()); TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount()); - // Runtime calls are currently fixed at 4 dimensions. Copy inputs so we can - // pad them to 4 dims (yes, we are "padding the padding"). + // Runtime calls are currently fixed at 5 dimensions. Copy inputs so we can + // pad them to 5 dims (yes, we are "padding the padding"). int left_padding_copy[PadKernelMaxDimensionCount()]; for (int i = 0; i < PadKernelMaxDimensionCount(); i++) { left_padding_copy[i] = 0; @@ -67,39 +67,46 @@ inline void PadImpl(const tflite::PadParams& op_params, } const int output_batch = ext_output_shape.Dims(0); - const int output_height = ext_output_shape.Dims(1); - const int output_width = ext_output_shape.Dims(2); - const int output_depth = ext_output_shape.Dims(3); + const int output_plane = ext_output_shape.Dims(1); + const int output_height = ext_output_shape.Dims(2); + const int output_width = ext_output_shape.Dims(3); + const int output_depth = ext_output_shape.Dims(4); const int left_b_padding = left_padding_copy[0]; - const int left_h_padding = left_padding_copy[1]; - const int left_w_padding = left_padding_copy[2]; - const int left_d_padding = left_padding_copy[3]; + const int left_p_padding = left_padding_copy[1]; + const int left_h_padding = left_padding_copy[2]; + const int left_w_padding = left_padding_copy[3]; + const int left_d_padding = left_padding_copy[4]; const int right_b_padding = right_padding_copy[0]; - const int right_h_padding = right_padding_copy[1]; - const int right_w_padding = right_padding_copy[2]; - const int right_d_padding = right_padding_copy[3]; + const int right_p_padding = right_padding_copy[1]; + const int right_h_padding = right_padding_copy[2]; + const int right_w_padding = right_padding_copy[3]; + const int right_d_padding = right_padding_copy[4]; const T pad_value = *pad_value_ptr; const T* in_ptr = input_data; T* out_ptr = output_data; for (int out_b = 0; out_b < output_batch; ++out_b) { - for (int out_h = 0; out_h < output_height; ++out_h) { - for (int out_w = 0; out_w < output_width; ++out_w) { - for (int out_d = 0; out_d < output_depth; ++out_d) { - if (out_b < left_b_padding || - out_b >= output_batch - right_b_padding || - out_h < left_h_padding || - out_h >= output_height - right_h_padding || - out_w < left_w_padding || - out_w >= output_width - right_w_padding || - out_d < left_d_padding || - out_d >= output_depth - right_d_padding) { - *out_ptr++ = pad_value; - } else { - *out_ptr++ = *in_ptr++; + for (int out_p = 0; out_p < output_plane; ++out_p) { + for (int out_h = 0; out_h < output_height; ++out_h) { + for (int out_w = 0; out_w < output_width; ++out_w) { + for (int out_d = 0; out_d < output_depth; ++out_d) { + if (out_b < left_b_padding || + out_b >= output_batch - right_b_padding || + out_p < left_p_padding || + out_p >= output_plane - right_p_padding || + out_h < left_h_padding || + out_h >= output_height - right_h_padding || + out_w < left_w_padding || + out_w >= output_width - right_w_padding || + out_d < left_d_padding || + out_d >= output_depth - right_d_padding) { + *out_ptr++ = pad_value; + } else { + *out_ptr++ = *in_ptr++; + } } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h index 904372a..3657ffd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" @@ -23,7 +25,7 @@ limitations under the License. namespace tflite { namespace reference_ops { -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const float* input_data, const RuntimeShape& output_shape, float* output_data) { @@ -66,6 +68,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; const float average = total / filter_count; output_data[Offset(output_shape, batch, out_y, out_x, channel)] = ActivationFunctionWithMinMax(average, params.float_activation_min, @@ -74,9 +77,10 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } -inline void AveragePool(const PoolParams& params, +inline bool AveragePool(const PoolParams& params, const RuntimeShape& input_shape, const uint8_t* input_data, const RuntimeShape& output_shape, @@ -122,6 +126,7 @@ inline void AveragePool(const PoolParams& params, filter_count++; } } + if (filter_count == 0) return false; acc = (acc + filter_count / 2) / filter_count; acc = std::max(acc, params.quantized_activation_min); acc = std::min(acc, params.quantized_activation_max); @@ -131,6 +136,7 @@ inline void AveragePool(const PoolParams& params, } } } + return true; } inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape, diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h index b66af02..6d1dbe0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PRELU_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h index 01dceec..760f54d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PROCESS_BROADCAST_SHAPES_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h index 0c561fd..b791413 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h @@ -17,6 +17,7 @@ limitations under the License. #include #include +#include #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" @@ -49,6 +50,39 @@ inline void AffineQuantize(const tflite::QuantizationParams& op_params, } } +// Quantizes per-channel. +template +inline void PerChannelQuantize( + const tflite::PerChannelQuantizationParams& op_params, + const RuntimeShape& input_shape, const InputT* input_data, + const RuntimeShape& output_shape, OutputT* output_data) { + // Ensure flat size is same. + MatchingFlatSize(input_shape, output_shape); + + const int32_t* zero_point = op_params.zero_point; + const float* scale = op_params.scale; + const int32_t quantized_dimension = op_params.quantized_dimension; + const int32_t num_dims = input_shape.DimensionsCount(); + const int32_t* dims_data = input_shape.DimsData(); + std::vector current_dim(num_dims, 0); + static constexpr int32_t min_val = std::numeric_limits::min(); + static constexpr int32_t max_val = std::numeric_limits::max(); + + do { + size_t offset = + ReducedOutputOffset(num_dims, reinterpret_cast(dims_data), + current_dim.data(), 0, nullptr); + const InputT val = input_data[offset]; + const int channel = current_dim[quantized_dimension]; + int32_t unclamped = static_cast(TfLiteRound( + val / static_cast(scale[channel]))) + + zero_point[channel]; + int32_t clamped = std::min(std::max(unclamped, min_val), max_val); + output_data[offset] = static_cast(clamped); + } while (NextIndex(num_dims, reinterpret_cast(dims_data), + current_dim.data())); +} + } // namespace reference_ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h index b90a4d0..54f24f4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REDUCE_H_ +#include + #include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" @@ -23,6 +25,25 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +// Check if the reduction at index is the first one along the dimensions given +// in axis. +inline bool IsFirstReduction(const int* index, const int num_axis, + const int* axis) { + if (num_axis == 0) { + return true; + } + + TFLITE_DCHECK(index != nullptr); + TFLITE_DCHECK(axis != nullptr); + for (int axis_idx = 0; axis_idx < num_axis; ++axis_idx) { + if (index[axis[axis_idx]] != 0) { + return false; + } + } + + return true; +} + namespace tflite { namespace reference_ops { @@ -35,8 +56,7 @@ inline bool Reduce(const In* input_data, const int* input_dims, const int* output_dims, const int input_num_dims, const int output_num_dims, const int* axis, const int num_axis, int* input_iter, - Out reducer(const Out current, const In in), - Out* output_data) { + Out reducer(Out current, const In in), Out* output_data) { // Reset input iterator. for (int idx = 0; idx < input_num_dims; ++idx) { input_iter[idx] = 0; @@ -53,6 +73,37 @@ inline bool Reduce(const In* input_data, const int* input_dims, return true; } +// Similar to above Reduce function but takes two reducer functions. +// The 'reducer_first' is called with the first value of the reduction, +// 'reducer_next' is then called for all the others. +template +inline bool Reduce(const In* input_data, const int* input_dims, + const int* output_dims, const int input_num_dims, + const int output_num_dims, const int* axis, + const int num_axis, int* input_iter, + const std::function& reducer_first, + const std::function& reducer_next, + Out* output_data) { + // Reset input iterator. + for (int idx = 0; idx < input_num_dims; ++idx) { + input_iter[idx] = 0; + } + // Iterate through input_data. + do { + size_t input_offset = + ReducedOutputOffset(input_num_dims, input_dims, input_iter, 0, nullptr); + size_t output_offset = ReducedOutputOffset(input_num_dims, input_dims, + input_iter, num_axis, axis); + if (IsFirstReduction(input_iter, num_axis, axis)) { + output_data[output_offset] = reducer_first(input_data[input_offset]); + } else { + output_data[output_offset] = + reducer_next(output_data[output_offset], input_data[input_offset]); + } + } while (NextIndex(input_num_dims, input_dims, input_iter)); + return true; +} + // This method parses the input 'axis' to remove duplicates and handle negative // values, and returns a valid 'out_axis' inline bool ResolveAxis(const int num_dims, const int* axis, @@ -111,7 +162,8 @@ inline bool InitTensorDataForReduce(const int* dims, const int num_dims, for (int idx = 0; idx < num_dims; ++idx) { size_t current = static_cast(dims[idx]); // Overflow prevention. - if (num_elements > std::numeric_limits::max() / current) { + if (current > 0 && + num_elements > std::numeric_limits::max() / current) { return false; } num_elements *= current; @@ -132,17 +184,20 @@ inline bool ReduceGeneric(const T* input_data, const int* input_dims, bool keep_dims, int* temp_index, int* resolved_axis, T init_value, T reducer(const T current, const T in)) { - // Return early when input shape has zero dim. - for (int i = 0; i < input_num_dims; ++i) { - if (input_dims[i] == 0) return true; - } - // Reset output data. if (!InitTensorDataForReduce(output_dims, output_num_dims, init_value, output_data)) { return false; } + // Return early when input shape has zero dim. This is done after initializing + // data for output tensor because there are cases that the input tensor is + // empty but output tensor is not. In that case, output tensor should be + // filled with init_value. + for (int i = 0; i < input_num_dims; ++i) { + if (input_dims[i] == 0) return true; + } + // Resolve axis. int num_resolved_axis = 0; if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, @@ -290,9 +345,9 @@ inline void Mean(const tflite::MeanParams& op_params, constexpr int32_t kMinValue = std::numeric_limits::min(); constexpr int32_t kMaxValue = std::numeric_limits::max(); - int32_t bias = - output_zero_point - - static_cast(input_zero_point * input_scale / output_scale); + float temp = input_zero_point * input_scale / output_scale; + temp = temp > 0 ? temp + 0.5f : temp - 0.5f; + int32_t bias = output_zero_point - static_cast(temp); double real_scale = static_cast(input_scale / (num_elements_in_axis * output_scale)); @@ -353,6 +408,14 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, temp_sum[idx] = U(); } + // Return early when input shape has zero dim. This is done after initializing + // data for output tensor because there are cases that the input tensor is + // empty but output tensor is not. In that case, output tensor should be + // filled with init_value. + for (int i = 0; i < input_num_dims; ++i) { + if (input_dims[i] == 0) return true; + } + // Resolve axis. int num_resolved_axis = 0; if (!ResolveAxis(input_num_dims, axis, num_axis_dimensions, resolved_axis, @@ -405,6 +468,73 @@ inline bool QuantizedMeanOrSum(const T* input_data, int32_t input_zero_point, return true; } +template +inline bool QuantizedMeanOrSumExtraArgs( + const T* input_data, int32_t input_zero_point, float input_scale, + const int* input_dims, const int input_num_dims, T* output_data, + float output_scale, int32_t output_multiplier, int output_shift, + int32_t output_zero_point, const int* output_dims, + const int output_num_dims, const int* axis, const int num_axis_dimensions, + bool keep_dims, int* temp_index, int* resolved_axis, U* temp_sum, + bool compute_sum) { + return QuantizedMeanOrSum( + input_data, input_zero_point, input_scale, input_dims, input_num_dims, + output_data, output_zero_point, output_scale, output_dims, + output_num_dims, axis, num_axis_dimensions, keep_dims, temp_index, + resolved_axis, temp_sum, compute_sum); +} + +template +inline bool QuantizedReduceProd(const T* input_data, int32_t input_zero_point, + const RuntimeShape& input_shape, T* output_data, + int32_t output_zero_point, + const RuntimeShape& output_shape, + const int* axis, + const int64_t num_axis_dimensions, + bool keep_dims, int* temp_index, + int* resolved_axis, int32_t* temp_prod, + int32_t scaling_multiplier, int scaling_shift) { + const int32_t kMinValue = std::numeric_limits::min(); + const int32_t kMaxValue = std::numeric_limits::max(); + + // Resolve axis. + int num_resolved_axis = 0; + if (!ResolveAxis(input_shape.DimensionsCount(), axis, num_axis_dimensions, + resolved_axis, &num_resolved_axis)) { + return false; + } + + // Calculate the reduced product by rescaling each multiplication step to + // avoid an overflow. + auto reducer_first = [&](T in) -> int32_t { return in - input_zero_point; }; + + auto reducer_next = [&](int32_t current, T in) -> int32_t { + const int64_t result = + static_cast(current) * (in - input_zero_point); + return MultiplyByQuantizedMultiplier(result, scaling_multiplier, + scaling_shift); + }; + + if (!Reduce( + input_data, input_shape.DimsData(), output_shape.DimsData(), + input_shape.DimensionsCount(), output_shape.DimensionsCount(), + resolved_axis, num_resolved_axis, temp_index, reducer_first, + reducer_next, temp_prod)) { + return false; + } + + for (int i = 0; i < output_shape.FlatSize(); i++) { + int32_t result = + MultiplyByQuantizedMultiplier(static_cast(temp_prod[i]), + scaling_multiplier, scaling_shift) + + output_zero_point; + result = std::min(std::max(result, kMinValue), kMaxValue); + output_data[i] = static_cast(result); + } + + return true; +} + } // namespace reference_ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h index 5d7e3b1..662046f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_REQUANTIZE_H_ +#include + #include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_bilinear.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_bilinear.h new file mode 100644 index 0000000..ec8ec26 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_bilinear.h @@ -0,0 +1,228 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ + +#include +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +inline void ComputeInterpolationValues(const float value, const float scale, + const bool half_pixel_centers, + int32_t input_size, float* scaled_value, + int32_t* lower_bound, + int32_t* upper_bound) { + if (half_pixel_centers) { + *scaled_value = (value + 0.5f) * scale - 0.5f; + } else { + *scaled_value = value * scale; + } + float scaled_value_floor = std::floor(*scaled_value); + *lower_bound = std::max(static_cast(scaled_value_floor), + static_cast(0)); + *upper_bound = + std::min(static_cast(std::ceil(*scaled_value)), input_size - 1); +} + +template +inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_size_shape, + const int32_t* output_size_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + // If half_pixel_centers is True, align_corners must be False. + TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners); + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_size_shape = + RuntimeShape::ExtendedShape(4, unextended_output_size_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); + int32_t input_height = input_shape.Dims(1); + int32_t input_width = input_shape.Dims(2); + int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); + + TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2); + int32_t output_height = + output_size_data[Offset(output_size_shape, 0, 0, 0, 0)]; + int32_t output_width = + output_size_data[Offset(output_size_shape, 0, 0, 0, 1)]; + + float height_scale = static_cast(input_height) / output_height; + float width_scale = static_cast(input_width) / output_width; + if (op_params.align_corners && output_height > 1) { + height_scale = static_cast(input_height - 1) / (output_height - 1); + } + if (op_params.align_corners && output_width > 1) { + width_scale = static_cast(input_width - 1) / (output_width - 1); + } + const float rounding_offset = std::numeric_limits::is_integer ? .5f : .0f; + + for (int b = 0; b < batches; ++b) { + for (int y = 0; y < output_height; ++y) { + float input_y; + int32_t y0, y1; + ComputeInterpolationValues(y, height_scale, op_params.half_pixel_centers, + input_height, &input_y, &y0, &y1); + for (int x = 0; x < output_width; ++x) { + float input_x; + int32_t x0, x1; + ComputeInterpolationValues(x, width_scale, op_params.half_pixel_centers, + input_width, &input_x, &x0, &x1); + for (int c = 0; c < depth; ++c) { + T interpolation = + static_cast(input_data[Offset(input_shape, b, y0, x0, c)] * + (1 - (input_y - y0)) * (1 - (input_x - x0)) + + input_data[Offset(input_shape, b, y1, x0, c)] * + (input_y - y0) * (1 - (input_x - x0)) + + input_data[Offset(input_shape, b, y0, x1, c)] * + (1 - (input_y - y0)) * (input_x - x0) + + input_data[Offset(input_shape, b, y1, x1, c)] * + (input_y - y0) * (input_x - x0) + + rounding_offset); + output_data[Offset(output_shape, b, y, x, c)] = interpolation; + } + } + } + } +} + +inline void ComputeInterpolationValuesInteger( + const int32_t value, const int32_t scale_10, const bool half_pixel_centers, + int32_t input_size, int32_t* scaled_value, int32_t* lower_bound, + int32_t* upper_bound) { + if (half_pixel_centers) { + *scaled_value = value * scale_10 + scale_10 / 2 - (1 << 9); + } else { + *scaled_value = value * scale_10; + } + constexpr int32_t zero = 0; + *lower_bound = std::max(*scaled_value / (1 << 10), zero); + *upper_bound = + std::min((*scaled_value + (1 << 10) - 1) / (1 << 10), input_size - 1); +} + +// Same as above but doesn't use any floating-point for the resize +template +inline void ResizeBilinearInteger( + const tflite::ResizeBilinearParams& op_params, + const RuntimeShape& unextended_input_shape, const T* input_data, + const RuntimeShape& unextended_output_size_shape, + const int32_t* output_size_data, + const RuntimeShape& unextended_output_shape, T* output_data) { + // If half_pixel_centers is True, align_corners must be False. + TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners); + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_size_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_size_shape = + RuntimeShape::ExtendedShape(4, unextended_output_size_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int32_t batches = MatchingDim(input_shape, 0, output_shape, 0); + const int32_t input_height = input_shape.Dims(1); + const int32_t input_width = input_shape.Dims(2); + const int32_t depth = MatchingDim(input_shape, 3, output_shape, 3); + + TFLITE_DCHECK_EQ(output_size_shape.Dims(0), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(1), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(2), 1); + TFLITE_DCHECK_EQ(output_size_shape.Dims(3), 2); + const int32_t output_height = + output_size_data[Offset(output_size_shape, 0, 0, 0, 0)]; + const int32_t output_width = + output_size_data[Offset(output_size_shape, 0, 0, 0, 1)]; + + int32_t height_scale_10 = + ((1 << 10) * input_height + output_height / 2) / output_height; + int32_t width_scale_10 = + ((1 << 10) * input_width + output_width / 2) / output_width; + if (op_params.align_corners && output_height > 1) { + height_scale_10 = + ((1 << 10) * (input_height - 1) + (output_height - 1) / 2) / + (output_height - 1); + } + if (op_params.align_corners && output_width > 1) { + width_scale_10 = ((1 << 10) * (input_width - 1) + (output_width - 1) / 2) / + (output_width - 1); + } + + for (int b = 0; b < batches; ++b) { + for (int y = 0; y < output_height; ++y) { + int32_t input_y, y0, y1; + ComputeInterpolationValuesInteger(y, height_scale_10, + op_params.half_pixel_centers, + input_height, &input_y, &y0, &y1); + for (int x = 0; x < output_width; ++x) { + int32_t input_x, x0, x1; + ComputeInterpolationValuesInteger(x, width_scale_10, + op_params.half_pixel_centers, + input_width, &input_x, &x0, &x1); + for (int c = 0; c < depth; ++c) { + const int64_t output_20_ll = + static_cast( + input_data[Offset(input_shape, b, y0, x0, c)]) * + ((1 << 10) - (input_y - (1 << 10) * y0)) * + ((1 << 10) - (input_x - (1 << 10) * x0)); + const int64_t output_20_lu = + static_cast( + input_data[Offset(input_shape, b, y1, x0, c)]) * + (input_y - (1 << 10) * y0) * + ((1 << 10) - (input_x - (1 << 10) * x0)); + const int64_t output_20_rl = + static_cast( + input_data[Offset(input_shape, b, y0, x1, c)]) * + ((1 << 10) - (input_y - (1 << 10) * y0)) * + (input_x - (1 << 10) * x0); + const int64_t output_20_ru = + static_cast( + input_data[Offset(input_shape, b, y1, x1, c)]) * + (input_y - (1 << 10) * y0) * (input_x - (1 << 10) * x0); + const int64_t output_20 = + output_20_ll + output_20_lu + output_20_rl + output_20_ru; + const int64_t round = (output_20 > 0) ? (1 << 19) : -(1 << 19); + const T interpolation = + static_cast((output_20 + round) / (1 << 20)); + output_data[Offset(output_shape, b, y, x, c)] = interpolation; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_BILINEAR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h index 0fd7f01..bbed46a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_RESIZE_NEAREST_NEIGHBOR_H_ +#include #include #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/select.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/select.h new file mode 100644 index 0000000..2230c96 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/select.h @@ -0,0 +1,151 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ + +#include + +#include "edge-impulse-sdk/third_party/ruy/ruy/profiler/instrumentation.h" // from @ruy +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +void Select(const RuntimeShape& input_condition_shape, + const D* input_condition_data, const RuntimeShape& input_x_shape, + const T* input_x_data, const RuntimeShape& input_y_shape, + const T* input_y_data, const RuntimeShape& output_shape, + T* output_data) { + ruy::profiler::ScopeLabel label("Select"); + int64_t flatsize; + // Allow select operator executions on mixed scalar tensors and one element + // tensors. + if (input_condition_shape.FlatSize() == 1 && input_x_shape.FlatSize() == 1 && + input_y_shape.FlatSize() == 1 && output_shape.FlatSize() == 1) { + flatsize = 1; + } else { + flatsize = MatchingFlatSize(input_condition_shape, input_x_shape, + input_y_shape, output_shape); + } + for (int64_t i = 0; i < flatsize; ++i) { + output_data[i] = + input_condition_data[i] ? input_x_data[i] : input_y_data[i]; + } +} + +template +void RankOneSelect(const RuntimeShape& input_condition_shape, + const D* input_condition_data, + const RuntimeShape& input_x_shape, const T* input_x_data, + const RuntimeShape& input_y_shape, const T* input_y_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("Select/RankOneSelect"); + const int64_t outer_size = input_condition_shape.FlatSize(); + int64_t inner_size; + if (input_condition_shape.DimensionsCount() == 0) { + inner_size = MatchingFlatSize(input_x_shape, input_y_shape, output_shape); + } else { + TFLITE_DCHECK_EQ( + MatchingDim(input_x_shape, 0, input_y_shape, 0, output_shape, 0), + outer_size); + inner_size = + MatchingFlatSizeSkipDim(input_x_shape, 0, input_y_shape, output_shape); + } + + int64_t offset = 0; + for (int64_t i = 0; i < outer_size; i++) { + const T* input_data = input_condition_data[i] ? input_x_data : input_y_data; + memcpy(output_data + offset, input_data + offset, inner_size * sizeof(T)); + offset += inner_size; + } +} + +template +void BroadcastSelect5DSlow(const RuntimeShape& input_condition_shape, + const D* input_condition_data, + const RuntimeShape& input_x_shape, + const T* input_x_data, + const RuntimeShape& input_y_shape, + const T* input_y_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("Select/BroadcastSelectSlow"); + TFLITE_DCHECK_LE(input_condition_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(input_x_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(input_y_shape.DimensionsCount(), 5); + TFLITE_DCHECK_LE(output_shape.DimensionsCount(), 5); + + NdArrayDesc<5> desc_condition; + NdArrayDesc<5> desc_x; + NdArrayDesc<5> desc_y; + NdArrayDesc<5> desc_output; + const RuntimeShape extended_output_shape = + RuntimeShape::ExtendedShape(5, output_shape); + CopyDimsToDesc(extended_output_shape, &desc_output); + NdArrayDescsForElementwiseBroadcast(input_condition_shape, input_x_shape, + input_y_shape, &desc_condition, &desc_x, + &desc_y); + + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest + // stride, typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + // + // We name our variables by their Tensorflow convention, but generate C code + // nesting loops such that the innermost loop has the smallest stride for + // the best cache behavior. + for (int n = 0; n < desc_output.extents[0]; ++n) { + int out_idx_n = desc_output.extents[1] * n; + int cond_idx_n = desc_condition.strides[0] * n; + int in_idx1_n = desc_x.strides[0] * n; + int in_idx2_n = desc_y.strides[0] * n; + for (int b = 0; b < desc_output.extents[1]; ++b) { + int out_idx_b = (out_idx_n + b) * desc_output.extents[2]; + int cond_idx_b = cond_idx_n + desc_condition.strides[1] * b; + int in_idx1_b = in_idx1_n + desc_x.strides[1] * b; + int in_idx2_b = in_idx2_n + desc_y.strides[1] * b; + for (int y = 0; y < desc_output.extents[2]; ++y) { + int out_idx_y = (out_idx_b + y) * desc_output.extents[3]; + int cond_idx_y = cond_idx_b + desc_condition.strides[2] * y; + int in_idx1_y = in_idx1_b + desc_x.strides[2] * y; + int in_idx2_y = in_idx2_b + desc_y.strides[2] * y; + for (int x = 0; x < desc_output.extents[3]; ++x) { + int out_idx = (out_idx_y + x) * desc_output.extents[4]; + int cond_idx = cond_idx_y + desc_condition.strides[3] * x; + int in_idx1 = in_idx1_y + desc_x.strides[3] * x; + int in_idx2 = in_idx2_y + desc_y.strides[3] * x; + for (int c = 0; c < desc_output.extents[4]; ++c) { + output_data[out_idx] = input_condition_data[cond_idx] + ? input_x_data[in_idx1] + : input_y_data[in_idx2]; + out_idx++; + cond_idx += desc_condition.strides[4]; + in_idx1 += desc_x.strides[4]; + in_idx2 += desc_y.strides[4]; + } + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SELECT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/slice.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/slice.h new file mode 100644 index 0000000..8214269 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/slice.h @@ -0,0 +1,80 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, + const RuntimeShape& output_shape, + SequentialTensorWriter* writer) { + const RuntimeShape ext_shape = RuntimeShape::ExtendedShape(5, input_shape); + TFLITE_DCHECK_LE(op_params.begin_count, 5); + TFLITE_DCHECK_LE(op_params.size_count, 5); + const int begin_count = op_params.begin_count; + const int size_count = op_params.size_count; + // We front-pad the begin and size vectors. + int start[5]; + int stop[5]; + for (int i = 0; i < 5; ++i) { + int padded_i = 5 - i; + start[i] = + begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i]; + stop[i] = + (size_count < padded_i || op_params.size[size_count - padded_i] == -1) + ? ext_shape.Dims(i) + : start[i] + op_params.size[size_count - padded_i]; + } + + for (int i0 = start[0]; i0 < stop[0]; ++i0) { + for (int i1 = start[1]; i1 < stop[1]; ++i1) { + for (int i2 = start[2]; i2 < stop[2]; ++i2) { + for (int i3 = start[3]; i3 < stop[3]; ++i3) { + for (int i4 = start[4]; i4 < stop[4]; ++i4) { + writer->Write(Offset(ext_shape, i0, i1, i2, i3, i4)); + } + } + } + } + } +} + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, const T* input_data, + const RuntimeShape& output_shape, T* output_data) { + SequentialTensorWriter writer(input_data, output_data); + return Slice(op_params, input_shape, output_shape, &writer); +} + +template +inline void Slice(const tflite::SliceParams& op_params, + const RuntimeShape& input_shape, const TfLiteTensor* input, + const RuntimeShape& output_shape, TfLiteTensor* output) { + SequentialTensorWriter writer(input, output); + return Slice(op_params, input_shape, output_shape, &writer); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SLICE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h index 25a1b45..1c6c0b9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SOFTMAX_H_ +#include #include #include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" @@ -159,7 +160,7 @@ inline int16_t SoftMaxCalculateExp(const SoftmaxParams& params, std::min(std::max(sym_scaled_diff, static_cast(-32768)), static_cast(32767)); // apply the exp() LUT activation function - return generic_int16_table_lookup(sat_sym_scaled_diff, params.exp_lut); + return LUTLookup(sat_sym_scaled_diff, params.exp_lut); } // Quantized softmax with int16_t input and int16_t output. inline void SoftmaxInt16(const SoftmaxParams& params, @@ -207,8 +208,8 @@ inline void SoftmaxInt16(const SoftmaxParams& params, std::min(std::max(sym_shifted_sum, static_cast(-32768)), static_cast(32767))); // apply 1/(1 + x) LUT activation function - int16_t reciprocal_scale_Q015 = generic_int16_table_lookup( - sat_sym_shifted_sum, params.one_over_one_plus_x_lut); + int16_t reciprocal_scale_Q015 = + LUTLookup(sat_sym_shifted_sum, params.one_over_one_plus_x_lut); // Rescale the exp_result with reciprocal // range of output is [0, 32767] correspond to [0.0, 1.0] diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/space_to_depth.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/space_to_depth.h new file mode 100644 index 0000000..53260ae --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/space_to_depth.h @@ -0,0 +1,80 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace reference_ops { + +template +inline void SpaceToDepth(const tflite::SpaceToDepthParams& op_params, + const RuntimeShape& unextended_input_shape, + const T* input_data, + const RuntimeShape& unextended_output_shape, + T* output_data) { + TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); + const RuntimeShape input_shape = + RuntimeShape::ExtendedShape(4, unextended_input_shape); + const RuntimeShape output_shape = + RuntimeShape::ExtendedShape(4, unextended_output_shape); + + const int input_depth = input_shape.Dims(3); + const int input_width = input_shape.Dims(2); + const int input_height = input_shape.Dims(1); + const int input_batch = input_shape.Dims(0); + + const int output_depth = output_shape.Dims(3); + const int output_width = output_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_batch = output_shape.Dims(0); + + const int32_t block_size = op_params.block_size; + + TFLITE_DCHECK_EQ(input_width, output_width * block_size); + TFLITE_DCHECK_EQ(input_height, output_height * block_size); + TFLITE_DCHECK_EQ(input_depth * block_size * block_size, output_depth); + TFLITE_DCHECK_EQ(input_batch, output_batch); + + for (int in_b = 0; in_b < input_batch; ++in_b) { + for (int in_h = 0; in_h < input_height; ++in_h) { + for (int in_w = 0; in_w < input_width; ++in_w) { + for (int in_d = 0; in_d < input_depth; ++in_d) { + const int out_d = + in_d + ((in_h % block_size) * block_size + in_w % block_size) * + input_depth; + const int out_w = in_w / block_size; + const int out_h = in_h / block_size; + const int out_b = in_b; + + const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d); + const int output_index = + Offset(output_shape, out_b, out_h, out_w, out_d); + + output_data[output_index] = input_data[input_index]; + } + } + } + } +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_SPACE_TO_DEPTH_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/strided_slice.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/strided_slice.h index 7d111d0..493d8f3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/strided_slice.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/strided_slice.h @@ -31,10 +31,6 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, const RuntimeShape& unextended_input_shape, const RuntimeShape& unextended_output_shape, SequentialTensorWriter* writer) { - using strided_slice::LoopCondition; - using strided_slice::StartForAxis; - using strided_slice::StopForAxis; - ruy::profiler::ScopeLabel label("StridedSlice"); // Note that the output_shape is not used herein. @@ -51,41 +47,71 @@ inline void StridedSlice(const tflite::StridedSliceParams& op_params, // requires (ie. all shapes must be 5D and are given backwards). strided_slice::StridedSlicePadIndices(¶ms_copy, 5); - const int start_0 = StartForAxis(params_copy, input_shape, 0); - const int stop_0 = StopForAxis(params_copy, input_shape, 0, start_0); - const int start_1 = StartForAxis(params_copy, input_shape, 1); - const int stop_1 = StopForAxis(params_copy, input_shape, 1, start_1); - const int start_2 = StartForAxis(params_copy, input_shape, 2); - const int stop_2 = StopForAxis(params_copy, input_shape, 2, start_2); - const int start_3 = StartForAxis(params_copy, input_shape, 3); - const int stop_3 = StopForAxis(params_copy, input_shape, 3, start_3); - const int start_4 = StartForAxis(params_copy, input_shape, 4); - const int stop_4 = StopForAxis(params_copy, input_shape, 4, start_4); - - for (int offset_0 = start_0 * input_shape.Dims(1), - end_0 = stop_0 * input_shape.Dims(1), - step_0 = params_copy.strides[0] * input_shape.Dims(1); - !LoopCondition(offset_0, end_0, params_copy.strides[0]); - offset_0 += step_0) { - for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2), - end_1 = (offset_0 + stop_1) * input_shape.Dims(2), - step_1 = params_copy.strides[1] * input_shape.Dims(2); - !LoopCondition(offset_1, end_1, params_copy.strides[1]); - offset_1 += step_1) { - for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3), - end_2 = (offset_1 + stop_2) * input_shape.Dims(3), - step_2 = params_copy.strides[2] * input_shape.Dims(3); - !LoopCondition(offset_2, end_2, params_copy.strides[2]); - offset_2 += step_2) { - for (int offset_3 = (offset_2 + start_3) * input_shape.Dims(4), - end_3 = (offset_2 + stop_3) * input_shape.Dims(4), - step_3 = params_copy.strides[3] * input_shape.Dims(4); - !LoopCondition(offset_3, end_3, params_copy.strides[3]); - offset_3 += step_3) { - for (int offset_4 = offset_3 + start_4, end_4 = offset_3 + stop_4; - !LoopCondition(offset_4, end_4, params_copy.strides[4]); - offset_4 += params_copy.strides[4]) { - writer->Write(offset_4); + const int start_0 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 0); + const int stop_0 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 0, start_0); + const int start_1 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 1); + const int stop_1 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 1, start_1); + const int start_2 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 2); + const int stop_2 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 2, start_2); + const int start_3 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 3); + const int stop_3 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 3, start_3); + const int start_4 = + strided_slice::StridedSliceStartForAxis(params_copy, input_shape, 4); + const int stop_4 = strided_slice::StridedSliceEndForAxis( + params_copy, input_shape, 4, start_4); + + auto lc = [&](int end, int stride, int index) { + if (stride < 0) { + return index > end; + } else { + return index < end; + } + }; + // With a static_cast it is not possible to initialize + // a variable of type 'const int *' + // with an rvalue of type 'const int32_t *' (aka 'const long *'). + // reinterpret_cast is required to handle this casting. + const int* shape = reinterpret_cast(input_shape.DimsData()); + const int* stride = reinterpret_cast(params_copy.strides); + const bool inner_stride_is_1 = params_copy.strides[4] == 1; + + for (int offset_0 = start_0; lc(stop_0, stride[0], offset_0); + offset_0 += stride[0]) { + for (int offset_1 = start_1; lc(stop_1, stride[1], offset_1); + offset_1 += stride[1]) { + for (int offset_2 = start_2; lc(stop_2, stride[2], offset_2); + offset_2 += stride[2]) { + for (int offset_3 = start_3; lc(stop_3, stride[3], offset_3); + offset_3 += stride[3]) { + // When the stride is 1, the inner loop is equivalent to the + // optimized slice inner loop. Otherwise, it is identical to the + // strided_slice reference implementation inner loop. + if (inner_stride_is_1) { + const int len = stop_4 - start_4; + int index = start_4 + offset_3 * shape[4] + + offset_2 * shape[3] * shape[4] + + offset_1 * shape[2] * shape[3] * shape[4] + + offset_0 * shape[1] * shape[2] * shape[3] * shape[4]; + if (len > 0) { + writer->WriteN(index, len); + } + } else { + for (int offset_4 = start_4; lc(stop_4, stride[4], offset_4); + offset_4 += stride[4]) { + int index = offset_4 + offset_3 * shape[4] + + offset_2 * shape[3] * shape[4] + + offset_1 * shape[2] * shape[3] * shape[4] + + offset_0 * shape[1] * shape[2] * shape[3] * shape[4]; + writer->Write(index); + } } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h index 7c66b63..44718a8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h @@ -105,63 +105,6 @@ inline void BroadcastSubSlow(const ArithmeticParams& params, NDOpsHelper(output_desc, sub_func); } -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const uint8_t* input1_data, - const RuntimeShape& input2_shape, - const uint8_t* input2_data, - const RuntimeShape& output_shape, - uint8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/uint8_t"); - TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); - TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); -} - template inline void BroadcastSubSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, @@ -202,60 +145,6 @@ inline void BroadcastSubSlow(const ArithmeticParams& params, NDOpsHelper(output_desc, sub_func); } -template -inline void BroadcastSubSlow(const ArithmeticParams& params, - const RuntimeShape& input1_shape, - const int8_t* input1_data, - const RuntimeShape& input2_shape, - const int8_t* input2_data, - const RuntimeShape& output_shape, - int8_t* output_data) { - ruy::profiler::ScopeLabel label("BroadcastSubSlow/int8_t"); - NdArrayDesc desc1; - NdArrayDesc desc2; - NdArrayDesc output_desc; - NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, - &desc2); - CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - - // In Tensorflow, the dimensions are canonically named (batch_number, row, - // col, channel), with extents (batches, height, width, depth), with the - // trailing dimension changing most rapidly (channels has the smallest stride, - // typically 1 element). - // - // In generated C code, we store arrays with the dimensions reversed. The - // first dimension has smallest stride. - // - // We name our variables by their Tensorflow convention, but generate C code - // nesting loops such that the innermost loop has the smallest stride for the - // best cache behavior. - auto sub_func = [&](int indexes[N]) { - const int32_t input1_val = - params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; - const int32_t input2_val = - params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; - const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); - const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); - const int32_t scaled_input1_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input1_val, params.input1_multiplier, params.input1_shift); - const int32_t scaled_input2_val = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - shifted_input2_val, params.input2_multiplier, params.input2_shift); - const int32_t raw_sub = scaled_input1_val - scaled_input2_val; - const int32_t raw_output = - MultiplyByQuantizedMultiplierSmallerThanOneExp( - raw_sub, params.output_multiplier, params.output_shift) + - params.output_offset; - const int32_t clamped_output = - std::min(params.quantized_activation_max, - std::max(params.quantized_activation_min, raw_output)); - output_data[SubscriptToIndex(output_desc, indexes)] = - static_cast(clamped_output); - }; - NDOpsHelper(output_desc, sub_func); -} - template void BroadcastSubSlow(const ArithmeticParams& params, const RuntimeShape& input1_shape, @@ -376,19 +265,40 @@ inline void BroadcastSub16POTSlow(const ArithmeticParams& params, NDOpsHelper(output_desc, sub_func); } -// Element-wise Sub that can often be used for inner loop of broadcast sub as -// well as the non-broadcast sub. -inline void SubElementwise(int size, const ArithmeticParams& params, - const uint8_t* input1_data, - const uint8_t* input2_data, uint8_t* output_data) { - TFLITE_DCHECK_GT(params.input1_offset, -256); - TFLITE_DCHECK_GT(params.input2_offset, -256); - TFLITE_DCHECK_LT(params.input1_offset, 256); - TFLITE_DCHECK_LT(params.input2_offset, 256); +template +void BroadcastQuantSubSlow(const ArithmeticParams& params, + const RuntimeShape& input1_shape, + const T* input1_data, + const RuntimeShape& input2_shape, + const T* input2_data, + const RuntimeShape& output_shape, T* output_data) { + ruy::profiler::ScopeLabel label("BroadcastQuantSubSlow/T"); + TFLITE_DCHECK_LE(input1_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(input2_shape.DimensionsCount(), N); + TFLITE_DCHECK_LE(output_shape.DimensionsCount(), N); + NdArrayDesc desc1; + NdArrayDesc desc2; + NdArrayDesc output_desc; + NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1, + &desc2); + CopyDimsToDesc(RuntimeShape::ExtendedShape(N, output_shape), &output_desc); - for (int i = 0; i < size; ++i) { - const int32_t input1_val = params.input1_offset + input1_data[i]; - const int32_t input2_val = params.input2_offset + input2_data[i]; + // In Tensorflow, the dimensions are canonically named (batch_number, row, + // col, channel), with extents (batches, height, width, depth), with the + // trailing dimension changing most rapidly (channels has the smallest stride, + // typically 1 element). + // + // In generated C code, we store arrays with the dimensions reversed. The + // first dimension has smallest stride. + // + // We name our variables by their Tensorflow convention, but generate C code + // nesting loops such that the innermost loop has the smallest stride for the + // best cache behavior. + auto sub_func = [&](int indexes[N]) { + const int32_t input1_val = + params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)]; + const int32_t input2_val = + params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)]; const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); const int32_t scaled_input1_val = @@ -405,21 +315,18 @@ inline void SubElementwise(int size, const ArithmeticParams& params, const int32_t clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); - } + output_data[SubscriptToIndex(output_desc, indexes)] = + static_cast(clamped_output); + }; + NDOpsHelper(output_desc, sub_func); } // Element-wise add that can often be used for inner loop of broadcast add as // well as the non-broadcast add. +template inline void SubElementwise(int size, const ArithmeticParams& params, - const int8_t* input1_data, const int8_t* input2_data, - int8_t* output_data) { - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); - + const T* input1_data, const T* input2_data, + T* output_data) { for (int i = 0; i < size; ++i) { const int32_t input1_val = params.input1_offset + input1_data[i]; const int32_t input2_val = params.input2_offset + input2_data[i]; @@ -439,7 +346,7 @@ inline void SubElementwise(int size, const ArithmeticParams& params, const int32_t clamped_output = std::min(params.quantized_activation_max, std::max(params.quantized_activation_min, raw_output)); - output_data[i] = static_cast(clamped_output); + output_data[i] = static_cast(clamped_output); } } @@ -469,11 +376,27 @@ inline void Sub(const ArithmeticParams& params, const int flat_size = MatchingElementsSize(input1_shape, input2_shape, output_shape); - const int32_t int8_max_value = std::numeric_limits::max(); - TFLITE_DCHECK_GE(params.input1_offset, -1 * int8_max_value); - TFLITE_DCHECK_GE(params.input2_offset, -1 * int8_max_value); - TFLITE_DCHECK_LE(params.input1_offset, int8_max_value); - TFLITE_DCHECK_LE(params.input2_offset, int8_max_value); + TFLITE_DCHECK_GE(params.input1_offset, -128); + TFLITE_DCHECK_GE(params.input2_offset, -128); + // offset = -quantization_params.zero_point in PrepareGeneralSubOp(). + // So it's maximum can be 128 not 127. + TFLITE_DCHECK_LE(params.input1_offset, 128); + TFLITE_DCHECK_LE(params.input2_offset, 128); + SubElementwise(flat_size, params, input1_data, input2_data, output_data); +} + +inline void Sub(const ArithmeticParams& params, + const RuntimeShape& input1_shape, const int16_t* input1_data, + const RuntimeShape& input2_shape, const int16_t* input2_data, + const RuntimeShape& output_shape, int16_t* output_data) { + TFLITE_DCHECK_LE(params.quantized_activation_min, + params.quantized_activation_max); + + const int flat_size = + MatchingElementsSize(input1_shape, input2_shape, output_shape); + + TFLITE_DCHECK_EQ(params.input1_offset, 0); + TFLITE_DCHECK_EQ(params.input2_offset, 0); SubElementwise(flat_size, params, input1_data, input2_data, output_data); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h new file mode 100644 index 0000000..d236420 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h @@ -0,0 +1,203 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +namespace reference_ops { + +namespace transpose_internal { + +// Recursively explores all the dimensions of the output tensor and writes the +// corresponding input tensor data. +// +// - depth: the current depth of the recursion. +// - dims: tensor dimension count, also `perm` size. +// - perm: permutation array. +// - input_data: Running input data pointer. If depth == num_dims-1, this points +// to the first element of the last dimension to traverse. +// - input_stride: Reverse partial product of input shapes. +// - output_data: Running output data pointer. If depth == num_dims-1, this +// points to the first element of the last dimension to traverse. +// - output_stride: Reverse partial product of output shapes. +// - output_shape: Shape of the output tensor. +// +// ## Algorithm explanation +// +// Assume a 3D tensor T with a shape of [I, J, K] stored in row major order. +// T[i, j, k] is at position `i*J*K + j*K + k` in the tensor buffer. +// +// If we want to go through the whole tensor iteratively, we can use loops. +// +// ``` +// for(i = 0; i < I; ++i) { +// for(j = 0; j < J; ++j) { +// for(k = 0; k < K; ++k) { +// T.data[i*J*K + j*K + k] = ... +// } +// } +// } +// ``` +// +// We can also compute the offset as we go through the loops. +// +// ``` +// stride_i = K * J; +// stride_j = K; +// stride_k = 1; +// for(i = 0; i < I; ++i) { +// offset_i = i * stride_i; +// offset_j = 0; +// for(j = 0; j < J; ++j) { +// offset_j += stride_j; +// offset_k = 0; +// for(k = 0; k < K; ++k) { +// offset_k += stride_k; +// T.data[offset_i + offset_j + offset_k] = ... +// } +// } +// } +// ``` +// +// This nicely extends to a recursive version which is the base of this +// algorithm and supports any number of dimensions. +// +// ``` +// shape = [I, J, K] +// strides = [K*J, K, 1] +// void recurse(T* data, shape, strides, depth = 0) { +// if(depth == shape.size) { +// *data = ... +// } else { +// for(a = 0; a < shape[depth]; ++a) { +// recurse(data, shape, strides, depth+1); +// data += strides[depth]; +// } +// } +// } +// ``` +template +void TransposeImpl(const int depth, const int dims, const int32_t* perm, + const T* input_data, const int* input_stride, T* output_data, + const int* output_stride, const int32_t* output_shape) { + const int dimension_size = output_shape[depth]; + if (depth == dims - 1) { + const int loop_stride = input_stride[perm[depth]]; + for (int i = 0; i < dimension_size; ++i) { + output_data[i] = *input_data; + input_data += loop_stride; + } + } else { + for (int i = 0; i < dimension_size; ++i) { + TransposeImpl(depth + 1, dims, perm, input_data, input_stride, + output_data, output_stride, output_shape); + + input_data += input_stride[perm[depth]]; + output_data += output_stride[depth]; + } + } +} + +// Compile-time switch to get the storage type of the transposition. +template +struct TransposeStorageType; + +template <> +struct TransposeStorageType<1> { + using type = int8_t; +}; + +template <> +struct TransposeStorageType<2> { + using type = int16_t; +}; + +template <> +struct TransposeStorageType<4> { + using type = int32_t; +}; + +template <> +struct TransposeStorageType<8> { + using type = int64_t; +}; + +// Sets up the stride arrays for the recursive transpose algorithm. +// +// Implementation notes: +// +// This is a reverse partial product. We could use standard algorithms to +// implement this but the result is not a readable and is tricky to get right +// because the first element must be set to 1, which leads to offset +// shenanigans: +// +// ``` +// stride[dims - 1] = 1; +// std::partial_sum(std::make_reverse_iterator(shape + dims), +// std::make_reverse_iterator(shape + 1), +// stride.rend() - input_rank + 1, std::multiplies()); +// ``` +// +// Note that Abseil isn't used in kernels implementation. That would make the +// above solution more readable. +inline void SetupTransposeStrides( + std::array& stride, const int32_t* shape, + const int dims) { + stride[dims - 1] = 1; + for (int i = dims - 2; i >= 0; --i) { + stride[i] = stride[i + 1] * shape[i + 1]; + } +} + +} // namespace transpose_internal + +// Copies a tensor to an other buffer and permutes its dimensions. +// +// Note: template parameter N is not used anymore. It is kept for API +// compatibility with TFLite micro. +template +void Transpose(const TransposeParams& params, const RuntimeShape& input_shape, + const T* input_data, const RuntimeShape& output_shape, + T* output_data) { + using transpose_internal::SetupTransposeStrides; + using transpose_internal::TransposeImpl; + using transpose_internal::TransposeStorageType; + // Transpose kernel only does rearranging values not numeric evaluations on + // each cell. It's safe to implement per size of scalar type and this trick + // keeps the total code size in a reasonable range. + using StorageType = typename TransposeStorageType::type; + const StorageType* const input_data_storage = + reinterpret_cast(input_data); + StorageType* const output_data_storage = + reinterpret_cast(output_data); + + const int dims = input_shape.DimensionsCount(); + std::array input_stride, output_stride; + SetupTransposeStrides(input_stride, input_shape.DimsData(), dims); + SetupTransposeStrides(output_stride, output_shape.DimsData(), dims); + TransposeImpl(0, dims, ¶ms.perm[0], input_data_storage, + input_stride.data(), output_data_storage, output_stride.data(), + output_shape.DimsData()); +} + +} // namespace reference_ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h index ca8a6e9..55fae7d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h @@ -15,6 +15,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_TRANSPOSE_CONV_H_ +#include + #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" @@ -47,6 +49,8 @@ inline void TransposeConv( const int filter_width = filter_shape.Dims(2); const int output_height = output_shape.Dims(1); const int output_width = output_shape.Dims(2); + const float output_activation_min = params.float_activation_min; + const float output_activation_max = params.float_activation_max; if (bias_data) { TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); } @@ -97,14 +101,18 @@ inline void TransposeConv( } } } - if (bias_data) { - for (int batch = 0; batch < batches; ++batch) { - for (int out_y = 0; out_y < output_height; ++out_y) { - for (int out_x = 0; out_x < output_width; ++out_x) { - for (int out_channel = 0; out_channel < output_depth; ++out_channel) { - output_data[Offset(output_shape, batch, out_y, out_x, - out_channel)] += bias_data[out_channel]; - } + + for (int batch = 0; batch < batches; ++batch) { + for (int out_y = 0; out_y < output_height; ++out_y) { + for (int out_x = 0; out_x < output_width; ++out_x) { + for (int out_channel = 0; out_channel < output_depth; ++out_channel) { + float acc = output_data[Offset(output_shape, batch, out_y, out_x, + out_channel)]; + if (bias_data) acc += bias_data[out_channel]; + + output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] = + ActivationFunctionWithMinMax(acc, output_activation_min, + output_activation_max); } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.cc new file mode 100644 index 0000000..6ae01b8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.cc @@ -0,0 +1,809 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include +#include +#include +#include +#include +#include + +#include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { +namespace tensor_utils { + +namespace { +const int32_t kInt16Max = std::numeric_limits::max(); +const int32_t kInt16Min = std::numeric_limits::min(); +} // namespace + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor) { + auto minmax = std::minmax_element(values, values + size); + *min_value = *minmax.first; + *max_value = *minmax.second; + + PortableSymmetricQuantizeFloats(values, size, quantized_values, *min_value, + *max_value, scaling_factor); +} + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor) { + const int32_t kScale = 127; + const float range = std::max(std::abs(min_value), std::abs(max_value)); + if (range == 0) { + memset(quantized_values, 0, size * sizeof(int8_t)); + *scaling_factor = 1; + return; + } + *scaling_factor = range / kScale; + const float scaling_factor_inv = kScale / range; + for (int i = 0; i < size; ++i) { + const int32_t quantized_value = + static_cast(TfLiteRound(values[i] * scaling_factor_inv)); + // Clamp: just in case some odd numeric offset. + quantized_values[i] = static_cast( + std::min(kScale, std::max(-kScale, quantized_value))); + } +} + +void PortableAsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, + float* scaling_factor, int32_t* offset) { + const int32_t kMinScale = -128; + const int32_t kMaxScale = 127; + const double qmin_double = kMinScale; + const double qmax_double = kMaxScale; + const auto minmax = std::minmax_element(values, values + size); + const double rmin = static_cast(std::min(0.0f, *minmax.first)); + const double rmax = static_cast(std::max(0.0f, *minmax.second)); + if (rmin == rmax) { + memset(quantized_values, 0, size * sizeof(int8_t)); + *scaling_factor = 1; + *offset = 0; + return; + } else { + double scale = (rmax - rmin) / (qmax_double - qmin_double); + const double zero_point_from_min = qmin_double - rmin / scale; + const double zero_point_from_max = qmax_double - rmax / scale; + const double zero_point_from_min_error = + std::abs(qmin_double) + std::abs(rmin / scale); + const double zero_point_from_max_error = + std::abs(qmax_double) + std::abs(rmax / scale); + const double zero_point_double = + zero_point_from_min_error < zero_point_from_max_error + ? zero_point_from_min + : zero_point_from_max; + int8_t nudged_zero_point = 0; + if (zero_point_double <= qmin_double) { + nudged_zero_point = kMinScale; + } else if (zero_point_double >= qmax_double) { + nudged_zero_point = kMaxScale; + } else { + nudged_zero_point = static_cast(round(zero_point_double)); + } + *scaling_factor = scale; + *offset = nudged_zero_point; + } + const float scaling_factor_inv = 1.0f / *scaling_factor; + for (int i = 0; i < size; ++i) { + const int32_t quantized_value = static_cast( + TfLiteRound(*offset + values[i] * scaling_factor_inv)); + quantized_values[i] = + std::min(kMaxScale, std::max(kMinScale, quantized_value)); + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix, + int m_rows, int m_cols, + const float* vector, + int n_batch, float* result) { + float* result_in_batch = result; + for (int b = 0; b < n_batch; b++) { + const float* matrix_ptr = matrix; + for (int r = 0; r < m_rows; r++) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + b * m_cols; + for (int c = 0; c < m_cols; c++) { + dot_prod += *matrix_ptr++ * *vector_in_batch++; + } + *result_in_batch += dot_prod; + ++result_in_batch; + } + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result) { + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + // Get the address of the first row. + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + // Initialize the dot product sum for the row to 0. + int32_t dotprod = 0; +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + for (int col = 0; col < m_cols; ++col, ++row_ptr) { + dotprod += (*row_ptr) * (vectors[col]); + } // for col + *result += dotprod * batch_scaling_factor; + ++result; + } // for row + } // for batch +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context) { + if (input_offset == nullptr) { + PortableMatrixBatchVectorMultiplyAccumulate( + matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result); + return; + } + if (!compute_row_sums || *compute_row_sums) { + PortableReductionSumVector(matrix, row_sums, m_rows, m_cols); + if (compute_row_sums) { + *compute_row_sums = false; + } + } + + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + const int32_t batch_offset = input_offset[batch]; + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + int32_t dotprod = 0; + float scale = batch_scaling_factor; + if (per_channel_scale) { + scale *= per_channel_scale[row]; + } +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + for (int col = 0; col < m_cols; ++col, ++row_ptr) { + dotprod += (*row_ptr) * vectors[col]; + } // for col + dotprod -= row_sums[row] * batch_offset; + *result += dotprod * scale; + ++result; + } // for row + } // for batch +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result) { + const int kBlockSize = 4; + TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; batch++) { + const float* matrix_ptr = matrix; + for (int row = 0; row < m_rows; row++) { + float dot_prod = 0.0f; + const float* vector_in_batch = vector + batch * m_cols; + for (int i = segments[row]; i < segments[row + 1]; i++) { + const int block_start_index = indices[i] * kBlockSize; + const float* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; + } + } + result[batch * m_rows + row] += dot_prod; + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result) { + const int kBlockSize = 16; + TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; ++batch) { + const int8_t* matrix_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + int32_t dot_prod = 0; + const int8_t* vector_in_batch = vector + batch * m_cols; + for (int i = segments[row]; i < segments[row + 1]; ++i) { + const int block_start_index = indices[i] * kBlockSize; + const int8_t* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr * *vector_block_in_batch_ptr++; + dot_prod += *matrix_ptr++ * input_offset; + } + } + const int32_t bias_value = bias_vector != nullptr ? bias_vector[row] : 0; + dot_prod = MultiplyByQuantizedMultiplier(dot_prod + bias_value, + output_multiplier, output_shift); + dot_prod += output_offset; + result[batch * m_rows + row] = + static_cast(ActivationFunctionWithMinMax( + dot_prod, output_activation_min, output_activation_max)); + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result) { + const int kBlockSize = 16; + TFLITE_DCHECK_EQ( // NOLINT + m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; batch++) { + const float* matrix_ptr = matrix; + const uint8_t* ledger_ptr = ledger; + for (int row = 0; row < m_rows; row++) { + float dot_prod = 0.0f; + int num_nonzero_blocks = *ledger_ptr++; + if (num_nonzero_blocks > 0) { + const float* vector_in_batch = vector + batch * m_cols; + for (int i = 0; i < num_nonzero_blocks; i++) { + const int block_start_index = *ledger_ptr++ * kBlockSize; + const float* vector_block_in_batch_ptr = + vector_in_batch + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++; + } + } + } + result[batch * m_rows + row] += dot_prod; + } + } +} + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result) { + static const int kBlockSize = 16; + TFLITE_DCHECK_EQ( // NOLINT + m_cols % kBlockSize, 0); + for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) { + const float batch_scaling_factor = scaling_factors[batch]; + const uint8_t* ledger_ptr = ledger; + // Get the address of the first row. + const int8_t* row_ptr = matrix; + for (int row = 0; row < m_rows; ++row) { + // Initialize the dot product sum for the row to 0. + int32_t dotprod = 0; +#if defined(__GNUC__) + // Prefetch the row to cache. + __builtin_prefetch(row_ptr, 0 /* prefetch for read */, + 3 /* temporal locality */); +#endif + int num_nonzero_blocks = *ledger_ptr++; + for (int i = 0; i < num_nonzero_blocks; i++) { + const int block_start_index = *ledger_ptr++ * kBlockSize; + const int8_t* vector_block_ptr = vectors + block_start_index; + for (int c = 0; c < kBlockSize; c++) { + dotprod += (*row_ptr++) * (*vector_block_ptr++); + } // for block + } // for num_nonzero_blocks + result[batch * m_rows + row] += dotprod * batch_scaling_factor; + } // for row + } // for batch +} + +template +void PortableMatrixBatchVectorMultiplyAccumulateImpl( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + T* output) { + const int16_t output_max = std::numeric_limits::max(); + const int16_t output_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_output; ++row) { + int32_t acc = bias[row]; + for (int col = 0; col < n_input; ++col) { + int8_t input_val = input[batch * n_input + col]; + int8_t weights_val = input_to_gate_weights[row * n_input + col]; + acc += input_val * weights_val; + } + acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift); + acc += output_zp; + acc += output[batch * n_output + row]; + if (acc > output_max) { + acc = output_max; + } + if (acc < output_min) { + acc = output_min; + } + output[batch * n_output + row] = static_cast(acc); + } + } +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulateImpl( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, output); +} + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulateImpl( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, output); +} + +void PortableMatrixBatchVectorMultiply(const int8_t* input, + int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, + int32_t n_cell, int8_t* gate_output, + int8_t gate_output_zp) { + const int32_t int8_max = std::numeric_limits::max(); + const int32_t int8_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_cell; ++row) { + int32_t acc = 0; + for (int col = 0; col < n_input; ++col) { + int32_t input_val = input[batch * n_input + col]; + int8_t weights_val = input_to_gate_weights[row * n_input + col]; + acc += (input_val - input_zeropoint) * weights_val; + } + acc = MultiplyByQuantizedMultiplier(acc, input_to_gate_effective_scale_a, + input_to_gate_effective_scale_b); + acc += gate_output_zp; + if (acc > int8_max) { + acc = int8_max; + } + if (acc < int8_min) { + acc = int8_min; + } + gate_output[batch * n_cell + row] = static_cast(acc); + } + } +} + +void PortableMatrixBatchVectorMultiply( + const int16_t* hidden, const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden, + int32_t n_output, int32_t output_zp, int8_t* proj_output) { + const int16_t int8_max = std::numeric_limits::max(); + const int16_t int8_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int row = 0; row < n_output; ++row) { + int64_t acc = gate_bias[row]; + for (int col = 0; col < n_hidden; ++col) { + int16_t input_val = hidden[batch * n_hidden + col]; + int8_t weights_val = hidden_to_output_weights[row * n_hidden + col]; + int64_t curr = acc; + acc += input_val * weights_val; + if (input_val * weights_val > 0 && acc < curr) { + acc = std::numeric_limits::max(); + } + if (input_val * weights_val < 0 && acc > curr) { + acc = std::numeric_limits::min(); + } + } + acc = MultiplyByQuantizedMultiplier(acc, proj_effective_scale_a, + proj_effective_scale_b); + acc += output_zp; + if (acc > int8_max) { + acc = int8_max; + } + if (acc < int8_min) { + acc = int8_min; + } + proj_output[batch * n_output + row] = acc; + } + } +} + +void PortableApplyLayerNorm(const int16_t* input, + const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output) { + // The square of std::pow(2, 10), which is the extra factor that makes sure + // normalized values has enough resolution. + static const int kTwoToPower20 = 1 << 20; + for (int i = 0; i < n_batch; ++i) { + int64_t sum = 0; + int64_t sum_sq = 0; + for (int j = 0; j < n_input; ++j) { + const int32_t index = i * n_input + j; + int32_t val = static_cast(input[index]); + sum += val; + sum_sq += val * val; + } + int32_t mean = + static_cast(static_cast(sum) * 1024 / n_input); + // TODO(b/173994730): Avoids overflow but only works for POT n_input. + int32_t temp = kTwoToPower20 / n_input; + int64_t variance = + sum_sq * temp - static_cast(mean) * static_cast(mean); + int32_t variance2 = static_cast(variance / kTwoToPower20); + if (variance2 < 1) { + variance2 = variance_limit; + } + int32_t stddev_inverse_a; + int stddev_inverse_b; + GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1, + &stddev_inverse_a, &stddev_inverse_b); + + for (int j = 0; j < n_input; ++j) { + const int32_t index = i * n_input + j; + int32_t val = static_cast(input[index]); + int32_t shifted = 1024 * val - mean; + int32_t rescaled = MultiplyByQuantizedMultiplier( + shifted, stddev_inverse_a, stddev_inverse_b); + // TODO(jianlijianli): Saturate this. + int64_t val3 = rescaled * layer_norm_weights[j] + bias[j]; + int32_t val4 = + static_cast((val3 > 0 ? val3 + 512 : val3 - 512) / 1024); + int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a, + layer_norm_scale_b + 12); + val5 = std::min(std::max(kInt16Min, val5), kInt16Max); + output[index] = static_cast(val5); + } + } +} + +void PortableApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + const float layer_norm_scale = + layer_norm_scale_a * + std::pow(2.0, static_cast(layer_norm_scale_b - 31)); + const float bias_scale = + static_cast(std::pow(2.0, -10)) * layer_norm_scale; + + for (int batch = 0; batch < n_batch; ++batch) { + float sum = 0.0f; + float sum_sq = 0.0f; + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float value = static_cast(input[index]); + sum += value; + sum_sq += value * value; + } + const float mean = sum / n_input; + float stddev_inv = 0.0f; + const float variance = sum_sq / n_input - mean * mean; + if (variance == 0) { + stddev_inv = 1.0f / std::sqrt(1e-8f); + } else { + stddev_inv = 1.0f / std::sqrt(variance); + } + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float normalized_value = + (static_cast(input[index]) - mean) * stddev_inv; + const float weighted_normalized_value = + normalized_value * layer_norm_weights[i] * layer_norm_scale + + bias[i] * bias_scale; + const int32_t quant_output = static_cast(round( + weighted_normalized_value * static_cast(std::pow(2, 12)))); + output[index] = std::min(int16_max, std::max(int16_min, quant_output)); + } + } +} + +void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix, + int32_t scalar, int32_t n_row, + int32_t n_col, int32_t* output) { + for (int i = 0; i < n_row; ++i) { + int32_t row_sum = 0; + for (int j = 0; j < n_col; ++j) { + row_sum += *matrix++; + } + output[i] += row_sum * scalar; + } +} + +void PortableApplySigmoid(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int c = 0; c < n_input; c++) { + using F3 = gemmlowp::FixedPoint; + using F0 = gemmlowp::FixedPoint; + const int index = batch * n_input + c; + F3 sigmoid_input = F3::FromRaw(input[index]); + F0 sigmoid_output = gemmlowp::logistic(sigmoid_input); + output[index] = sigmoid_output.raw(); + } + } +} + +void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float float_input = + input[index] * static_cast(std::pow(2, -12)); + const float float_output = 1.0f / (1.0f + std::exp(-float_input)); + const int32_t quant_output = static_cast( + float_output * static_cast(std::pow(2, 15))); + const int32_t quant_output_clamped = + std::min(int16_max, std::max(int16_min, quant_output)); + output[index] = static_cast(quant_output_clamped); + } + } +} + +template +void PortableApplyTanhImpl(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + using FX = gemmlowp::FixedPoint; + using F0 = gemmlowp::FixedPoint; + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + FX tanh_input = FX::FromRaw(input[index]); + F0 tanh_output = gemmlowp::tanh(tanh_input); + output[index] = tanh_output.raw(); + } + } +} + +void PortableApplyTanh(int32_t integer_bits, const int16_t* input, + int32_t n_batch, int32_t n_input, int16_t* output) { + assert(integer_bits <= 6); +#define DISPATCH_TANH(i) \ + case i: \ + PortableApplyTanhImpl(input, n_batch, n_input, output); \ + break; + switch (integer_bits) { + DISPATCH_TANH(0); + DISPATCH_TANH(1); + DISPATCH_TANH(2); + DISPATCH_TANH(3); + DISPATCH_TANH(4); + DISPATCH_TANH(5); + DISPATCH_TANH(6); + default: + return; + } +#undef DISPATCH_TANH +} + +void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int32_t integer_bits, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + const double two = 2.0; + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const float float_input = + input[index] * std::pow(two, static_cast(integer_bits)); + const float float_output = std::tanh(float_input); + const int32_t quant_output = static_cast( + float_output * static_cast(std::pow(2, 15))); + const int32_t quant_output_clamped = + std::min(int16_max, std::max(int16_min, quant_output)); + output[index] = static_cast(quant_output_clamped); + } + } +} + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int shift, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const int16_t a = input_1[index]; + const int16_t b = input_2[index]; + const int32_t value = static_cast(a) * static_cast(b); + output[index] = + static_cast(gemmlowp::RoundingDivideByPOT(value, shift)); + } + } +} + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + const int16_t a = input_1[index]; + const int16_t b = input_2[index]; + int32_t value = static_cast(a) * static_cast(b); + value = MultiplyByQuantizedMultiplier(value, multiplier, shift); + value += output_zp; + value = std::min(std::max(static_cast(-128), value), + static_cast(127)); + + output[index] = static_cast(value); + } + } +} + +void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + int32_t sum = input_1[index] + input_2[index]; + const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum)); + output[index] = static_cast(sum_clamped); + } + } +} + +float PortableVectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size) { + float result = 0.0; + for (int v = 0; v < v_size; v++) { + result += *vector1++ * *vector2++; + } + return result; +} + +namespace { +inline int32_t VectorVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size) { + int32_t result = 0; + for (int v = 0; v < v_size; v++) { + result += *vector1++ * *vector2++; + } + return result; +} +} // namespace + +void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, + int v_size, int n_batch, + int32_t* result) { + for (int b = 0; b < n_batch; b++) { + result[b] = VectorVectorDotProduct(vector1, vector2, v_size); + vector1 += v_size; + vector2 += v_size; + } +} + +void PortableVectorBatchVectorCwiseProductAccumulate( + const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch, + int32_t multiplier, int shift, int16_t* result) { + for (int b = 0; b < n_batch; b++) { + for (int v = 0; v < v_size; v++) { + int32_t prod = vector[v] * *batch_vector++; + prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift); + int32_t output = prod + *result; + output = std::max(std::min(static_cast(32767), output), + static_cast(-32768)); + *result++ = output; + } + } +} + +void PortableSub1Vector(const float* vector, int v_size, float* result) { + for (int v = 0; v < v_size; v++) { + *result++ = 1.0f - *vector++; + } +} + +void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result) { + static const int16_t kOne = 32767; + for (int v = 0; v < v_size; v++) { + *result++ = kOne - *vector++; + } +} + +void PortableVectorScalarMultiply(const int8_t* vector, const int v_size, + const float scale, float* result) { + for (int v = 0; v < v_size; ++v) { + *result++ = scale * *vector++; + } +} + +void PortableMeanStddevNormalization(const float* __restrict__ input_vector, + float* __restrict__ output_vector, + int v_size, int n_batch) { + for (int batch = 0; batch < n_batch; ++batch) { + float sum = 0.0f; + for (int i = 0; i < v_size; ++i) { + sum += input_vector[i]; + } + const float mean = sum / v_size; + float sum_diff_sq = 0.0f; + for (int i = 0; i < v_size; ++i) { + const float diff = input_vector[i] - mean; + sum_diff_sq += diff * diff; + } + const float variance = sum_diff_sq / v_size; + constexpr float kNormalizationConstant = 1e-8f; + const float stddev_inv = + 1.0f / std::sqrt(variance + kNormalizationConstant); + for (int i = 0; i < v_size; ++i) { + output_vector[i] = (input_vector[i] - mean) * stddev_inv; + } + input_vector += v_size; + output_vector += v_size; + } +} + +void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, + int32_t n_batch, int32_t n_cell, + int16_t* output) { + const int32_t int16_max = std::numeric_limits::max(); + const int32_t int16_min = std::numeric_limits::min(); + for (int i = 0; i < n_batch * n_cell; ++i) { + int32_t x = static_cast(input[i]) - static_cast(input_zp); + int32_t h = + static_cast(recurrent[i]) - static_cast(recurrent_zp); + int32_t x_scaled = MultiplyByQuantizedMultiplier(x, input_effective_scale_a, + input_effective_scale_b); + int32_t h_scaled = MultiplyByQuantizedMultiplier( + h, recurrent_effective_scale_a, recurrent_effective_scale_b); + int32_t y = h_scaled + x_scaled; + if (y > int16_max) { + y = int16_max; + } + if (y < int16_min) { + y = int16_min; + } + output[i] = static_cast(y); + } +} + +} // namespace tensor_utils +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.h new file mode 100644 index 0000000..06c867c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.h @@ -0,0 +1,333 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { +namespace tensor_utils { + +// Check if all entries of a vector are zero for float. +bool IsZeroVector(const float* vector, int v_size) { + return PortableIsZeroVector(vector, v_size); +} + +// Check if all entries of a vector are zero for int8_t. +bool IsZeroVector(const int8_t* vector, int v_size) { + return PortableIsZeroVector(vector, v_size); +} + +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min, float* max, + float* scaling_factor) { + PortableSymmetricQuantizeFloats(values, size, quantized_values, min, max, + scaling_factor); +} + +void SymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor) { + PortableSymmetricQuantizeFloats(values, size, quantized_values, min_value, + max_value, scaling_factor); +} + +void AsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* scaling_factor, + int32_t* offset) { + PortableAsymmetricQuantizeFloats(values, size, quantized_values, + scaling_factor, offset); +} + +void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows, + int m_cols, const float* vector, + int n_batch, float* result) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + n_batch, result); +} + +void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix, + const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, + const float* scaling_factors, + int n_batch, + float* __restrict__ result) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + scaling_factors, n_batch, result); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result, + per_channel_scale, input_offset, scratch, row_sums, compute_row_sums, + context); +} + +void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix, + const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, + const float* scaling_factors, + int n_batch, int32_t* scratch, + float* __restrict__ result, + CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vector, + scaling_factors, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + matrix, segments, indices, m_rows, m_cols, vector, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate( + matrix, ledger, m_rows, m_cols, vector, n_batch, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + + int8_t* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + matrix, segments, indices, m_rows, m_cols, vector, bias_vector, n_batch, + input_offset, output_multiplier, output_shift, output_offset, + output_activation_min, output_activation_max, result); +} + +void SparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result) { + PortableSparseMatrixBatchVectorMultiplyAccumulate( + matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch, + result); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, scratch, output, context); +} + +void MatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context) { + PortableMatrixBatchVectorMultiplyAccumulate( + input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input, + n_output, output_zp, scratch, output, context); +} + +void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar, + int32_t n_row, int32_t n_col, + int32_t* output) { + PortableMatrixScalarMultiplyAccumulate(matrix, scalar, n_row, n_col, output); +} + +void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, int32_t n_cell, + int8_t* gate_output, int8_t gate_output_zp) { + PortableMatrixBatchVectorMultiply( + input, input_zeropoint, input_to_gate_weights, + input_to_gate_effective_scale_a, input_to_gate_effective_scale_b, n_batch, + n_input, n_cell, gate_output, gate_output_zp); +} + +void MatrixBatchVectorMultiply(const int16_t* hidden, + const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, + int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, + int32_t n_hidden, int32_t n_output, + int32_t output_zp, int8_t* proj_output) { + PortableMatrixBatchVectorMultiply(hidden, hidden_to_output_weights, + proj_effective_scale_a, + proj_effective_scale_b, gate_bias, n_batch, + n_hidden, n_output, output_zp, proj_output); +} + +void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output) { + PortableApplyLayerNorm(input, layer_norm_weights, bias, layer_norm_scale_a, + layer_norm_scale_b, variance_limit, n_batch, n_input, + output); +} + +void ApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output) { + PortableApplyLayerNormFloat(input, layer_norm_weights, layer_norm_scale_a, + layer_norm_scale_b, bias, n_batch, n_input, + output); +} + +void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output) { + PortableApplySigmoid(input, n_batch, n_input, output); +} + +void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int16_t* output) { + PortableApplySigmoidFloat(input, n_batch, n_input, output); +} + +void ApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output) { + PortableApplyTanh(integer_bits, input, n_batch, n_input, output); +} + +void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input, + int32_t integer_bits, int16_t* output) { + PortableApplyTanhFloat(input, n_batch, n_input, integer_bits, output); +} + +void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int shift, int16_t* output) { + PortableCwiseMul(input_1, input_2, n_batch, n_input, shift, output); +} + +void CwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output) { + PortableCwiseMul(input_1, input_2, multiplier, shift, n_batch, n_input, + output_zp, output); +} + +void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output) { + PortableCwiseAdd(input_1, input_2, n_batch, n_input, output); +} + +void CwiseClipping(float* vector, const int v_size, + const float clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void CwiseClipping(int16_t* vector, const int v_size, + const int16_t clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void CwiseClipping(int8_t* vector, const int v_size, + const int8_t clipping_value) { + PortableCwiseClipping(vector, v_size, clipping_value); +} + +void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size, + const int16_t* batch_vector, + int n_batch, int32_t multiplier, + int shift, int16_t* result) { + PortableVectorBatchVectorCwiseProductAccumulate( + vector, v_size, batch_vector, n_batch, multiplier, shift, result); +} + +float VectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size) { + return PortableVectorVectorDotProduct(vector1, vector2, v_size); +} + +void BatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, int v_size, + int n_batch, int32_t* result) { + PortableBatchVectorBatchVectorDotProduct(vector1, vector2, v_size, n_batch, + result); +} + +void Sub1Vector(const float* vector, int v_size, float* result) { + PortableSub1Vector(vector, v_size, result); +} + +void Sub1Vector(const int16_t* vector, int v_size, int16_t* result) { + PortableSub1Vector(vector, v_size, result); +} + +// Multiply all elements of vector with a scalar. +void VectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result) { + PortableVectorScalarMultiply(vector, v_size, scale, result); +} + +void ReductionSumVector(const float* input_vector, float* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector, + int output_size, int reduction_size) { + PortableReductionSumVector(input_vector, output_vector, output_size, + reduction_size); +} + +void MeanStddevNormalization(const float* input_vector, float* output_vector, + int v_size, int n_batch) { + PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch); +} + +void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, int32_t n_batch, + int32_t n_cell, int16_t* output) { + PortableTwoGateSaturatingAdd( + input, input_zp, recurrent, recurrent_zp, input_effective_scale_a, + input_effective_scale_b, recurrent_effective_scale_a, + recurrent_effective_scale_b, n_batch, n_cell, output); +} + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h new file mode 100644 index 0000000..6c404d5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils_impl.h @@ -0,0 +1,244 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ + +#include +#include + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. +class CpuBackendContext; + +namespace tensor_utils { + +template +bool PortableIsZeroVector(const T* vector, int v_size) { + for (int i = 0; i < v_size; ++i) { + if (vector[i] != 0) { + return false; + } + } + return true; +} + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float* min_value, + float* max_value, float* scaling_factor); + +void PortableSymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, float min_value, + float max_value, float* scaling_factor); + +void PortableAsymmetricQuantizeFloats(const float* values, const int size, + int8_t* quantized_values, + float* scaling_factor, int32_t* offset); + +// Multiply a matrix by a batch vector, and store results in a batch-size +// vector. +void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix, + int m_rows, int m_cols, + const float* vector, + int n_batch, float* result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vectors, const float* scaling_factors, + int n_batch, float* __restrict__ result, const float* per_channel_scale, + const int32_t* input_offset, int32_t* scratch, int32_t* row_sums, + bool* compute_row_sums, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const int m_rows, const int m_cols, + const int8_t* __restrict__ vector, const float* scaling_factors, + int n_batch, int32_t* scratch, float* __restrict__ result, + CpuBackendContext* context); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4( + const float* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const float* __restrict__ vector, int n_batch, float* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const float* __restrict__ matrix, const uint8_t* __restrict__ ledger, + int m_rows, int m_cols, const float* __restrict__ vector, int n_batch, + float* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate1x16( + const int8_t* __restrict__ matrix, const int32_t* __restrict__ segments, + const int32_t* __restrict__ indices, int m_rows, int m_cols, + const int8_t* __restrict__ vector, const int32_t* __restrict__ bias_vector, + int n_batch, const int32_t input_offset, const int32_t output_multiplier, + const int32_t output_shift, const int32_t output_offset, + const int32_t output_activation_min, const int32_t output_activation_max, + int8_t* __restrict__ result); + +void PortableSparseMatrixBatchVectorMultiplyAccumulate( + const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows, + const int m_cols, const int8_t* __restrict__ vectors, + const float* scaling_factors, int n_batch, float* __restrict__ result); + +// Dot product of two vectors. +float PortableVectorVectorDotProduct(const float* vector1, const float* vector2, + int v_size); + +void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1, + const int16_t* vector2, + int v_size, int n_batch, + int32_t* result); + +void PortableVectorBatchVectorCwiseProductAccumulate( + const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch, + int32_t multiplier, int shift, int16_t* result); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int16_t* output, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiplyAccumulate( + const int8_t* input, const int32_t* bias, + const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift, + int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp, + int32_t* scratch, int8_t* output, CpuBackendContext* context); + +void PortableMatrixBatchVectorMultiply(const int8_t* input, + int32_t input_zeropoint, + const int8_t* input_to_gate_weights, + int32_t input_to_gate_effective_scale_a, + int32_t input_to_gate_effective_scale_b, + int32_t n_batch, int32_t n_input, + int32_t n_cell, int8_t* gate_output, + int8_t gate_output_zp); + +void PortableMatrixBatchVectorMultiply( + const int16_t* hidden, const int8_t* hidden_to_output_weights, + int32_t proj_effective_scale_a, int32_t proj_effective_scale_b, + const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden, + int32_t n_output, int32_t output_zp, int8_t* proj_output); + +void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix, + int32_t scalar, int32_t n_row, + int32_t n_col, int32_t* output); + +void PortableApplyLayerNorm(const int16_t* input, + const int16_t* layer_norm_weights, + const int32_t* bias, int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, int32_t variance_limit, + int n_batch, int n_input, int16_t* output); + +void PortableApplyLayerNormFloat(const int16_t* input, + const int16_t* layer_norm_weights, + int32_t layer_norm_scale_a, + int32_t layer_norm_scale_b, + const int32_t* bias, int n_batch, int n_input, + int16_t* output); + +void PortableApplySigmoid(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int16_t* output); + +void PortableApplyTanh(int32_t integer_bits, const int16_t* input, + int32_t n_batch, int32_t n_input, int16_t* output); + +void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch, + int32_t n_input, int32_t integer_bits, + int16_t* output); + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int shift, int16_t* output); + +void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2, + int32_t multiplier, int32_t shift, int32_t n_batch, + int32_t n_input, int32_t output_zp, int8_t* output); + +void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2, + int n_batch, int n_input, int16_t* output); + +template +void PortableCwiseClipping(T* vector, const int v_size, + const T& clipping_value) { + for (int i = 0; i < v_size; i++) { + vector[i] = std::max(std::min(clipping_value, vector[i]), + static_cast(-clipping_value)); + } +} + +// Batch vector initialization with another vector. +void PortableVectorBatchVectorAssign(const float* vector, int v_size, + int n_batch, float* batch_vector); + +// Compute "1.0f - elements of vector" (used in CIFG). +void PortableSub1Vector(const float* vector, int v_size, float* result); + +void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result); + +// Multiply all elements of vector with a scalar. +void PortableVectorScalarMultiply(const int8_t* vector, int v_size, float scale, + float* result); + +// Reduce-sum on a vector: +// input_vector: pointer to input vector. +// output_vector: pointer to vector. +// output_size: output vector size. +// reduction_size: number of consecutive elements from input vector which are +// added to get one element of output. +template +void PortableReductionSumVector(const INPUT* input_vector, + OUTPUT* output_vector, int output_size, + int reduction_size) { + for (int o = 0; o < output_size; o++) { + OUTPUT result = 0; + for (int r = 0; r < reduction_size; r++) { + result += input_vector[r]; + } + output_vector[o] = result; + input_vector += reduction_size; + } +} + +// Layer norm for each batch. +void PortableMeanStddevNormalization(const float* __restrict__ input_vector, + float* __restrict__ output_vector, + int v_size, int n_batch); + +// Saturate Add. +void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp, + const int8_t* recurrent, int8_t recurrent_zp, + int32_t input_effective_scale_a, + int32_t input_effective_scale_b, + int32_t recurrent_effective_scale_a, + int32_t recurrent_effective_scale_b, + int32_t n_batch, int32_t n_cell, + int16_t* output); + +} // namespace tensor_utils +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PORTABLE_TENSOR_UTILS_IMPL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/runtime_shape.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/runtime_shape.h new file mode 100644 index 0000000..c2678b5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/runtime_shape.h @@ -0,0 +1,158 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ +#define TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ + +namespace tflite { + +template +struct Dims { + int sizes[N]; + int strides[N]; +}; + +class RuntimeShape { + public: + RuntimeShape& operator=(RuntimeShape const&) = delete; + + // RuntimeShape in TFLM supports up to 5 dimensions. + // The name kMaxSmallSize comes from the same file of the upstream + // tensorflow lite repo and need to be kept the same for max reuse. + static constexpr int kMaxSmallSize = 5; + + RuntimeShape() : size_(0) {} + + explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) {} + + RuntimeShape(int shape_size, int32_t value) : size_(shape_size) { + for (int i = 0; i < shape_size; ++i) { + SetDim(i, value); + } + } + + RuntimeShape(int dimensions_count, const int32_t* dims_data) + : size_(dimensions_count) { + ReplaceWith(dimensions_count, dims_data); + } + + bool operator==(const RuntimeShape& comp) const { + return this->size_ == comp.size_ && + std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) == + 0; + } + + ~RuntimeShape() {} + + int32_t DimensionsCount() const { return size_; } + int32_t Dims(int i) const { + TFLITE_DCHECK_GE(i, 0); + TFLITE_DCHECK_LT(i, size_); + return dims_[i]; + } + void SetDim(int i, int32_t val) { + TFLITE_DCHECK_GE(i, 0); + TFLITE_DCHECK_LT(i, size_); + dims_[i] = val; + } + + static RuntimeShape ExtendedShape(int new_shape_size, + const RuntimeShape& shape) { + return RuntimeShape(new_shape_size, shape, 1); + } + int32_t* DimsData() { return dims_; } + const int32_t* DimsData() const { return dims_; } + const int32_t* DimsDataUpTo5D() const { return dims_; } + + void ReplaceWith(int dimensions_count, const int32_t* dims_data) { + size_ = dimensions_count; + int32_t* dst_dims = DimsData(); + std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t)); + } + + // Returns the total count of elements, that is the size when flattened into a + // vector. + int FlatSize() const { + int buffer_size = 1; + const int* dims_data = reinterpret_cast(DimsData()); + for (int i = 0; i < size_; i++) { + buffer_size *= dims_data[i]; + } + return buffer_size; + } + + private: + // For use only by ExtendedShape(), written to guarantee (return-value) copy + // elision in C++17. + // This creates a shape padded to the desired size with the specified value. + RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value) + : size_(new_shape_size) { + // If the following check fails, it is likely because a 4D-only kernel is + // being used with an array of larger dimension count. + TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount()); + const int size_increase = new_shape_size - shape.DimensionsCount(); + for (int i = 0; i < size_increase; ++i) { + SetDim(i, pad_value); + } + std::memcpy(DimsData() + size_increase, shape.DimsData(), + sizeof(int32_t) * shape.DimensionsCount()); + } + + int32_t size_; + union { + int32_t dims_[kMaxSmallSize]; + }; +}; + +// Since tensors with '0' in their shape are valid in TF, these offset functions +// allow that as long as the corresponding index is also 0. It is upto the +// calling ops to ensure that they perform verification checks on tensor shapes +// if they don't support a particular behavior. + +inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { + TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4); + const int* dims_data = reinterpret_cast(shape.DimsData()); + TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) || + (i0 >= 0 && i0 < dims_data[0])); + TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) || + (i1 >= 0 && i1 < dims_data[1])); + TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) || + (i2 >= 0 && i2 < dims_data[2])); + TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) || + (i3 >= 0 && i3 < dims_data[3])); + return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; +} + +inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3, + int i4) { + TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5); + const int* dims_data = reinterpret_cast(shape.DimsData()); + TFLITE_DCHECK((dims_data[0] == 0 && i0 == 0) || + (i0 >= 0 && i0 < dims_data[0])); + TFLITE_DCHECK((dims_data[1] == 0 && i1 == 0) || + (i1 >= 0 && i1 < dims_data[1])); + TFLITE_DCHECK((dims_data[2] == 0 && i2 == 0) || + (i2 >= 0 && i2 < dims_data[2])); + TFLITE_DCHECK((dims_data[3] == 0 && i3 == 0) || + (i3 >= 0 && i3 < dims_data[3])); + TFLITE_DCHECK((dims_data[4] == 0 && i4 == 0) || + (i4 >= 0 && i4 < dims_data[4])); + return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) * + dims_data[4] + + i4; +} + +} // namespace tflite + +#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_RUNTIME_SHAPE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/strided_slice_logic.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/strided_slice_logic.h index 002f907..18a7940 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/strided_slice_logic.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/strided_slice_logic.h @@ -69,6 +69,69 @@ inline void StridedSlicePadIndices(tflite::StridedSliceParams* p, p->strides_count = dim_count; } +// Return the index for the first element along that axis. This index will be a +// positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0) +// that can be used to index directly into the data. +inline int StridedSliceStartForAxis(const tflite::StridedSliceParams& params, + const RuntimeShape& input_shape, + int32_t axis) { + const int32_t axis_size = input_shape.Dims(axis); + int32_t start = params.start_indices[axis]; + const int32_t stride = params.strides[axis]; + const int32_t begin_mask = (params.begin_mask & 1 << axis); + if (start < 0) { + start += axis_size; + } + if (stride > 0) { + start = Clamp(start, 0, axis_size); + } else { + start = Clamp(start, -1, axis_size - 1); + } + if (begin_mask) { + if (stride > 0) { + start = 0; + } else { + start = axis_size - 1; + } + } + return start; +} + +inline int StridedSliceEndForAxis(const tflite::StridedSliceParams& params, + const RuntimeShape& input_shape, int axis, + int start) { + const auto shrink_axis_mask = params.shrink_axis_mask; + const bool shrink_axis = shrink_axis_mask & (1 << axis); + const int axis_size = input_shape.Dims(axis); + if (shrink_axis) { + if (start >= axis_size) { + return start; + } else { + return start + 1; + } + } + const auto* indices = params.stop_indices; + int end = indices[axis]; + const int32_t stride = params.strides[axis]; + const int32_t end_mask = (params.end_mask & 1 << axis); + if (end < 0) { + end += axis_size; + } + if (stride > 0) { + end = Clamp(end, 0, axis_size); + } else { + end = Clamp(end, -1, axis_size - 1); + } + if (end_mask) { + if (stride > 0) { + end = axis_size; + } else { + end = -1; + } + } + return end; +} + // Return the index for the first element along that axis. This index will be a // positive integer between [0, axis_size] (or [-1, axis_size -1] if stride < 0) // that can be used to index directly into the data. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h index 831843c..de2d802 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h @@ -15,7 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ #define TENSORFLOW_LITE_KERNELS_INTERNAL_TENSOR_CTYPES_H_ -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" namespace tflite { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_utils.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_utils.cc new file mode 100644 index 0000000..7527994 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_utils.cc @@ -0,0 +1,25 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ + +// internal/reference_portable_tensor_utils.h has the implementation of the +// functions declared in internal/portable_tensor_utils.h. This somewhat +// confusing setup is derived from how the code is organized in TfLite where it +// is used to select between NEON, SSE and portable implementaitons. See +// https://github.com/tensorflow/tensorflow/blob/d76c23975c4a3a0d7987cfe3f45c76566df06180/tensorflow/lite/kernels/internal/tensor_utils.cc +// for how the code is written in TfLite. + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference_portable_tensor_utils.h" diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h index 42c8111..9e73812 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h @@ -21,6 +21,7 @@ limitations under the License. #include #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/runtime_shape.h" namespace tflite { @@ -139,211 +140,22 @@ inline bool operator==(const QuantizationParams& qp1, return qp1.zero_point == qp2.zero_point && qp1.scale == qp2.scale; } -template -struct Dims { - int sizes[N]; - int strides[N]; -}; - -class RuntimeShape { - public: - // Shapes with dimensions up to 5 are stored directly in the structure, while - // larger shapes are separately allocated. - static constexpr int kMaxSmallSize = 5; - - RuntimeShape& operator=(RuntimeShape const&) = delete; - - RuntimeShape() : size_(0) {} - - explicit RuntimeShape(int dimensions_count) : size_(dimensions_count) { - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - RuntimeShape(int shape_size, int32_t value) : size_(0) { - Resize(shape_size); - for (int i = 0; i < shape_size; ++i) { - SetDim(i, value); - } - } - - RuntimeShape(int dimensions_count, const int32_t* dims_data) : size_(0) { - ReplaceWith(dimensions_count, dims_data); - } - - RuntimeShape(const std::initializer_list init_list) : size_(0) { - BuildFrom(init_list); - } - - // Avoid using this constructor. We should be able to delete it when C++17 - // rolls out. - RuntimeShape(RuntimeShape const& other) : size_(other.DimensionsCount()) { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else - dims_pointer_ = new int32_t[size_]; -#endif - } - std::memcpy(DimsData(), other.DimsData(), sizeof(int32_t) * size_); - } - - bool operator==(const RuntimeShape& comp) const { - return this->size_ == comp.size_ && - std::memcmp(DimsData(), comp.DimsData(), size_ * sizeof(int32_t)) == - 0; - } - - ~RuntimeShape() { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline int32_t DimensionsCount() const { return size_; } - inline int32_t Dims(int i) const { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - return size_ > kMaxSmallSize ? dims_pointer_[i] : dims_[i]; - } - inline void SetDim(int i, int32_t val) { - TFLITE_DCHECK_GE(i, 0); - TFLITE_DCHECK_LT(i, size_); - if (size_ > kMaxSmallSize) { - dims_pointer_[i] = val; - } else { - dims_[i] = val; - } - } - - inline int32_t* DimsData() { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - inline const int32_t* DimsData() const { - return size_ > kMaxSmallSize ? dims_pointer_ : dims_; - } - // The caller must ensure that the shape is no bigger than 5-D. - inline const int32_t* DimsDataUpTo5D() const { return dims_; } - - inline void Resize(int dimensions_count) { - if (size_ > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - delete[] dims_pointer_; -#endif // TF_LITE_STATIC_MEMORY - } - size_ = dimensions_count; - if (dimensions_count > kMaxSmallSize) { -#ifdef TF_LITE_STATIC_MEMORY - TFLITE_CHECK(false && "No shape resizing supported on this platform"); -#else // TF_LITE_STATIC_MEMORY - dims_pointer_ = new int32_t[dimensions_count]; -#endif // TF_LITE_STATIC_MEMORY - } - } - - inline void ReplaceWith(int dimensions_count, const int32_t* dims_data) { - Resize(dimensions_count); - int32_t* dst_dims = DimsData(); - std::memcpy(dst_dims, dims_data, dimensions_count * sizeof(int32_t)); - } - - template - inline void BuildFrom(const T& src_iterable) { - const int dimensions_count = - std::distance(src_iterable.begin(), src_iterable.end()); - Resize(dimensions_count); - int32_t* data = DimsData(); - for (auto it : src_iterable) { - *data = it; - ++data; - } - } - - // This will probably be factored out. Old code made substantial use of 4-D - // shapes, and so this function is used to extend smaller shapes. Note that - // (a) as Dims<4>-dependent code is eliminated, the reliance on this should be - // reduced, and (b) some kernels are stricly 4-D, but then the shapes of their - // inputs should already be 4-D, so this function should not be needed. - inline static RuntimeShape ExtendedShape(int new_shape_size, - const RuntimeShape& shape) { - return RuntimeShape(new_shape_size, shape, 1); - } - - inline void BuildFrom(const std::initializer_list init_list) { - BuildFrom>(init_list); - } - - // Returns the total count of elements, that is the size when flattened into a - // vector. - inline int FlatSize() const { - int buffer_size = 1; - const int* dims_data = reinterpret_cast(DimsData()); - for (int i = 0; i < size_; i++) { - buffer_size *= dims_data[i]; - } - return buffer_size; - } - - bool operator!=(const RuntimeShape& comp) const { return !((*this) == comp); } - - private: - // For use only by ExtendedShape(), written to guarantee (return-value) copy - // elision in C++17. - // This creates a shape padded to the desired size with the specified value. - RuntimeShape(int new_shape_size, const RuntimeShape& shape, int pad_value) - : size_(0) { - // If the following check fails, it is likely because a 4D-only kernel is - // being used with an array of larger dimension count. - TFLITE_CHECK_GE(new_shape_size, shape.DimensionsCount()); - Resize(new_shape_size); - const int size_increase = new_shape_size - shape.DimensionsCount(); - for (int i = 0; i < size_increase; ++i) { - SetDim(i, pad_value); - } - std::memcpy(DimsData() + size_increase, shape.DimsData(), - sizeof(int32_t) * shape.DimensionsCount()); - } - - int32_t size_; - union { - int32_t dims_[kMaxSmallSize]; - int32_t* dims_pointer_; - }; +// Quantization parameters for each channel, determining the mapping of +// quantized values to real values. See QuantizationParams for a single set of +// parameters per tensor. This has one parameters set per each channel. +// +// The correspondence is as follows: +// +// real_value = scale[channel] * (quantized_value - zero_point[channel]); +// +struct PerChannelQuantizationParams { + // The following members typically point to the corresponding members of a + // TfLiteAffineQuantization struct. + const float* scale; + const int32_t* zero_point; + int32_t quantized_dimension; }; -// Converts inference-style shape to legacy tflite::Dims<4>. -inline tflite::Dims<4> ToRuntimeDims(const tflite::RuntimeShape& array_shape) { - tflite::Dims<4> result; - const int dimensions_count = array_shape.DimensionsCount(); - TFLITE_CHECK_LE(dimensions_count, 4); - int cum_prod = 1; - for (int i = 0; i < 4; i++) { - const int new_dim = - (i < dimensions_count) ? array_shape.Dims(dimensions_count - 1 - i) : 1; - result.sizes[i] = new_dim; - result.strides[i] = cum_prod; - cum_prod *= new_dim; - } - return result; -} - -// TODO(b/80418076): Move to legacy ops file, update invocations. -inline RuntimeShape DimsToShape(const tflite::Dims<4>& dims) { - return RuntimeShape( - {dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]}); -} - // Gets next index to iterate through a multidimensional array. inline bool NextIndex(const int num_dims, const int* dims, int* current) { if (num_dims == 0) { @@ -400,35 +212,20 @@ inline size_t ReducedOutputOffset(const int num_dims, const int* dims, return offset; } -inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), 4); - const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); - TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); - return ((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3; -} - -inline int Offset(const RuntimeShape& shape, int i0, int i1, int i2, int i3, - int i4) { - TFLITE_DCHECK_EQ(shape.DimensionsCount(), 5); - const int* dims_data = reinterpret_cast(shape.DimsDataUpTo5D()); - TFLITE_DCHECK(i0 >= 0 && i0 < dims_data[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims_data[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims_data[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims_data[3]); - TFLITE_DCHECK(i4 >= 0 && i4 < dims_data[4]); - return (((i0 * dims_data[1] + i1) * dims_data[2] + i2) * dims_data[3] + i3) * - dims_data[4] + - i4; -} +// Since tensors with '0' in their shape are valid in TF, these offset functions +// allow that as long as the corresponding index is also 0. It is upto the +// calling ops to ensure that they perform verification checks on tensor shapes +// if they don't support a particular behavior. inline int Offset(const Dims<4>& dims, int i0, int i1, int i2, int i3) { - TFLITE_DCHECK(i0 >= 0 && i0 < dims.sizes[0]); - TFLITE_DCHECK(i1 >= 0 && i1 < dims.sizes[1]); - TFLITE_DCHECK(i2 >= 0 && i2 < dims.sizes[2]); - TFLITE_DCHECK(i3 >= 0 && i3 < dims.sizes[3]); + TFLITE_DCHECK((i0 == 0 && dims.sizes[0] == 0) || + (i0 >= 0 && i0 < dims.sizes[0])); + TFLITE_DCHECK((i1 == 0 && dims.sizes[1] == 0) || + (i1 >= 0 && i1 < dims.sizes[1])); + TFLITE_DCHECK((i2 == 0 && dims.sizes[2] == 0) || + (i2 >= 0 && i2 < dims.sizes[2])); + TFLITE_DCHECK((i3 == 0 && dims.sizes[3] == 0) || + (i3 >= 0 && i3 < dims.sizes[3])); return i0 * dims.strides[0] + i1 * dims.strides[1] + i2 * dims.strides[2] + i3 * dims.strides[3]; } @@ -437,10 +234,6 @@ inline int Offset(const Dims<4>& dims, int* index) { return Offset(dims, index[0], index[1], index[2], index[3]); } -inline int Offset(const RuntimeShape& shape, int* index) { - return Offset(shape, index[0], index[1], index[2], index[3]); -} - // Get array size, DCHECKing that the dim index is in range. // // Note that this will be phased out with Dims<4>, since RuntimeShape::Dims() @@ -602,6 +395,58 @@ inline int MatchingFlatSize(const Dims& dims, const Dims& check_dims_0, return MatchingFlatSize(dims, check_dims_1, check_dims_2, check_dims_3); } +// Flat size calculation, checking if their extended shapes match. +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0) { + const int shape_dims = shape.DimensionsCount(); + const int check_shape_0_dims = check_shape_0.DimensionsCount(); + const int min_dims = std::min(shape_dims, check_shape_0_dims); + + for (int i = 0; i < min_dims; ++i) { + TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), + check_shape_0.Dims(check_shape_0_dims - 1 - i)); + } + for (int i = min_dims; i < shape_dims; ++i) { + TFLITE_DCHECK_EQ(shape.Dims(shape_dims - 1 - i), 1); + } + for (int i = min_dims; i < check_shape_0_dims; ++i) { + TFLITE_DCHECK_EQ(check_shape_0.Dims(check_shape_0_dims - 1 - i), 1); + } + return shape.FlatSize(); +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1), + flat_size); + return flat_size; +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1, + const RuntimeShape& check_shape_2) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ( + MatchingExtendedShapeFlatSize(shape, check_shape_1, check_shape_2), + flat_size); + return flat_size; +} + +inline int MatchingExtendedShapeFlatSize(const RuntimeShape& shape, + const RuntimeShape& check_shape_0, + const RuntimeShape& check_shape_1, + const RuntimeShape& check_shape_2, + const RuntimeShape& check_shape_3) { + const int flat_size = MatchingExtendedShapeFlatSize(shape, check_shape_0); + TFLITE_DCHECK_EQ(MatchingExtendedShapeFlatSize(shape, check_shape_1, + check_shape_2, check_shape_3), + flat_size); + return flat_size; +} + // Data is required to be contiguous, and so many operators can use either the // full array flat size or the flat size with one dimension skipped (commonly // the depth). @@ -885,6 +730,8 @@ struct Conv3DParams { float float_activation_max; }; +typedef Conv3DParams Conv3DTransposeParams; + struct DepthToSpaceParams { int32_t block_size; }; @@ -1019,9 +866,9 @@ struct PackParams { struct PadParams { int8_t left_padding_count; - int32_t left_padding[4]; + int32_t left_padding[5]; int8_t right_padding_count; - int32_t right_padding[4]; + int32_t right_padding[5]; ResizingCategory resizing_category; }; @@ -1127,11 +974,11 @@ struct StridedSliceParams { int8_t strides_count; int32_t strides[5]; - int16_t begin_mask; - int16_t ellipsis_mask; - int16_t end_mask; - int16_t new_axis_mask; - int16_t shrink_axis_mask; + uint16_t begin_mask; + uint16_t ellipsis_mask; + uint16_t end_mask; + uint16_t new_axis_mask; + uint16_t shrink_axis_mask; }; struct TanhParams { @@ -1141,9 +988,11 @@ struct TanhParams { int input_left_shift; }; +constexpr int kTransposeMaxDimensions = 6; + struct TransposeParams { int8_t perm_count; - int32_t perm[5]; + int32_t perm[kTransposeMaxDimensions]; }; struct UnpackParams { @@ -1196,6 +1045,23 @@ inline void GetActivationParams(const P& params, int64_t* min, int64_t* max) { *min = params.int64_activation_min; *max = params.int64_activation_max; } + +// Type trait to check of given type has size smaller than 4 bytes. +template +struct is_small_integer + : public std::integral_constant::value || + std::is_same::value || + std::is_same::value || + std::is_same::value> {}; + +// Type trait to check of given type is int32 or int64. +template +struct is_int32_or_int64 + : public std::integral_constant::value || + std::is_same::value> { +}; + } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_INTERNAL_TYPES_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h index f2b6fbe..5f33173 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h @@ -18,9 +18,12 @@ limitations under the License. #include #include +#ifndef TF_LITE_STATIC_MEMORY +#include +#endif // TF_LITE_STATIC_MEMORY -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" namespace tflite { @@ -149,8 +152,17 @@ inline int SizeOfDimension(const TfLiteTensor* t, int dim) { return t->dims->data[dim]; } -inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; } -inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; } +inline int NumDimensions(const TfLiteEvalTensor* t) { return t->dims->size; } +inline int SizeOfDimension(const TfLiteEvalTensor* t, int dim) { + return t->dims->data[dim]; +} + +inline int NumInputs(const TfLiteNode* node) { + return node->inputs == nullptr ? 0 : node->inputs->size; +} +inline int NumOutputs(const TfLiteNode* node) { + return node->outputs == nullptr ? 0 : node->outputs->size; +} #ifndef TF_LITE_STATIC_MEMORY inline int NumIntermediates(const TfLiteNode* node) { @@ -170,6 +182,14 @@ inline int64_t NumElements(const TfLiteTensor* t) { return NumElements(t->dims); } +inline int64_t NumElements(const int* dims, int num_dims) { + int64_t count = 1; + for (int i = 0; i < num_dims; ++i) { + count *= dims[i]; + } + return count; +} + // Determines whether tensor is constant. // TODO(b/138199592): Introduce new query which checks for constant OR // persistent-read-only, which would be useful for most tensor kernels that @@ -179,6 +199,11 @@ inline bool IsConstantTensor(const TfLiteTensor* tensor) { return tensor->allocation_type == kTfLiteMmapRo; } +inline bool IsConstantOrPersistentTensor(const TfLiteTensor* tensor) { + return IsConstantTensor(tensor) || + (tensor->allocation_type == kTfLitePersistentRo); +} + // Determines whether tensor is dynamic. Note that a tensor can be non-const and // not dynamic. This function specifically checks for a dynamic tensor. inline bool IsDynamicTensor(const TfLiteTensor* tensor) { @@ -214,14 +239,15 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift); + int32_t* per_channel_multiplier, int32_t* per_channel_shift); TfLiteStatus PopulateConvolutionQuantizationParams( TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels); + int32_t* per_channel_multiplier, int32_t* per_channel_shift, + int num_channels); // Calculates the multiplication factor for a quantized convolution (or // quantized depthwise convolution) involving the given tensors. Returns an @@ -270,6 +296,16 @@ void CalculateActivationRange(TfLiteFusedActivation activation, // Return true if the given tensors have the same shape. bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2); +#if !defined(TF_LITE_STATIC_MEMORY) +// Gets the output shape from the input tensor. +TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteIntArray** output_shape); + +const std::string GetShapeDebugString(const TfLiteIntArray* shape); + +#endif // !defined(TF_LITE_STATIC_MEMORY) + // Calculates the output_shape that is necessary for element-wise operations // with broadcasting involving the two input tensors. TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, @@ -285,12 +321,15 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input3, TfLiteIntArray** output_shape); -// Return the size of given type in bytes. Return 0 in in case of string. +// Return the size of given type in bytes. Return 0 in case of string. int TfLiteTypeGetSize(TfLiteType type); // Whether the current platform is mobile (Android or iOS). bool IsMobilePlatform(); +// Returns whether there is unspecified dimension in the tensor's dim signature. +bool HasUnspecifiedDimension(const TfLiteTensor* tensor); + } // namespace tflite #endif // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cc similarity index 74% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cc index f2b7d05..a786b68 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/kernel_util_lite.cc @@ -14,14 +14,6 @@ limitations under the License. ==============================================================================*/ #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -// Patched by Edge Impulse, remove these declarations for Eta Compute -#ifdef ECM3532 -#undef _GLIBCXX_HAVE_ENOTSUP -#undef _GLIBCXX_HAVE_ECANCELED -#undef _GLIBCXX_HAVE_EOWNERDEAD -#undef _GLIBCXX_HAVE_ENOTRECOVERABLE -#endif - #include #include @@ -33,8 +25,9 @@ limitations under the License. #include #endif // TF_LITE_STATIC_MEMORY -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/context_util.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" @@ -49,11 +42,7 @@ namespace { // Assumes tensor_index is a valid index (in bounds) inline TfLiteTensor* GetTensorAtIndex(const TfLiteContext* context, int tensor_index) { - if (context->tensors != nullptr) { - return &context->tensors[tensor_index]; - } else { - return context->GetTensor(context, tensor_index); - } + return context->GetTensor(context, tensor_index); } // Validate in a single place to reduce binary size @@ -127,6 +116,7 @@ TfLiteStatus GetInputSafe(const TfLiteContext* context, const TfLiteNode* node, TfLiteTensor* GetVariableInput(TfLiteContext* context, const TfLiteNode* node, int index) { TfLiteTensor* tensor = GetMutableInput(context, node, index); + if (tensor == nullptr) return nullptr; return tensor->is_variable ? tensor : nullptr; } @@ -205,7 +195,7 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift) { + int32_t* per_channel_multiplier, int32_t* per_channel_shift) { const auto* affine_quantization = reinterpret_cast(filter->quantization.params); return PopulateConvolutionQuantizationParams( @@ -220,7 +210,8 @@ TfLiteStatus PopulateConvolutionQuantizationParams( const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output, const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift, int32_t* output_activation_min, int32_t* output_activation_max, - int32_t* per_channel_multiplier, int* per_channel_shift, int num_channels) { + int32_t* per_channel_multiplier, int32_t* per_channel_shift, + int num_channels) { TF_LITE_ENSURE_EQ(context, input->quantization.type, kTfLiteAffineQuantization); TF_LITE_ENSURE_EQ(context, filter->quantization.type, @@ -241,7 +232,8 @@ TfLiteStatus PopulateConvolutionQuantizationParams( // Currently only Int8/Int16 is supported for per channel quantization. TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, filter->type, kTfLiteInt8); + TF_LITE_ENSURE(context, + filter->type == kTfLiteInt8 || filter->type == kTfLiteInt4); TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, num_channels); TF_LITE_ENSURE_EQ( context, num_channels, @@ -341,30 +333,49 @@ TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context, } namespace { -void CalculateActivationRangeQuantizedImpl(TfLiteFusedActivation activation, - int32_t qmin, int32_t qmax, - TfLiteTensor* output, - int32_t* act_min, int32_t* act_max) { + +inline TfLiteStatus Quantize(TfLiteContext* context, float scale, + int32_t zero_point, float f, int32_t& q) { + const float tmp = TfLiteRound(f / scale); + const bool no_integer_overflow_from_quantization = + (tmp >= static_cast(std::numeric_limits::min()) && + tmp <= static_cast(std::numeric_limits::max())); + TF_LITE_ENSURE(context, no_integer_overflow_from_quantization); + q = zero_point + static_cast(tmp); + return kTfLiteOk; +} + +TfLiteStatus CalculateActivationRangeQuantizedImpl( + TfLiteContext* context, TfLiteFusedActivation activation, int32_t qmin, + int32_t qmax, TfLiteTensor* output, int32_t* act_min, int32_t* act_max) { const auto scale = output->params.scale; const auto zero_point = output->params.zero_point; - auto quantize = [scale, zero_point](float f) { - return zero_point + static_cast(TfLiteRound(f / scale)); - }; - + int32_t tmp_q; if (activation == kTfLiteActRelu) { - *act_min = std::max(qmin, quantize(0.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 0.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); *act_max = qmax; } else if (activation == kTfLiteActRelu6) { - *act_min = std::max(qmin, quantize(0.0)); - *act_max = std::min(qmax, quantize(6.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 0.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 6.0, tmp_q)); + *act_max = std::min(qmax, tmp_q); } else if (activation == kTfLiteActReluN1To1) { - *act_min = std::max(qmin, quantize(-1.0)); - *act_max = std::min(qmax, quantize(1.0)); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, -1.0, tmp_q)); + *act_min = std::max(qmin, tmp_q); + TF_LITE_ENSURE_OK(context, + Quantize(context, scale, zero_point, 1.0, tmp_q)); + *act_max = std::min(qmax, tmp_q); } else { *act_min = qmin; *act_max = qmax; } + return kTfLiteOk; } } // namespace @@ -388,9 +399,8 @@ TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context, TF_LITE_ENSURE(context, false); } - CalculateActivationRangeQuantizedImpl(activation, qmin, qmax, output, act_min, - act_max); - return kTfLiteOk; + return CalculateActivationRangeQuantizedImpl(context, activation, qmin, qmax, + output, act_min, act_max); } bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { @@ -398,6 +408,24 @@ bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { } #ifndef TF_LITE_STATIC_MEMORY +TfLiteStatus GetOutputShapeFromInput(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteIntArray** output_shape) { + if (NumDimensions(input) != 1) { + TF_LITE_KERNEL_LOG(const_cast(context), + "Invalid %dD input tensor (must be a 1D tensor).", + NumDimensions(input)); + return kTfLiteError; + } + const int output_dims = SizeOfDimension(input, 0); + std::unique_ptr shape( + TfLiteIntArrayCreate(output_dims), TfLiteIntArrayFree); + for (int i = 0; i < output_dims; i++) { + shape->data[i] = input->data.i32[i]; + } + *output_shape = shape.release(); + return kTfLiteOk; +} // TODO(b/172067338): Having this function be part of TF_LITE_STATIC_MEMORY // build results in a 6KB size increase, even though the function is unsused for @@ -405,15 +433,24 @@ bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2) { // unsused function, the string library that gets pulled in is not dropped, // resulting in the increased binary size. // Patched by Edge Impulse, issues with building for TinkerGen -std::string GetShapeDebugString(const TfLiteIntArray* shape) { - std::string str = "GetShapeDebugString"; - // for (int d = 0; d < shape->size; ++d) { - // if (str.empty()) - // str = "[" + std::to_string(shape->data[d]); - // else - // str += ", " + std::to_string(shape->data[d]); - // } - // str += "]"; +// TODO inspect if we still need this +const std::string GetShapeDebugString(const TfLiteIntArray* shape) { + std::string str = "GetShapeDebugString";; + /* + for (int d = 0; d < shape->size; ++d) { + if (str.empty()) + str = "[" + std::to_string(shape->data[d]); + else + // Don't add space after "," to make the output consistent with + // tensorflow::shape_inference::InferenceContext::DebugString() + str += "," + std::to_string(shape->data[d]); + } + if (str.empty()) { + str = "[]"; + } else { + str += "]"; + } + */ return str; } @@ -421,26 +458,28 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input1, const TfLiteTensor* input2, TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int out_dims = std::max(dims1, dims2); - if (NumElements(input1) == 0) { - *output_shape = TfLiteIntArrayCopy(input1->dims); - return kTfLiteOk; - } + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int out_dims = std::max(dims1, dims2); + std::unique_ptr shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); if (!(d1 == d2 || d1 == 1 || d2 == 1)) { - context->ReportError(context, - "Given shapes, %s and %s, are not broadcastable.", - GetShapeDebugString(input1->dims).c_str(), - GetShapeDebugString(input2->dims).c_str()); + TF_LITE_KERNEL_LOG(context, + "Given shapes, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str()); return kTfLiteError; } - shape->data[out_dims - i - 1] = std::max(d1, d2); + + if (d1 == 0 || d2 == 0) { + shape->data[out_dims - i - 1] = 0; + } else { + shape->data[out_dims - i - 1] = std::max(d1, d2); + } } *output_shape = shape.release(); return kTfLiteOk; @@ -451,24 +490,27 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, const TfLiteTensor* input2, const TfLiteTensor* input3, TfLiteIntArray** output_shape) { - int dims1 = NumDimensions(input1); - int dims2 = NumDimensions(input2); - int dims3 = NumDimensions(input3); - int out_dims = std::max(std::max(dims1, dims2), dims3); + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int dims3 = NumDimensions(input3); + const int out_dims = std::max(std::max(dims1, dims2), dims3); std::unique_ptr shape( TfLiteIntArrayCreate(out_dims), TfLiteIntArrayFree); for (int i = 0; i < out_dims; ++i) { - int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); - int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); - int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int min_value = std::min(std::min(d1, d2), d3); int max_value = std::max(std::max(d1, d2), d3); + // If one dimention is 0, others must be 0 or 1. + if (min_value == 0) max_value = 0; if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || !(d3 == 1 || d3 == max_value)) { - context->ReportError( - context, "Given shapes, %s, %s and %s, are not broadcastable.", - GetShapeDebugString(input1->dims).c_str(), - GetShapeDebugString(input2->dims).c_str(), - GetShapeDebugString(input3->dims).c_str()); + TF_LITE_KERNEL_LOG(context, + "Given shapes, %s, %s and %s, are not broadcastable.", + GetShapeDebugString(input1->dims).c_str(), + GetShapeDebugString(input2->dims).c_str(), + GetShapeDebugString(input3->dims).c_str()); return kTfLiteError; } shape->data[out_dims - i - 1] = max_value; @@ -482,42 +524,45 @@ TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context, int TfLiteTypeGetSize(TfLiteType type) { switch (type) { case kTfLiteUInt8: - TF_LITE_ASSERT_EQ(sizeof(uint8_t), 1); + static_assert(sizeof(uint8_t) == 1, ""); return 1; case kTfLiteInt8: - TF_LITE_ASSERT_EQ(sizeof(int8_t), 1); + static_assert(sizeof(int8_t) == 1, ""); return 1; case kTfLiteBool: return sizeof(bool); + case kTfLiteUInt16: + static_assert(sizeof(uint16_t) == 2, ""); + return 2; case kTfLiteInt16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); + static_assert(sizeof(int16_t) == 2, ""); return 2; case kTfLiteFloat16: - TF_LITE_ASSERT_EQ(sizeof(int16_t), 2); + static_assert(sizeof(int16_t) == 2, ""); return 2; case kTfLiteFloat32: - TF_LITE_ASSERT_EQ(sizeof(float), 4); + static_assert(sizeof(float) == 4, ""); return 4; case kTfLiteInt32: - TF_LITE_ASSERT_EQ(sizeof(int32_t), 4); + static_assert(sizeof(int32_t) == 4, ""); return 4; case kTfLiteUInt32: - TF_LITE_ASSERT_EQ(sizeof(uint32_t), 4); + static_assert(sizeof(uint32_t) == 4, ""); return 4; case kTfLiteInt64: - TF_LITE_ASSERT_EQ(sizeof(int64_t), 8); + static_assert(sizeof(int64_t) == 8, ""); return 8; case kTfLiteUInt64: - TF_LITE_ASSERT_EQ(sizeof(uint64_t), 8); + static_assert(sizeof(uint64_t) == 8, ""); return 8; case kTfLiteFloat64: - TF_LITE_ASSERT_EQ(sizeof(double), 8); + static_assert(sizeof(double) == 8, ""); return 8; case kTfLiteComplex64: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 8); + static_assert(sizeof(std::complex) == 8, ""); return 8; case kTfLiteComplex128: - TF_LITE_ASSERT_EQ(sizeof(std::complex), 16); + static_assert(sizeof(std::complex) == 16, ""); return 16; default: return 0; @@ -535,4 +580,15 @@ bool IsMobilePlatform() { return false; } +bool HasUnspecifiedDimension(const TfLiteTensor* tensor) { +#ifndef TF_LITE_STATIC_MEMORY + if (tensor->dims_signature) { + for (int i : TfLiteIntArrayView(tensor->dims_signature)) { + if (i == -1) return true; + } + } +#endif // TF_LITE_STATIC_MEMORY + return false; +} + } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h index 0f91f5e..d3c50bb 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h @@ -15,69 +15,24 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ #define TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ -// If we're on a platform without standard IO functions, fall back to a -// non-portable function. -#ifdef TF_LITE_MCU_DEBUG_LOG - #include "edge-impulse-sdk/tensorflow/lite/micro/debug_log.h" -#define DEBUG_LOG(x) \ - do { \ - DebugLog(x); \ - } while (0) - -inline void InfiniteLoop() { - DEBUG_LOG("HALTED\n"); +#if !defined(TF_LITE_MCU_DEBUG_LOG) +#include +#define TFLITE_ABORT abort() +#else +inline void AbortImpl() { + DebugLog("HALTED\n"); while (1) { } } +#define TFLITE_ABORT AbortImpl(); +#endif -#define TFLITE_ABORT InfiniteLoop(); - -#else // TF_LITE_MCU_DEBUG_LOG - -#include -#include - -#define DEBUG_LOG(x) \ - do { \ - fprintf(stderr, "%s", (x)); \ - } while (0) - -// Report Error for unsupported type by op 'op_name' and returns kTfLiteError. -#define TF_LITE_UNSUPPORTED_TYPE(context, type, op_name) \ - do { \ - TF_LITE_KERNEL_LOG((context), "%s:%d Type %s is unsupported by op %s.", \ - __FILE__, __LINE__, TfLiteTypeGetName(type), \ - (op_name)); \ - return kTfLiteError; \ - } while (0) - -#define TFLITE_ABORT abort() - -#endif // TF_LITE_MCU_DEBUG_LOG - -#if defined(NDEBUG) || defined(ARDUINO) +#if defined(NDEBUG) #define TFLITE_ASSERT_FALSE (static_cast(0)) #else #define TFLITE_ASSERT_FALSE TFLITE_ABORT #endif -#define TF_LITE_FATAL(msg) \ - do { \ - DEBUG_LOG(msg); \ - DEBUG_LOG("\nFATAL\n"); \ - TFLITE_ABORT; \ - } while (0) - -#define TF_LITE_ASSERT(x) \ - do { \ - if (!(x)) TF_LITE_FATAL(#x); \ - } while (0) - -#define TF_LITE_ASSERT_EQ(x, y) \ - do { \ - if ((x) != (y)) TF_LITE_FATAL(#x " didn't equal " #y); \ - } while (0) - #endif // TENSORFLOW_LITE_KERNELS_OP_MACROS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/padding.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/padding.h index 62eb7a4..836ca92 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/padding.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/kernels/padding.h @@ -15,12 +15,11 @@ limitations under the License. #ifndef TENSORFLOW_LITE_KERNELS_PADDING_H_ #define TENSORFLOW_LITE_KERNELS_PADDING_H_ -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" namespace tflite { -// TODO(renjieliu): Migrate others to use ComputePaddingWithLeftover. inline int ComputePadding(int stride, int dilation_rate, int in_size, int filter_size, int out_size) { int effective_filter_size = (filter_size - 1) * dilation_rate + 1; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cc similarity index 68% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cc index 6436277..e9d2d6f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -26,24 +26,45 @@ AllOpsResolver::AllOpsResolver() { AddAddN(); AddArgMax(); AddArgMin(); + AddAssignVariable(); AddAveragePool2D(); + AddBatchMatMul(); AddBatchToSpaceNd(); + AddBroadcastArgs(); + AddBroadcastTo(); + AddCallOnce(); + AddCast(); AddCeil(); + AddComplexAbs(); + AddCircularBuffer(); AddConcatenation(); AddConv2D(); AddCos(); + AddCumSum(); + AddDepthToSpace(); AddDepthwiseConv2D(); AddDequantize(); - // AddDetectionPostprocess(); + AddDetectionPostprocess(); AddDiv(); AddElu(); AddEqual(); AddEthosU(); + AddExp(); + AddExpandDims(); + AddFill(); AddFloor(); + AddFloorDiv(); + AddFloorMod(); AddFullyConnected(); +#ifndef TF_LITE_STATIC_MEMORY + AddGather(); +#endif // TF_LITE_STATIC_MEMORY + AddGatherNd(); AddGreater(); AddGreaterEqual(); AddHardSwish(); + AddImag(); + AddIf(); AddL2Normalization(); AddL2Pool2D(); AddLeakyRelu(); @@ -54,10 +75,12 @@ AllOpsResolver::AllOpsResolver() { AddLogicalNot(); AddLogicalOr(); AddLogistic(); + AddLogSoftmax(); AddMaxPool2D(); AddMaximum(); AddMean(); AddMinimum(); + AddMirrorPad(); AddMul(); AddNeg(); AddNotEqual(); @@ -66,28 +89,47 @@ AllOpsResolver::AllOpsResolver() { AddPadV2(); AddPrelu(); AddQuantize(); + AddReal(); + AddReadVariable(); AddReduceMax(); + AddReduceMin(); AddRelu(); AddRelu6(); AddReshape(); + AddResizeBilinear(); AddResizeNearestNeighbor(); + AddRfft2D(); AddRound(); AddRsqrt(); +#ifndef TF_LITE_STATIC_MEMORY + AddSelect(); + AddSelectV2(); +#endif // TF_LITE_STATIC_MEMORY AddShape(); AddSin(); + AddSlice(); AddSoftmax(); AddSpaceToBatchNd(); + AddSpaceToDepth(); AddSplit(); AddSplitV(); AddSqrt(); AddSquare(); + AddSquaredDifference(); AddSqueeze(); AddStridedSlice(); AddSub(); + AddSum(); AddSvdf(); AddTanh(); + AddTranspose(); AddTransposeConv(); + AddTreeEnsembleClassifier(); + AddUnidirectionalSequenceLstm(); AddUnpack(); + AddVarHandle(); + AddWhile(); + AddZerosLike(); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/compatibility.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/compatibility.h index 0b08923..db117ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/compatibility.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/compatibility.h @@ -22,7 +22,10 @@ limitations under the License. // nothing to avoid linking in ::delete(). // This macro needs to be included in all subclasses of a virtual base class in // the private section. -// Patched by Edge Impulse, actually declaring `void operator delete(void* p) {}` yields compiler errors on some compilers + +// Patched by Edge Impulse, +// actually declaring `void operator delete(void* p) {}` +// yields compiler errors on some compilers #define TF_LITE_REMOVE_VIRTUAL_DELETE #endif // TENSORFLOW_LITE_MICRO_COMPATIBILITY_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.cc new file mode 100644 index 0000000..5ca66ab --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.cc @@ -0,0 +1,110 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h" + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +// Patched by Edge Impulse +constexpr int FakeMicroContext::kNumScratchBuffers_; + +namespace { +// Dummy static variables to allow creation of dummy MicroAllocator. +// All tests are guarateed to run serially. +static constexpr int KDummyTensorArenaSize = 256; +static uint8_t dummy_tensor_arena[KDummyTensorArenaSize]; +} // namespace + +FakeMicroContext::FakeMicroContext(TfLiteTensor* tensors, + SingleArenaBufferAllocator* allocator, + MicroGraph* micro_graph) + : MicroContext( + MicroAllocator::Create(dummy_tensor_arena, KDummyTensorArenaSize), + nullptr, micro_graph), + tensors_(tensors), + allocator_(allocator) {} + +TfLiteTensor* FakeMicroContext::AllocateTempTfLiteTensor(int tensor_index) { + allocated_tensor_count_++; + return &tensors_[tensor_index]; +} + +void FakeMicroContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + allocated_tensor_count_--; +} + +bool FakeMicroContext::IsAllTempTfLiteTensorDeallocated() { + return !allocated_tensor_count_; +} + +TfLiteEvalTensor* FakeMicroContext::GetEvalTensor(int tensor_index) { + TfLiteEvalTensor* eval_tensor = + reinterpret_cast(allocator_->AllocateTemp( + sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); + TFLITE_DCHECK(eval_tensor != nullptr); + + // In unit tests, the TfLiteTensor pointer contains the source of truth for + // buffers and values: + eval_tensor->data = tensors_[tensor_index].data; + eval_tensor->dims = tensors_[tensor_index].dims; + eval_tensor->type = tensors_[tensor_index].type; + return eval_tensor; +} + +void* FakeMicroContext::AllocatePersistentBuffer(size_t bytes) { + // FakeMicroContext use SingleArenaBufferAllocator, which does not + // automatically apply the buffer alignment like MicroAllocator. The buffer + // alignment is potentially wasteful but allows the fake_micro_context to work + // correctly with optimized kernels. + return allocator_->AllocatePersistentBuffer(bytes, + MicroArenaBufferAlignment()); +} + +TfLiteStatus FakeMicroContext::RequestScratchBufferInArena(size_t bytes, + int* buffer_index) { + TFLITE_DCHECK(buffer_index != nullptr); + + if (scratch_buffer_count_ == kNumScratchBuffers_) { + MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).", + kNumScratchBuffers_); + return kTfLiteError; + } + + // For tests, we allocate scratch buffers from the tail and keep them around + // for the lifetime of model. This means that the arena size in the tests will + // be more than what we would have if the scratch buffers could share memory. + scratch_buffers_[scratch_buffer_count_] = + allocator_->AllocatePersistentBuffer(bytes, MicroArenaBufferAlignment()); + TFLITE_DCHECK(scratch_buffers_[scratch_buffer_count_] != nullptr); + + *buffer_index = scratch_buffer_count_++; + return kTfLiteOk; +} + +void* FakeMicroContext::GetScratchBuffer(int buffer_index) { + TFLITE_DCHECK(scratch_buffer_count_ <= kNumScratchBuffers_); + if (buffer_index >= scratch_buffer_count_) { + return nullptr; + } + return scratch_buffers_[buffer_index]; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h new file mode 100644 index 0000000..a7af023 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h @@ -0,0 +1,56 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ +#define TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" + +namespace tflite { +// A fake of MicroContext for kernel util tests. +class FakeMicroContext : public MicroContext { + public: + FakeMicroContext(TfLiteTensor* tensors, SingleArenaBufferAllocator* allocator, + MicroGraph* micro_graph); + + void* AllocatePersistentBuffer(size_t bytes) override; + TfLiteStatus RequestScratchBufferInArena(size_t bytes, + int* buffer_index) override; + void* GetScratchBuffer(int buffer_index) override; + + TfLiteTensor* AllocateTempTfLiteTensor(int tensor_index) override; + void DeallocateTempTfLiteTensor(TfLiteTensor* tensor) override; + bool IsAllTempTfLiteTensorDeallocated(); + + TfLiteEvalTensor* GetEvalTensor(int tensor_index) override; + + private: + static constexpr int kNumScratchBuffers_ = 12; + + int scratch_buffer_count_ = 0; + uint8_t* scratch_buffers_[kNumScratchBuffers_]; + + TfLiteTensor* tensors_; + int allocated_tensor_count_ = 0; + + SingleArenaBufferAllocator* allocator_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_FAKE_MICRO_CONTEXT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.cc new file mode 100644 index 0000000..2fe1663 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.cc @@ -0,0 +1,34 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type) { + return ConvertTensorType(tensor_type, type, tflite::GetMicroErrorReporter()); +} + +TfLiteStatus CallBuiltinParseFunction(TfLiteBridgeBuiltinParseFunction parser, + const Operator* op, + BuiltinDataAllocator* allocator, + void** builtin_data) { + return parser(op, tflite::GetMicroErrorReporter(), allocator, builtin_data); +} +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h new file mode 100644 index 0000000..a2a1ad4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h @@ -0,0 +1,45 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ +#define TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// Forward declaration of the ErrorReporter class to hide it from the TFLM code. +class ErrorReporter; + +using TfLiteBridgeBuiltinDataAllocator = BuiltinDataAllocator; + +using TfLiteBridgeBuiltinParseFunction = + TfLiteStatus (*)(const Operator* op, ErrorReporter* error_reporter, + BuiltinDataAllocator* allocator, void** builtin_data); + +// Converts the tensor data type used in the flatbuffer to the representation +// used by the runtime. +TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type); + +// CallBuiltinParseFunction is a wrapper function to wrap the parser function +// calls to Call parser(op, allocator, builtin_data) +TfLiteStatus CallBuiltinParseFunction(TfLiteBridgeBuiltinParseFunction parser, + const Operator* op, + BuiltinDataAllocator* allocator, + void** builtin_data); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_FLATBUFFER_CONVERSIONS_BRIDGE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.cc new file mode 100644 index 0000000..e5d779b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.cc @@ -0,0 +1,85 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" + +namespace tflite { + +FlexbufferWrapper::FlexbufferWrapper(const uint8_t* buffer, size_t size) + : flexbuffers::Vector(flexbuffers::GetRoot(buffer, size).AsVector()) {} + +int64_t FlexbufferWrapper::ElementAsInt64(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadInt64(elem, byte_width_); +} + +uint64_t FlexbufferWrapper::ElementAsUInt64(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadUInt64(elem, byte_width_); +} + +int32_t FlexbufferWrapper::ElementAsInt32(size_t i) const { + return static_cast(ElementAsInt64(i)); +} + +bool FlexbufferWrapper::ElementAsBool(size_t i) const { + return static_cast(ElementAsUInt64(i)); +} + +double FlexbufferWrapper::ElementAsDouble(size_t i) const { + const uint8_t* elem = data_ + i * byte_width_; + return ::flexbuffers::ReadDouble(elem, byte_width_); +} + +float FlexbufferWrapper::ElementAsFloat(size_t i) const { + return static_cast(FlexbufferWrapper::ElementAsDouble(i)); +} + +// TODO(b/192589496): Ops must always be there. Remove this function when fixed +uint32_t NumSubgraphOperators(const SubGraph* subgraph) { + if (subgraph->operators() != nullptr) { + return subgraph->operators()->size(); + } else { + return 0; + } +} +// TODO(b/192589496): Ops must always be there. Remove this function when fixed +uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + return NumSubgraphOperators(subgraph); +} + +TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array) { + // On little-endian machines, TfLiteIntArray happens to have the same memory + // layout as flatbuffers:Vector, so we can reinterpret_cast the + // flatbuffer vector and avoid a copy and malloc. + // TODO(b/188459715): audit this usage of const_cast. + return const_cast( + reinterpret_cast(flatbuffer_array)); +} + +TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array) { + // On little-endian machines, TfLiteFloatArray happens to have the same memory + // layout as flatbuffers:Vector, so we can reinterpret_cast the + // flatbuffer vector and avoid a copy and malloc. + // TODO(b/188459715): audit this usage of const_cast. + return const_cast( + reinterpret_cast(flatbuffer_array)); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h new file mode 100644 index 0000000..a5a7f9e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h @@ -0,0 +1,65 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ +#define THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +// Kernels use flexbuffers::Map to pack their init parameters in a tflite file, +// with the parameter names as map keys and the parameter values as the +// corresponding map values. +// Accessing the map values using the flexbuffers:Map class is inline heavy, +// which can cause the code size to bloat beyond what's reasonable for a micro +// application. Use this class instead, when possible. +// FlexbufferWrapper takes advantage of the following properties of +// flexbuffers::Map: +// 1. It can be viewed as a flexbuffers::Vector of the values. +// 2. The values in the vector are ordered alphabetically by their keys. +// 3. All integer and Boolean values are stored as 64-bit numbers. +// 4. All floating point values are stored as double precision numbers. +// The properties are mentioned in the flexbuffers docs, but we rely on +// a unit test to catch design changes. +class FlexbufferWrapper : public flexbuffers::Vector { + public: + // Construct with a serialized flexbuffer 'buffer' of 'size' bytes + explicit FlexbufferWrapper(const uint8_t* buffer, size_t size); + int64_t ElementAsInt64(size_t i) const; + uint64_t ElementAsUInt64(size_t i) const; + int32_t ElementAsInt32(size_t i) const; + bool ElementAsBool(size_t i) const; + double ElementAsDouble(size_t i) const; + float ElementAsFloat(size_t i) const; +}; + +// Return the number of operators in a subgraph tflite +uint32_t NumSubgraphOperators(const SubGraph* subgraph); +uint32_t NumSubgraphOperators(const Model* model, int subgraph_idx); + +// Converts a flatbuffer array to a TfLiteArray. +// TODO(b/188459715): These function convert a const input to a non-const via a +// const_cast. It is unclear exactly why this is required. +TfLiteIntArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array); +TfLiteFloatArray* FlatBufferVectorToTfLiteTypeArray( + const flatbuffers::Vector* flatbuffer_array); + +} // namespace tflite + +#endif // THIRD_PARTY_TFLITE_MICRO_TENSORFLOW_LITE_MICRO_FLATBUFFER_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h new file mode 100644 index 0000000..287eea3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h @@ -0,0 +1,100 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" + +namespace tflite { +// Interface classes that the TFLM framework relies on to get buffers it needs. +// There are two types of buffers that the TFLM framework requires: persistent +// and non-persistent. Persistent buffers, once allocated, are never freed by +// the TFLM framework. Non-persist buffers can be allocated and deallocated by +// the TFLM framework. This file defines two interfaces classes that TFLM +// framework will rely on to manage these buffers. + +// Interface class for managing persistent buffers. +class IPersistentBufferAllocator { + public: + IPersistentBufferAllocator() {} + virtual ~IPersistentBufferAllocator() {} + + // Allocates persistent memory. The persistent buffer is never freed. + virtual uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) = 0; + + // Returns the size of all persistent allocations in bytes. + virtual size_t GetPersistentUsedBytes() const = 0; +}; + +// Interface class for managing non-persistent buffers. +// The default non-persistent buffers are temp buffers that are not resizable. +// Support of at least one resizable buffer is required. +class INonPersistentBufferAllocator { + public: + INonPersistentBufferAllocator() {} + virtual ~INonPersistentBufferAllocator() {} + + // Allocates a temporary buffer. This buffer is not resizable. + virtual uint8_t* AllocateTemp(size_t size, size_t alignment) = 0; + + // Signals that a temporary buffer is no longer needed. + virtual void DeallocateTemp(uint8_t* buf) = 0; + + // Returns true if all temporary buffers are already deallocated. + virtual bool IsAllTempDeallocated() = 0; + + // Signals that all temporary allocations can be reclaimed. TFLM calls this + // API when it knows that all temporary buffers that it requested has been + // deallocated. The goal of API is to facilitate implementations of + // INonPersistentBufferAllocator can reuse buffer with some reasonable + // complexity. + virtual TfLiteStatus ResetTempAllocations() = 0; + + // Returns a buffer that is resizable viable ResizeBuffer(). + virtual uint8_t* AllocateResizableBuffer(size_t size, size_t alignment) = 0; + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. + virtual TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) = 0; + + // Frees up the memory occupied by the resizable buffer. + virtual TfLiteStatus DeallocateResizableBuffer(uint8_t* resizable_buf) = 0; + + // Returns a pointer pointing to the start of the overlay memory, which is + // used for activation tensors and scratch buffers by kernels at Invoke stage. + virtual uint8_t* GetOverlayMemoryAddress() const = 0; + + // Reserves the size of the overlay memory. This overlay is reserved for the + // kernels at Invoke stage. This is referred to as the overlay because before + // Invoket state, the same memory can be used for temp buffers. The layout of + // the memory is planned by the memory planner separately at Invoke stage. + virtual TfLiteStatus ReserveNonPersistentOverlayMemory(size_t size, + size_t alignment) = 0; + + // Returns the size of non-persistent buffer in use. + virtual size_t GetNonPersistentUsedBytes() const = 0; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + virtual size_t GetAvailableMemory(size_t alignment) const = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_IBUFFER_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cc new file mode 100644 index 0000000..4f4cf81 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cc @@ -0,0 +1,120 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(ReluOpData)); +} + +TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const ReluOpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kActivationsInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor); + + switch (input->type) { + case kTfLiteFloat32: { + ReluFloat(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; + } + case kTfLiteInt8: { + tflite::ReluQuantized(data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: { + MicroPrintf("Only float32 is supported currently, got %s", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + } +} + +void* Relu6Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(Relu6OpData)); +} + +TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const Relu6OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kActivationsInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kActivationsOutputTensor); + + switch (input->type) { + case kTfLiteFloat32: { + Relu6Float(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; + } + case kTfLiteInt8: { + Relu6Quantized(data.zero_int8, data.six_int8, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: { + MicroPrintf("Only float32 is supported currently, got %s", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + } +} + +} // namespace + +TfLiteRegistration Register_RELU() { + return tflite::micro::RegisterOp(ReluInit, ReluPrepare, ReluEval); +} + +TfLiteRegistration Register_RELU6() { + return tflite::micro::RegisterOp(Relu6Init, Relu6Prepare, Relu6Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cpp deleted file mode 100644 index 7e2a032..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.cpp +++ /dev/null @@ -1,288 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { - -struct ReluOpData { - ReluParams params; -}; - -struct Relu6OpData { - int8_t six_int8; - int8_t zero_int8; - uint8_t six_uint8; - uint8_t zero_uint8; -}; - -} // namespace - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -template -inline void ReluQuantized(const ReluOpData& data, - const RuntimeShape& input_shape, - const RuntimeShape& output_shape, const T* input_data, - T* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const int32_t val = static_cast(input_data[i]); - int32_t clamped = - data.params.output_offset + - MultiplyByQuantizedMultiplier(val - data.params.input_offset, - data.params.output_multiplier, - data.params.output_shift); - clamped = std::max(data.params.quantized_activation_min, clamped); - clamped = std::min(data.params.quantized_activation_max, clamped); - output_data[i] = static_cast(clamped); - } -} - -template -inline void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, - ReluOpData* data) { - float act_min = 0.0; - float act_max = std::numeric_limits::infinity(); - double real_multiplier = - static_cast(input->params.scale / output->params.scale); - - const RuntimeShape input_shape = GetTensorShape(input); - const RuntimeShape output_shape = GetTensorShape(output); - - QuantizeMultiplier(real_multiplier, &data->params.output_multiplier, - &data->params.output_shift); - - data->params.quantized_activation_min = std::max( - static_cast(std::numeric_limits::min()), - output->params.zero_point + - static_cast(roundf(act_min / output->params.scale))); - data->params.quantized_activation_max = - act_max == std::numeric_limits::infinity() - ? static_cast(std::numeric_limits::max()) - : std::min(static_cast(std::numeric_limits::max()), - output->params.zero_point + - static_cast( - roundf(act_max / output->params.scale))); - data->params.input_offset = input->params.zero_point; - data->params.output_offset = output->params.zero_point; -} - -inline void ReluFloat(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float lower = 0.0f; - const float clamped = val < lower ? lower : val; - output_data[i] = clamped; - } -} - -inline void Relu6Float(const RuntimeShape& input_shape, const float* input_data, - const RuntimeShape& output_shape, float* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const float val = input_data[i]; - const float upper = 6.0f; - const float lower = 0.0f; - const float clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - -template -inline void Relu6Quantized(Q lower, Q upper, const RuntimeShape& input_shape, - const Q* input_data, - const RuntimeShape& output_shape, Q* output_data) { - const int flat_size = MatchingFlatSize(input_shape, output_shape); - for (int i = 0; i < flat_size; ++i) { - const Q val = input_data[i]; - const Q clamped = val > upper ? upper : val < lower ? lower : val; - output_data[i] = clamped; - } -} - -void* ReluInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(ReluOpData)); -} - -TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - ReluOpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - if (input->type == kTfLiteInt8) { - CalculateReluOpData(input, output, data); - } else if (input->type == kTfLiteUInt8) { - CalculateReluOpData(input, output, data); - } - - return kTfLiteOk; -} - -TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const ReluOpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - ReluFloat(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; - } - case kTfLiteInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteUInt8: { - ReluQuantized(data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } -} - -void* Relu6Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(Relu6OpData)); -} - -TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - Relu6OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - - if (input->type == kTfLiteInt8) { - data->six_int8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_int8 = input->params.zero_point; - } else if (input->type == kTfLiteUInt8) { - data->six_uint8 = FloatToQuantizedType(6.0f, input->params.scale, - input->params.zero_point); - data->zero_uint8 = input->params.zero_point; - } - - return kTfLiteOk; -} - -TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const Relu6OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: { - Relu6Float(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - - return kTfLiteOk; - } - case kTfLiteInt8: { - Relu6Quantized(data.zero_int8, data.six_int8, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteUInt8: { - Relu6Quantized(data.zero_uint8, data.six_uint8, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - default: { - TF_LITE_KERNEL_LOG(context, "Only float32 is supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } -} - -} // namespace activations - -TfLiteRegistration Register_RELU() { - return {/*init=*/activations::ReluInit, - /*free=*/nullptr, - /*prepare=*/activations::ReluPrepare, - /*invoke=*/activations::ReluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_RELU6() { - return {/*init=*/activations::Relu6Init, - /*free=*/nullptr, - /*prepare=*/activations::Relu6Prepare, - /*invoke=*/activations::Relu6Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h new file mode 100644 index 0000000..c6dddcd --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h @@ -0,0 +1,63 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +extern const int kActivationsInputTensor; +extern const int kActivationsOutputTensor; + +struct ReluOpData { + ReluParams params; +}; + +struct Relu6OpData { + int8_t six_int8; + int8_t zero_int8; +}; + +void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const int8_t* input_data, + int8_t* output_data); + +template +void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, + ReluOpData* data); + +void ReluFloat(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data); + +void Relu6Float(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data); + +void Relu6Quantized(int8_t lower, int8_t upper, const RuntimeShape& input_shape, + const int8_t* input_data, const RuntimeShape& output_shape, + int8_t* output_data); + +TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ACTIVATIONS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations_common.cc new file mode 100644 index 0000000..d270813 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/activations_common.cc @@ -0,0 +1,158 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activations.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +const int kActivationsInputTensor = 0; +const int kActivationsOutputTensor = 0; + +void ReluQuantized(const ReluOpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const int8_t* input_data, + int8_t* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const int32_t val = static_cast(input_data[i]); + int32_t clamped = + data.params.output_offset + + MultiplyByQuantizedMultiplier(val - data.params.input_offset, + data.params.output_multiplier, + data.params.output_shift); + clamped = std::max(data.params.quantized_activation_min, clamped); + clamped = std::min(data.params.quantized_activation_max, clamped); + output_data[i] = static_cast(clamped); + } +} + +template +void CalculateReluOpData(const TfLiteTensor* input, TfLiteTensor* output, + ReluOpData* data) { + float act_min = 0.0; + float act_max = std::numeric_limits::infinity(); + double real_multiplier = static_cast(input->params.scale) / + static_cast(output->params.scale); + + const RuntimeShape input_shape = GetTensorShape(input); + const RuntimeShape output_shape = GetTensorShape(output); + + QuantizeMultiplier(real_multiplier, &data->params.output_multiplier, + &data->params.output_shift); + + data->params.quantized_activation_min = std::max( + static_cast(std::numeric_limits::min()), + output->params.zero_point + + static_cast(roundf(act_min / output->params.scale))); + data->params.quantized_activation_max = + act_max == std::numeric_limits::infinity() + ? static_cast(std::numeric_limits::max()) + : std::min(static_cast(std::numeric_limits::max()), + output->params.zero_point + + static_cast( + roundf(act_max / output->params.scale))); + data->params.input_offset = input->params.zero_point; + data->params.output_offset = output->params.zero_point; +} + +void ReluFloat(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + const float lower = 0.0f; + const float clamped = val < lower ? lower : val; + output_data[i] = clamped; + } +} + +void Relu6Float(const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& output_shape, float* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const float val = input_data[i]; + const float upper = 6.0f; + const float lower = 0.0f; + const float clamped = val > upper ? upper : val < lower ? lower : val; + output_data[i] = clamped; + } +} + +void Relu6Quantized(int8_t lower, int8_t upper, const RuntimeShape& input_shape, + const int8_t* input_data, const RuntimeShape& output_shape, + int8_t* output_data) { + const int flat_size = MatchingFlatSize(input_shape, output_shape); + for (int i = 0; i < flat_size; ++i) { + const int8_t val = input_data[i]; + const int8_t clamped = val > upper ? upper : val < lower ? lower : val; + output_data[i] = clamped; + } +} + +TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + ReluOpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kActivationsInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kActivationsOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + if (input->type == kTfLiteInt8) { + CalculateReluOpData(input, output, data); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Relu6Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + Relu6OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kActivationsInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + + if (input->type == kTfLiteInt8) { + data->six_int8 = FloatToQuantizedType(6.0f, input->params.scale, + input->params.zero_point); + data->zero_int8 = input->params.zero_point; + } + + micro_context->DeallocateTempTfLiteTensor(input); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cc new file mode 100644 index 0000000..2140d1f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cc @@ -0,0 +1,1383 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + + int output_shift; + int left_shift; + + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteTensor* output, + OpData* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + return kTfLiteOk; +} + +void UpdateOpParams(tflite::ArithmeticParams* const op_params, + const OpData* data) { + op_params->left_shift = data->left_shift; + op_params->input1_offset = data->input1_offset; + op_params->input1_multiplier = data->input1_multiplier; + op_params->input1_shift = data->input1_shift; + op_params->input2_offset = data->input2_offset; + op_params->input2_multiplier = data->input2_multiplier; + op_params->input2_shift = data->input2_shift; + op_params->output_offset = data->output_offset; + op_params->output_multiplier = data->output_multiplier; + op_params->output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + op_params); +} + +TfLiteStatus EvalAddQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + UpdateOpParams(&op_params, data); + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + arm_elementwise_add_s8( + tflite::micro::GetTensorData(input1), + + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input1_multiplier, op_params.input1_shift, + op_params.input2_offset, op_params.input2_multiplier, + op_params.input2_shift, op_params.left_shift, + tflite::micro::GetTensorData(output), op_params.output_offset, + op_params.output_multiplier, op_params.output_shift, + op_params.quantized_activation_min, op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAddQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + UpdateOpParams(&op_params, data); + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + arm_elementwise_add_s16( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input1_multiplier, op_params.input1_shift, + op_params.input2_offset, op_params.input2_multiplier, + op_params.input2_shift, op_params.left_shift, + tflite::micro::GetTensorData(output), op_params.output_offset, + op_params.output_multiplier, op_params.output_shift, + op_params.quantized_activation_min, op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + + return kTfLiteOk; +} + +void EvalAddFloat(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + switch (output->type) { + case kTfLiteInt8: { + EvalAddQuantizedInt8(context, node, params, data, input1, input2, output); + break; + } + case kTfLiteInt16: { + EvalAddQuantizedInt16(context, node, params, data, input1, input2, + output); + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +void* InitAdd(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus PrepareAdd(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + } + + OpData* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpData(context, params, input1, input2, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + if (output->type == kTfLiteFloat32) { + EvalAddFloat(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, + input1, input2, output)); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAddInt8(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(output->type == kTfLiteInt8); + const OpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_OK(context, EvalAddQuantizedInt8(context, node, params, data, + input1, input2, output)); + + return kTfLiteOk; +} + +TfLiteStatus EvalAddInt16(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(output->type == kTfLiteInt16); + const OpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_OK(context, EvalAddQuantizedInt16(context, node, params, data, + input1, input2, output)); + + return kTfLiteOk; +} + +TfLiteRegistration Register_ADD() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAdd); +} + +TfLiteRegistration Register_ADD_INT8() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAddInt8); +} + +TfLiteRegistration Register_ADD_INT16() { + return tflite::micro::RegisterOp(InitAdd, PrepareAdd, EvalAddInt16); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" + +#include +#include + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; + + // The result of checking if MLI optimized version of tensors can be used. + bool is_mli_applicable; + + // Tensors in MLI format. + mutable ops::micro::MliTensorInterface mli_input1; + mutable ops::micro::MliTensorInterface mli_input2; + mutable ops::micro::MliTensorInterface mli_out; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteTensor* output, + OpData* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + + // MLI 2.0 optimized version only supports int8_t datatype and min/max + // within container range. Broadcasting isn't supported on the primitive + // level (but might be implemented as part of slicing in future) +#ifdef MLI_2_0 // + data->is_mli_applicable = + (input1->type == kTfLiteInt8) && (input2->type == kTfLiteInt8) && + (output->type == kTfLiteInt8) && !data->requires_broadcast && + data->output_activation_min == std::numeric_limits::min() && + data->output_activation_max == std::numeric_limits::max(); +#else + data->is_mli_applicable = false; +#endif + + if (data->is_mli_applicable) { + data->mli_input1 = + ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_input2 = + ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_out = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + + ops::micro::ConvertToMliTensor(input1, &data->mli_input1); + ops::micro::ConvertToMliTensor(input2, &data->mli_input2); + ops::micro::ConvertToMliTensor(output, &data->mli_out); + /* Flatten tensors to simplify the process (as we don't support + * broadcasting). */ + data->mli_input1.Shape()[0] = + mli_hlp_count_elem_num(data->mli_input1.MliTensor(), 0); + data->mli_input2.Shape()[0] = + mli_hlp_count_elem_num(data->mli_input2.MliTensor(), 0); + data->mli_out.Shape()[0] = + mli_hlp_count_elem_num(data->mli_out.MliTensor(), 0); + data->mli_input1.MemStride()[0] = data->mli_input2.MemStride()[0] = 1; + data->mli_out.MemStride()[0] = 1; + *data->mli_input1.Rank() = *data->mli_input2.Rank() = 1; + *data->mli_out.Rank() = 1; + } + } else { + data->is_mli_applicable = false; + } + +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); +#endif // !defined(TF_LITE_STRIP_REFERENCE_IMPL) + } + + return kTfLiteOk; +} + +TfLiteStatus EvalAdd(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + return kTfLiteOk; +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); + return kTfLiteError; +#endif +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + &op_params); + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + switch (output->type) { + case kTfLiteInt8: { + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Add( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + break; + } + case kTfLiteInt16: { + if (need_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + false); + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); + return kTfLiteError; +#endif +} + +TfLiteStatus EvalMLIAddInt8(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { +#ifdef MLI_2_0 + TF_LITE_ENSURE(context, data->is_mli_applicable == true); + TF_LITE_ENSURE(context, input1->type == kTfLiteInt8); + TF_LITE_ENSURE(context, input2->type == kTfLiteInt8); + TF_LITE_ENSURE(context, output->type == kTfLiteInt8); + + ops::micro::MliTensorAttachBuffer(input1, &data->mli_input1); + ops::micro::MliTensorAttachBuffer(input2, &data->mli_input2); + ops::micro::MliTensorAttachBuffer(output, &data->mli_out); + + // mli_mov config and tensors for data in fast (local) memory with interface + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); + mli_tensor input1_local_tsr = *data->mli_input1.MliTensor(); + mli_tensor input2_local_tsr = *data->mli_input2.MliTensor(); + mli_tensor out_local_tsr = *data->mli_out.MliTensor(); + ops::micro::MliTensorInterface input1_local(&input1_local_tsr); + ops::micro::MliTensorInterface input2_local(&input2_local_tsr); + ops::micro::MliTensorInterface out_local(&out_local_tsr); + + /* allocate the local buffers, and compute the slice size */ + TF_LITE_ENSURE_STATUS(ops::micro::get_arc_scratch_buffer_for_eltwise_tensors( + context, &input1_local, &input2_local, &out_local)); + TF_LITE_ENSURE(context, *input1_local.Rank() == 1 && + *input2_local.Rank() == 1 && + *out_local.Rank() == 1); + uint32_t min_capacity = *input1_local.DataCapacity(); + min_capacity = std::min(min_capacity, *input2_local.DataCapacity()); + min_capacity = std::min(min_capacity, *out_local.DataCapacity()); + const int slice_dim = 0; + const int slice_size = + min_capacity / mli_hlp_tensor_element_size(out_local.MliTensor()); + + /* is_local indicates that the tensor is already in local memory, + so in that case the original tensor can be used, + and there is no need to copy it to the local tensor*/ + const bool input1_is_local = + input1_local.Data() == data->mli_input1.Data(); + const bool input2_is_local = + input2_local.Data() == data->mli_input2.Data(); + const bool out_is_local = + out_local.Data() == data->mli_out.Data(); + + ops::micro::TensorSlicer input1_slice(data->mli_input1.MliTensor(), slice_dim, + slice_size); + ops::micro::TensorSlicer input2_slice(data->mli_input2.MliTensor(), slice_dim, + slice_size); + ops::micro::TensorSlicer out_slice(data->mli_out.MliTensor(), slice_dim, + slice_size); + + mli_tensor* input1_tsr = + input1_is_local ? input1_slice.Sub() : input1_local.MliTensor(); + mli_tensor* input2_tsr = + input2_is_local ? input2_slice.Sub() : input2_local.MliTensor(); + mli_tensor* out_tsr = out_is_local ? out_slice.Sub() : out_local.MliTensor(); + + while (!out_slice.Done()) { + mli_mov_tensor_sync(input1_slice.Sub(), ©_config, input1_tsr); + mli_mov_tensor_sync(input2_slice.Sub(), ©_config, input2_tsr); + + mli_krn_eltwise_add_sa8(input1_tsr, input2_tsr, out_tsr); + + mli_mov_tensor_sync(out_tsr, ©_config, out_slice.Sub()); + input1_slice.Next(); + input2_slice.Next(); + out_slice.Next(); + } + return kTfLiteOk; +#else + return kTfLiteError; +#endif +} + +void* AddInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) { + TfLiteStatus ret_val = kTfLiteOk; + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + if (data->is_mli_applicable) { + ret_val = + EvalMLIAddInt8(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteFloat32) { + ret_val = EvalAdd(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + ret_val = + EvalAddQuantized(context, node, params, data, input1, input2, output); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + ret_val = kTfLiteError; + } + + return ret_val; +} + +TfLiteRegistration Register_ADD() { + return tflite::micro::RegisterOp(AddInit, AddPrepare, AddEval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "sl_mvp_ml_add.h" + +namespace tflite { +namespace sl { +namespace add { + +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + + int input1_shift; + int input2_shift; + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + + sli_mvp_ml_add_s8_params_t params; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, TfLiteTensor* output, + OpData* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8) { + data->params.input1_offset = -input1->params.zero_point; + data->params.input2_offset = -input2->params.zero_point; + data->params.output_offset = output->params.zero_point; + data->params.input1_multiplier = input1->params.scale; + data->params.input2_multiplier = input2->params.scale; + data->params.output_multiplier = 1.0 / output->params.scale; + data->params.length = GetTensorShape(input1).FlatSize(); + + int32_t activation_min; + int32_t activation_max; + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &activation_min, + &activation_max)); + data->params.activation_min = static_cast(activation_min); + data->params.activation_max = static_cast(activation_max); + + // These multipliers and parameters are not used by the MVP codepath, + // however are needed in cases where broadcast is used. + data->left_shift = 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + return kTfLiteOk; +} + +void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, + const OpData* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow(op_params, tflite::micro::GetTensorShape(input1), tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, + tflite::micro::GetTensorShape(input1), tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + TfLiteStatus status = kTfLiteOk; + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->params.input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->params.input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->params.output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + op_params.quantized_activation_min = data->params.activation_min; + op_params.quantized_activation_max = data->params.activation_max; + + // TODO: Do we need to support the broadcast scenario? + bool need_broadcast = reference_ops::ProcessBroadcastShapes(tflite::micro::GetTensorShape(input1), tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow(op_params, + tflite::micro::GetTensorShape(input1), tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); + } else { + sli_mvp_ml_add_s8_params_t params = data->params; + params.input1 = tflite::micro::GetTensorData(input1); + params.input2 = tflite::micro::GetTensorData(input2); + params.output = tflite::micro::GetTensorData(output); + sl_status_t ret = sli_mvp_ml_add_s8(¶ms); + if (ret != SL_STATUS_OK) { + status = kTfLiteError; + } + } + + return status; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + OpData* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpData(context, params, input1, input2, output, data)); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalAdd(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8) { + TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, + input1, input2, output)); + } else { + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace add +} // namespace sl + +TfLiteRegistration Register_ADD() { + return {/*init=*/sl::add::Init, + /*free=*/nullptr, + /*prepare=*/sl::add::Prepare, + /*invoke=*/sl::add::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#include + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +long long add_total_time = 0; + +namespace tflite { + +void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, + const OpDataAdd* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpDataAdd* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + &op_params); + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + switch (output->type) { + case kTfLiteInt8: { + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { +#if ESP_NN + const int8_t *input1_data = tflite::micro::GetTensorData(input1); + const int8_t *input2_data = tflite::micro::GetTensorData(input2); + int8_t *out_data = tflite::micro::GetTensorData(output); + + esp_nn_add_elementwise_s8(input1_data, + input2_data, + data->input1_offset, + data->input2_offset, + data->input1_multiplier, + data->input2_multiplier, + data->input1_shift, + data->input2_shift, + data->left_shift, + out_data, + data->output_offset, + data->output_multiplier, + data->output_shift, + data->output_activation_min, + data->output_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output)) + ); +#else + reference_integer_ops::Add( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#endif + } + break; + } + case kTfLiteInt16: { + if (need_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + false); + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +void* AddInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataAdd)); +} + +TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataAdd* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kAddInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kAddInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kAddOutputTensor); + + long long start_time = esp_timer_get_time(); + + if (output->type == kTfLiteFloat32) { + EvalAdd(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, + input1, input2, output)); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + return kTfLiteError; + } + add_total_time += esp_timer_get_time() - start_time; + + return kTfLiteOk; +} + +TfLiteRegistration Register_ADD() { + return tflite::micro::RegisterOp(AddInit, AddPrepare, AddEval); +} + +} // namespace tflite + +#else +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, + const OpDataAdd* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + SetActivationParams(data->output_activation_min_f32, + data->output_activation_max_f32, &op_params); + if (data->requires_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteAddParams* params, const OpDataAdd* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + &op_params); + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + switch (output->type) { + case kTfLiteInt8: { + if (need_broadcast) { + reference_integer_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Add( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + break; + } + case kTfLiteInt16: { + if (need_broadcast) { + reference_ops::BroadcastAdd4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + false); + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +void* AddInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataAdd)); +} + +TfLiteStatus AddEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataAdd* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kAddInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kAddInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kAddOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalAdd(context, node, params, data, input1, input2, output); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, + input1, input2, output)); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteRegistration Register_ADD() { + return tflite::micro::RegisterOp(AddInit, AddPrepare, AddEval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp deleted file mode 100644 index 151f661..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.cpp +++ /dev/null @@ -1,525 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace add { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; - - // Used only for float evals: - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const double twice_max_input_scale = - 2 * static_cast( - std::max(input1->params.scale, input2->params.scale)); - const double real_input1_multiplier = - static_cast(input1->params.scale) / twice_max_input_scale; - const double real_input2_multiplier = - static_cast(input2->params.scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } else if (output->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params; - SetActivationParams(data->output_activation_min_f32, - data->output_activation_max_f32, &op_params); -#define TF_LITE_ADD(opname) \ - reference_ops::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - if (data->requires_broadcast) { - TF_LITE_ADD(BroadcastAdd4DSlow); - } else { - TF_LITE_ADD(Add); - } -#undef TF_LITE_ADD -} - -TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteAddParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); -#define TF_LITE_ADD(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - TF_LITE_ADD(reference_integer_ops, BroadcastAdd4DSlow, int8_t); - } else { - arm_elementwise_add_s8( - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorData(input2), - op_params.input1_offset, op_params.input1_multiplier, - op_params.input1_shift, op_params.input2_offset, - op_params.input2_multiplier, op_params.input2_shift, - op_params.left_shift, tflite::micro::GetTensorData(output), - op_params.output_offset, op_params.output_multiplier, - op_params.output_shift, op_params.quantized_activation_min, - op_params.quantized_activation_max, - MatchingElementsSize(tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorShape(output))); - } - } else { - if (need_broadcast) { - TF_LITE_ADD(reference_ops, BroadcastAdd4DSlow, uint8_t); - } else { - TF_LITE_ADD(reference_ops, Add, uint8_t); - } - } -#undef TF_LITE_ADD - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - if (output->type == kTfLiteFloat32) { - EvalAdd(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace add - -TfLiteRegistration Register_ADD() { - return {/*init=*/add::Init, - /*free=*/nullptr, - /*prepare=*/add::Prepare, - /*invoke=*/add::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#else -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace add { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; - - // Used only for float evals: - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteAddParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const double twice_max_input_scale = - 2 * static_cast( - std::max(input1->params.scale, input2->params.scale)); - const double real_input1_multiplier = - static_cast(input1->params.scale) / twice_max_input_scale; - const double real_input2_multiplier = - static_cast(input2->params.scale) / twice_max_input_scale; - const double real_output_multiplier = - twice_max_input_scale / - ((1 << data->left_shift) * static_cast(output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } else if (output->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -void EvalAdd(TfLiteContext* context, TfLiteNode* node, TfLiteAddParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params; - SetActivationParams(data->output_activation_min_f32, - data->output_activation_max_f32, &op_params); - if (data->requires_broadcast) { - reference_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus EvalAddQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteAddParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Add( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - if (need_broadcast) { - reference_ops::BroadcastAdd4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Add(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - if (output->type == kTfLiteFloat32) { - EvalAdd(context, node, params, data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalAddQuantized(context, node, params, data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace add - -TfLiteRegistration Register_ADD() { - return {/*init=*/add::Init, - /*free=*/nullptr, - /*prepare=*/add::Prepare, - /*invoke=*/add::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h new file mode 100644 index 0000000..e91ffb3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h @@ -0,0 +1,77 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kAddInputTensor1; +extern const int kAddInputTensor2; +extern const int kAddOutputTensor; + +struct OpDataAdd { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; + + // Used only for float evals: + float output_activation_min_f32; + float output_activation_max_f32; +}; + +TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataAdd* data); + +TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node); + +// Generic must define registration function. +TfLiteRegistration Register_ADD(); + +#if defined(CMSIS_NN) +TfLiteRegistration Register_ADD_INT8(); + +TfLiteRegistration Register_ADD_INT16(); +#else +// Fallback registration +inline TfLiteRegistration Register_ADD_INT8() { return Register_ADD(); } + +inline TfLiteRegistration Register_ADD_INT16() { return Register_ADD(); } +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ADD_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_common.cc new file mode 100644 index 0000000..d9622a2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_common.cc @@ -0,0 +1,106 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" + +namespace tflite { + +const int kAddInputTensor1 = 0; +const int kAddInputTensor2 = 1; +const int kAddOutputTensor = 0; + +TfLiteStatus CalculateOpDataAdd(TfLiteContext* context, TfLiteAddParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataAdd* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = (output->type == kTfLiteInt16) ? 15 : 20; + const double twice_max_input_scale = + 2 * static_cast( + std::max(input1->params.scale, input2->params.scale)); + const double real_input1_multiplier = + static_cast(input1->params.scale) / twice_max_input_scale; + const double real_input2_multiplier = + static_cast(input2->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } else if (output->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + return kTfLiteOk; +} + +TfLiteStatus AddPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kAddInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kAddInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kAddOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + OpDataAdd* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataAdd(context, params, input1, input2, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cc new file mode 100644 index 0000000..0ec3276 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cc @@ -0,0 +1,215 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor0 = 0; +constexpr int kOutputTensor = 0; + +constexpr int kAddNIntegerShift = 20; + +// only used with INT8 tensors +struct OpData { + int32_t output_activation_min; + int32_t output_activation_max; + int32_t input_offset; + int32_t output_offset; + int32_t input_multiplier; + int32_t output_multiplier; + int input_shift; + int output_shift; + int left_shift; + int scratch_index; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + int num_inputs = NumInputs(node); + TF_LITE_ENSURE(context, num_inputs >= 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input_tensor_first = + micro_context->AllocateTempInputTensor(node, kInputTensor0); + TF_LITE_ENSURE(context, input_tensor_first != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + // Check that all tensors have the same shape and type. + TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type); + for (int i = kInputTensor0 + 1; i < num_inputs; ++i) { + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input)); + TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type); + + // Check that all INT8 input tensors have the same zero-point and scale. + if (input_tensor_first->type == kTfLiteInt8) { + TF_LITE_ENSURE(context, input_tensor_first->params.zero_point == + input->params.zero_point); + TF_LITE_ENSURE(context, + input_tensor_first->params.scale == input->params.scale); + } + + micro_context->DeallocateTempTfLiteTensor(input); + } + + if (output->type == kTfLiteFloat32) { + // Allocate scratch buffer space for pointer to each tensor's data + // and store the scratch buffer index in the node's user_data + int scratch_index; + size_t scratch_size = sizeof(float*) * num_inputs; + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, scratch_size, &scratch_index)); + node->user_data = + reinterpret_castuser_data)>(scratch_index); + } else if (output->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* data = static_cast(node->user_data); + + // Allocate scratch buffer space for pointer to each tensor's data + // and store the scratch buffer index in OpData + size_t scratch_size = sizeof(int8_t*) * num_inputs; + TF_LITE_ENSURE_OK( + context, context->RequestScratchBufferInArena(context, scratch_size, + &data->scratch_index)); + + // 8bit -> 8bit general quantized path, with general rescalings + data->input_offset = -input_tensor_first->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = kAddNIntegerShift; + const double twice_max_input_scale = + 2 * static_cast(input_tensor_first->params.scale); + const double real_input_multiplier = + static_cast(input_tensor_first->params.scale) / + twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input_multiplier, &data->input_multiplier, &data->input_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, kTfLiteActNone, output, &data->output_activation_min, + &data->output_activation_max)); + } else { + MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input_tensor_first); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +inline const T** CopyInputsToScratchBuffer(TfLiteContext* context, + TfLiteNode* node, + const int scratch_index) { + int num_inputs = NumInputs(node); + void* scratch_buffer = context->GetScratchBuffer(context, scratch_index); + const T** all_inputs = static_cast(scratch_buffer); + for (int i = 0; i < num_inputs; i++) { + const TfLiteEvalTensor* next_input = + tflite::micro::GetEvalInput(context, node, kInputTensor0 + i); + all_inputs[i] = tflite::micro::GetTensorData(next_input); + } + + return all_inputs; +} + +template +void EvalAddN(TfLiteContext* context, TfLiteNode* node, + TfLiteEvalTensor* output) { + int num_inputs = NumInputs(node); + + int scratch_index = + static_cast(reinterpret_cast(node->user_data)); + const T** all_inputs = + CopyInputsToScratchBuffer(context, node, scratch_index); + + reference_ops::AddN(tflite::micro::GetTensorShape(output), num_inputs, + all_inputs, tflite::micro::GetTensorData(output)); +} + +template +void EvalAddNQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteEvalTensor* output) { + int num_inputs = NumInputs(node); + + OpData* data = static_cast(node->user_data); + const T** all_inputs = + CopyInputsToScratchBuffer(context, node, data->scratch_index); + + ArithmeticParams params; + params.left_shift = data->left_shift; + params.input1_offset = data->input_offset; + params.input1_multiplier = data->input_multiplier; + params.input1_shift = data->input_shift; + params.output_offset = data->output_offset; + params.output_multiplier = data->output_multiplier; + params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + ¶ms); + + reference_ops::AddN(params, tflite::micro::GetTensorShape(output), num_inputs, + all_inputs, tflite::micro::GetTensorData(output)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + if (output->type == kTfLiteFloat32) { + EvalAddN(context, node, output); + } else if (output->type == kTfLiteInt8) { + EvalAddNQuantized(context, node, output); + } else { + MicroPrintf("ADD_N only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_ADD_N() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cpp deleted file mode 100644 index a36a986..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/add_n.cpp +++ /dev/null @@ -1,119 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add_n.h" - -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor0 = 0; -constexpr int kOutputTensor = 0; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - int num_inputs = NumInputs(node); - TF_LITE_ENSURE(context, num_inputs >= 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input_tensor_first; - TF_LITE_ENSURE_OK( - context, GetInputSafe(context, node, kInputTensor0, &input_tensor_first)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); - - // Check that all tensors have the same shape and type. - TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_tensor_first->type); - for (int i = kInputTensor0 + 1; i < num_inputs; ++i) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, i, &input)); - TF_LITE_ENSURE(context, HaveSameShapes(input_tensor_first, input)); - TF_LITE_ENSURE_TYPES_EQ(context, input_tensor_first->type, input->type); - } - - // Allocate scratch buffer space for pointer to each tensor's data - // and store the scratch buffer index in the node's user_data - if (output->type == kTfLiteFloat32) { - int scratch_index; - size_t scratch_size = sizeof(float*) * num_inputs; - TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( - context, scratch_size, &scratch_index)); - node->user_data = - reinterpret_castuser_data)>(scratch_index); - } else { - TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - - return kTfLiteOk; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - -template -void EvalAddN(TfLiteContext* context, TfLiteNode* node, - TfLiteEvalTensor* output) { - int num_inputs = NumInputs(node); - - int scratch_index = - static_cast(reinterpret_cast(node->user_data)); - void* scratch_buffer = context->GetScratchBuffer(context, scratch_index); - const T** all_inputs = static_cast(scratch_buffer); - for (int i = 0; i < num_inputs; i++) { - const TfLiteEvalTensor* next_input = - tflite::micro::GetEvalInput(context, node, kInputTensor0 + i); - all_inputs[i] = tflite::micro::GetTensorData(next_input); - } - - reference_ops::AddN(tflite::micro::GetTensorShape(output), num_inputs, - all_inputs, tflite::micro::GetTensorData(output)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - if (output->type == kTfLiteFloat32) { - EvalAddN(context, node, output); - } else { - TF_LITE_KERNEL_LOG(context, "ADD_N only supports FLOAT32, got %s.", - TfLiteTypeGetName(output->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_ADD_N() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cc similarity index 67% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cc index 5fa261a..f781ab5 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/arg_min_max.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,15 +17,15 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/comparisons.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace arg_min_max { + +namespace { constexpr int kInputTensor = 0; constexpr int kAxis = 1; @@ -36,12 +36,17 @@ inline void ArgMinMaxHelper(const RuntimeShape& input1_shape, const T1* input1_data, const T3* input2_data, const RuntimeShape& output_shape, T2* output_data, bool is_arg_max) { + // Use Greater/Less from comparisons.h (formerly from kernels/micro_utils.h + // which was deprecated). Same as gtl::Greater but used here to reduce + // dependencies and binary size for micro environment. if (is_arg_max) { reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Greater()); + output_shape, output_data, + reference_ops::GreaterFn); } else { reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, - output_shape, output_data, micro::Less()); + output_shape, output_data, + reference_ops::LessFn); } } @@ -66,28 +71,24 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; - case kTfLiteUInt8: - TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); - break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; default: - TF_LITE_KERNEL_LOG(context, - "Only float32, uint8_t and int8_t are " - "supported currently, got %s.", - TfLiteTypeGetName(input->type)); + MicroPrintf( + "Only float32, uint8_t and int8_t are " + "supported currently, got %s.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, - "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Only int32_t are supported currently, got %s.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, "Only int32_t are supported currently, got %s.", - TfLiteTypeGetName(axis->type)); + MicroPrintf("Only int32_t are supported currently, got %s.", + TfLiteTypeGetName(axis->type)); return kTfLiteError; } @@ -104,30 +105,14 @@ TfLiteStatus ArgMaxEval(TfLiteContext* context, TfLiteNode* node) { return Eval(context, node, true); } -} // namespace arg_min_max +} // namespace TfLiteRegistration Register_ARG_MAX() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, ArgMaxEval); } TfLiteRegistration Register_ARG_MIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/arg_min_max::ArgMinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, ArgMinEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/assign_variable.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/assign_variable.cc new file mode 100644 index 0000000..e650294 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/assign_variable.cc @@ -0,0 +1,101 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +constexpr int kInputVariableId = 0; +constexpr int kInputValue = 1; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); + + // This must be a TfLiteEvalTensor despite this being in Prepare, because + // CreateTensor allocates a temp tensor from the flatbuffer, which does not + // contain the correct ID generated within the VAR_HANDLE op. EvalTensors are + // all allocated during StartModelAllocation which happens before + // init/prepare, and VAR_HANDLE Prepare() references its own op_data in the + // TfLiteEvalTensor, so reading the ID here is valid. + const TfLiteEvalTensor* input_resource_id_tensor = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + TF_LITE_ENSURE(context, (input_resource_id_tensor->type == kTfLiteResource || + input_resource_id_tensor->type == kTfLiteInt32)); + TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor->dims), 1); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + TfLiteTensor* input_value = + micro_context->AllocateTempInputTensor(node, kInputValue); + TFLITE_DCHECK(input_value != nullptr); + + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + TF_LITE_ENSURE_OK(context, + resources->Allocate(input_resource_id_tensor->data.i32[0], + context, input_value)); + + micro_context->DeallocateTempTfLiteTensor(input_value); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input_id = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_id != nullptr); + + const TfLiteEvalTensor* input_value = + tflite::micro::GetEvalInput(context, node, kInputValue); + TFLITE_DCHECK(input_value != nullptr); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "ASSIGN_VARIABLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + TF_LITE_ENSURE_OK(context, + resources->Assign(input_id->data.i32[0], input_value)); + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_ASSIGN_VARIABLE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_matmul.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_matmul.cc new file mode 100644 index 0000000..3858f73 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_matmul.cc @@ -0,0 +1,644 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/batch_matmul.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +constexpr int kInputLHSTensor = 0; +constexpr int kInputRHSTensor = 1; +constexpr int kOutputTensor = 0; + +constexpr int kInvalidScratchBufferIndex = -1; + +struct QuantizationOpData { + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; // exponent + + // The range of the fused activation layer. For example for kNone and + // int8_t these would be -128 and 127. + int32_t output_activation_min; + int32_t output_activation_max; + + int32_t lhs_zero_point; + int32_t rhs_zero_point; + int32_t output_zero_point; +}; + +struct HybridOpData { + float filter_scale; // RHS tensor scale + + // scratch buffer indices + int input_quantized_index; + int scaling_factors_index; + int input_offsets_index; + + // row_sums_buffer may be re-used across eval calls + int32_t* row_sums_buffer; + + bool compute_row_sums; +}; + +struct OpData { + union { + QuantizationOpData* quantization; + HybridOpData* hybrid; + }; + + // Transpose tensors and state + TfLiteEvalTensor* lhs_transposed_tensor; + TfLiteEvalTensor* rhs_transposed_tensor; + bool rhs_is_transposed; + bool lhs_is_constant_tensor; + bool rhs_is_constant_tensor; +}; + +struct OpContext { + OpContext(TfLiteContext* context, TfLiteNode* node) { + params = reinterpret_cast(node->builtin_data); + opdata = static_cast(node->user_data); + } + + TfLiteBatchMatMulParams* params; + OpData* opdata; +}; + +struct PrepareOpContext : OpContext { + PrepareOpContext(TfLiteContext* context, TfLiteNode* node) + : OpContext(context, node) { + MicroContext* micro_context = GetMicroContext(context); + lhs = micro_context->AllocateTempInputTensor(node, kInputLHSTensor); + rhs = micro_context->AllocateTempInputTensor(node, kInputRHSTensor); + output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); + } + TfLiteTensor* lhs; + TfLiteTensor* rhs; + TfLiteTensor* output; +}; + +struct EvalOpContext : OpContext { + EvalOpContext(TfLiteContext* context, TfLiteNode* node) + : OpContext(context, node) { + lhs = tflite::micro::GetEvalInput(context, node, kInputLHSTensor); + rhs = tflite::micro::GetEvalInput(context, node, kInputRHSTensor); + output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + } + + const TfLiteEvalTensor* lhs; + const TfLiteEvalTensor* rhs; + TfLiteEvalTensor* output; +}; + +TfLiteStatus ResizeOutputTensor(TfLiteContext* context, TfLiteNode* node, + const RuntimeShape& extended_lhs_shape, + const RuntimeShape& extended_rhs_shape, + bool adj_x, bool adj_y, int output_rank, + TfLiteTensor* output) { + auto orig_size = NumElements(output); + + // make sure output tensor dims are not in the FlatBuffer + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + // Fill in any broadcast dimensions. + for (int i = 0; i < output_rank - 2; ++i) { + const int lhs_dim = extended_lhs_shape.Dims(i); + const int rhs_dim = extended_rhs_shape.Dims(i); + int broadcast_dim = lhs_dim; + if ((lhs_dim != rhs_dim) && (lhs_dim == 1)) { + broadcast_dim = rhs_dim; + } + output->dims->data[i] = broadcast_dim; + } + // Fill in the matmul dimensions. + int lhs_rows_index = adj_x ? output_rank - 1 : output_rank - 2; + int rhs_cols_index = adj_y ? output_rank - 2 : output_rank - 1; + + output->dims->data[output_rank - 2] = extended_lhs_shape.Dims(lhs_rows_index); + output->dims->data[output_rank - 1] = extended_rhs_shape.Dims(rhs_cols_index); + output->dims->size = output_rank; + + // Check that output tensor has not been resized + // since TFLM doesn't support tensor resizing. + TF_LITE_ENSURE_EQ(context, orig_size, NumElements(output)); + + return kTfLiteOk; +} + +TfLiteEvalTensor* AllocInitTransposeTensorFromTfLiteTensor( + TfLiteContext* context, const TfLiteTensor& tensor) { + TfLiteEvalTensor* eval_tensor = static_cast( + context->AllocatePersistentBuffer(context, sizeof(TfLiteEvalTensor))); + + eval_tensor->type = tensor.type; + + const int tensor_rank = NumDimensions(&tensor); + auto eval_dims_size = TfLiteIntArrayGetSizeInBytes(tensor_rank); + eval_tensor->dims = static_cast( + context->AllocatePersistentBuffer(context, eval_dims_size)); + eval_tensor->dims->size = tensor_rank; + for (int i = 0; i < tensor_rank - 2; ++i) { + eval_tensor->dims->data[i] = tensor.dims->data[i]; + } + // Swap last two dimensions. + eval_tensor->dims->data[tensor_rank - 2] = tensor.dims->data[tensor_rank - 1]; + eval_tensor->dims->data[tensor_rank - 1] = tensor.dims->data[tensor_rank - 2]; + + size_t eval_data_size = static_cast(NumElements(&tensor)); + if (tensor.type == kTfLiteFloat32) { + eval_data_size *= sizeof(float); + } + eval_tensor->data.data = + context->AllocatePersistentBuffer(context, eval_data_size); + + return eval_tensor; +} + +// Initializes tensors to store transposed operands. +// Allocate storage for hybrid quantization if needed. +// Allocate normal quantization data if needed. +TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, + const PrepareOpContext& op_context) { + OpData* op_data = op_context.opdata; + const TfLiteTensor* lhs = op_context.lhs; + const TfLiteTensor* rhs = op_context.rhs; + + // For "hybrid" quantization, we impose the constraint that the LHS + // is float (typically an activation from a prior layer) and the RHS + // is quantized int8. + bool is_hybrid = (lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8); + if (is_hybrid) { + op_data->hybrid = static_casthybrid)>( + context->AllocatePersistentBuffer(context, sizeof(*op_data->hybrid))); + TF_LITE_ENSURE(context, op_data->hybrid != nullptr); + op_data->hybrid->input_quantized_index = kInvalidScratchBufferIndex; + op_data->hybrid->scaling_factors_index = kInvalidScratchBufferIndex; + op_data->hybrid->row_sums_buffer = nullptr; + op_data->hybrid->input_offsets_index = kInvalidScratchBufferIndex; + } else if (lhs->type == kTfLiteInt8) { + op_data->quantization = static_castquantization)>( + context->AllocatePersistentBuffer(context, + sizeof(*op_data->quantization))); + TF_LITE_ENSURE(context, op_data->quantization != nullptr); + } else { + op_data->quantization = nullptr; // also op_data->hybrid + } + + // tensor for Transposed LHS; + if (op_context.params->adj_x) { + op_data->lhs_transposed_tensor = + AllocInitTransposeTensorFromTfLiteTensor(context, *lhs); + } else { + op_data->lhs_transposed_tensor = nullptr; + } + + // We need a buffer for the RHS if we need to transpose the RHS. We + // transpose by default, so that the two inputs (LHS and RHS) are in a proper + // layout for our fast matrix multiplication routines. If the transpose flag + // is set by the caller, the data is already in the desired layout. + if (!op_context.params->adj_y) { + op_data->rhs_transposed_tensor = + AllocInitTransposeTensorFromTfLiteTensor(context, *rhs); + } else { + op_data->rhs_transposed_tensor = nullptr; + } + + // If we have to perform on-the-fly quantization (with quantized weights and + // float inputs) first we need to quantize the inputs. Allocate temporary + // buffer to store the intermediate quantized values, the batch scaling + // factors, the input offsets, and persistent storage for the sums of the + // rows for each weights matrix. + // RHS = weights, LHS = inputs + if (is_hybrid) { + const int lhs_rank = NumDimensions(lhs); + const int rhs_rank = NumDimensions(rhs); + const int batch_size = op_context.params->adj_x + ? lhs->dims->data[lhs_rank - 1] + : lhs->dims->data[lhs_rank - 2]; + const int num_units = rhs->dims->data[rhs_rank - 1]; + + // Calculate the total number of LHS batches. + int num_batches = 1; + for (int i = 0; i < lhs_rank - 2; ++i) { + num_batches *= lhs->dims->data[i]; + } + int num_weights_matrices = 1; + for (int i = 0; i < rhs_rank - 2; ++i) { + num_weights_matrices *= rhs->dims->data[i]; + } + + const size_t input_quantized_size = static_cast( + NumElements(lhs->dims) * TfLiteTypeGetSize(rhs->type)); + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, input_quantized_size, + &op_data->hybrid->input_quantized_index)); + + const size_t scaling_factors_size = + static_cast(batch_size * num_batches * sizeof(float)); + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, scaling_factors_size, + &op_data->hybrid->scaling_factors_index)); + + const size_t input_offsets_size = + static_cast(batch_size * num_batches * sizeof(int32_t)); + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, input_offsets_size, + &op_data->hybrid->input_offsets_index)); + + const size_t row_sums_size = + static_cast(num_weights_matrices * num_units * sizeof(int32_t)); + op_data->hybrid->row_sums_buffer = static_cast( + context->AllocatePersistentBuffer(context, row_sums_size)); + TF_LITE_ENSURE(context, op_data->hybrid->row_sums_buffer != nullptr); + + op_data->hybrid->compute_row_sums = true; + op_data->hybrid->filter_scale = rhs->params.scale; + } + + return kTfLiteOk; +} + +template +void TransposeRowsColumnsImpl(const TfLiteEvalTensor& tensor_in, + const scalar* input, TfLiteEvalTensor* tensor_out, + scalar* output) { + RuntimeShape transposed_shape(tflite::micro::GetTensorShape(&tensor_in)); + RuntimeShape shape(transposed_shape); + TransposeParams params; + int rank = shape.DimensionsCount(); + params.perm_count = rank; + for (int i = 0; i < rank - 2; ++i) { + params.perm[i] = i; + } + // Transpose the last two dimensions. + params.perm[rank - 2] = rank - 1; + params.perm[rank - 1] = rank - 2; + transposed_shape.SetDim(rank - 1, shape.Dims(rank - 2)); + transposed_shape.SetDim(rank - 2, shape.Dims(rank - 1)); + reference_ops::Transpose(params, shape, input, transposed_shape, output); +} + +TfLiteStatus TransposeRowsColumns(TfLiteContext* context, + const TfLiteEvalTensor& tensor_in, + TfLiteEvalTensor* tensor_out) { + if (tensor_in.type == kTfLiteFloat32) { + TransposeRowsColumnsImpl( + tensor_in, tflite::micro::GetTensorData(&tensor_in), tensor_out, + tflite::micro::GetTensorData(tensor_out)); + return kTfLiteOk; + } else if (tensor_in.type == kTfLiteInt8) { + TransposeRowsColumnsImpl( + tensor_in, tflite::micro::GetTensorData(&tensor_in), tensor_out, + tflite::micro::GetTensorData(tensor_out)); + return kTfLiteOk; + } else { + TF_LITE_KERNEL_LOG(context, + "BATCH_MATMUL can only transpose tensors with float, " + "int8 type."); + return kTfLiteError; + } +} + +RuntimeShape SwapRowColumnDims(const RuntimeShape& shape) { + RuntimeShape swapped_shape(shape); + const int32_t dims = shape.DimensionsCount(); + swapped_shape.SetDim(dims - 2, shape.Dims(dims - 1)); + swapped_shape.SetDim(dims - 1, shape.Dims(dims - 2)); + return swapped_shape; +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + PrepareOpContext op_context(context, node); + const TfLiteTensor* lhs_data = op_context.lhs; + TF_LITE_ENSURE(context, lhs_data != nullptr); + const TfLiteTensor* rhs_data = op_context.rhs; + TF_LITE_ENSURE(context, rhs_data != nullptr); + TfLiteTensor* output = op_context.output; + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || + lhs_data->type == kTfLiteInt8); + TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || + rhs_data->type == kTfLiteInt8); + // Either we have a hybrid quantization with a float32 and an int8 input, + // otherwise both inputs should be of the same type. + TF_LITE_ENSURE(context, (lhs_data->type == kTfLiteFloat32 && + rhs_data->type == kTfLiteInt8) || + lhs_data->type == rhs_data->type); + + const int lhs_rank = NumDimensions(lhs_data); + const int rhs_rank = NumDimensions(rhs_data); + // Support dimensions between 2 and 4, inclusive. + TF_LITE_ENSURE(context, lhs_rank >= 2); + TF_LITE_ENSURE(context, lhs_rank <= 4); + TF_LITE_ENSURE(context, rhs_rank >= 2); + TF_LITE_ENSURE(context, rhs_rank <= 4); + + TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, op_context)); + + OpData* op_data = op_context.opdata; + // If the RHS is constant, we only transpose once. + op_data->rhs_is_transposed = false; + op_data->lhs_is_constant_tensor = IsConstantTensor(lhs_data); + op_data->rhs_is_constant_tensor = IsConstantTensor(rhs_data); + + bool adj_x = op_context.params->adj_x; + bool adj_y = op_context.params->adj_y; + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (lhs_data->type == kTfLiteInt8) { + TF_LITE_ENSURE(context, op_data->quantization != nullptr); + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, lhs_data, rhs_data, output, &real_multiplier)); + QuantizeMultiplier(real_multiplier, + &op_data->quantization->output_multiplier, + &op_data->quantization->output_shift); + // BatchMatMul has no fused activation functions. Therefore, set + // output activation min and max to min and max of int8_t type. + op_data->quantization->output_activation_min = + std::numeric_limits::min(); + op_data->quantization->output_activation_max = + std::numeric_limits::max(); + + // set zero_point for Int8 only + op_data->quantization->lhs_zero_point = lhs_data->params.zero_point; + op_data->quantization->rhs_zero_point = rhs_data->params.zero_point; + op_data->quantization->output_zero_point = output->params.zero_point; + } + + const int output_rank = std::max(lhs_rank, rhs_rank); + const RuntimeShape extended_lhs_shape = + RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); + const RuntimeShape extended_rhs_shape = + RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); + + // Ensure any batch dimensions obey broacasting rules. + for (int i = 0; i < output_rank - 2; ++i) { + const int lhs_dim = extended_lhs_shape.Dims(i); + const int rhs_dim = extended_rhs_shape.Dims(i); + if (lhs_dim != rhs_dim) { + if (lhs_dim != 1) { + TF_LITE_ENSURE_EQ(context, rhs_dim, 1); + } + } + } + // Ensure other dimensions work for matrix multiplication. + int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) + : extended_lhs_shape.Dims(output_rank - 1); + int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) + : extended_rhs_shape.Dims(output_rank - 2); + + TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); + TfLiteStatus status = + ResizeOutputTensor(context, node, extended_lhs_shape, extended_rhs_shape, + adj_x, adj_y, output_rank, output); + + micro_context->DeallocateTempTfLiteTensor(op_context.lhs); + micro_context->DeallocateTempTfLiteTensor(op_context.rhs); + micro_context->DeallocateTempTfLiteTensor(op_context.output); + + return status; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + // This is a builtin op, so we don't use the contents in 'buffer', if any. + // Instead, we allocate a new object to carry information from Prepare() to + // Eval(). + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, + const OpData& data, const RuntimeShape& input_shape, + const TfLiteEvalTensor& input, + const RuntimeShape& filter_shape, + const TfLiteEvalTensor& filter, + TfLiteEvalTensor* output) { + const auto* params = + static_cast(node->builtin_data); + const int32_t num_input_dims = input_shape.DimensionsCount(); + + // Input row/cols have been swapped at this point, so dims are + // {input_size, num_batches} + const int input_size = input_shape.Dims(num_input_dims - 2); + const int batch_size = input_shape.Dims(num_input_dims - 1); + + int num_batches_to_quantize = batch_size; + for (int i = 0; i < input_shape.DimensionsCount() - 2; ++i) { + num_batches_to_quantize *= input_shape.Dims(i); + } + // Quantize input from float to uint8 + quantization params (scaling factor). + float* scaling_factors_ptr = static_cast( + context->GetScratchBuffer(context, data.hybrid->scaling_factors_index)); + int32_t* input_offset_ptr = static_cast( + context->GetScratchBuffer(context, data.hybrid->input_offsets_index)); + int32_t* row_sums_ptr = data.hybrid->row_sums_buffer; + if (!params->asymmetric_quantize_inputs) { + std::fill_n(input_offset_ptr, num_batches_to_quantize, 0); + } + + int8_t* quant_data = static_cast( + context->GetScratchBuffer(context, data.hybrid->input_quantized_index)); + const int8_t* filter_data = tflite::micro::GetTensorData(&filter); + const float* input_ptr = tflite::micro::GetTensorData(&input); + // Quantize each batch independently. + tensor_utils::BatchQuantizeFloats(input_ptr, num_batches_to_quantize, + input_size, quant_data, scaling_factors_ptr, + input_offset_ptr, + params->asymmetric_quantize_inputs); + for (int b = 0; b < num_batches_to_quantize; ++b) { + // Incorporate scaling of the filter. + scaling_factors_ptr[b] *= data.hybrid->filter_scale; + } + + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + int output_size = NumElements(output->dims); + std::fill_n(tflite::micro::GetTensorData(output), output_size, 0.0f); + reference_ops::BatchMatMul( + filter_shape, filter_data, input_shape, quant_data, scaling_factors_ptr, + input_offset_ptr, row_sums_ptr, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + &(data.hybrid->compute_row_sums)); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, const OpData& data, + const RuntimeShape& lhs_shape, + const TfLiteEvalTensor& lhs, + const RuntimeShape& rhs_shape, + const TfLiteEvalTensor& rhs, + const RuntimeShape& output_shape, + TfLiteEvalTensor* output) { + TF_LITE_ENSURE(context, data.quantization != nullptr); + + // Reuse params struct from FullyConnected Op. + FullyConnectedParams op_params; + op_params.input_offset = -data.quantization->lhs_zero_point; + op_params.weights_offset = + -data.quantization->rhs_zero_point; // filter offset + op_params.output_offset = data.quantization->output_zero_point; + op_params.output_multiplier = data.quantization->output_multiplier; + op_params.output_shift = data.quantization->output_shift; + op_params.quantized_activation_min = data.quantization->output_activation_min; + op_params.quantized_activation_max = data.quantization->output_activation_max; + op_params.lhs_cacheable = data.lhs_is_constant_tensor; + op_params.rhs_cacheable = data.rhs_is_constant_tensor; + + // Note we pass RHS args first, LHS args second. See note for Eval. + reference_ops::BatchMatMul( + op_params, rhs_shape, tflite::micro::GetTensorData(&rhs), + lhs_shape, tflite::micro::GetTensorData(&lhs), output_shape, + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, + const OpData& data, const RuntimeShape& lhs_shape, + const TfLiteEvalTensor& lhs, + const RuntimeShape& rhs_shape, + const TfLiteEvalTensor& rhs, + TfLiteEvalTensor* output) { + if (lhs.type == kTfLiteFloat32 && rhs.type == kTfLiteInt8) { + TF_LITE_ENSURE(context, data.hybrid != nullptr); + TF_LITE_ENSURE(context, data.hybrid->row_sums_buffer != nullptr); + TF_LITE_ENSURE(context, data.hybrid->input_quantized_index != + kInvalidScratchBufferIndex); + TF_LITE_ENSURE(context, data.hybrid->scaling_factors_index != + kInvalidScratchBufferIndex); + TF_LITE_ENSURE(context, data.hybrid->input_offsets_index != + kInvalidScratchBufferIndex); + return EvalHybrid(context, node, data, lhs_shape, lhs, rhs_shape, rhs, + output); + } else if (lhs.type == kTfLiteInt8 && rhs.type == kTfLiteInt8) { + return EvalInt8(context, data, lhs_shape, lhs, rhs_shape, rhs, + tflite::micro::GetTensorShape(output), output); + } else { + TF_LITE_KERNEL_LOG( + context, "BATCH_MATMUL only supports hybrid, int8 quantization.\n"); + } + return kTfLiteError; +} + +// Perform a batch matrix multiply on +// LHS <..., A, B> X RHS<..., B, C> +// where the leading dimensions of LHS and RHS obey broadcasting rules +// (this Op will apply broadcasting rules). +// We assume that LHS and RHS are both row oriented (adjacent values in memory +// are in the same row) and will output in the same memory layout. However, +// our fast GEMM libraries assume RCC layout (LHS row oriented, +// RHS column oriented, output column oriented). Therefore, we perform +// RHS <..., C, B> X LHS <..., B, A> +// where output is a C X A column-oriented, which is equivalent to +// A X C row-oriented. +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + EvalOpContext op_context(context, node); + OpData* op_data = op_context.opdata; + const TfLiteEvalTensor* lhs = op_context.lhs; + const TfLiteEvalTensor* rhs = op_context.rhs; + TfLiteEvalTensor* output = op_context.output; + RuntimeShape orig_lhs_shape = tflite::micro::GetTensorShape(lhs); + RuntimeShape orig_rhs_shape = tflite::micro::GetTensorShape(rhs); + + bool adj_y = op_context.params->adj_y; + bool adj_x = op_context.params->adj_x; + + TfLiteEvalTensor* rhs_tensor = adj_y ? const_cast(rhs) + : op_data->rhs_transposed_tensor; + TfLiteEvalTensor* lhs_tensor = adj_x ? op_data->lhs_transposed_tensor + : const_cast(lhs); + TF_LITE_ENSURE(context, rhs_tensor != nullptr); + TF_LITE_ENSURE(context, lhs_tensor != nullptr); + if (!adj_y) { + // OLD-TODO(b/154760341) Constant tensors should already be transposed, but + // we transpose once if necessary for now. + if (!(op_data->rhs_is_constant_tensor && op_data->rhs_is_transposed)) { + TransposeRowsColumns(context, *rhs, rhs_tensor); + op_data->rhs_is_transposed = true; + } + } + if (adj_x) { + TransposeRowsColumns(context, *lhs, lhs_tensor); + } + RuntimeShape rhs_shape = + adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape); + RuntimeShape lhs_shape = + adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape); + + switch (rhs->type) { + case kTfLiteFloat32: + // Note we pass RHS args first, LHS args second. See note above. + reference_ops::BatchMatMul( + rhs_shape, tflite::micro::GetTensorData(rhs_tensor), lhs_shape, + tflite::micro::GetTensorData(lhs_tensor), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + return EvalQuantized(context, node, *op_data, lhs_shape, *lhs_tensor, + rhs_shape, *rhs_tensor, output); + default: + TF_LITE_KERNEL_LOG(context, + "Currently BATCH_MATMUL doesn't support type: %s", + TfLiteTypeGetName(lhs->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_BATCH_MATMUL() { + return {/*init=*/Init, + /*free=*/nullptr, + /*prepare=*/Prepare, + /*invoke=*/Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cc similarity index 87% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cc index 7ba4df9..9959e47 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/batch_to_space_nd.cc @@ -19,6 +19,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -41,8 +42,12 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, input != nullptr && output != nullptr); TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); @@ -51,6 +56,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } @@ -88,8 +96,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; @@ -98,14 +106,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace. TfLiteRegistration Register_BATCH_TO_SPACE_ND() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_args.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_args.cc new file mode 100644 index 0000000..002a192 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_args.cc @@ -0,0 +1,91 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_args.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" + +namespace tflite { +namespace { +constexpr int kShape1Tensor = 0; +constexpr int kShape2Tensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus BroadcastArgsPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, NumInputs(node) == 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* shape1 = + micro_context->AllocateTempInputTensor(node, kShape1Tensor); + TfLiteTensor* shape2 = + micro_context->AllocateTempInputTensor(node, kShape2Tensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE(context, + shape1->type == kTfLiteInt32 || shape1->type == kTfLiteInt64); + TF_LITE_ENSURE_EQ(context, shape1->type, shape2->type); + TF_LITE_ENSURE_EQ(context, shape1->type, output->type); + + // Ensures the shapes are 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(shape1), 1); + TF_LITE_ENSURE_EQ(context, NumDimensions(shape2), 1); + + // Ensure the shape of the output tensor is compatible + TF_LITE_ENSURE_EQ(context, NumDimensions(output), 1); + + micro_context->DeallocateTempTfLiteTensor(shape1); + micro_context->DeallocateTempTfLiteTensor(shape2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus BroadcastArgsEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* shape1 = + micro::GetEvalInput(context, node, kShape1Tensor); + const TfLiteEvalTensor* shape2 = + micro::GetEvalInput(context, node, kShape2Tensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteInt32) { + reference_ops::BroadcastArgs( + micro::GetTensorShape(shape1), micro::GetTensorData(shape1), + micro::GetTensorShape(shape2), micro::GetTensorData(shape2), + micro::GetTensorShape(output), micro::GetTensorData(output)); + } else { + reference_ops::BroadcastArgs( + micro::GetTensorShape(shape1), micro::GetTensorData(shape1), + micro::GetTensorShape(shape2), micro::GetTensorData(shape2), + micro::GetTensorShape(output), micro::GetTensorData(output)); + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_BROADCAST_ARGS() { + return tflite::micro::RegisterOp(nullptr, BroadcastArgsPrepare, + BroadcastArgsEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_to.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_to.cc new file mode 100644 index 0000000..51b19e0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/broadcast_to.cc @@ -0,0 +1,123 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/broadcast_to.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" + +namespace tflite { + +namespace { +constexpr int kInputTensor = 0; +constexpr int kShapeTensor = 1; +constexpr int kOutputTensor = 0; +// Support a maximum of 5 dimensions in TFLM. +constexpr int kMaxDims = 5; + +TfLiteStatus ValidateOutputTensor(TfLiteContext* context, TfLiteTensor* input, + TfLiteTensor* shape, TfLiteTensor* output) { + // Ensures the shape is 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(shape), 1); + + // Ensure output dims is not less than input dims. + int input_num_dims = NumDimensions(input); + int output_num_dims = NumDimensions(output); + int shape_num_dims = SizeOfDimension(shape, 0); + TF_LITE_ENSURE_MSG(context, output_num_dims == shape_num_dims, + "Output must match with the expected shape dimension."); + TF_LITE_ENSURE_MSG(context, input_num_dims <= output_num_dims, + "Output shape must be broadcastable from input shape."); + TF_LITE_ENSURE_MSG(context, output_num_dims <= kMaxDims, + "BroadcastTo only supports 1-5D tensor."); + + // Check if output shape is broadcastable from input shape. + auto get_shape_data = [shape](int i) -> int32_t { + if (shape->type == kTfLiteInt32) { + return GetTensorData(shape)[i]; + } else { + return GetTensorData(shape)[i]; + } + }; + + int extending_dims = output_num_dims - input_num_dims; + for (int idx = 0; idx < input_num_dims; ++idx) { + TF_LITE_ENSURE_MSG( + context, + (SizeOfDimension(input, idx) == 1 || + SizeOfDimension(input, idx) == get_shape_data(extending_dims + idx)), + "Output shape must be broadcastable from input shape."); + } + + // Validating the shape of the output tensor. + tflite::RuntimeShape output_shape = tflite::GetTensorShape(output); + for (int idx = 0; idx < output_num_dims; ++idx) { + TF_LITE_ENSURE(context, output_shape.Dims(idx) == get_shape_data(idx)); + } + return kTfLiteOk; +} + +TfLiteStatus BroadcastToPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, NumInputs(node) == 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* shape = + micro_context->AllocateTempInputTensor(node, kShapeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_MSG(context, (NumDimensions(input) <= kMaxDims), + "BroadcastTo only supports 1-5D tensor."); + + TF_LITE_ENSURE(context, + shape->type == kTfLiteInt32 || shape->type == kTfLiteInt64); + TF_LITE_ENSURE_EQ(context, input->type, output->type); + + // Does not support String type due to its variable size. This limitation is + // the same as TFLite. + TF_LITE_ENSURE(context, input->type != kTfLiteString); + + TF_LITE_ENSURE_STATUS(ValidateOutputTensor(context, input, shape, output)); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(shape); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus BroadcastToEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + // BroadcastTo op support upto 5 dims, different from 8 dims in TFLite. + reference_ops::BroadcastTo( + micro::GetTensorShape(input), input->data.raw, + micro::GetTensorShape(output), output->data.raw, input->type); + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_BROADCAST_TO() { + return tflite::micro::RegisterOp(nullptr, BroadcastToPrepare, + BroadcastToEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/call_once.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/call_once.cc new file mode 100644 index 0000000..21643c8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/call_once.cc @@ -0,0 +1,88 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +struct OpData { + int init_subgraph_index; + bool has_run; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + op_data->init_subgraph_index = params->init_subgraph_index; + op_data->has_run = false; + + TF_LITE_ENSURE(context, NumInputs(node) == 0); + TF_LITE_ENSURE(context, NumOutputs(node) == 0); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->init_subgraph_index < graph_info.NumSubgraphs()); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + + // Call once only runs one time then is a no-op for every subsequent call. + if (op_data->has_run) { + return kTfLiteOk; + } + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE_OK(context, + graph_info.InvokeSubgraph(op_data->init_subgraph_index)); + + op_data->has_run = true; + + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_CALL_ONCE() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cc similarity index 67% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cc index 7253245..19e545f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cast.cc @@ -17,6 +17,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -27,11 +28,19 @@ constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } @@ -48,13 +57,19 @@ TfLiteStatus copyToTensor(TfLiteContext* context, const FromT* in, case kTfLiteInt8: copyCast(in, out->data.int8, num_elements); break; + case kTfLiteInt16: + copyCast(in, out->data.i16, num_elements); + break; + case kTfLiteInt32: + copyCast(in, out->data.i32, num_elements); + break; case kTfLiteFloat32: copyCast(in, tflite::micro::GetTensorData(out), num_elements); break; default: // Unsupported type. - TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.", - TfLiteTypeGetName(out->type), out->type); + MicroPrintf("Output type %s (%d) not supported.", + TfLiteTypeGetName(out->type), out->type); } return kTfLiteOk; } @@ -70,27 +85,30 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { switch (input->type) { case kTfLiteInt8: return copyToTensor(context, input->data.int8, output, num_elements); + case kTfLiteInt16: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + case kTfLiteInt32: + return copyToTensor(context, tflite::micro::GetTensorData(input), + output, num_elements); + case kTfLiteUInt32: + return copyToTensor(context, + tflite::micro::GetTensorData(input), output, + num_elements); case kTfLiteFloat32: return copyToTensor(context, tflite::micro::GetTensorData(input), output, num_elements); default: // Unsupported type. - TF_LITE_KERNEL_LOG(context, "Input type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); } return kTfLiteOk; } } // namespace TfLiteRegistration Register_CAST() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cc similarity index 80% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cc index 0b78d48..0f09137 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ceil.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,17 +21,20 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace ceil { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -42,6 +45,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < output->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -58,19 +63,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } -} // namespace ceil + +} // namespace TfLiteRegistration Register_CEIL() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ceil::Prepare, - /*invoke=*/ceil::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cc new file mode 100644 index 0000000..bf69599 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cc @@ -0,0 +1,117 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +/* + * The circular buffer custom operator is used to implement strided streaming + * convolutions on TFLite Micro. Each time this operator is invoked, it checks + * whether or not to run, based on a predetermined stride in time. If the op + * runs, it inserts the input into the end of the output buffer and shifts the + * output values towards the start of the buffer. It discards the oldest value + * in the output buffer. + * + * Input: [, , , ] + * + * After shifting: + * Output: [, , , ] + * + * We make some assumptions in this custom operator: + * - Input shape must be [1, 1, 1, depth] + * - Output shape must be [1, num_slots, 1, depth] + * - Input and output types must match. + * - Input and output quantization params must be identical. + */ +namespace tflite { + +void* CircularBufferInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + OpDataCircularBuffer* op_data = static_cast( + context->AllocatePersistentBuffer(context, sizeof(OpDataCircularBuffer))); + + if (buffer != nullptr && length > 0) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + tflite::FlexbufferWrapper wrapper(buffer_t, length); + op_data->cycles_max = wrapper.ElementAsInt32(kCircularBufferCyclesMaxIndex); + } else { + op_data->cycles_max = 0; + } + + return op_data; +} + +// Shifts buffer over by the output depth, and write new input to end of buffer. +// num_slots is the number of samples stored in the output buffer. +// depth is the size of each sample. +void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) { + memmove(output, &output[depth], (num_slots - 1) * depth); + memcpy(&output[(num_slots - 1) * depth], input, depth); +} + +TfLiteStatus CircularBufferEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kCircularBufferInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kCircularBufferOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataCircularBuffer* data = + reinterpret_cast(node->user_data); + + int num_slots = output->dims->data[1]; + int depth = output->dims->data[2] * output->dims->data[3]; + + if (input->type == kTfLiteInt8) { + EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, + tflite::micro::GetTensorData(output)); + } else { + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + if (--data->cycles_until_run != 0) { + // Signal the interpreter to end current run if the delay before op invoke + // has not been reached. + // TODO(b/149795762): Add kTfLiteAbort to TfLiteStatus enum. + return static_cast(kTfLiteAbort); + } + + data->cycles_until_run = data->cycles_max; + + return kTfLiteOk; +} + +TfLiteRegistration* Register_CIRCULAR_BUFFER() { + static TfLiteRegistration r = tflite::micro::RegisterOp( + CircularBufferInit, CircularBufferPrepare, CircularBufferEval); + return &r; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cpp deleted file mode 100644 index 007f103..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.cpp +++ /dev/null @@ -1,192 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#define FLATBUFFERS_LOCALE_INDEPENDENT 0 -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -/* - * The circular buffer custom operator is used to implement strided streaming - * convolutions on TFLite Micro. Each time this operator is invoked, it checks - * whether or not to run, based on a predetermined stride in time. If the op - * runs, it inserts the input into the end of the output buffer and shifts the - * output values towards the start of the buffer. It discards the oldest value - * in the output buffer. - * - * Input: [, , , ] - * - * After shifting: - * Output: [, , , ] - * - * We make some assumptions in this custom operator: - * - Input shape must be [1, 1, 1, depth] - * - Output shape must be [1, num_slots, 1, depth] - * - Input and output types must match. - * - Input and output quantization params must be identical. - */ -namespace tflite { -namespace ops { -namespace micro { -namespace circular_buffer { - -namespace { - -// The CircularBuffer op has one input and one output tensor. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -// TODO(b/149795762): Add this to TfLiteStatus enum. -constexpr TfLiteStatus kTfLiteAbort = static_cast(-9); - -// These fields control the stride period of a strided streaming model. This op -// returns kTfLiteAbort until cycles_until_run-- is zero. At this time, -// cycles_until_run is reset to cycles_max. -struct OpData { - int cycles_until_run; - int cycles_max; -}; - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - OpData* op_data = static_cast( - context->AllocatePersistentBuffer(context, sizeof(OpData))); - - if (buffer != nullptr && length > 0) { - const uint8_t* buffer_t = reinterpret_cast(buffer); - const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); - op_data->cycles_max = m["cycles_max"].AsInt32(); - } else { - op_data->cycles_max = 0; - } - - return op_data; -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* op_data = static_cast(node->user_data); - - TF_LITE_ENSURE(context, input != nullptr); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); - TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); - TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); - TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - // The circular buffer custom operator currently only supports int8. - TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); - - if (op_data->cycles_max <= 0) { - // The last circular buffer layer simply accumulates outputs, and does not - // run periodically. - // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. - static int cb_prepare_count = 0; - cb_prepare_count++; - // These checks specifically work for the only two streaming models - // supported on TFLM. They use the shape of the output tensor along with the - // layer number to determine if the circular buffer period should be 1 or 2. - - // These models are outlined int the following documents: - // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing - // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing - if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || - (cb_prepare_count == 5 && output->dims->data[2] == 2 && - output->dims->data[3] == 96)) { - op_data->cycles_max = 1; - cb_prepare_count = 0; - } else { - op_data->cycles_max = 2; - } - } - op_data->cycles_until_run = op_data->cycles_max; - node->user_data = op_data; - - return kTfLiteOk; -} - -// Shifts buffer over by the output depth, and write new input to end of buffer. -// num_slots is the number of samples stored in the output buffer. -// depth is the size of each sample. -void EvalInt8(const int8_t* input, int num_slots, int depth, int8_t* output) { - memmove(output, &output[depth], (num_slots - 1) * depth); - memcpy(&output[(num_slots - 1) * depth], input, depth); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = reinterpret_cast(node->user_data); - - int num_slots = output->dims->data[1]; - int depth = output->dims->data[2] * output->dims->data[3]; - - if (input->type == kTfLiteInt8) { - EvalInt8(tflite::micro::GetTensorData(input), num_slots, depth, - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - if (--data->cycles_until_run != 0) { - // Signal the interpreter to end current run if the delay before op invoke - // has not been reached. - // TODO(b/149795762): Add kTfLiteAbort to TfLiteStatus enum. - return static_cast(kTfLiteAbort); - } - - data->cycles_until_run = data->cycles_max; - - return kTfLiteOk; -} - -} // namespace circular_buffer - -TfLiteRegistration* Register_CIRCULAR_BUFFER() { - static TfLiteRegistration r = {/*init=*/circular_buffer::Init, - /*free=*/nullptr, - /*prepare=*/circular_buffer::Prepare, - /*invoke=*/circular_buffer::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; - return &r; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h new file mode 100644 index 0000000..c52a1ec --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h @@ -0,0 +1,48 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +// The CircularBuffer op has one input and one output tensor. +extern const int kCircularBufferInputTensor; +extern const int kCircularBufferOutputTensor; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +extern const int kCircularBufferCyclesMaxIndex; // 'cycles_max' + +// TODO(b/149795762): Add this to TfLiteStatus enum. +extern const TfLiteStatus kTfLiteAbort; + +// These fields control the stride period of a strided streaming model. This op +// returns kTfLiteAbort until cycles_until_run-- is zero. At this time, +// cycles_until_run is reset to cycles_max. +struct OpDataCircularBuffer { + int cycles_until_run; + int cycles_max; +}; + +TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CIRCULAR_BUFFER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer_common.cc new file mode 100644 index 0000000..b6d1f0d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer_common.cc @@ -0,0 +1,97 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/circular_buffer.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +// The CircularBuffer op has one input and one output tensor. +const int kCircularBufferInputTensor = 0; +const int kCircularBufferOutputTensor = 0; + +// Indices into the init flexbuffer's vector. +// The parameter's name is in the comment that follows. +// Elements in the vectors are ordered alphabetically by parameter name. +const int kCircularBufferCyclesMaxIndex = 0; // 'cycles_max' + +// TODO(b/149795762): Add this to TfLiteStatus enum. +const TfLiteStatus kTfLiteAbort = static_cast(-9); + +TfLiteStatus CircularBufferPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kCircularBufferInputTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kCircularBufferOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataCircularBuffer* op_data = + static_cast(node->user_data); + + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_EQ(context, input->dims->data[0], output->dims->data[0]); + TF_LITE_ENSURE_EQ(context, 1, input->dims->data[1]); + TF_LITE_ENSURE_EQ(context, input->dims->data[2], output->dims->data[2]); + TF_LITE_ENSURE_EQ(context, output->dims->data[3], input->dims->data[3]); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + // The circular buffer custom operator currently only supports int8. + TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); + + if (op_data->cycles_max <= 0) { + // The last circular buffer layer simply accumulates outputs, and does not + // run periodically. + // TODO(b/150001379): Move this special case logic to the tflite flatbuffer. + static int cb_prepare_count = 0; + cb_prepare_count++; + // These checks specifically work for the only two streaming models + // supported on TFLM. They use the shape of the output tensor along with the + // layer number to determine if the circular buffer period should be 1 or 2. + + // These models are outlined int the following documents: + // https://docs.google.com/document/d/1lc_G2ZFhjiKFo02UHjBaljye1xsL0EkfybkaVELEE3Q/edit?usp=sharing + // https://docs.google.com/document/d/1pGc42PuWyrk-Jy1-9qeqtggvsmHr1ifz8Lmqfpr2rKA/edit?usp=sharing + if (output->dims->data[1] == 5 || output->dims->data[1] == 13 || + output->dims->data[1] == 25 || + (cb_prepare_count == 5 && output->dims->data[2] == 2 && + output->dims->data[3] == 96)) { + op_data->cycles_max = 1; + cb_prepare_count = 0; + } else { + op_data->cycles_max = 2; + } + } + op_data->cycles_until_run = op_data->cycles_max; + node->user_data = op_data; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cc similarity index 78% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cc index 4990b77..1a8fbb0 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/comparisons.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,11 +19,10 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace comparisons { + namespace { struct OpData { @@ -104,19 +103,6 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::EqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowEqualWithScaling( @@ -131,8 +117,8 @@ TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -209,19 +195,6 @@ TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowNotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::NotEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowNotEqualWithScaling( @@ -236,8 +209,8 @@ TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -300,19 +273,6 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowGreaterWithScaling( @@ -327,8 +287,8 @@ TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -391,19 +351,6 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::GreaterEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowGreaterEqualWithScaling( @@ -418,8 +365,8 @@ TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -482,19 +429,6 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowLessWithScaling( @@ -509,8 +443,8 @@ TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; @@ -573,19 +507,6 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input2), output_shape, output_data); break; - case kTfLiteUInt8: - requires_broadcast - ? reference_ops::Broadcast4DSlowLessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data) - : reference_ops::LessEqualWithScaling( - data->params, input1_shape, - tflite::micro::GetTensorData(input1), input2_shape, - tflite::micro::GetTensorData(input2), output_shape, - output_data); - break; case kTfLiteInt8: requires_broadcast ? reference_ops::Broadcast4DSlowLessEqualWithScaling( @@ -600,15 +521,13 @@ TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { output_data); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); @@ -618,12 +537,16 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); OpData* data = static_cast(node->user_data); - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); TF_LITE_ENSURE(context, input2 != nullptr); - if (input1->type == kTfLiteUInt8 || input1->type == kTfLiteInt8) { + if (input1->type == kTfLiteInt8) { auto input1_offset = -input1->params.zero_point; auto input2_offset = -input2->params.zero_point; const int kLeftShift = 8; @@ -648,77 +571,36 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->params.input2_shift = input2_shift; } + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + return kTfLiteOk; } -} // namespace comparisons +} // namespace TfLiteRegistration Register_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::EqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, EqualEval); } TfLiteRegistration Register_NOT_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::NotEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, NotEqualEval); } TfLiteRegistration Register_GREATER() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, GreaterEval); } TfLiteRegistration Register_GREATER_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::GreaterEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, GreaterEqualEval); } TfLiteRegistration Register_LESS() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, LessEval); } TfLiteRegistration Register_LESS_EQUAL() { - return {/*init=*/comparisons::Init, - /*free=*/nullptr, - /*prepare=*/comparisons::Prepare, - /*invoke=*/comparisons::LessEqualEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, LessEqualEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/complex_abs.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/complex_abs.cc new file mode 100644 index 0000000..94a6107 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/complex_abs.cc @@ -0,0 +1,103 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace ops { +namespace micro { +namespace complex_abs { + +using std::complex; + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + // Check type and shape of the input tensor + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + if (input->type != kTfLiteComplex64 || output->type != kTfLiteFloat32) { + TF_LITE_KERNEL_LOG(context, "Types input %s (%d), output %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type, + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + size_t total_input_els = 1; + for (size_t dim_ix = 0; dim_ix < input->dims->size; dim_ix++) { + total_input_els *= input->dims->data[dim_ix]; + } + + for (size_t ix = 0; ix < total_input_els; ix++) { + output->data.f[ix] = sqrt(pow(input->data.c64[ix].re, 2) + pow(input->data.c64[ix].im, 2)); + } + + return kTfLiteOk; +} + +} // namespace complex_abs +} // namespace micro +} // namespace ops + +TfLiteRegistration Register_COMPLEX_ABS() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/ops::micro::complex_abs::Prepare, + /*invoke=*/ops::micro::complex_abs::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cc similarity index 77% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cc index e912d54..13a5d63 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/concatenation.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,11 +23,14 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace concatenation { + +// Patched by Edge Impulse +constexpr int RuntimeShape::kMaxSmallSize; + +namespace { constexpr int kMaxInputNum = 10; // Maximum number of input tensors constexpr int kOutputTensor = 0; @@ -104,51 +107,37 @@ void EvalUnquantized(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); } -void EvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node) { - // Collect the shapes and data pointer of input tensors - RuntimeShape inputs_shape[kMaxInputNum]; - const RuntimeShape* inputs_shape_ptr[kMaxInputNum]; - const uint8_t* inputs_data[kMaxInputNum]; - GetAllInputTensorShapes(context, node, inputs_shape); - GetShapesPointers(inputs_shape, node->inputs->size, inputs_shape_ptr); - GetAllInputTensorData(context, node, inputs_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - reference_ops::ConcatenationWithScaling( - data->params, inputs_shape_ptr, inputs_data, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } + TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // This function only checks the types. Additional shape validations are // performed in the reference implementation called during Eval(). const TfLiteConcatenationParams* params = reinterpret_cast(node->builtin_data); - const TfLiteTensor* input_tensor = GetInput(context, node, 0); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input_tensor = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input_tensor != nullptr); TfLiteType input_type = input_tensor->type; - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output_tensor = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output_tensor != nullptr); TfLiteType output_type = output_tensor->type; + micro_context->DeallocateTempTfLiteTensor(input_tensor); + micro_context->DeallocateTempTfLiteTensor(output_tensor); + // Check activation and input type TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TF_LITE_ENSURE(context, - input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || - input_type == kTfLiteInt8 || input_type == kTfLiteInt32 || - input_type == kTfLiteInt64); + input_type == kTfLiteFloat32 || input_type == kTfLiteInt8 || + input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || + input_type == kTfLiteInt64 || input_type == kTfLiteBool); // Output type must match input type TF_LITE_ENSURE_EQ(context, output_type, input_type); @@ -159,36 +148,38 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Shapes with dimensions >4 are not yet supported with static allocation. for (int i = 0; i < num_inputs; ++i) { - const TfLiteTensor* input = GetInput(context, node, i); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, i); TF_LITE_ENSURE(context, input != nullptr); int num_dimensions = NumDimensions(input); - if (num_dimensions > 4) { - TF_LITE_KERNEL_LOG( - context, - "Op Concatenation does not currently support num dimensions >4 " + if (num_dimensions > RuntimeShape::kMaxSmallSize) { + MicroPrintf( + "Op Concatenation does not currently support num dimensions > %d " "Tensor has %d dimensions.", - num_dimensions); + RuntimeShape::kMaxSmallSize, num_dimensions); return kTfLiteError; } + micro_context->DeallocateTempTfLiteTensor(input); } // Calculate OpData. TFLITE_DCHECK(node->user_data != nullptr); OpData* data = static_cast(node->user_data); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); switch (output_type) { // Already know in/outtypes are same. + case kTfLiteBool: case kTfLiteFloat32: + case kTfLiteInt16: case kTfLiteInt32: case kTfLiteInt64: { data->params.axis = CalculatePositiveAxis(params->axis, output); data->params.inputs_count = node->inputs->size; break; } - case kTfLiteUInt8: case kTfLiteInt8: { data->params.axis = CalculatePositiveAxis(params->axis, output); data->params.inputs_count = node->inputs->size; @@ -204,10 +195,11 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // Allocate persistent scale and zeropoint buffers. // Store input scale and zero point values in OpParams: for (int i = 0; i < node->inputs->size; ++i) { - const TfLiteTensor* t = GetInput(context, node, i); + TfLiteTensor* t = micro_context->AllocateTempInputTensor(node, i); TF_LITE_ENSURE(context, t != nullptr); input_scales[i] = t->params.scale; input_zero_points[i] = t->params.zero_point; + micro_context->DeallocateTempTfLiteTensor(t); } data->params.input_scale = input_scales; @@ -217,17 +209,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { break; } default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); + MicroPrintf("Op Concatenation does not currently support Type '%s'.", + TfLiteTypeGetName(output_type)); return kTfLiteError; } + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* output_tensor = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, output_tensor != nullptr); TfLiteType output_type = output_tensor->type; @@ -238,39 +232,32 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteInt32: EvalUnquantized(context, node); break; - case kTfLiteUInt8: - EvalQuantizedUInt8(context, node); - break; case kTfLiteInt8: EvalUnquantized(context, node); break; case kTfLiteInt64: EvalUnquantized(context, node); break; + case kTfLiteInt16: + EvalUnquantized(context, node); + break; + case kTfLiteBool: + EvalUnquantized(context, node); + break; default: - TF_LITE_KERNEL_LOG( - context, "Op Concatenation does not currently support Type '%s'.", - TfLiteTypeGetName(output_type)); + MicroPrintf("Op Concatenation does not currently support Type '%s'.", + TfLiteTypeGetName(output_type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace concatenation +} // namespace TfLiteRegistration Register_CONCATENATION() { - return {/*init=*/concatenation::Init, - /*free=*/nullptr, - /*prepare=*/concatenation::Prepare, - /*invoke=*/concatenation::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cc new file mode 100644 index 0000000..32177b3 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cc @@ -0,0 +1,2213 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataConv reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + int32_t buf_size = 0; + const auto& params = + *(static_cast(node->builtin_data)); + OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + + // Initialize cmsis_nn input dimensions + cmsis_nn_dims input_dims; + input_dims.n = MatchingDim(input_shape, 0, output_shape, 0); + input_dims.h = input->dims->data[1]; + input_dims.w = input->dims->data[2]; + input_dims.c = input_shape.Dims(3); + + // Initialize cmsis_nn filter dimensions + cmsis_nn_dims filter_dims; + filter_dims.n = output_shape.Dims(3); + filter_dims.h = filter->dims->data[1]; + filter_dims.w = filter->dims->data[2]; + filter_dims.c = input_dims.c; + + // Initialize cmsis_nn output dimensions + cmsis_nn_dims output_dims; + output_dims.n = input_dims.n; + output_dims.h = output->dims->data[1]; + output_dims.w = output->dims->data[2]; + output_dims.c = output_shape.Dims(3); + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena( + context, filter_size, &data->reference_op_data.filter_buffer_index); + } + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->reference_op_data.per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->reference_op_data.per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataConv( + context, node, params, input_dims.w, input_dims.h, filter_dims.w, + filter_dims.h, output_dims.w, output_dims.h, input->type, + &data->reference_op_data)); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + // Initialize cmsis_nn convolution parameters + cmsis_nn_conv_params conv_params; + conv_params.input_offset = -input->params.zero_point; + conv_params.output_offset = output->params.zero_point; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.dilation.h = params.dilation_height_factor; + conv_params.dilation.w = params.dilation_width_factor; + conv_params.padding.h = data->reference_op_data.padding.height; + conv_params.padding.w = data->reference_op_data.padding.width; + conv_params.activation.min = data->reference_op_data.output_activation_min; + conv_params.activation.max = data->reference_op_data.output_activation_max; + + if (input->type == kTfLiteInt8) { + buf_size = arm_convolve_wrapper_s8_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + } else if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + buf_size = arm_convolve_wrapper_s16_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_conv_params conv_params; + conv_params.dilation.h = params.dilation_height_factor; + conv_params.dilation.w = params.dilation_width_factor; + + // Initialize cmsis_nn convolution parameters + conv_params.input_offset = -data.reference_op_data.input_zero_point; + conv_params.output_offset = data.reference_op_data.output_zero_point; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.padding.h = data.reference_op_data.padding.height; + conv_params.padding.w = data.reference_op_data.padding.width; + conv_params.activation.min = data.reference_op_data.output_activation_min; + conv_params.activation.max = data.reference_op_data.output_activation_max; + + // Initialize cmsis_nn per channel quantization parameters + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = const_cast( + data.reference_op_data.per_channel_output_multiplier); + quant_params.shift = + const_cast(data.reference_op_data.per_channel_output_shift); + + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + // Consistency check. + TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (tflite::micro::GetOptionalTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Initialize cmsis_nn dimensions + // Input + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_shape.Dims(1); + input_dims.w = input_shape.Dims(2); + input_dims.c = input_depth; + + // Filter + cmsis_nn_dims filter_dims; + filter_dims.n = output_depth; + filter_dims.h = filter_shape.Dims(1); + filter_dims.w = filter_shape.Dims(2); + filter_dims.c = input_depth; + + // Bias + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = output_depth; + + // Output + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_shape.Dims(1); + output_dims.w = output_shape.Dims(2); + output_dims.c = output_depth; + + // Initialize cmsis_nn context + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + // Note: ctx.size is currently not used in cmsis_nn. + // The buffer should be allocated in the Prepare function through + // arm_convolve_wrapper_s8_get_buffer_size + } + + // arm_convolve_wrapper_s8 dispatches the optimized kernel accordingly with + // the parameters passed + TFLITE_DCHECK_EQ( + arm_convolve_wrapper_s8( + &ctx, &conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedPerChannel16x8( + TfLiteContext* context, TfLiteNode* node, const TfLiteConvParams& params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_conv_params conv_params; + conv_params.dilation.h = params.dilation_height_factor; + conv_params.dilation.w = params.dilation_width_factor; + + // Initialize cmsis_nn convolution parameters + conv_params.input_offset = -data.reference_op_data.input_zero_point; + conv_params.output_offset = data.reference_op_data.output_zero_point; + conv_params.stride.h = params.stride_height; + conv_params.stride.w = params.stride_width; + conv_params.padding.h = data.reference_op_data.padding.height; + conv_params.padding.w = data.reference_op_data.padding.width; + conv_params.activation.min = data.reference_op_data.output_activation_min; + conv_params.activation.max = data.reference_op_data.output_activation_max; + + // Initialize cmsis_nn per channel quantization parameters + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = const_cast( + data.reference_op_data.per_channel_output_multiplier); + quant_params.shift = + const_cast(data.reference_op_data.per_channel_output_shift); + + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + // Consistency check. + TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + if (tflite::micro::GetOptionalTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + // Initialize cmsis_nn dimensions + // Input + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_shape.Dims(1); + input_dims.w = input_shape.Dims(2); + input_dims.c = input_depth; + + // Filter + cmsis_nn_dims filter_dims; + filter_dims.n = output_depth; + filter_dims.h = filter_shape.Dims(1); + filter_dims.w = filter_shape.Dims(2); + filter_dims.c = input_depth; + + // Bias + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = output_depth; + + // Output + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_shape.Dims(1); + output_dims.w = output_shape.Dims(2); + output_dims.c = output_depth; + + // Initialize cmsis_nn context + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + // Note: ctx.size is currently not used in cmsis_nn. + // The buffer should be allocated in the Prepare function through + // arm_convolve_wrapper_s8_get_buffer_size + } + + TFLITE_DCHECK_EQ( + arm_convolve_wrapper_s16( + &ctx, &conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + return EvalQuantizedPerChannel(context, node, params, data, input, + &filter_int8, bias, output); +} + +TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + return EvalQuantizedPerChannel16x8(context, node, params, data, input, filter, + bias, output); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && filter->type == kTfLiteInt4), + "Hybrid models are not supported on TFLite Micro."); + + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Conv( + ConvParamsFloat(params, data.reference_op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + switch (filter_int8.type) { + case kTfLiteInt8: { + return EvalQuantizedPerChannel(context, node, params, data, input, + &filter_int8, bias, output); + } + + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + } + + break; + case kTfLiteInt16: + return EvalQuantizedPerChannel16x8(context, node, params, data, input, + filter, bias, output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TfLiteRegistration Register_CONV_2D_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TfLiteRegistration Register_CONV_2D_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16x8); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kFilterTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) +constexpr int kConvQuantizedDimension = 3; +#else +constexpr int kConvQuantizedDimension = 0; +#endif + +// This file has 2 implementation of Conv. + +struct OpData { + TfLitePaddingValues padding; + + // Cached tensor zero point values for quantized operations. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + + // Per channel output multiplier and shift. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; +#ifdef MLI_2_0 + int8_t* per_channel_scale_frac_bits; +#endif + + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + + // The result of checking if MLI optimized version of tensors can be used. + bool is_mli_applicable; + + // Tensors in MLI format. + mutable ops::micro::MliTensorInterface mli_in; + mutable ops::micro::MliTensorInterface mli_weights; + mutable ops::micro::MliTensorInterface mli_bias; + mutable ops::micro::MliTensorInterface mli_out; + mli_conv2d_cfg* cfg; + + // Pointer to the mli convolution function. + conv_func_ptr p_mli_krn_conv2d_sa8_sa8_sa32; +}; + +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) +inline PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} +#endif + +bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* filter, const TfLiteTensor* bias, + const TfLiteConvParams* params) { + const auto* affine_quantization = + reinterpret_cast(filter->quantization.params); + // MLI optimized version only supports int8_t datatype, dilation factor of 1 + // and per-axis quantization of weights (no broadcasting/per-tensor) + bool ret_val = (filter->type == kTfLiteInt8) && + (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && + (params->dilation_width_factor == 1) && + (params->dilation_height_factor == 1) && + (affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + return ret_val; +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + const TfLiteConvParams* params, int width, + int height, int filter_width, int filter_height, + int out_width, int out_height, + const TfLiteType data_type, OpData* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + data->padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + params->dilation_height_factor, params->dilation_width_factor, height, + width, filter_height, filter_width, padding, &out_height, &out_width); + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params->activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + output_channels)); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + micro_context->DeallocateTempTfLiteTensor(output); +#endif + return kTfLiteOk; +} +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kBiasTensor); + + int input_width = input->dims->data[2]; + int input_height = input->dims->data[1]; +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + int filter_width = filter->dims->data[1]; + int filter_height = filter->dims->data[0]; +#else + int filter_width = filter->dims->data[2]; + int filter_height = filter->dims->data[1]; +#endif + int output_width = output->dims->data[2]; + int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + data->is_mli_applicable = + IsMliApplicable(context, input, filter, bias, params); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpData( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, data)); + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + if (data->is_mli_applicable) { + data->mli_in = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_weights = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_bias = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_out = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->cfg = static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_conv2d_cfg))); + +#ifdef MLI_2_0 + data->per_channel_scale_frac_bits = + static_cast(context->AllocatePersistentBuffer( + context, 2 * num_channels * sizeof(int16_t))); +#endif + + // Reuse space allocated for OpData parameters. +#ifdef MLI_2_0 + *data->mli_weights.Scale() = + reinterpret_cast(data->per_channel_output_multiplier); + *data->mli_bias.Scale() = + reinterpret_cast(data->per_channel_output_multiplier) + + num_channels; +#else + *data->mli_weights.Scale() = + static_cast(data->per_channel_output_multiplier); + *data->mli_bias.Scale() = + static_cast(data->per_channel_output_shift); +#endif + +#ifdef MLI_2_0 + *data->mli_weights.ZeroPoint() = + reinterpret_cast(data->per_channel_output_shift); + *data->mli_bias.ZeroPoint() = + reinterpret_cast(data->per_channel_output_shift) + + num_channels; +#else + *data->mli_weights.ZeroPoint() = + reinterpret_cast(&data->filter_zero_point); + *data->mli_bias.ZeroPoint() = + reinterpret_cast(&data->filter_zero_point) + sizeof(int16_t); +#endif + +#ifdef MLI_2_0 + *data->mli_weights.ScaleFracBits() = + reinterpret_cast(data->per_channel_scale_frac_bits); + *data->mli_bias.ScaleFracBits() = + reinterpret_cast(data->per_channel_scale_frac_bits) + + num_channels; +#endif + + ops::micro::ConvertToMliTensor(input, &data->mli_in); + ops::micro::ConvertToMliTensorPerChannel(filter, &data->mli_weights, + /* is_bias_tensor = */ false); + ops::micro::ConvertToMliTensorPerChannel(bias, &data->mli_bias, + /* is_bias_tensor = */ true); +#ifdef MLI_2_0 + ops::micro::AdjustBiasTensor(&data->mli_bias, &data->mli_in, + &data->mli_weights); +#endif + ops::micro::ConvertToMliTensor(output, &data->mli_out); + +#ifdef MLI_2_0 + // Choose convolution mli specialized function. + data->p_mli_krn_conv2d_sa8_sa8_sa32 = + mli_krn_conv2d_hwcn(data->mli_weights.MliTensor()); +#else + data->p_mli_krn_conv2d_sa8_sa8_sa32 = + mli_krn_conv2d_hwcn(data->mli_weights.MliTensor(), data->cfg); +#endif + +#ifdef MLI_2_0 + data->cfg->dilation_width = 1; + data->cfg->dilation_height = 1; +#endif + + if (data->output_activation_min == -128 && + data->output_activation_max == 127) { + data->cfg->relu.type = MLI_RELU_NONE; + } else if (params->activation == kTfLiteActRelu) { + data->cfg->relu.type = MLI_RELU_GEN; + } else if (params->activation == kTfLiteActRelu6) { + data->cfg->relu.type = MLI_RELU_6; + } else if (params->activation == kTfLiteActReluN1To1) { + data->cfg->relu.type = MLI_RELU_1; + } else { + data->cfg->relu.type = MLI_RELU_NONE; + } + data->cfg->stride_width = params->stride_width; + data->cfg->stride_height = params->stride_height; + if (params->padding == kTfLitePaddingValid) { + data->cfg->padding_left = 0; + data->cfg->padding_right = 0; + data->cfg->padding_top = 0; + data->cfg->padding_bottom = 0; + } else { + data->cfg->padding_left = data->padding.width; + data->cfg->padding_right = + data->padding.width + data->padding.width_offset; + data->cfg->padding_top = data->padding.height; + data->cfg->padding_bottom = + data->padding.height + data->padding.height_offset; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + return kTfLiteOk; +} + +TfLiteStatus EvalMliQuantizedPerChannel( + TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + // Run Conv MLI kernel + // MLI optimized version only supports int8_t dataype and dilation factor of 1 + if (data.is_mli_applicable) { + // Copy configuration data from external to local memory + mli_conv2d_cfg cfg_local = *data.cfg; + + ops::micro::MliTensorAttachBuffer(input, &data.mli_in); + ops::micro::MliTensorAttachBuffer(filter, &data.mli_weights); + ops::micro::MliTensorAttachBuffer(bias, &data.mli_bias); + ops::micro::MliTensorAttachBuffer(output, &data.mli_out); + + // for height slicing + const int height_dimension = 1; + int in_slice_height = 0; + int out_slice_height = 0; + const int kernel_height = + static_cast(data.mli_weights.Shape()[KRNL_H_DIM_HWC]); + const int overlap = kernel_height - cfg_local.stride_height; + +// for weight slicing (on output channels) +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + // HWCN layout for weights, output channel dimension is the first dimension. + const int weight_out_ch_dimension = 3; +#else + // NHWC layout for weights, output channel dimension is the first dimension. + const int weight_out_ch_dimension = 0; +#endif + // bias has only 1 dimension + const int bias_out_ch_dimension = 0; + int slice_channels = + static_cast(data.mli_weights.Shape()[weight_out_ch_dimension]); + // Batch-Height-Width-Channel layout means last dimension is output + // channels. + const int out_tensor_ch_dimension = 3; + + // Tensors for data in fast (local) memory and config to copy data from + // external to local memory + mli_tensor weights_local = *data.mli_weights.MliTensor(); + mli_tensor bias_local = *data.mli_bias.MliTensor(); + mli_tensor in_local = *data.mli_in.MliTensor(); + mli_tensor out_local = *data.mli_out.MliTensor(); + + ops::micro::MliTensorInterface weights_local_interface(&weights_local); + ops::micro::MliTensorInterface bias_local_interface(&bias_local); + ops::micro::MliTensorInterface in_local_interface(&in_local); + ops::micro::MliTensorInterface out_local_interface(&out_local); + + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); + + TF_LITE_ENSURE_STATUS(ops::micro::get_arc_scratch_buffer_for_conv_tensors( + context, &in_local_interface, &weights_local_interface, + &bias_local_interface, &out_local_interface)); + TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_io( + &in_local_interface, &out_local_interface, kernel_height, + cfg_local.stride_height, cfg_local.padding_top, + cfg_local.padding_bottom, &in_slice_height, &out_slice_height)); + TF_LITE_ENSURE_STATUS( + ops::micro::arc_scratch_buffer_calc_slice_size_weights( + &weights_local_interface, &bias_local_interface, + weight_out_ch_dimension, &slice_channels)); + + /* is_local indicates that the tensor is already in local memory, + so in that case the original tensor can be used, + and there is no need to copy it to the local tensor*/ + const bool in_is_local = + in_local_interface.Data() == data.mli_in.Data(); + const bool out_is_local = + out_local_interface.Data() == data.mli_out.Data(); + const bool b_is_local = + bias_local_interface.Data() == data.mli_bias.Data(); +#ifndef MLI_2_0_KRNL_TEST + const bool w_is_local = weights_local_interface.Data() == + data.mli_weights.Data(); +#endif + +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + ops::micro::TensorSlicer w_slice(data.mli_weights.MliTensor(), + weight_out_ch_dimension, slice_channels, 0, + 0, 0, true); +#else + ops::micro::TensorSlicer w_slice(data.mli_weights.MliTensor(), + weight_out_ch_dimension, slice_channels); +#endif + ops::micro::TensorSlicer b_slice(data.mli_bias.MliTensor(), + bias_out_ch_dimension, slice_channels); + ops::micro::TensorSlicer out_ch_slice(data.mli_out.MliTensor(), + out_tensor_ch_dimension, + slice_channels, 0, 0, 0, true); + +#ifdef MLI_2_0_KRNL_TEST + mli_tensor* w_ptr = &weights_local; +#else + mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; +#endif + mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; + + void* input_buffer_ptr = NULL; + uint32_t input_buffer_size = 0; + + while (!w_slice.Done()) { +#ifndef MLI_2_0_KRNL_TEST + mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); +#endif + mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); + + /* mli_in tensor contains batches of HWC tensors. so it is a 4 dimensional + tensor. because the mli kernel will process one HWC tensor at a time, the + 4 dimensional tensor needs to be sliced into nBatch 3 dimensional tensors. + on top of that there could be a need to also slice in the Height + dimension. for that the sliceHeight has been calculated. The tensor slicer + is configured that it will completely slice the nBatch dimension (0) and + slice the height dimension (1) in chunks of 'sliceHeight' */ + ops::micro::TensorSlicer in_slice( + data.mli_in.MliTensor(), height_dimension, in_slice_height, + cfg_local.padding_top, cfg_local.padding_bottom, overlap); + + /* output tensor is already sliced in the output channel dimension. + out_ch_slice.Sub() is the tensor for the amount of output channels of this + iteration of the weight slice loop. This tensor needs to be further + sliced over the batch and height dimension. */ + ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), height_dimension, + out_slice_height); + + /* setup the pointers to the local or remote tensor to make the code + * inside the loop easier. */ + mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; + mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; + +#ifdef MLI_2_0_KRNL_TEST + /* Permute weights tensor to the HWCN layout */ + // Checking conditions here to prevent usage non-contiguous buffer memory. + if (data.mli_out.Shape()[out_tensor_ch_dimension] != + out_slice.Sub()->shape[FMAP_C_DIM_HWC] || + data.mli_out.Shape()[height_dimension] != + out_slice.Sub()->shape[FMAP_H_DIM_HWC]) { + MicroPrintf("Slicing is not supported with real-time permutation."); + return kTfLiteError; + } + mli_permute_cfg permute_cfg = {{1, 2, 3, 0}}; + ops::micro::permute_weights(data.mli_weights.MliTensor(), &permute_cfg, + w_ptr, &out_ptr->data); +#endif + + while (!out_slice.Done()) { + if (!out_is_local) { + ops::micro::PrepareLocalTensor(out_slice.Sub(), &out_local); + ops::micro::PrepareLocalTensor(in_slice.Sub(), &in_local); + } + + TF_LITE_ENSURE(context, !in_slice.Done()); + cfg_local.padding_top = in_slice.GetPaddingPre(); + cfg_local.padding_bottom = in_slice.GetPaddingPost(); + + // if same input copy as previous iteration, skip the copy of input +#ifdef MLI_2_0 + if ((in_slice.Sub()->data.mem.pi8 != input_buffer_ptr) || + (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data.mem.pi8; + input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); + } + + data.p_mli_krn_conv2d_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, &cfg_local, + out_ptr); +#else + if ((in_slice.Sub()->data != input_buffer_ptr) || + (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data; + input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); + } + data.p_mli_krn_conv2d_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, &cfg_local, + out_ptr); +#endif + mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); + + in_slice.Next(); + out_slice.Next(); + } + w_slice.Next(); + b_slice.Next(); + out_ch_slice.Next(); + TF_LITE_ENSURE(context, in_slice.Done()); + } + } + return kTfLiteOk; +} + +void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output, + TfLiteEvalTensor* im2col) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + ConvParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.dilation_height_factor = params->dilation_height_factor; + op_params.dilation_width_factor = params->dilation_width_factor; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + + reference_integer_ops::ConvPerChannel( + op_params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); +#endif +} + +void EvalQuantizedPerChannelInt16(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + ConvParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.dilation_height_factor = params->dilation_height_factor; + op_params.dilation_width_factor = params->dilation_width_factor; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + + reference_integer_ops::ConvPerChannel( + op_params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); +#endif +} + +void EvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLiteConvParams* params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, + TfLiteEvalTensor* hwcn_weights, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + float output_activation_min, output_activation_max; + CalculateActivationRange(params->activation, &output_activation_min, + &output_activation_max); + ConvParams op_params; + op_params.padding_type = RuntimePaddingType(params->padding); + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params->stride_width; + op_params.stride_height = params->stride_height; + op_params.dilation_width_factor = params->dilation_width_factor; + op_params.dilation_height_factor = params->dilation_height_factor; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + + reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(im2col), + tflite::micro::GetTensorData(im2col)); +#else + MicroPrintf("Type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kBiasTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8), + "Hybrid models are not supported on TFLite Micro."); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + EvalFloat(context, node, params, data, input, filter, bias, nullptr, + nullptr, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + if (data.is_mli_applicable) { + EvalMliQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); + } else { + EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output, nullptr); + } + break; + case kTfLiteInt16: + EvalQuantizedPerChannelInt16(context, node, params, data, input, filter, + bias, output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" + +#include "sl_mvp_ml_conv2d.h" + +namespace tflite { +namespace sl { +namespace conv2d { + +constexpr int kInputTensor = 0; +constexpr int kFilterTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +// Conv is quantized along dimension 0 of filter tensor. +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kConvQuantizedDimension = 0; + +enum op_support { kMvp, kCmsisNN, kTFLMrefF32, kTFLMrefI8 }; + +struct OpData { + op_support supported; + float activation_min_f32; + float activation_max_f32; + int scratch_buffer_index; + sli_mvp_ml_conv2d_s8_params_t op_params; + + // CMSIS-NN per channel output multiplier and shift. + int32_t *per_channel_output_multiplier; + int32_t *per_channel_output_shift; +}; + +inline float16_t normalize_fp16(float f) +{ + return (float16_t)std::min(std::max(f, SLI_MVP_FP16_MIN), SLI_MVP_FP16_MAX); +} + +inline PaddingType RuntimePaddingType(TfLitePadding padding) +{ + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus PopulateConvolutionQuantizationParams( + TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* filter, + TfLiteTensor* output, + const TfLiteFusedActivation& activation, + int32_t* output_activation_min, int32_t* output_activation_max, + float16_t* per_channel_scalers, int num_channels, float accumulator_multipler) +{ + auto affine_quantization = + reinterpret_cast(filter->quantization.params); + + // Populate multiplier and shift using affine quantization. + const float input_scale = input->params.scale; + const float output_scale = output->params.scale; + const float* filter_scales = affine_quantization->scale->data; + + for (int i = 0; i < num_channels; ++i) { + // If per-tensor quantization parameter is specified, broadcast it along the + // quantization dimension (channels_out). + const float filter_scale = filter_scales[i]; + const float effective_output_scale = (input_scale * filter_scale) / output_scale; + const float acc_output_scale = effective_output_scale * accumulator_multipler; + per_channel_scalers[i] = normalize_fp16(acc_output_scale); + } + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, activation, output, output_activation_min, + output_activation_max)); + + return kTfLiteOk; +} + +void *Init(TfLiteContext* context, const char* buffer, size_t length) +{ + (void)buffer; + (void)length; + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) +{ + int scratch_buffer_size = 0; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = static_cast(node->builtin_data); + + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE(context, filter != nullptr); + + data->op_params.batches = input->dims->data[0]; + data->op_params.in_channels = input->dims->data[3]; + data->op_params.input_height = input->dims->data[1]; + data->op_params.input_width = input->dims->data[2]; + data->op_params.out_channels = filter->dims->data[kConvQuantizedDimension]; + data->op_params.output_height = output->dims->data[1]; + data->op_params.output_width = output->dims->data[2]; + data->op_params.filter_height = filter->dims->data[1]; + data->op_params.filter_width = filter->dims->data[2]; + data->op_params.input_offset = -input->params.zero_point; + data->op_params.output_offset = output->params.zero_point; + data->op_params.stride_height = params->stride_height; + data->op_params.stride_width = params->stride_width; + data->op_params.dilation_height = params->dilation_height_factor; + data->op_params.dilation_width = params->dilation_width_factor; + data->op_params.padding = params->padding == kTfLitePaddingSame; + + int dummy_height, dummy_width; + const auto padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + params->dilation_height_factor, params->dilation_width_factor, + data->op_params.input_height, data->op_params.input_width, + data->op_params.filter_height, data->op_params.filter_width, + params->padding, + &dummy_height, &dummy_width); + + data->op_params.pad_height = padding.height; + data->op_params.pad_width = padding.width; + + const int num_channels = data->op_params.out_channels; + + if (input->type == kTfLiteInt8) { + if (sli_mvp_ml_conv2d_s8_is_supported(&data->op_params)) { + data->supported = kMvp; + + float16_t *bias_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + if(bias != nullptr) { + data->op_params.bias = bias_data; + int32_t i32_bias; + for(int i = 0; i < num_channels; i++) { + i32_bias = bias->data.i32[i]; + bias_data[i] = float16_t(i32_bias * SLI_MVP_ACCUMULATOR_SCALER); + } + } else { + data->op_params.bias = nullptr; + } + + float16_t *scaler_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + data->op_params.output_scaler = scaler_data; + TF_LITE_ENSURE_STATUS(PopulateConvolutionQuantizationParams( + context, input, filter, output, params->activation, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + scaler_data, num_channels, SLI_MVP_ACCUMULATOR_MULTIPLIER)); + + } else { + data->per_channel_output_multiplier = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + int32_t dummy_output_multiplier; + int dummy_output_shift; + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params->activation, + &dummy_output_multiplier, &dummy_output_shift, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + num_channels)); + + if (data->op_params.dilation_height == 1 && data->op_params.dilation_width == 1) { + data->supported = kCmsisNN; + cmsis_nn_conv_params conv_params; + conv_params.input_offset = data->op_params.input_offset; + conv_params.output_offset = data->op_params.output_offset; + conv_params.stride.h = data->op_params.stride_height; + conv_params.stride.w = data->op_params.stride_width; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + conv_params.padding.h = data->op_params.pad_height; + conv_params.padding.w = data->op_params.pad_width; + conv_params.activation.min = data->op_params.output_activation_min; + conv_params.activation.max = data->op_params.output_activation_max; + + cmsis_nn_dims input_dims; + input_dims.n = data->op_params.batches; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.in_channels; + + cmsis_nn_dims filter_dims; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + + cmsis_nn_dims output_dims; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.out_channels; + + scratch_buffer_size = arm_convolve_wrapper_s8_get_buffer_size( + &conv_params, &input_dims, &filter_dims, &output_dims); + } else { + data->supported = kTFLMrefI8; + } + } + + } else if (input->type == kTfLiteFloat32) { + data->supported = kTFLMrefF32; + CalculateActivationRange(params->activation, + &data->activation_min_f32, + &data->activation_max_f32); + + } else { + TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + if(scratch_buffer_size > 0) { + TF_LITE_ENSURE_STATUS( + context->RequestScratchBufferInArena( + context, scratch_buffer_size, &data->scratch_buffer_index)); + } else { + data->scratch_buffer_index = -1; + } + + return kTfLiteOk; +} + +TfLiteStatus eval_mvp_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + TfLiteEvalTensor* output) +{ + data->op_params.input = tflite::micro::GetTensorData(input); + data->op_params.output = tflite::micro::GetTensorData(output); + data->op_params.filter = tflite::micro::GetTensorData(filter); + + TF_LITE_ENSURE_EQ(context, SL_STATUS_OK, sli_mvp_ml_conv2d_s8(&data->op_params)); + + return kTfLiteOk; +} + +TfLiteStatus eval_cmsis_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + cmsis_nn_dims input_dims; + input_dims.n = data->op_params.batches; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.in_channels; + + cmsis_nn_dims filter_dims; + filter_dims.n = data->op_params.out_channels; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + filter_dims.c = data->op_params.in_channels; + + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = data->op_params.out_channels; + + cmsis_nn_dims output_dims; + output_dims.n = data->op_params.batches; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.out_channels; + + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = data->per_channel_output_multiplier; + quant_params.shift = data->per_channel_output_shift; + + cmsis_nn_conv_params conv_params; + conv_params.input_offset = data->op_params.input_offset; + conv_params.output_offset = data->op_params.output_offset; + conv_params.stride.h = data->op_params.stride_height; + conv_params.stride.w = data->op_params.stride_width; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + conv_params.padding.h = data->op_params.pad_height; + conv_params.padding.w = data->op_params.pad_width; + conv_params.activation.min = data->op_params.output_activation_min; + conv_params.activation.max = data->op_params.output_activation_max; + + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data->scratch_buffer_index > -1) { + ctx.buf = context->GetScratchBuffer(context, data->scratch_buffer_index); + } + TFLITE_DCHECK_EQ(ARM_MATH_SUCCESS, + arm_convolve_wrapper_s8( + &ctx, &conv_params, &quant_params, + &input_dims, tflite::micro::GetTensorData(input), + &filter_dims, tflite::micro::GetTensorData(filter), + &bias_dims, bias == nullptr ? NULL : tflite::micro::GetTensorData(bias), + &output_dims, tflite::micro::GetTensorData(output))); + + return kTfLiteOk; +} + +TfLiteStatus eval_tflm_int8(OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + ConvParams op_params; + + op_params.input_offset = data->op_params.input_offset; + op_params.output_offset = data->op_params.output_offset; + op_params.stride_height = data->op_params.stride_height; + op_params.stride_width = data->op_params.stride_width; + op_params.dilation_height_factor = data->op_params.dilation_height; + op_params.dilation_width_factor = data->op_params.dilation_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.padding_values.width = data->op_params.pad_width; + op_params.quantized_activation_min = data->op_params.output_activation_min; + op_params.quantized_activation_max = data->op_params.output_activation_max; + + reference_integer_ops::ConvPerChannel( + op_params, + data->per_channel_output_multiplier, + data->per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + bias == nullptr ? nullptr : tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus eval_float(TfLiteConvParams* params, + const OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + ConvParams op_params; + op_params.padding_type = RuntimePaddingType(params->padding); + op_params.padding_values.width = data->op_params.pad_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.stride_width = data->op_params.stride_width; + op_params.stride_height = data->op_params.stride_height; + op_params.dilation_width_factor = data->op_params.dilation_width; + op_params.dilation_height_factor = data->op_params.dilation_height; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + + reference_ops::Conv(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + bias == nullptr ? nullptr : tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + RuntimeShape(), + nullptr); + return kTfLiteOk; +} + +TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) +{ + TfLiteStatus status = kTfLiteError; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* params = reinterpret_cast(node->builtin_data); + OpData* data = static_cast(node->user_data); + + const auto input = tflite::micro::GetEvalInput(context, node, kInputTensor); + const auto filter = tflite::micro::GetEvalInput(context, node, kFilterTensor); + const auto bias = NumInputs(node) == 3 + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + auto output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (data->supported == kMvp) { + status = eval_mvp_int8(context, data, input, filter, output); + + } else if (data->supported == kCmsisNN) { + status = eval_cmsis_int8(context, data, input, filter, bias, output); + + } else if (data->supported == kTFLMrefI8) { + status = eval_tflm_int8(data, input, filter, bias, output); + + } else if (data->supported == kTFLMrefF32) { + #if EI_TFLITE_DISABLE_CONV_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + status = eval_float(params, data, input, filter, bias, output); + } + + return status; +} + +} // namespace conv2d +} // namespace sl + +TfLiteRegistration Register_CONV_2D() { + return {/*init=*/sl::conv2d::Init, + /*free=*/nullptr, + /*prepare=*/sl::conv2d::Prepare, + /*invoke=*/sl::conv2d::Invoke, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#include + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + + +long long conv_total_time = 0; + +namespace tflite { +namespace { + +struct NodeData { + OpDataConv op_data; +#if ESP_NN + int buffer_idx; +#endif +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(NodeData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + NodeData* data = static_cast(node->user_data); + const auto& params = + *(static_cast(node->builtin_data)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->op_data.per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->op_data.per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, &data->op_data)); + +#if ESP_NN + if (input->type == kTfLiteInt8) { + data_dims_t input_dims = { + .width = input_width, .height = input_height, + .channels = input->dims->data[3], 1 + }; + data_dims_t output_dims = { + .width = output_width, .height = output_height, + .channels = output->dims->data[3], 1 + }; + data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; + conv_params_t conv_params = { + .in_offset = 0, .out_offset = 0, + .stride = {params.stride_width, params.stride_height}, + .padding = {data->op_data.padding.width, data->op_data.padding.height}, + .dilation = {0, 0}, .activation = {-128, 127} + }; + + int scratch_buf_size = esp_nn_get_conv_scratch_size( + &input_dims, &filter_dims, &output_dims, &conv_params); + if (scratch_buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, scratch_buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } +#endif + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + + return kTfLiteOk; +} + +#if ESP_NN +// Fixed-point per-channel-quantization convolution Int8 function wrapper. +inline void EvalQuantizedPerChannel( + TfLiteContext* context, TfLiteNode* node, const TfLiteConvParams& params, + const NodeData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + const int dilation_width_factor = params.dilation_width_factor; + const int dilation_height_factor = params.dilation_height_factor; + + if (dilation_width_factor == 1 && dilation_height_factor == 1) { + // Get parameters. + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + const int8_t *input_data = tflite::micro::GetTensorData(input); + int8_t *output_data = tflite::micro::GetTensorData(output); + + const int32_t input_offset = -data.op_data.input_zero_point; + const int32_t output_offset = data.op_data.output_zero_point; + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = data.op_data.padding.width; + const int pad_height = data.op_data.padding.height; + + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + + // Set min and max value of the output. + const int32_t activation_min = data.op_data.output_activation_min; + const int32_t activation_max = data.op_data.output_activation_max; + + // Consistency check. + TFLITE_DCHECK_LE(activation_min, activation_max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); + const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); + + if (tflite::micro::GetTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + void *scratch_buf = NULL; + if (data.buffer_idx > -1) { + scratch_buf = context->GetScratchBuffer(context, data.buffer_idx); + } + esp_nn_set_conv_scratch_buf(scratch_buf); + + const int input_size = input_width * input_height * input_depth; + const int output_size = output_width * output_height * output_depth; + + data_dims_t input_dims = { + .width = input_width, .height = input_height, + .channels = input_depth, 1 + }; + data_dims_t output_dims = { + .width = output_width, .height = output_height, + .channels = output_depth, 1 + }; + data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; + conv_params_t conv_params = { + .in_offset = input_offset, .out_offset = output_offset, + .stride = {stride_width, stride_height}, + .padding = {pad_width, pad_height}, + .dilation = {0, 0}, + .activation = {activation_min, activation_max} + }; + quant_data_t quant_data = { + .shift = data.op_data.per_channel_output_shift, + .mult = data.op_data.per_channel_output_multiplier + }; + + for (int i_batch = 0; i_batch < batch_size; i_batch++) { + esp_nn_conv_s8(&input_dims, input_data + i_batch * input_size, + &filter_dims, tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorData(bias), + &output_dims, output_data + i_batch * output_size, + &conv_params, &quant_data); + } + } else { + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data.op_data), + data.op_data.per_channel_output_multiplier, + data.op_data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} +#endif + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const auto& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, input->type == filter->type, + "Hybrid models are not supported on TFLite Micro."); + + long long start_time = esp_timer_get_time(); + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Conv( + ConvParamsFloat(params, data.op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif +#if ESP_NN + EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); +#else + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data.op_data), + data.op_data.per_channel_output_multiplier, + data.op_data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#endif + break; + } + case kTfLiteUInt8: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_U8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + //EvalQuantized + reference_ops::Conv(ConvParamsQuantized(params, data.op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, + nullptr); + break; + } + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + long long time_this_instance = esp_timer_get_time() - start_time; + conv_total_time += time_this_instance; + //printf("time this instance: %llu\n", time_this_instance / 1000); + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#else +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); + + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + TFLITE_DCHECK(node->user_data != nullptr); + const auto& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && filter->type == kTfLiteInt4), + "Hybrid models are not supported on TFLite Micro."); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Conv( + ConvParamsFloat(params, data), tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt16: { + switch (bias->type) { + case kTfLiteInt32: { + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt64: { + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: + MicroPrintf("Bias type %s (%d) not supported.", + TfLiteTypeGetName(bias->type), bias->type); + return kTfLiteError; + } + break; + } + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + switch (filter->type) { + case kTfLiteInt4: { + int8_t* unpacked_filter_data = static_cast( + context->GetScratchBuffer(context, data.filter_buffer_index)); + tflite::tensor_utils::UnpackDenseInt4IntoInt8( + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(filter).FlatSize(), + unpacked_filter_data); + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), unpacked_filter_data, + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { + reference_integer_ops::ConvPerChannel( + ConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: + MicroPrintf("Weight type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_CONV_2D() { + return tflite::micro::RegisterOp(Init, ConvPrepare, Eval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp deleted file mode 100644 index d46d99d..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.cpp +++ /dev/null @@ -1,990 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - OpDataConv reference_op_data; - - // Index to buffer for optimizations if applicable. - int buffer_idx; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - int32_t buf_size = 0; - const auto& params = - *(static_cast(node->builtin_data)); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - RuntimeShape input_shape = GetTensorShape(input); - RuntimeShape output_shape = GetTensorShape(output); - - // Initialize cmsis_nn input dimensions - cmsis_nn_dims input_dims; - input_dims.n = MatchingDim(input_shape, 0, output_shape, 0); - input_dims.h = input->dims->data[1]; - input_dims.w = input->dims->data[2]; - input_dims.c = input_shape.Dims(3); - - // Initialize cmsis_nn filter dimensions - cmsis_nn_dims filter_dims; - filter_dims.n = output_shape.Dims(3); - filter_dims.h = filter->dims->data[1]; - filter_dims.w = filter->dims->data[2]; - filter_dims.c = input_dims.c; - - // Initialize cmsis_nn output dimensions - cmsis_nn_dims output_dims; - output_dims.n = input_dims.n; - output_dims.h = output->dims->data[1]; - output_dims.w = output->dims->data[2]; - output_dims.c = output_shape.Dims(3); - - // Dynamically allocate per-channel quantization parameters. - // TODO(#42883): This allocation is done even for non-int8 cases to get around - // a bug in kernel_util.cc which incorrectly uses per_channel_output_shift in - // non-int8 cases. Protect this section with a if (input->type == kTfLiteInt8) - // when the issue is fixed. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->reference_op_data.per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->reference_op_data.per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - TF_LITE_ENSURE_STATUS(CalculateOpDataConv( - context, node, params, input_dims.w, input_dims.h, filter_dims.w, - filter_dims.h, output_dims.w, output_dims.h, input->type, - &data->reference_op_data)); - - if (input->type == kTfLiteInt8) { - // Initialize cmsis_nn convolution parameters - cmsis_nn_conv_params conv_params; - conv_params.input_offset = -input->params.zero_point; - conv_params.output_offset = output->params.zero_point; - conv_params.stride.h = params.stride_height; - conv_params.stride.w = params.stride_width; - conv_params.dilation.h = params.dilation_height_factor; - conv_params.dilation.w = params.dilation_width_factor; - conv_params.padding.h = data->reference_op_data.padding.height; - conv_params.padding.w = data->reference_op_data.padding.width; - conv_params.activation.min = data->reference_op_data.output_activation_min; - conv_params.activation.max = data->reference_op_data.output_activation_max; - - buf_size = arm_convolve_wrapper_s8_get_buffer_size( - &conv_params, &input_dims, &filter_dims, &output_dims); - } - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantizedPerChannel( - TfLiteContext* context, TfLiteNode* node, const TfLiteConvParams& params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output, TfLiteEvalTensor* im2col) { - cmsis_nn_conv_params conv_params; - conv_params.dilation.h = params.dilation_height_factor; - conv_params.dilation.w = params.dilation_width_factor; - // TODO(#43557) Remove checks for dilation and call to reference - // implementation when dilation is supported in the optimized implementation - // by CMSIS-NN. - if (conv_params.dilation.h == 1 && conv_params.dilation.w == 1) { - // Initialize cmsis_nn convolution parameters - conv_params.input_offset = -data.reference_op_data.input_zero_point; - conv_params.output_offset = data.reference_op_data.output_zero_point; - conv_params.stride.h = params.stride_height; - conv_params.stride.w = params.stride_width; - conv_params.padding.h = data.reference_op_data.padding.height; - conv_params.padding.w = data.reference_op_data.padding.width; - conv_params.activation.min = data.reference_op_data.output_activation_min; - conv_params.activation.max = data.reference_op_data.output_activation_max; - - // Initialize cmsis_nn per channel quantization parameters - cmsis_nn_per_channel_quant_params quant_params; - quant_params.multiplier = const_cast( - data.reference_op_data.per_channel_output_multiplier); - quant_params.shift = - const_cast(data.reference_op_data.per_channel_output_shift); - - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - // Consistency check. - TFLITE_DCHECK_LE(conv_params.activation.min, conv_params.activation.max); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3); - const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3); - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - // Initialize cmsis_nn dimensions - // Input - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = input_depth; - - // Filter - cmsis_nn_dims filter_dims; - filter_dims.n = output_depth; - filter_dims.h = filter_shape.Dims(1); - filter_dims.w = filter_shape.Dims(2); - filter_dims.c = input_depth; - - // Bias - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - // Output - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = output_depth; - - // Initialize cmsis_nn context - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - // Note: ctx.size is currently not used in cmsis_nn. - // The buffer should be allocated in the Prepare function through - // arm_convolve_wrapper_s8_get_buffer_size - } - - // arm_convolve_wrapper_s8 dispatches the optimized kernel accordingly with - // the parameters passed - TFLITE_DCHECK_EQ( - arm_convolve_wrapper_s8( - &ctx, &conv_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } else { - reference_integer_ops::ConvPerChannel( - ConvParamsQuantized(params, data.reference_op_data), - data.reference_op_data.per_channel_output_multiplier, - data.reference_op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); - - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto& params = - *(reinterpret_cast(node->builtin_data)); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::Conv( - ConvParamsFloat(params, data.reference_op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: - return EvalQuantizedPerChannel(context, node, params, data, input, filter, - bias, output, nullptr); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" - -#include "mli_api.h" // NOLINT -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -// This file has 2 implementation of Conv. - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - - // The result of checking if MLI optimized version of tensors can be used. - bool is_mli_applicable; - - // Tensors in MLI format. - mli_tensor* mli_in; - mli_tensor* mli_weights; - mli_tensor* mli_bias; - mli_tensor* mli_out; - mli_conv2d_cfg* cfg; -}; - -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} -#endif - -bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, - const TfLiteConvParams* params) { - const auto* affine_quantization = - reinterpret_cast(filter->quantization.params); - // MLI optimized version only supports int8_t datatype, dilation factor of 1 - // and per-axis quantization of weights (no broadcasting/per-tensor) - bool ret_val = (filter->type == kTfLiteInt8) && - (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && - (params->dilation_width_factor == 1) && - (params->dilation_height_factor == 1) && - (affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - return ret_val; -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), - output_channels)); - } -#endif - return kTfLiteOk; -} -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = static_cast(node->builtin_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - - int input_width = input->dims->data[2]; - int input_height = input->dims->data[1]; - int filter_width = filter->dims->data[2]; - int filter_height = filter->dims->data[1]; - int output_width = output->dims->data[2]; - int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - data->is_mli_applicable = - IsMliApplicable(context, input, filter, bias, params); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - if (data->is_mli_applicable) { - data->mli_in = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_weights = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_bias = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_out = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->cfg = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_conv2d_cfg))); - - // reuse space allocated for OpData parameters - data->mli_weights->el_params.asym.scale.pi32 = - static_cast(data->per_channel_output_multiplier); - data->mli_bias->el_params.asym.scale.pi32 = - static_cast(data->per_channel_output_shift); - - data->mli_weights->el_params.asym.zero_point.pi16 = - reinterpret_cast(&data->filter_zero_point); - data->mli_bias->el_params.asym.zero_point.pi16 = - reinterpret_cast(&data->filter_zero_point) + sizeof(int16_t); - - ops::micro::ConvertToMliTensor(input, data->mli_in); - ops::micro::ConvertToMliTensorPerChannel(filter, data->mli_weights); - ops::micro::ConvertToMliTensorPerChannel(bias, data->mli_bias); - ops::micro::ConvertToMliTensor(output, data->mli_out); - - if (params->activation == kTfLiteActRelu) { - data->cfg->relu.type = MLI_RELU_GEN; - } else if (params->activation == kTfLiteActRelu6) { - data->cfg->relu.type = MLI_RELU_6; - } else if (params->activation == kTfLiteActReluN1To1) { - data->cfg->relu.type = MLI_RELU_1; - } else { - data->cfg->relu.type = MLI_RELU_NONE; - } - data->cfg->stride_width = params->stride_width; - data->cfg->stride_height = params->stride_height; - if (params->padding == kTfLitePaddingValid) { - data->cfg->padding_left = 0; - data->cfg->padding_right = 0; - data->cfg->padding_top = 0; - data->cfg->padding_bottom = 0; - } else { - data->cfg->padding_left = data->padding.width; - data->cfg->padding_right = - data->padding.width + data->padding.width_offset; - data->cfg->padding_top = data->padding.height; - data->cfg->padding_bottom = - data->padding.height + data->padding.height_offset; - } - } - return kTfLiteOk; -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* im2col, TfLiteEvalTensor* hwcn_weights, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col), nullptr); -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -TfLiteStatus EvalMliQuantizedPerChannel( - TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - // Run Conv MLI kernel - // MLI optimized version only supports int8_t dataype and dilation factor of 1 - if (data.is_mli_applicable) { - // Copy configuration data from external to local memory - mli_conv2d_cfg cfg_local = *data.cfg; - - ops::micro::MliTensorAttachBuffer(input, data.mli_in); - ops::micro::MliTensorAttachBuffer(filter, data.mli_weights); - ops::micro::MliTensorAttachBuffer(bias, data.mli_bias); - ops::micro::MliTensorAttachBuffer(output, data.mli_out); - - // for height slicing - const int height_dimension = 1; - int in_slice_height = 0; - int out_slice_height = 0; - const int kernel_height = - static_cast(data.mli_weights->shape[KRNL_H_DIM_HWC]); - const int overlap = kernel_height - cfg_local.stride_height; - - // for weight slicing (on output channels) - // NHWC layout for weights, output channel dimension is the first dimension. - const int weight_out_ch_dimension = 0; - int slice_channels = - static_cast(data.mli_weights->shape[weight_out_ch_dimension]); - // Batch-Height-Width-Channel layout means last dimension is output - // channels. - const int out_tensor_ch_dimension = 3; - - // Tensors for data in fast (local) memory and config to copy data from - // external to local memory - mli_tensor weights_local = *data.mli_weights; - mli_tensor bias_local = *data.mli_bias; - mli_tensor in_local = *data.mli_in; - mli_tensor out_local = *data.mli_out; - mli_mov_cfg_t copy_config; - mli_mov_cfg_for_copy(©_config); - TF_LITE_ENSURE_STATUS(ops::micro::get_arc_scratch_buffer_for_conv_tensors( - context, &in_local, &weights_local, &bias_local, &out_local)); - TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_io( - &in_local, &out_local, kernel_height, cfg_local.stride_height, - cfg_local.padding_top, cfg_local.padding_bottom, &in_slice_height, - &out_slice_height)); - TF_LITE_ENSURE_STATUS( - ops::micro::arc_scratch_buffer_calc_slice_size_weights( - &weights_local, &bias_local, weight_out_ch_dimension, - &slice_channels)); - - /* is_local indicates that the tensor is already in local memory, - so in that case the original tensor can be used, - and there is no need to copy it to the local tensor*/ - const bool in_is_local = in_local.data == data.mli_in->data; - const bool out_is_local = out_local.data == data.mli_out->data; - const bool w_is_local = weights_local.data == data.mli_weights->data; - const bool b_is_local = bias_local.data == data.mli_bias->data; - - ops::micro::TensorSlicer w_slice(data.mli_weights, weight_out_ch_dimension, - slice_channels); - ops::micro::TensorSlicer b_slice(data.mli_bias, weight_out_ch_dimension, - slice_channels); - ops::micro::TensorSlicer out_ch_slice(data.mli_out, out_tensor_ch_dimension, - slice_channels, 0, 0, 0, true); - - mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; - mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; - - void* input_buffer_ptr = NULL; - uint32_t input_buffer_size = 0; - - while (!w_slice.Done()) { - mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); - mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); - - /* mli_in tensor contains batches of HWC tensors. so it is a 4 dimensional - tensor. because the mli kernel will process one HWC tensor at a time, the - 4 dimensional tensor needs to be sliced into nBatch 3 dimensional tensors. - on top of that there could be a need to also slice in the Height - dimension. for that the sliceHeight has been calculated. The tensor slicer - is configured that it will completely slice the nBatch dimension (0) and - slice the height dimension (1) in chunks of 'sliceHeight' */ - ops::micro::TensorSlicer in_slice(data.mli_in, height_dimension, - in_slice_height, cfg_local.padding_top, - cfg_local.padding_bottom, overlap); - - /* output tensor is already sliced in the output channel dimension. - out_ch_slice.Sub() is the tensor for the amount of output channels of this - iteration of the weight slice loop. This tensor needs to be further - sliced over the batch and height dimension. */ - ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), height_dimension, - out_slice_height); - - /* setup the pointers to the local or remote tensor to make the code - * inside the loop easier. */ - mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; - mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; - - while (!out_slice.Done()) { - TF_LITE_ENSURE(context, !in_slice.Done()); - cfg_local.padding_top = in_slice.GetPaddingPre(); - cfg_local.padding_bottom = in_slice.GetPaddingPost(); - - // if same input copy as previous iteration, skip the copy of input - if ((in_slice.Sub()->data != input_buffer_ptr) || - (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { - mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); - input_buffer_ptr = in_slice.Sub()->data; - input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); - } - mli_krn_conv2d_nhwc_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, &cfg_local, - out_ptr); - mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); - - in_slice.Next(); - out_slice.Next(); - } - w_slice.Next(); - b_slice.Next(); - out_ch_slice.Next(); - TF_LITE_ENSURE(context, in_slice.Done()); - } - } - return kTfLiteOk; -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output, - TfLiteEvalTensor* im2col) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - ConvParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::ConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG(context, - "Node configuration is not supported by ARC MLI Library."); -#endif -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* im2col, - TfLiteEvalTensor* hwcn_weights, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - ConvParams op_params; - op_params.padding_type = RuntimePaddingType(params->padding); - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - reference_ops::Conv(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(im2col), - tflite::micro::GetTensorData(im2col)); -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); - break; - case kTfLiteInt8: - if (data.is_mli_applicable) { - EvalMliQuantizedPerChannel(context, node, params, data, input, filter, - bias, output); - } else { - EvalQuantizedPerChannel(context, node, params, data, input, filter, - bias, output, nullptr); - } - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, nullptr, - nullptr, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#else -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kConvBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kConvOutputTensor); - - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto& params = - *(reinterpret_cast(node->builtin_data)); - TFLITE_DCHECK(node->user_data != nullptr); - const auto& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::Conv( - ConvParamsFloat(params, data), tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: { - reference_integer_ops::ConvPerChannel( - ConvParamsQuantized(params, data), data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/ConvPrepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h index 3399526..2a4b63d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h @@ -1,4 +1,4 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -45,6 +45,10 @@ struct OpDataConv { // uint8_t these would be 0 and 255. int32_t output_activation_min; int32_t output_activation_max; + + // A buffer used to store unpacked filter values. This is used if the source + // tensor is of n-bit precision that cannot be easily processed by kernels. + int filter_buffer_index; }; extern const int kConvInputTensor; @@ -72,6 +76,41 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node); +// This is the most generic TfLiteRegistration. The actual supported types may +// still be target dependent. The only requirement is that every implementation +// (reference or optimized) must define this function. +TfLiteRegistration Register_CONV_2D(); + +#if defined(XTENSA) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and always calls the reference +// implementation. +TfLiteRegistration Register_CONV_2D_INT8REF(); +#else +inline TfLiteRegistration Register_CONV_2D_INT8REF() { + return Register_CONV_2D(); +} +#endif + +#if defined(CMSIS_NN) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and uses the latency optimized +// implementations. +TfLiteRegistration Register_CONV_2D_INT8(); + +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int16 activations and int8 weights and uses the latency optimized +// implementations. +TfLiteRegistration Register_CONV_2D_INT16(); + +#else +inline TfLiteRegistration Register_CONV_2D_INT8() { return Register_CONV_2D(); } + +inline TfLiteRegistration Register_CONV_2D_INT16() { + return Register_CONV_2D(); +} +#endif + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cc similarity index 76% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cc index 8a21348..fe23085 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_common.cc @@ -14,12 +14,8 @@ limitations under the License. ==============================================================================*/ #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" @@ -93,13 +89,18 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, params.dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); - const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kConvBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); TF_LITE_ENSURE(context, output != nullptr); // Note that quantized inference requires that all tensors have their @@ -111,8 +112,7 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, context, input, filter, bias, output, params.activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), + data->per_channel_output_multiplier, data->per_channel_output_shift, output_channels)); } @@ -120,6 +120,11 @@ TfLiteStatus CalculateOpDataConv(TfLiteContext* context, TfLiteNode* node, data->filter_zero_point = filter->params.zero_point; data->output_zero_point = output->params.zero_point; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(bias); + return kTfLiteOk; } @@ -130,12 +135,16 @@ TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) { OpDataConv* data = static_cast(node->user_data); const auto& params = *(static_cast(node->builtin_data)); + MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); TF_LITE_ENSURE(context, filter != nullptr); const int input_width = input->dims->data[2]; @@ -146,16 +155,18 @@ TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) { const int output_height = output->dims->data[1]; // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); + if (input->type != kTfLiteFloat32) { + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); @@ -169,14 +180,25 @@ TfLiteStatus ConvPrepare(TfLiteContext* context, TfLiteNode* node) { affine_quantization->scale->size == 1 || affine_quantization->scale->size == filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); } TF_LITE_ENSURE_STATUS(CalculateOpDataConv( context, node, params, input_width, input_height, filter_width, filter_height, output_width, output_height, input->type, data)); + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena(context, filter_size, + &data->filter_buffer_index); + } + + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_test.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_test.h index f7a2459..cdaaefa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_test.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/conv_test.h @@ -13,8 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" @@ -59,36 +59,56 @@ TfLiteStatus ValidateConvGoldens(TfLiteTensor* tensors, int tensors_size, TfLiteRegistration registration, uint8_t* output_data, float tolerance = 1e-5); -TfLiteStatus TestConvFloat(const int* input_dims_data, const float* input_data, - const int* filter_dims_data, - const float* filter_data, const int* bias_dims_data, - const float* bias_data, const int* output_dims_data, +TfLiteStatus TestConvFloat(int* input_dims_data, const float* input_data, + int* filter_dims_data, const float* filter_data, + int* bias_dims_data, const float* bias_data, + int* output_dims_data, const float* expected_output_data, TfLiteConvParams* conv_params, TfLiteRegistration registration, float* output_data); TfLiteStatus TestConvQuantizedPerLayer( - const int* input_dims_data, const float* input_data, - uint8_t* input_quantized, float input_scale, const int* filter_dims_data, - const float* filter_data, uint8_t* filter_quantized, float filter_scale, - const int* bias_dims_data, const float* bias_data, int32_t* bias_quantized, - const int* output_dims_data, const float* expected_output_data, - uint8_t* expected_output_quantized, float output_scale, - TfLiteConvParams* conv_params, TfLiteRegistration registration, - uint8_t* output_data); + int* input_dims_data, const float* input_data, uint8_t* input_quantized, + float input_scale, int* filter_dims_data, const float* filter_data, + uint8_t* filter_quantized, float filter_scale, int* bias_dims_data, + const float* bias_data, int32_t* bias_quantized, int* output_dims_data, + const float* expected_output_data, uint8_t* expected_output_quantized, + float output_scale, TfLiteConvParams* conv_params, + TfLiteRegistration registration, uint8_t* output_data); TfLiteStatus TestConvQuantizedPerChannel( - const int* input_dims_data, const float* input_data, - int8_t* input_quantized, float input_scale, int input_zero_point, - const int* filter_dims_data, const float* filter_data, - int8_t* filter_data_quantized, const int* bias_dims_data, - const float* bias_data, int32_t* bias_data_quantized, float* bias_scales, - int* bias_zero_points, const int* output_dims_data, + int* input_dims_data, const float* input_data, int8_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized, + float* bias_scales, int* bias_zero_points, int* output_dims_data, const float* expected_output_data, int8_t* expected_output_data_quantized, float output_scale, int output_zero_point, TfLiteConvParams* conv_params, - TfLiteRegistration registration, int8_t* output_data); + TfLiteRegistration registration, int8_t* output_data, + TfLiteType tensor_weight_type = kTfLiteNoType); + +TfLiteStatus TestConvQuantizedPerChannel( + int* input_dims_data, const float* input_data, int16_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, + std::int64_t* bias_data_quantized, float* bias_scales, + int* bias_zero_points, int* output_dims_data, + const float* expected_output_data, int16_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TfLiteRegistration registration, int16_t* output_data); + +TfLiteStatus TestConvQuantizedPerChannel( + int* input_dims_data, const float* input_data, int16_t* input_quantized, + float input_scale, int input_zero_point, int* filter_dims_data, + const float* filter_data, int8_t* filter_data_quantized, + int* bias_dims_data, const float* bias_data, int32_t* bias_data_quantized, + float* bias_scales, int* bias_zero_points, int* output_dims_data, + const float* expected_output_data, int16_t* expected_output_data_quantized, + float output_scale, int output_zero_point, TfLiteConvParams* conv_params, + TfLiteRegistration registration, int16_t* output_data); } // namespace testing } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_H_ +#endif // TENSORFLOW_LITE_MICRO_KERNELS_CONV_TEST_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cumsum.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cumsum.cc new file mode 100644 index 0000000..bdc888b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/cumsum.cc @@ -0,0 +1,175 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/cumsum.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kAxisTensor = 1; +constexpr int kOutputTensor = 0; + +constexpr int kCumSumIntegerShift = 20; + +// only used with INT8 tensors +struct OpData { + int32_t output_activation_min; + int32_t output_activation_max; + int32_t input_offset; + int32_t output_offset; + int32_t input_multiplier; + int32_t output_multiplier; + int input_shift; + int output_shift; + int left_shift; +}; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* axis = + micro_context->AllocateTempInputTensor(node, kAxisTensor); + + TF_LITE_ENSURE(context, + input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); + TF_LITE_ENSURE_EQ(context, axis->type, kTfLiteInt32); + + TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); + + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE(context, HaveSameShapes(input, output)); + + if (output->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* data = static_cast(node->user_data); + + // 8bit -> 8bit general quantized path, with general rescalings + data->input_offset = -input->params.zero_point; + data->output_offset = output->params.zero_point; + data->left_shift = kCumSumIntegerShift; + const double twice_max_input_scale = + 2 * static_cast(input->params.scale); + const double real_input_multiplier = + static_cast(input->params.scale) / twice_max_input_scale; + const double real_output_multiplier = + twice_max_input_scale / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input_multiplier, &data->input_multiplier, &data->input_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, kTfLiteActNone, output, &data->output_activation_min, + &data->output_activation_max)); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* axis_tensor = + tflite::micro::GetEvalInput(context, node, kAxisTensor); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + auto* cs_params = static_cast(node->builtin_data); + auto input_shape = tflite::micro::GetTensorShape(input); + + int32_t axis = *tflite::micro::GetTensorData(axis_tensor); + if (axis < 0) axis += input_shape.DimensionsCount(); + + if (axis < 0 || axis >= input_shape.DimensionsCount()) { + MicroPrintf("CUMSUM Invalid axis: %d", axis); + return kTfLiteError; + } + + switch (input->type) { + case kTfLiteFloat32: { + reference_ops::CumSum(tflite::micro::GetTensorData(input), + input_shape, axis, cs_params->exclusive, + cs_params->reverse, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + + case kTfLiteInt8: { + auto* data = static_cast(node->user_data); + ArithmeticParams params; + params.left_shift = data->left_shift; + params.input1_offset = data->input_offset; + params.input1_multiplier = data->input_multiplier; + params.input1_shift = data->input_shift; + params.output_offset = data->output_offset; + params.output_multiplier = data->output_multiplier; + params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, + data->output_activation_max, ¶ms); + reference_ops::CumSum(params, tflite::micro::GetTensorData(input), + input_shape, axis, cs_params->exclusive, + cs_params->reverse, + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + + default: { + MicroPrintf("CUMSUM only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } + + return kTfLiteError; +} + +} // namespace + +TfLiteRegistration Register_CUMSUM() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depth_to_space.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depth_to_space.cc new file mode 100644 index 0000000..72e1545 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depth_to_space.cc @@ -0,0 +1,142 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depth_to_space.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +// input/output tensor shape rank associations +constexpr int kBatchRank = 0; +constexpr int kHeightRank = 1; +constexpr int kWidthRank = 2; +constexpr int kDepthRank = 3; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + + auto data_type = output->type; + TF_LITE_ENSURE(context, + data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + const int block_size = params->block_size; + TF_LITE_ENSURE(context, block_size > 0); + const int input_height = input->dims->data[kHeightRank]; + const int input_width = input->dims->data[kWidthRank]; + const int input_channels = input->dims->data[kDepthRank]; + int output_height = input_height * block_size; + int output_width = input_width * block_size; + int output_channels = input_channels / block_size / block_size; + + TF_LITE_ENSURE_EQ(context, input_height, output_height / block_size); + TF_LITE_ENSURE_EQ(context, input_width, output_width / block_size); + TF_LITE_ENSURE_EQ(context, input_channels, + output_channels * block_size * block_size); + + // We must update the output tensor dimensions. + // The dims storage is expected to be the same area in memory + // for both TfLiteTensor and TfLiteEvalTensor. This is important + // because TfLiteTensor in the MicroInterpreter is a temporary + // allocation. For the KernelRunner interpreter, TfLiteEvalTensor + // is a temporary allocation. We must therefore relocate the dims + // from the FlatBuffer to the persistant storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; + output->dims->data[kHeightRank] = output_height; + output->dims->data[kWidthRank] = output_width; + output->dims->data[kDepthRank] = output_channels; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + tflite::DepthToSpaceParams op_params; + op_params.block_size = static_cast(params->block_size); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::DepthToSpace(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::DepthToSpace(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("DEPTH_TO_SPACE only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DEPTH_TO_SPACE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cc new file mode 100644 index 0000000..000bb0b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cc @@ -0,0 +1,2106 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataConv reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +// Always inline for optimal code size. +void PopulateDwConvParams( + cmsis_nn_dw_conv_params* const dw_conv_params, + cmsis_nn_per_channel_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, cmsis_nn_dims* const output_dims, + const TfLiteDepthwiseConvParams& params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) + __attribute__((always_inline)); + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto& params = + *(reinterpret_cast(node->builtin_data)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kDepthwiseConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + const TfLiteType data_type = input->type; + int input_width = SizeOfDimension(input, 2); + int input_height = SizeOfDimension(input, 1); + int filter_width = SizeOfDimension(filter, 2); + int filter_height = SizeOfDimension(filter, 1); + int output_width = SizeOfDimension(output, 2); + int output_height = SizeOfDimension(output, 1); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + } + + // All per-channel quantized tensors need valid zero point and scale arrays. + const auto* affine_quantization = + reinterpret_cast( + filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + + // Allocate memory for per-channel quantization parameters + const int num_channels = + filter->dims->data[kDepthwiseConvQuantizedDimension]; + + data->reference_op_data.per_channel_output_multiplier = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->reference_op_data.per_channel_output_shift = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena( + context, filter_size, &data->reference_op_data.filter_buffer_index); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, data_type, + &data->reference_op_data)); + + if (input->type == kTfLiteInt8) { + RuntimeShape input_shape = GetTensorShape(input); + RuntimeShape output_shape = GetTensorShape(output); + RuntimeShape filter_shape = GetTensorShape(filter); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(output_shape, 3, filter_shape, 3); + TFLITE_DCHECK_EQ(batch_size, 1); /* Only batch = 1 is supported */ + + cmsis_nn_dims input_dims; + input_dims.n = batch_size; + input_dims.h = input_height; + input_dims.w = input_width; + input_dims.c = input_shape.Dims(3); + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = filter_height; + filter_dims.w = filter_width; + filter_dims.c = output_depth; + + cmsis_nn_dims output_dims; + output_dims.n = batch_size; + output_dims.h = output_height; + output_dims.w = output_width; + output_dims.c = output_depth; + + cmsis_nn_dw_conv_params dw_conv_params; + dw_conv_params.padding.h = data->reference_op_data.padding.height; + dw_conv_params.padding.w = data->reference_op_data.padding.width; + dw_conv_params.dilation.h = params.dilation_height_factor; + dw_conv_params.dilation.w = params.dilation_width_factor; + + const int32_t buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size( + &dw_conv_params, &input_dims, &filter_dims, &output_dims); + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + + return kTfLiteOk; +} + +inline void PopulateDwConvParams( + cmsis_nn_dw_conv_params* const dw_conv_params, + cmsis_nn_per_channel_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, cmsis_nn_dims* const output_dims, + const TfLiteDepthwiseConvParams& params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { + dw_conv_params->dilation.h = params.dilation_height_factor; + dw_conv_params->dilation.w = params.dilation_width_factor; + + dw_conv_params->input_offset = -data.reference_op_data.input_zero_point; + dw_conv_params->output_offset = data.reference_op_data.output_zero_point; + dw_conv_params->stride.h = params.stride_height; + dw_conv_params->stride.w = params.stride_width; + dw_conv_params->padding.h = data.reference_op_data.padding.height; + dw_conv_params->padding.w = data.reference_op_data.padding.width; + + dw_conv_params->activation.min = data.reference_op_data.output_activation_min; + dw_conv_params->activation.max = data.reference_op_data.output_activation_max; + + dw_conv_params->ch_mult = params.depth_multiplier; + + quant_params->multiplier = + data.reference_op_data.per_channel_output_multiplier; + quant_params->shift = data.reference_op_data.per_channel_output_shift; + + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + TFLITE_DCHECK_LE(dw_conv_params->activation.min, + dw_conv_params->activation.max); + + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + + if (tflite::micro::GetOptionalTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + input_dims->n = batch_size; + input_dims->h = input_shape.Dims(1); + input_dims->w = input_shape.Dims(2); + input_dims->c = input_shape.Dims(3); + + filter_dims->n = filter_shape.Dims(0); + filter_dims->h = filter_shape.Dims(1); + filter_dims->w = filter_shape.Dims(2); + filter_dims->c = output_depth; + + bias_dims->n = 1; + bias_dims->h = 1; + bias_dims->w = 1; + bias_dims->c = output_depth; + + output_dims->n = batch_size; + output_dims->h = output_shape.Dims(1); + output_dims->w = output_shape.Dims(2); + output_dims->c = output_depth; +} + +void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_dw_conv_params dw_conv_params; + cmsis_nn_per_channel_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + + PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, params, data, + input, filter, bias, output); + + cmsis_nn_context ctx; + ctx.buf = nullptr; + /* 'size' is unused */ + ctx.size = 0; + + if (data.buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); + } + + TFLITE_DCHECK_EQ( + arm_depthwise_conv_wrapper_s8( + &ctx, &dw_conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +} + +void EvalQuantizedPerChannel16x8(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_dw_conv_params dw_conv_params; + cmsis_nn_per_channel_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + + PopulateDwConvParams(&dw_conv_params, &quant_params, &input_dims, + &filter_dims, &bias_dims, &output_dims, params, data, + input, filter, bias, output); + + cmsis_nn_context ctx; + ctx.buf = nullptr; + /* 'size' is unused */ + ctx.size = 0; + + TFLITE_DCHECK_EQ( + arm_depthwise_conv_s16( + &ctx, &dw_conv_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetOptionalTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::DepthwiseConv( + DepthwiseConvParamsFloat(params, data.reference_op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + switch (filter_int8.type) { + case kTfLiteInt8: { + EvalQuantizedPerChannel(context, node, params, data, input, + &filter_int8, bias, output); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + } + break; + case kTfLiteInt16: + EvalQuantizedPerChannel16x8(context, node, params, data, input, filter, + bias, output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + EvalQuantizedPerChannel(context, node, params, data, input, &filter_int8, + bias, output); + return kTfLiteOk; +} + +TfLiteStatus EvalInt16x8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + const auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + EvalQuantizedPerChannel16x8(context, node, params, data, input, filter, bias, + output); + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DEPTHWISE_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16x8); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kFilterTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +// Depthwise conv is quantized along dimension 3: +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kDepthwiseConvQuantizedDimension = 3; + +struct OpData { + TfLitePaddingValues padding; + + // Cached tensor zero point values for quantized operations. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + + // Per channel output multiplier and shift. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; +#ifdef MLI_2_0 + int8_t* per_channel_scale_frac_bits; +#endif + + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + + // The result of checking if MLI optimized version of tensors can be used. + bool is_mli_applicable; + + // Tensors in MLI format. + mutable ops::micro::MliTensorInterface mli_in; + mutable ops::micro::MliTensorInterface mli_weights; + mutable ops::micro::MliTensorInterface mli_bias; + mutable ops::micro::MliTensorInterface mli_out; + mli_conv2d_cfg* cfg; + + // Pointer to the required depthwise function. For “channel multiplier” + // functionality group convolution is used. + depthwise_func_ptr p_mli_krn_depthwise_conv2d_sa8_sa8_sa32; +}; + +bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* filter, const TfLiteTensor* bias, + const TfLiteDepthwiseConvParams* params) { + const auto* affine_quantization = + reinterpret_cast(filter->quantization.params); + +#ifndef MLI_2_0 + const int in_ch = SizeOfDimension(input, 3); + const int filters_num = SizeOfDimension(filter, 3); +#endif + + // MLI optimized version only supports int8_t datatype, dilation factor of 1 + // and per-axis quantization of weights (no broadcasting/per-tensor). For + // MLI 1.1 (in_ch == filters_num) || (in_ch == 1)) is used to prevent usage of + // channel multiplier logic for multichannel input. + + bool ret_val = (filter->type == kTfLiteInt8) && + (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && + (params->dilation_width_factor == 1) && + (params->dilation_height_factor == 1) && + (affine_quantization->scale->size == +#ifdef MLI_2_0 + filter->dims->data[kDepthwiseConvQuantizedDimension]); +#else + filter->dims->data[kDepthwiseConvQuantizedDimension]) && + ((in_ch == filters_num) || (in_ch == 1)); +#endif + return ret_val; +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + TfLiteDepthwiseConvParams* params, int width, + int height, int filter_width, int filter_height, + const TfLiteType data_type, OpData* data) { + bool has_bias = node->inputs->size == 3; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + int unused_output_height, unused_output_width; + data->padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, 1, 1, height, width, + filter_height, filter_width, params->padding, &unused_output_height, + &unused_output_width); + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { + int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + + return tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params->activation, + &data->output_multiplier, &data->output_shift, + &data->output_activation_min, &data->output_activation_max, + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), num_channels); + } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + micro_context->DeallocateTempTfLiteTensor(output); + +#endif + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* params = + reinterpret_cast(node->builtin_data); + OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); + const TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, kInputTensor); + const TfLiteTensor* filter = micro_context->AllocateTempInputTensor(node, kFilterTensor); + const TfLiteTensor* bias = micro_context->AllocateTempInputTensor(node, kBiasTensor); + const TfLiteType data_type = input->type; + int width = SizeOfDimension(input, 2); + int height = SizeOfDimension(input, 1); + +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + int filter_width = SizeOfDimension(filter, 1); + int filter_height = SizeOfDimension(filter, 0); +#else + int filter_width = SizeOfDimension(filter, 2); + int filter_height = SizeOfDimension(filter, 1); +#endif + + // Per channel quantization is only needed for int8 inference. For other + // quantized types, only a single scale and zero point is needed. + const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + // Dynamically allocate per-channel quantization parameters. + data->per_channel_output_multiplier = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + reinterpret_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + data->is_mli_applicable = + IsMliApplicable(context, input, filter, bias, params); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + reinterpret_cast( + filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, + filter_width, filter_height, data_type, + data)); + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + if (data->is_mli_applicable) { + data->mli_in = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_weights = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_bias = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_out = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->cfg = static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_conv2d_cfg))); + +#ifdef MLI_2_0 + const int num_buffers = 2; + data->per_channel_scale_frac_bits = + static_cast(context->AllocatePersistentBuffer( + context, num_buffers * num_channels * sizeof(int16_t))); +#endif + + // Reuse space allocated for OpData parameters. +#ifdef MLI_2_0 + *data->mli_weights.Scale() = + reinterpret_cast(data->per_channel_output_multiplier); + *data->mli_bias.Scale() = + reinterpret_cast(data->per_channel_output_multiplier) + + num_channels; +#else + *data->mli_weights.Scale() = + static_cast(data->per_channel_output_multiplier); + *data->mli_bias.Scale() = + static_cast(data->per_channel_output_shift); +#endif + +#ifdef MLI_2_0 + *data->mli_weights.ZeroPoint() = + reinterpret_cast(data->per_channel_output_shift); + *data->mli_bias.ZeroPoint() = + reinterpret_cast(data->per_channel_output_shift) + + num_channels; +#else + *data->mli_weights.ZeroPoint() = + reinterpret_cast(&data->filter_zero_point); + *data->mli_bias.ZeroPoint() = + reinterpret_cast(&data->filter_zero_point) + sizeof(int16_t); +#endif + +#ifdef MLI_2_0 + *data->mli_weights.ScaleFracBits() = + reinterpret_cast(data->per_channel_scale_frac_bits); + *data->mli_bias.ScaleFracBits() = + reinterpret_cast(data->per_channel_scale_frac_bits) + + num_channels; +#endif + + ops::micro::ConvertToMliTensor(input, &data->mli_in); + ops::micro::ConvertToMliTensorPerChannel(filter, &data->mli_weights, + /* is_bias_tensor = */ false); + ops::micro::ConvertToMliTensorPerChannel(bias, &data->mli_bias, + /* is_bias_tensor = */ true); +#ifdef MLI_2_0 + ops::micro::AdjustBiasTensor(&data->mli_bias, &data->mli_in, + &data->mli_weights); +#endif + ops::micro::ConvertToMliTensor(output, &data->mli_out); + +#ifdef MLI_2_0 + // Choose group convolution function for "channel multiplier" functionality. + const int in_ch = SizeOfDimension(input, 3); + const int filters_num = SizeOfDimension(filter, 3); + const int channels_num = SizeOfDimension(filter, 2); + if (in_ch == filters_num && channels_num == 1) { + data->p_mli_krn_depthwise_conv2d_sa8_sa8_sa32 = + mli_krn_depthwise_conv2d(data->mli_weights.MliTensor()); + } else { + data->p_mli_krn_depthwise_conv2d_sa8_sa8_sa32 = + mli_krn_group_conv2d(data->mli_weights.MliTensor()); + } +#else + data->p_mli_krn_depthwise_conv2d_sa8_sa8_sa32 = + mli_krn_depthwise_conv2d(data->mli_weights.MliTensor(), data->cfg); +#endif + +#ifdef MLI_2_0 + data->cfg->dilation_width = 1; + data->cfg->dilation_height = 1; +#endif + + if (data->output_activation_min == -128 && + data->output_activation_max == 127) { + data->cfg->relu.type = MLI_RELU_NONE; + } else if (params->activation == kTfLiteActRelu) { + data->cfg->relu.type = MLI_RELU_GEN; + } else if (params->activation == kTfLiteActRelu6) { + data->cfg->relu.type = MLI_RELU_6; + } else if (params->activation == kTfLiteActReluN1To1) { + data->cfg->relu.type = MLI_RELU_1; + } else { + data->cfg->relu.type = MLI_RELU_NONE; + } + + data->cfg->stride_width = params->stride_width; + data->cfg->stride_height = params->stride_height; + if (params->padding == kTfLitePaddingValid) { + data->cfg->padding_left = 0; + data->cfg->padding_right = 0; + data->cfg->padding_top = 0; + data->cfg->padding_bottom = 0; + } else { + data->cfg->padding_left = data->padding.width; + data->cfg->padding_right = + data->padding.width + data->padding.width_offset; + data->cfg->padding_top = data->padding.height; + data->cfg->padding_bottom = + data->padding.height + data->padding.height_offset; + } + } + return kTfLiteOk; +} + +void EvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLiteDepthwiseConvParams* params, const OpData& data, + const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + float output_activation_min, output_activation_max; + CalculateActivationRange(params->activation, &output_activation_min, + &output_activation_max); + + tflite::DepthwiseParams op_params; + // Padding type is ignored, but still set. + op_params.padding_type = PaddingType::kSame; + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params->stride_width; + op_params.stride_height = params->stride_height; + op_params.dilation_width_factor = params->dilation_width_factor; + op_params.dilation_height_factor = params->dilation_height_factor; + op_params.depth_multiplier = params->depth_multiplier; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + + tflite::reference_ops::DepthwiseConv( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} +TfLiteStatus EvalMliQuantizedPerChannel( + TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + // Run Depthwise Conv MLI kernel + // MLI optimized version only supports int8_t dataype and dilation factor of 1 + if (data.is_mli_applicable) { + // Copy configuration data from external to local memory + mli_conv2d_cfg cfg_local = *data.cfg; + + ops::micro::MliTensorAttachBuffer(input, &data.mli_in); + ops::micro::MliTensorAttachBuffer(filter, &data.mli_weights); + ops::micro::MliTensorAttachBuffer(bias, &data.mli_bias); + ops::micro::MliTensorAttachBuffer(output, &data.mli_out); + + // for height slicing + const int height_dimension = 1; + int in_slice_height = 0; + int out_slice_height = 0; + uint32_t* mli_weights_shape = data.mli_weights.Shape(); +#ifdef MLI_2_0 + const int kernel_height = + static_cast(mli_weights_shape[KRNL_DW_H_DIM_HW1N]); +#else + const int kernel_height = + static_cast(mli_weights_shape[KRNL_DW_H_DIM_HWC]); +#endif + const int overlap = kernel_height - cfg_local.stride_height; + + // for weight slicing (on output channels) + // HWCN layout for weights, output channel dimension is the first dimension. + const int weight_out_ch_dimension = 3; + // bias has only 1 dimension + const int bias_out_ch_dimension = 0; + // Batch-Height-Width-Channel layout means last dimension is output + // channels. + const int out_tensor_ch_dimension = 3; + const int32_t in_channels = data.mli_in.Shape()[out_tensor_ch_dimension]; + const int32_t out_channels = data.mli_out.Shape()[out_tensor_ch_dimension]; + int slice_channels = + static_cast(mli_weights_shape[weight_out_ch_dimension]); + + // Tensors for data in fast (local) memory + // and config to copy data from external to local memory + mli_tensor weights_local = *data.mli_weights.MliTensor(); + mli_tensor bias_local = *data.mli_bias.MliTensor(); + mli_tensor in_local = *data.mli_in.MliTensor(); + mli_tensor out_local = + *data.mli_out.MliTensor(); // this assumes that output shape + // is already filled in the tensor struct. + + ops::micro::MliTensorInterface weights_local_interface(&weights_local); + ops::micro::MliTensorInterface bias_local_interface(&bias_local); + ops::micro::MliTensorInterface in_local_interface(&in_local); + ops::micro::MliTensorInterface out_local_interface(&out_local); + + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); + + TF_LITE_ENSURE_STATUS(ops::micro::get_arc_scratch_buffer_for_conv_tensors( + context, &in_local_interface, &weights_local_interface, + &bias_local_interface, &out_local_interface)); + + /* is_local indicates that the tensor is already in local memory, + so in that case the original tensor can be used, + and there is no need to copy it to the local tensor*/ + const bool in_is_local = + in_local_interface.Data() == data.mli_in.Data(); + const bool out_is_local = + out_local_interface.Data() == data.mli_out.Data(); + const bool w_is_local = weights_local_interface.Data() == + data.mli_weights.Data(); + const bool b_is_local = + bias_local_interface.Data() == data.mli_bias.Data(); + + TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_io( + &in_local_interface, &out_local_interface, kernel_height, + cfg_local.stride_height, cfg_local.padding_top, + cfg_local.padding_bottom, &in_slice_height, &out_slice_height)); + TF_LITE_ENSURE_STATUS( + ops::micro::arc_scratch_buffer_calc_slice_size_weights( + &weights_local_interface, &bias_local_interface, + weight_out_ch_dimension, &slice_channels)); + + /* if input channels is not equal to output channels, a channel multiplier + is used. in this case the slice channels needs to be rounded down to a + multiple of the input channels */ + if (in_channels != out_channels) { + slice_channels = (slice_channels / in_channels) * in_channels; + } + + ops::micro::TensorSlicer b_slice(data.mli_bias.MliTensor(), + bias_out_ch_dimension, slice_channels); + ops::micro::TensorSlicer w_slice(data.mli_weights.MliTensor(), + weight_out_ch_dimension, slice_channels, 0, + 0, 0, true); + ops::micro::TensorSlicer out_ch_slice(data.mli_out.MliTensor(), + out_tensor_ch_dimension, + slice_channels, 0, 0, 0, true); + ops::micro::TensorSlicer in_ch_slice(data.mli_in.MliTensor(), + out_tensor_ch_dimension, + slice_channels, 0, 0, 0, true); + + mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; + mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; + + void* input_buffer_ptr = NULL; + uint32_t input_buffer_size = 0; + int padding_top = cfg_local.padding_top; + int padding_bottom = cfg_local.padding_bottom; + + while (!w_slice.Done()) { + mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); + mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); + + /* input tensor is already sliced in the channel dimension. + out_ch_slice.Sub() is the tensor for the amount of channels of this + iteration of the weight slice loop. This tensor needs to be further + sliced over the batch and height dimension. in_ch_slice.Sub() tensor + contains batches of HWC tensors. so it is a 4 dimensional tensor. because + the mli kernel will process one HWC tensor at a time, the 4 dimensional + tensor needs to be sliced into nBatch 3 dimensional tensors. on top of + that there could be a need to also slice in the Height dimension. for that + the sliceHeight has been calculated. The tensor slicer is configured that + it will completely slice the nBatch dimension (0) and slice the height + dimension (1) in chunks of 'sliceHeight' */ + ops::micro::TensorSlicer in_slice(in_ch_slice.Sub(), height_dimension, + in_slice_height, padding_top, + padding_bottom, overlap); + + /* output tensor is already sliced in the output channel dimension. + out_ch_slice.Sub() is the tensor for the amount of output channels of this + iteration of the weight slice loop. This tensor needs to be further + sliced over the batch and height dimension. */ + ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), height_dimension, + out_slice_height); + + /* setup the pointers to the local or remote tensor to make the code + * inside the loop easier. */ + mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; + mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; + + while (!out_slice.Done()) { + if (!out_is_local) { + ops::micro::PrepareLocalTensor(out_slice.Sub(), &out_local); + ops::micro::PrepareLocalTensor(in_slice.Sub(), &in_local); + } + TF_LITE_ENSURE(context, !in_slice.Done()); + cfg_local.padding_top = in_slice.GetPaddingPre(); + cfg_local.padding_bottom = in_slice.GetPaddingPost(); + + // if same input copy as previous iteration, skip the copy of input +#ifdef MLI_2_0 + if ((in_slice.Sub()->data.mem.pi8 != input_buffer_ptr) || + (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data.mem.pi8; + input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); + } + +#ifdef MLI_2_0_KRNL_TEST + // Checking conditions here to prevent usage non-contiguous buffer + // memory. + if (mli_weights_shape[weight_out_ch_dimension] != + w_slice.Sub()->shape[3]) { + MicroPrintf("Slicing is not supported with real-time permutation."); + return kTfLiteError; + } + uint8_t dim_order[] = {1, 2, 0, 3}; + ops::micro::change_shape(w_ptr, dim_order); +#endif + + data.p_mli_krn_depthwise_conv2d_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, + &cfg_local, out_ptr); +#else + if ((in_slice.Sub()->data != input_buffer_ptr) || + (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data; + input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); + } + data.p_mli_krn_depthwise_conv2d_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, + &cfg_local, out_ptr); +#endif + + mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); + + in_slice.Next(); + out_slice.Next(); + } + w_slice.Next(); + b_slice.Next(); + out_ch_slice.Next(); + in_ch_slice.Next(); + TF_LITE_ENSURE(context, in_slice.Done()); + } + } + return kTfLiteOk; +} + +void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + TfLiteDepthwiseConvParams* params, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + DepthwiseParams op_params; + op_params.padding_type = PaddingType::kSame; + op_params.padding_values.width = data.padding.width; + op_params.padding_values.height = data.padding.height; + op_params.stride_width = params->stride_width; + op_params.stride_height = params->stride_height; + op_params.dilation_width_factor = params->dilation_width_factor; + op_params.dilation_height_factor = params->dilation_height_factor; + op_params.depth_multiplier = params->depth_multiplier; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = 0; + op_params.output_offset = data.output_zero_point; + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + + reference_integer_ops::DepthwiseConvPerChannel( + op_params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); +#endif +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* params = + reinterpret_cast(node->builtin_data); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + EvalFloat(context, node, params, data, input, filter, bias, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + if (data.is_mli_applicable) { + EvalMliQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); + } else { + EvalQuantizedPerChannel(context, node, params, data, input, filter, + bias, output); + } + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DEPTHWISE_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" + +#include "sl_mvp_ml_depthwise_conv2d.h" + +namespace tflite { +namespace sl { +namespace depthwise_conv2d { + +constexpr int kInputTensor = 0; +constexpr int kFilterTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +// Depthwise conv is quantized along dimension 3 of filter tensor. +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kDepthwiseConvQuantizedDimension = 3; + +enum op_support { kMvp, kCmsisNN, kTFLMrefF32, kTFLMrefI8 }; + +struct OpData { + op_support supported; + float activation_min_f32; + float activation_max_f32; + int scratch_buffer_index; + sli_mvp_ml_depthwise_conv2d_s8_params_t op_params; + + // CMSIS-NN per channel output multiplier and shift. + int32_t *per_channel_output_multiplier; + int32_t *per_channel_output_shift; +}; + +inline float16_t normalize_fp16(float f) +{ + return (float16_t)std::min(std::max(f, SLI_MVP_FP16_MIN), SLI_MVP_FP16_MAX); +} + +inline PaddingType RuntimePaddingType(TfLitePadding padding) +{ + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus PopulateConvolutionQuantizationParams( + TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* filter, + TfLiteTensor* output, + const TfLiteFusedActivation& activation, + int32_t* output_activation_min, int32_t* output_activation_max, + float16_t* per_channel_scalers, int num_channels, float accumulator_multipler) +{ + auto affine_quantization = + reinterpret_cast(filter->quantization.params); + + // Populate multiplier and shift using affine quantization. + const float input_scale = input->params.scale; + const float output_scale = output->params.scale; + const float* filter_scales = affine_quantization->scale->data; + + for (int i = 0; i < num_channels; ++i) { + // If per-tensor quantization parameter is specified, broadcast it along the + // quantization dimension (channels_out). + const float filter_scale = filter_scales[i]; + const float effective_output_scale = (input_scale * filter_scale) / output_scale; + const float acc_output_scale = effective_output_scale * accumulator_multipler; + per_channel_scalers[i] = normalize_fp16(acc_output_scale); + } + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, activation, output, output_activation_min, + output_activation_max)); + + return kTfLiteOk; +} + +void *Init(TfLiteContext* context, const char* buffer, size_t length) +{ + (void)buffer; + (void)length; + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) +{ + int scratch_buffer_size = 0; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = static_cast(node->builtin_data); + + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE(context, filter != nullptr); + + data->op_params.batches = input->dims->data[0]; + data->op_params.in_channels = input->dims->data[3]; + data->op_params.input_height = input->dims->data[1]; + data->op_params.input_width = input->dims->data[2]; + data->op_params.out_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + data->op_params.output_height = output->dims->data[1]; + data->op_params.output_width = output->dims->data[2]; + data->op_params.filter_height = filter->dims->data[1]; + data->op_params.filter_width = filter->dims->data[2]; + data->op_params.input_offset = -input->params.zero_point; + data->op_params.output_offset = output->params.zero_point; + data->op_params.stride_height = params->stride_height; + data->op_params.stride_width = params->stride_width; + data->op_params.dilation_height = params->dilation_height_factor; + data->op_params.dilation_width = params->dilation_width_factor; + data->op_params.padding = params->padding == kTfLitePaddingSame; + + int dummy_height, dummy_width; + const auto padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + params->dilation_height_factor, params->dilation_width_factor, + data->op_params.input_height, data->op_params.input_width, + data->op_params.filter_height, data->op_params.filter_width, + params->padding, + &dummy_height, &dummy_width); + + data->op_params.pad_height = padding.height; + data->op_params.pad_width = padding.width; + + const int num_channels = data->op_params.out_channels; + + if (input->type == kTfLiteInt8) { + if (sli_mvp_ml_depthwise_conv2d_s8_is_supported(&data->op_params)) { + data->supported = kMvp; + + float16_t *bias_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + if(bias != nullptr) { + data->op_params.bias = bias_data; + int32_t i32_bias; + for(int i = 0; i < num_channels; i++) { + i32_bias = bias->data.i32[i]; + bias_data[i] = float16_t(i32_bias * SLI_MVP_ACCUMULATOR_SCALER); + } + } else { + data->op_params.bias = nullptr; + } + + float16_t *scaler_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + data->op_params.output_scaler = scaler_data; + TF_LITE_ENSURE_STATUS(PopulateConvolutionQuantizationParams( + context, input, filter, output, params->activation, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + scaler_data, num_channels, SLI_MVP_ACCUMULATOR_MULTIPLIER)); + + } else { + data->per_channel_output_multiplier = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + int32_t dummy_output_multiplier; + int dummy_output_shift; + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, params->activation, + &dummy_output_multiplier, &dummy_output_shift, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + num_channels)); + + if (data->op_params.dilation_height == 1 && data->op_params.dilation_width == 1) { + data->supported = kCmsisNN; + cmsis_nn_dw_conv_params dw_conv_params; + dw_conv_params.input_offset = data->op_params.input_offset; + dw_conv_params.output_offset = data->op_params.output_offset; + dw_conv_params.stride.h = data->op_params.stride_height; + dw_conv_params.stride.w = data->op_params.stride_width; + dw_conv_params.dilation.h = 1; + dw_conv_params.dilation.w = 1; + dw_conv_params.padding.h = data->op_params.pad_height; + dw_conv_params.padding.w = data->op_params.pad_width; + dw_conv_params.activation.min = data->op_params.output_activation_min; + dw_conv_params.activation.max = data->op_params.output_activation_max; + dw_conv_params.ch_mult = data->op_params.out_channels / data->op_params.in_channels; + + cmsis_nn_dims input_dims; + input_dims.n = data->op_params.batches; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.in_channels; + + cmsis_nn_dims filter_dims; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + + cmsis_nn_dims output_dims; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.out_channels; + + scratch_buffer_size = arm_depthwise_conv_wrapper_s8_get_buffer_size( + &dw_conv_params, &input_dims, &filter_dims, &output_dims); + } else { + data->supported = kTFLMrefI8; + } + } + + } else if (input->type == kTfLiteFloat32) { + data->supported = kTFLMrefF32; + CalculateActivationRange(params->activation, + &data->activation_min_f32, + &data->activation_max_f32); + + } else { + TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + if(scratch_buffer_size > 0) { + TF_LITE_ENSURE_STATUS( + context->RequestScratchBufferInArena( + context, scratch_buffer_size, &data->scratch_buffer_index)); + } else { + data->scratch_buffer_index = -1; + } + + return kTfLiteOk; +} + +TfLiteStatus eval_mvp_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + TfLiteEvalTensor* output) +{ + data->op_params.input = tflite::micro::GetTensorData(input); + data->op_params.output = tflite::micro::GetTensorData(output); + data->op_params.filter = tflite::micro::GetTensorData(filter); + + TF_LITE_ENSURE_EQ(context, SL_STATUS_OK, sli_mvp_ml_depthwise_conv2d_s8(&data->op_params)); + + return kTfLiteOk; +} + +TfLiteStatus eval_cmsis_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + cmsis_nn_dims input_dims; + input_dims.n = data->op_params.batches; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.in_channels; + + cmsis_nn_dims filter_dims; + filter_dims.n = data->op_params.in_channels; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + filter_dims.c = data->op_params.out_channels; + + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = data->op_params.out_channels; + + cmsis_nn_dims output_dims; + output_dims.n = data->op_params.batches; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.out_channels; + + cmsis_nn_per_channel_quant_params quant_params; + quant_params.multiplier = data->per_channel_output_multiplier; + quant_params.shift = data->per_channel_output_shift; + + cmsis_nn_dw_conv_params dw_conv_params; + dw_conv_params.input_offset = data->op_params.input_offset; + dw_conv_params.output_offset = data->op_params.output_offset; + dw_conv_params.stride.h = data->op_params.stride_height; + dw_conv_params.stride.w = data->op_params.stride_width; + dw_conv_params.dilation.h = 1; + dw_conv_params.dilation.w = 1; + dw_conv_params.padding.h = data->op_params.pad_height; + dw_conv_params.padding.w = data->op_params.pad_width; + dw_conv_params.activation.min = data->op_params.output_activation_min; + dw_conv_params.activation.max = data->op_params.output_activation_max; + dw_conv_params.ch_mult = data->op_params.out_channels / data->op_params.in_channels; + + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + if (data->scratch_buffer_index > -1) { + ctx.buf = context->GetScratchBuffer(context, data->scratch_buffer_index); + } + TFLITE_DCHECK_EQ(ARM_MATH_SUCCESS, + arm_depthwise_conv_wrapper_s8( + &ctx, &dw_conv_params, &quant_params, + &input_dims, tflite::micro::GetTensorData(input), + &filter_dims, tflite::micro::GetTensorData(filter), + &bias_dims, bias == nullptr ? NULL : tflite::micro::GetTensorData(bias), + &output_dims, tflite::micro::GetTensorData(output))); + + return kTfLiteOk; +} + +TfLiteStatus eval_tflm_int8(OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + DepthwiseParams dw_op_params; + + dw_op_params.input_offset = data->op_params.input_offset; + dw_op_params.output_offset = data->op_params.output_offset; + dw_op_params.stride_height = data->op_params.stride_height; + dw_op_params.stride_width = data->op_params.stride_width; + dw_op_params.dilation_height_factor = data->op_params.dilation_height; + dw_op_params.dilation_width_factor = data->op_params.dilation_width; + dw_op_params.padding_values.height = data->op_params.pad_height; + dw_op_params.padding_values.width = data->op_params.pad_width; + dw_op_params.quantized_activation_min = data->op_params.output_activation_min; + dw_op_params.quantized_activation_max = data->op_params.output_activation_max; + dw_op_params.depth_multiplier = data->op_params.out_channels / data->op_params.in_channels; + + reference_integer_ops::DepthwiseConvPerChannel( + dw_op_params, + data->per_channel_output_multiplier, + data->per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + bias == nullptr ? nullptr : tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus eval_float(TfLiteDepthwiseConvParams* params, + const OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + DepthwiseParams dw_op_params; + + dw_op_params.padding_type = RuntimePaddingType(params->padding); + dw_op_params.padding_values.width = data->op_params.pad_width; + dw_op_params.padding_values.height = data->op_params.pad_height; + dw_op_params.stride_width = data->op_params.stride_width; + dw_op_params.stride_height = data->op_params.stride_height; + dw_op_params.dilation_width_factor = data->op_params.dilation_width; + dw_op_params.dilation_height_factor = data->op_params.dilation_height; + dw_op_params.float_activation_min = data->activation_min_f32; + dw_op_params.float_activation_max = data->activation_max_f32; + dw_op_params.depth_multiplier = data->op_params.out_channels / data->op_params.in_channels; + + reference_ops::DepthwiseConv(dw_op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + bias == nullptr ? nullptr : tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; +} + +TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) +{ + TfLiteStatus status = kTfLiteError; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* params = reinterpret_cast(node->builtin_data); + OpData* data = static_cast(node->user_data); + + const auto input = tflite::micro::GetEvalInput(context, node, kInputTensor); + const auto filter = tflite::micro::GetEvalInput(context, node, kFilterTensor); + const auto bias = NumInputs(node) == 3 + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + auto output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (data->supported == kMvp) { + status = eval_mvp_int8(context, data, input, filter, output); + + } else if (data->supported == kCmsisNN) { + status = eval_cmsis_int8(context, data, input, filter, bias, output); + + } else if (data->supported == kTFLMrefI8) { + status = eval_tflm_int8(data, input, filter, bias, output); + + } else if (data->supported == kTFLMrefF32) { + #if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + status = eval_float(params, data, input, filter, bias, output); + } + + return status; +} + +} // namespace depthwise_conv2d +} // namespace sl + +TfLiteRegistration Register_DEPTHWISE_CONV_2D() { + return {/*init=*/sl::depthwise_conv2d::Init, + /*free=*/nullptr, + /*prepare=*/sl::depthwise_conv2d::Prepare, + /*invoke=*/sl::depthwise_conv2d::Invoke, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#include + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +long long dc_total_time = 0; + +namespace tflite { +namespace { + +struct NodeData { + OpDataConv op_data; +#if ESP_NN + int buffer_idx; +#endif +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(NodeData)); +} + +#if ESP_NN +inline void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, + const TfLiteDepthwiseConvParams& params, + const NodeData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + const int dilation_width_factor = params.dilation_width_factor; + const int dilation_height_factor = params.dilation_height_factor; + + if (dilation_width_factor == 1 && dilation_height_factor == 1) { + // Get parameters. + RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); + + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int8_t *input_data = tflite::micro::GetTensorData(input); + int8_t *output_data = tflite::micro::GetTensorData(output); + + const int depth_multiplier = params.depth_multiplier; + const int32_t input_offset = -data.op_data.input_zero_point; + const int32_t output_offset = data.op_data.output_zero_point; + const int stride_width = params.stride_width; + const int stride_height = params.stride_height; + const int pad_width = data.op_data.padding.width; + const int pad_height = data.op_data.padding.height; + + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int input_depth = input_shape.Dims(3); + const int filter_height = filter_shape.Dims(1); + const int filter_width = filter_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + + // Set min and max value of the output. + const int32_t activation_min = data.op_data.output_activation_min; + const int32_t activation_max = data.op_data.output_activation_max; + + // Consistency check. + TFLITE_DCHECK_LE(activation_min, activation_max); + const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); + const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); + + TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier); + if (tflite::micro::GetTensorData(bias)) { + TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); + } + + const int input_size = input_width * input_height * input_depth; + const int output_size = output_width * output_height * output_depth; + void *scratch_buf = NULL; + if (data.buffer_idx > -1) { + scratch_buf = context->GetScratchBuffer(context, data.buffer_idx); + } + + esp_nn_set_depthwise_conv_scratch_buf(scratch_buf); + + data_dims_t input_dims = { + .width = input_width, .height = input_height, + .channels = input_depth, 1 + }; + data_dims_t output_dims = { + .width = output_width, .height = output_height, + .channels = output_depth, 1 + }; + data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; + dw_conv_params_t conv_params = { + .in_offset = input_offset, .out_offset = output_offset, + .ch_mult = depth_multiplier, + .stride = {stride_width, stride_height}, + .padding = {pad_width, pad_height}, .dilation = {0, 0}, + .activation = {activation_min, activation_max} + }; + quant_data_t quant_data = { + .shift = data.op_data.per_channel_output_shift, + .mult = data.op_data.per_channel_output_multiplier + }; + + for (int i_batch = 0; i_batch < batch_size; i_batch++) { + esp_nn_depthwise_conv_s8(&input_dims, input_data + i_batch * input_size, + &filter_dims, tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorData(bias), + &output_dims, output_data + i_batch * output_size, + &conv_params, &quant_data); + } + } else { + reference_integer_ops::DepthwiseConvPerChannel( + DepthwiseConvParamsQuantized(params, data.op_data), + data.op_data.per_channel_output_multiplier, + data.op_data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} +#endif + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + NodeData* data = static_cast(node->user_data); + const TfLiteDepthwiseConvParams& params = + *(static_cast(node->builtin_data)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + const int input_width = input->dims->data[2]; + const int input_height = input->dims->data[1]; + const int filter_width = filter->dims->data[2]; + const int filter_height = filter->dims->data[1]; + const int output_width = output->dims->data[2]; + const int output_height = output->dims->data[1]; + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + data->op_data.per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->op_data.per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TFLITE_DCHECK(affine_quantization != nullptr); + TFLITE_DCHECK(affine_quantization->scale != nullptr); + TFLITE_DCHECK(affine_quantization->zero_point != nullptr); + + TF_LITE_ENSURE( + context, affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kDepthwiseConvQuantizedDimension]); + + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( + context, node, params, input_width, input_height, filter_width, + filter_height, output_width, output_height, input->type, &data->op_data)); + +#if ESP_NN + if (input->type == kTfLiteInt8) { + data_dims_t input_dims = { + .width = input_width, .height = input_height, + .channels = input->dims->data[3], 1 + }; + data_dims_t output_dims = { + .width = output_width, .height = output_height, + .channels = output->dims->data[3], 1 + }; + data_dims_t filter_dims = {.width = filter_width, .height = filter_height, 0, 0}; + dw_conv_params_t conv_params = { + .in_offset = 0, .out_offset = 0, + .ch_mult = params.depth_multiplier, + .stride = {params.stride_width, params.stride_height}, + .padding = {data->op_data.padding.width, data->op_data.padding.height}, + .dilation = {0, 0}, .activation = {-128, 127} + }; + + int scratch_buf_size = esp_nn_get_depthwise_conv_scratch_size( + &input_dims, &filter_dims, &output_dims, &conv_params); + if (scratch_buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, scratch_buf_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } +#endif + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto& params = + *(reinterpret_cast(node->builtin_data)); + const NodeData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + long long start_time = esp_timer_get_time(); + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::DepthwiseConv( + DepthwiseConvParamsFloat(params, data.op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif +#if ESP_NN + EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, + output); +#else + reference_integer_ops::DepthwiseConvPerChannel( + DepthwiseConvParamsQuantized(params, data.op_data), + data.op_data.per_channel_output_multiplier, + data.op_data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#endif + break; + case kTfLiteUInt8: +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_U8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + //EvalQuantized(context, node, params, &data, input, filter, bias, output); + reference_ops::DepthwiseConv( + DepthwiseConvParamsQuantized(params, data.op_data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + long long time_this_instance = esp_timer_get_time() - start_time; + dc_total_time += time_this_instance; + // printf("time this instance: %llu\n", time_this_instance / 1000); + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DEPTHWISE_CONV_2D() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#else +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto& params = + *(reinterpret_cast(node->builtin_data)); + const OpDataConv& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 3) + ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) + : nullptr; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::DepthwiseConv( + DepthwiseConvParamsFloat(params, data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_DEPTHWISE_CONV_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + switch (filter->type) { + case kTfLiteInt4: { + int8_t* unpacked_filter_data = static_cast( + context->GetScratchBuffer(context, data.filter_buffer_index)); + tflite::tensor_utils::UnpackDenseInt4IntoInt8( + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(filter).FlatSize(), + unpacked_filter_data); + reference_integer_ops::DepthwiseConvPerChannel( + DepthwiseConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), unpacked_filter_data, + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { + reference_integer_ops::DepthwiseConvPerChannel( + DepthwiseConvParamsQuantized(params, data), + data.per_channel_output_multiplier, data.per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + break; + } + default: + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_DEPTHWISE_CONV_2D() { + return tflite::micro::RegisterOp(Init, DepthwiseConvPrepare, Eval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp deleted file mode 100644 index f90d03c..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.cpp +++ /dev/null @@ -1,1015 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - OpDataConv reference_op_data; - - // Index to buffer for optimizations if applicable. - int buffer_idx; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto& params = - *(reinterpret_cast(node->builtin_data)); - - const TfLiteTensor* input = - GetInput(context, node, kDepthwiseConvInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = - GetInput(context, node, kDepthwiseConvWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - TfLiteTensor* output = GetOutput(context, node, kDepthwiseConvOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - const TfLiteType data_type = input->type; - int input_width = SizeOfDimension(input, 2); - int input_height = SizeOfDimension(input, 1); - int filter_width = SizeOfDimension(filter, 2); - int filter_height = SizeOfDimension(filter, 1); - int output_width = SizeOfDimension(output, 2); - int output_height = SizeOfDimension(output, 1); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - // All per-channel quantized tensors need valid zero point and scale arrays. - const auto* affine_quantization = - reinterpret_cast( - filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - // Allocate memory for per-channel quantization parameters - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - data->reference_op_data.per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->reference_op_data.per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, data_type, - &data->reference_op_data)); - - if (input->type == kTfLiteInt8) { - RuntimeShape input_shape = GetTensorShape(input); - RuntimeShape output_shape = GetTensorShape(output); - RuntimeShape filter_shape = GetTensorShape(filter); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(output_shape, 3, filter_shape, 3); - TFLITE_DCHECK_EQ(batch_size, 1); /* Only batch = 1 is supported */ - - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = input_height; - input_dims.w = input_width; - input_dims.c = input_shape.Dims(3); - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = filter_height; - filter_dims.w = filter_width; - filter_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_height; - output_dims.w = output_width; - output_dims.c = output_depth; - - cmsis_nn_dw_conv_params dw_conv_params; - dw_conv_params.padding.h = data->reference_op_data.padding.height; - dw_conv_params.padding.w = data->reference_op_data.padding.width; - - const int32_t buf_size = arm_depthwise_conv_wrapper_s8_get_buffer_size( - &dw_conv_params, &input_dims, &filter_dims, &output_dims); - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - const TfLiteDepthwiseConvParams& params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - cmsis_nn_dw_conv_params dw_conv_params; - dw_conv_params.dilation.h = params.dilation_height_factor; - dw_conv_params.dilation.w = params.dilation_width_factor; - // Call to reference implementation can be removed when dilation is supported - // in the optimized implementations. - if (1 == dw_conv_params.dilation.h && 1 == dw_conv_params.dilation.w) { - dw_conv_params.input_offset = -data.reference_op_data.input_zero_point; - dw_conv_params.output_offset = data.reference_op_data.output_zero_point; - dw_conv_params.stride.h = params.stride_height; - dw_conv_params.stride.w = params.stride_width; - dw_conv_params.padding.h = data.reference_op_data.padding.height; - dw_conv_params.padding.w = data.reference_op_data.padding.width; - // TODO(b/130439627): Use calculated value for clamping. - dw_conv_params.activation.min = std::numeric_limits::min(); - dw_conv_params.activation.max = std::numeric_limits::max(); - dw_conv_params.ch_mult = params.depth_multiplier; - - cmsis_nn_per_channel_quant_params quant_params; - quant_params.multiplier = - data.reference_op_data.per_channel_output_multiplier; - quant_params.shift = data.reference_op_data.per_channel_output_shift; - - RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - RuntimeShape bias_shape = tflite::micro::GetTensorShape(bias); - - TFLITE_DCHECK_LE(dw_conv_params.activation.min, - dw_conv_params.activation.max); - - const int batch_size = MatchingDim(input_shape, 0, output_shape, 0); - const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3); - - if (tflite::micro::GetTensorData(bias)) { - TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth); - } - - cmsis_nn_dims input_dims; - input_dims.n = batch_size; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = input_shape.Dims(3); - - cmsis_nn_dims filter_dims; - filter_dims.n = filter_shape.Dims(0); - filter_dims.h = filter_shape.Dims(1); - filter_dims.w = filter_shape.Dims(2); - filter_dims.c = output_depth; - - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batch_size; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = output_depth; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - /* 'size' is unused */ - ctx.size = 0; - - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_depthwise_conv_wrapper_s8( - &ctx, &dw_conv_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } else { - reference_integer_ops::DepthwiseConvPerChannel( - DepthwiseConvParamsQuantized(params, data.reference_op_data), - data.reference_op_data.per_channel_output_multiplier, - data.reference_op_data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - const auto& params = - *(reinterpret_cast(node->builtin_data)); - const OpData& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) - : nullptr; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::DepthwiseConv( - DepthwiseConvParamsFloat(params, data.reference_op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: - EvalQuantizedPerChannel(context, node, params, data, input, filter, bias, - output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" - -#include "mli_api.h" // NOLINT -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -constexpr int kInputTensor = 0; -constexpr int kFilterTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -// Depthwise conv is quantized along dimension 3: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kDepthwiseConvQuantizedDimension = 3; - -struct OpData { - TfLitePaddingValues padding; - - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - - // Per channel output multiplier and shift. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; - - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - - // The result of checking if MLI optimized version of tensors can be used. - bool is_mli_applicable; - - // Tensors in MLI format. - mli_tensor* mli_in; - mli_tensor* mli_weights; - mli_tensor* mli_bias; - mli_tensor* mli_out; - mli_conv2d_cfg* cfg; -}; - -bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, - const TfLiteDepthwiseConvParams* params) { - const auto* affine_quantization = - reinterpret_cast(filter->quantization.params); - const int in_ch = SizeOfDimension(input, 3); - const int filters_num = SizeOfDimension(filter, 3); - - // MLI optimized version only supports int8_t datatype, dilation factor of 1 - // and per-axis quantization of weights (no broadcasting/per-tensor) (in_ch == - // filters_num) || (in_ch == 1)) is a forbidding of channel multiplier logic - // for multichannel input. - bool ret_val = (filter->type == kTfLiteInt8) && - (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && - (params->dilation_width_factor == 1) && - (params->dilation_height_factor == 1) && - (affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]) && - ((in_ch == filters_num) || (in_ch == 1)); - return ret_val; -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, int width, - int height, int filter_width, int filter_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 3; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - int unused_output_height, unused_output_width; - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, 1, 1, height, width, - filter_height, filter_width, params->padding, &unused_output_height, - &unused_output_width); - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { - int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - - return tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->output_multiplier, &data->output_shift, - &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), num_channels); - } -#endif - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - OpData* data = static_cast(node->user_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - - const TfLiteType data_type = input->type; - int width = SizeOfDimension(input, 2); - int height = SizeOfDimension(input, 1); - int filter_width = SizeOfDimension(filter, 2); - int filter_height = SizeOfDimension(filter, 1); - - // Per channel quantization is only needed for int8 inference. For other - // quantized types, only a single scale and zero point is needed. - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - // Dynamically allocate per-channel quantization parameters. - data->per_channel_output_multiplier = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - reinterpret_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - data->is_mli_applicable = - IsMliApplicable(context, input, filter, bias, params); - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - reinterpret_cast( - filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - TF_LITE_ENSURE( - context, affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kDepthwiseConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, - filter_width, filter_height, data_type, - data)); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - if (data->is_mli_applicable) { - data->mli_in = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_weights = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_bias = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_out = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->cfg = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_conv2d_cfg))); - - // reuse space allocated for OpData parameters - data->mli_weights->el_params.asym.scale.pi32 = - static_cast(data->per_channel_output_multiplier); - data->mli_bias->el_params.asym.scale.pi32 = - static_cast(data->per_channel_output_shift); - - data->mli_weights->el_params.asym.zero_point.pi16 = - reinterpret_cast(&data->filter_zero_point); - data->mli_bias->el_params.asym.zero_point.pi16 = - reinterpret_cast(&data->filter_zero_point) + sizeof(int16_t); - - ops::micro::ConvertToMliTensor(input, data->mli_in); - ops::micro::ConvertToMliTensorPerChannel(filter, data->mli_weights); - ops::micro::ConvertToMliTensorPerChannel(bias, data->mli_bias); - ops::micro::ConvertToMliTensor(output, data->mli_out); - - if (params->activation == kTfLiteActRelu) { - data->cfg->relu.type = MLI_RELU_GEN; - } else if (params->activation == kTfLiteActRelu6) { - data->cfg->relu.type = MLI_RELU_6; - } else if (params->activation == kTfLiteActReluN1To1) { - data->cfg->relu.type = MLI_RELU_1; - } else { - data->cfg->relu.type = MLI_RELU_NONE; - } - - data->cfg->stride_width = params->stride_width; - data->cfg->stride_height = params->stride_height; - if (params->padding == kTfLitePaddingValid) { - data->cfg->padding_left = 0; - data->cfg->padding_right = 0; - data->cfg->padding_top = 0; - data->cfg->padding_bottom = 0; - } else { - data->cfg->padding_left = data->padding.width; - data->cfg->padding_right = - data->padding.width + data->padding.width_offset; - data->cfg->padding_top = data->padding.height; - data->cfg->padding_bottom = - data->padding.height + data->padding.height_offset; - } - } - return kTfLiteOk; -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} -TfLiteStatus EvalMliQuantizedPerChannel( - TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - // Run Depthwise Conv MLI kernel - // MLI optimized version only supports int8_t dataype and dilation factor of 1 - if (data.is_mli_applicable) { - // Copy configuration data from external to local memory - mli_conv2d_cfg cfg_local = *data.cfg; - - ops::micro::MliTensorAttachBuffer(input, data.mli_in); - ops::micro::MliTensorAttachBuffer(filter, data.mli_weights); - ops::micro::MliTensorAttachBuffer(bias, data.mli_bias); - ops::micro::MliTensorAttachBuffer(output, data.mli_out); - - // for height slicing - const int heightDimension = 1; - int inSliceHeight = 0; - int outSliceHeight = 0; - const int kernelHeight = - static_cast(data.mli_weights->shape[KRNL_DW_H_DIM_HWC]); - const int overlap = kernelHeight - cfg_local.stride_height; - - // for weight slicing (on output channels) - // HWCN layout for weights, output channel dimension is the first dimension. - const int weight_out_ch_dimension = 3; - // bias has only 1 dimension - const int bias_out_ch_dimension = 0; - // Batch-Height-Width-Channel layout means last dimension is output - // channels. - const int out_tensor_ch_dimension = 3; - const int32_t in_channels = data.mli_in->shape[out_tensor_ch_dimension]; - const int32_t out_channels = data.mli_out->shape[out_tensor_ch_dimension]; - int slice_channels = - static_cast(data.mli_weights->shape[weight_out_ch_dimension]); - - // Tensors for data in fast (local) memory - // and config to copy data from external to local memory - mli_tensor weights_local = *data.mli_weights; - mli_tensor bias_local = *data.mli_bias; - mli_tensor in_local = *data.mli_in; - mli_tensor out_local = - *data.mli_out; // this assumes that output shape - // is already filled in the tensor struct. - mli_mov_cfg_t copy_config; - mli_mov_cfg_for_copy(©_config); - - TF_LITE_ENSURE_STATUS(ops::micro::get_arc_scratch_buffer_for_conv_tensors( - context, &in_local, &weights_local, &bias_local, &out_local)); - /* is_local indicates that the tensor is already in local memory, - so in that case the original tensor can be used, - and there is no need to copy it to the local tensor*/ - const bool in_is_local = in_local.data == data.mli_in->data; - const bool out_is_local = out_local.data == data.mli_out->data; - const bool w_is_local = weights_local.data == data.mli_weights->data; - const bool b_is_local = bias_local.data == data.mli_bias->data; - - TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_io( - &in_local, &out_local, kernelHeight, cfg_local.stride_height, - cfg_local.padding_top, cfg_local.padding_bottom, &inSliceHeight, - &outSliceHeight)); - TF_LITE_ENSURE_STATUS( - ops::micro::arc_scratch_buffer_calc_slice_size_weights( - &weights_local, &bias_local, weight_out_ch_dimension, - &slice_channels)); - - /* if input channels is not equal to output channels, a channel multiplier - is used. in this case the slice channels needs to be rounded down to a - multiple of the input channels */ - if (in_channels != out_channels) { - slice_channels = (slice_channels / in_channels) * in_channels; - } - - ops::micro::TensorSlicer b_slice(data.mli_bias, bias_out_ch_dimension, - slice_channels); - ops::micro::TensorSlicer w_slice(data.mli_weights, weight_out_ch_dimension, - slice_channels, 0, 0, 0, true); - ops::micro::TensorSlicer out_ch_slice(data.mli_out, out_tensor_ch_dimension, - slice_channels, 0, 0, 0, true); - ops::micro::TensorSlicer in_ch_slice(data.mli_in, out_tensor_ch_dimension, - slice_channels, 0, 0, 0, true); - - mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; - mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; - - void* input_buffer_ptr = NULL; - uint32_t input_buffer_size = 0; - int padding_top = cfg_local.padding_top; - int padding_bottom = cfg_local.padding_bottom; - - while (!w_slice.Done()) { - mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); - mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); - - /* input tensor is already sliced in the channel dimension. - out_ch_slice.Sub() is the tensor for the amount of channels of this - iteration of the weight slice loop. This tensor needs to be further - sliced over the batch and height dimension. in_ch_slice.Sub() tensor - contains batches of HWC tensors. so it is a 4 dimensional tensor. because - the mli kernel will process one HWC tensor at a time, the 4 dimensional - tensor needs to be sliced into nBatch 3 dimensional tensors. on top of - that there could be a need to also slice in the Height dimension. for that - the sliceHeight has been calculated. The tensor slicer is configured that - it will completely slice the nBatch dimension (0) and slice the height - dimension (1) in chunks of 'sliceHeight' */ - ops::micro::TensorSlicer in_slice(in_ch_slice.Sub(), heightDimension, - inSliceHeight, padding_top, - padding_bottom, overlap); - - /* output tensor is already sliced in the output channel dimension. - out_ch_slice.Sub() is the tensor for the amount of output channels of this - iteration of the weight slice loop. This tensor needs to be further - sliced over the batch and height dimension. */ - ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), heightDimension, - outSliceHeight); - - /* setup the pointers to the local or remote tensor to make the code - * inside the loop easier. */ - mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; - mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; - - while (!out_slice.Done()) { - TF_LITE_ENSURE(context, !in_slice.Done()); - cfg_local.padding_top = in_slice.GetPaddingPre(); - cfg_local.padding_bottom = in_slice.GetPaddingPost(); - - // if same input copy as previous iteration, skip the copy of input - if ((in_slice.Sub()->data != input_buffer_ptr) || - (mli_hlp_count_elem_num(in_slice.Sub(), 0) != input_buffer_size)) { - mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); - input_buffer_ptr = in_slice.Sub()->data; - input_buffer_size = mli_hlp_count_elem_num(in_slice.Sub(), 0); - } - mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, - &cfg_local, out_ptr); - mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); - - in_slice.Next(); - out_slice.Next(); - } - w_slice.Next(); - b_slice.Next(); - out_ch_slice.Next(); - in_ch_slice.Next(); - TF_LITE_ENSURE(context, in_slice.Done()); - } - } - return kTfLiteOk; -} - -void EvalQuantizedPerChannel(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - DepthwiseParams op_params; - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = 0; - op_params.output_offset = data.output_zero_point; - op_params.quantized_activation_min = std::numeric_limits::min(); - op_params.quantized_activation_max = std::numeric_limits::max(); - - reference_integer_ops::DepthwiseConvPerChannel( - op_params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG(context, - "Node configuration is not supported by ARC MLI Library."); -#endif -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDepthwiseConvParams* params, const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::DepthwiseParams op_params; - // Padding type is ignored, but still set. - op_params.padding_type = PaddingType::kSame; - op_params.padding_values.width = data.padding.width; - op_params.padding_values.height = data.padding.height; - op_params.stride_width = params->stride_width; - op_params.stride_height = params->stride_height; - op_params.dilation_width_factor = params->dilation_width_factor; - op_params.dilation_height_factor = params->dilation_height_factor; - op_params.depth_multiplier = params->depth_multiplier; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - - tflite::reference_ops::DepthwiseConv( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* params = - reinterpret_cast(node->builtin_data); - const OpData& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input, filter, bias, output); - break; - case kTfLiteInt8: - if (data.is_mli_applicable) { - EvalMliQuantizedPerChannel(context, node, params, data, input, filter, - bias, output); - } else { - EvalQuantizedPerChannel(context, node, params, data, input, filter, - bias, output); - } - break; - case kTfLiteUInt8: - EvalQuantized(context, node, params, data, input, filter, bias, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#else -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpDataConv)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto& params = - *(reinterpret_cast(node->builtin_data)); - const OpDataConv& data = *(static_cast(node->user_data)); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kDepthwiseConvOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kDepthwiseConvWeightsTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 3) - ? tflite::micro::GetEvalInput(context, node, kDepthwiseConvBiasTensor) - : nullptr; - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - tflite::reference_ops::DepthwiseConv( - DepthwiseConvParamsFloat(params, data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - reference_integer_ops::DepthwiseConvPerChannel( - DepthwiseConvParamsQuantized(params, data), - data.per_channel_output_multiplier, data.per_channel_output_shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_DEPTHWISE_CONV_2D() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/DepthwiseConvPrepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h index 049af09..000e792 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h @@ -1,4 +1,4 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -49,6 +49,32 @@ TfLiteStatus CalculateOpDataDepthwiseConv( TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node); +// This is the most generic TfLiteRegistration. The actual supported types may +// still be target dependent. The only requirement is that every implementation +// (reference or optimized) must define this function. +TfLiteRegistration Register_DEPTHWISE_CONV_2D(); + +#if defined(CMSIS_NN) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8 activations and int8 weights and uses the latency optimized +// implementations. +TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT8(); + +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int16 activations and int8 weights and uses the latency optimized +// implementations. +TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT16(); + +#else +inline TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT8() { + return Register_DEPTHWISE_CONV_2D(); +} + +inline TfLiteRegistration Register_DEPTHWISE_CONV_2D_INT16() { + return Register_DEPTHWISE_CONV_2D(); +} +#endif + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_DEPTHWISE_CONV_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cc similarity index 78% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cc index 4b444e8..5263961 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv_common.cc @@ -18,7 +18,6 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" @@ -95,13 +94,18 @@ TfLiteStatus CalculateOpDataDepthwiseConv( params.dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); - const TfLiteTensor* input = GetInput(context, node, kConvInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kConvInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kConvWeightsTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kConvWeightsTensor); TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kConvBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kConvOutputTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kConvBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kConvOutputTensor); TF_LITE_ENSURE(context, output != nullptr); // Note that quantized inference requires that all tensors have their @@ -113,8 +117,7 @@ TfLiteStatus CalculateOpDataDepthwiseConv( context, input, filter, bias, output, params.activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), + data->per_channel_output_multiplier, data->per_channel_output_shift, output_channels)); } @@ -122,6 +125,11 @@ TfLiteStatus CalculateOpDataDepthwiseConv( data->filter_zero_point = filter->params.zero_point; data->output_zero_point = output->params.zero_point; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } @@ -132,14 +140,16 @@ TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) { OpDataConv* data = static_cast(node->user_data); const auto& params = *(static_cast(node->builtin_data)); + MicroContext* micro_context = GetMicroContext(context); - TfLiteTensor* output = GetOutput(context, node, kDepthwiseConvOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kDepthwiseConvOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = - GetInput(context, node, kDepthwiseConvInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = - GetInput(context, node, kDepthwiseConvWeightsTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kDepthwiseConvWeightsTensor); TF_LITE_ENSURE(context, filter != nullptr); const int input_width = input->dims->data[2]; @@ -150,13 +160,15 @@ TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) { const int output_height = output->dims->data[1]; // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); + if (input->type != kTfLiteFloat32) { + const int num_channels = filter->dims->data[kDepthwiseConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + } // All per-channel quantized tensors need valid zero point and scale arrays. if (input->type == kTfLiteInt8) { @@ -178,10 +190,23 @@ TfLiteStatus DepthwiseConvPrepare(TfLiteContext* context, TfLiteNode* node) { affine_quantization->zero_point->size); } + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena(context, filter_size, + &data->filter_buffer_index); + } + TF_LITE_ENSURE_STATUS(CalculateOpDataDepthwiseConv( context, node, params, input_width, input_height, filter_width, filter_height, output_width, output_height, input->type, data)); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + return kTfLiteOk; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cc similarity index 52% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cc index b31e913..c41036e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -22,118 +22,67 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace dequantize { -struct OpData { - tflite::DequantizationParams quantization_params; - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - int32_t output_zero_point; -}; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { +void* DequantizeInit(TfLiteContext* context, const char* buffer, + size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - // TODO(b/140515557): Add cached dequant to improve hybrid model performance. - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE(context, input->type == kTfLiteUInt8 || - input->type == kTfLiteInt8 || - input->type == kTfLiteInt16); - TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); - - if (output->type == kTfLiteInt32) { - const double effective_output_scale = - static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(effective_output_scale, &data->output_multiplier, - &data->output_shift); - } - - data->quantization_params.zero_point = input->params.zero_point; - data->quantization_params.scale = static_cast(input->params.scale); - data->output_zero_point = output->params.zero_point; - return kTfLiteOk; + return context->AllocatePersistentBuffer(context, sizeof(DequantizeOpData)); } -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus DequantizeEval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + DequantizeOpData* data = static_cast(node->user_data); const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); if (output->type == kTfLiteFloat32) { switch (input->type) { - case kTfLiteUInt8: + case kTfLiteInt8: reference_ops::Dequantize(data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); break; - case kTfLiteInt8: + case kTfLiteInt16: reference_ops::Dequantize(data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); break; - case kTfLiteInt16: + case kTfLiteUInt8: reference_ops::Dequantize(data->quantization_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace dequantize - TfLiteRegistration Register_DEQUANTIZE() { - return {/*init=*/dequantize::Init, - /*free=*/nullptr, - /*prepare=*/dequantize::Prepare, - /*invoke=*/dequantize::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(DequantizeInit, DequantizePrepare, + DequantizeEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h new file mode 100644 index 0000000..ee45f36 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h @@ -0,0 +1,38 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +struct DequantizeOpData { + tflite::DequantizationParams quantization_params; + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + int32_t output_zero_point; +}; + +TfLiteStatus DequantizePrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_DEQUANTIZE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize_common.cc new file mode 100644 index 0000000..e8ae297 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize_common.cc @@ -0,0 +1,67 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/dequantize.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/requantize.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/dequantize.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { + +TfLiteStatus DequantizePrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + DequantizeOpData* data = static_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + // TODO(b/140515557): Add cached dequant to improve hybrid model performance. + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || + input->type == kTfLiteInt16 || + input->type == kTfLiteUInt8); + TF_LITE_ENSURE(context, output->type == kTfLiteFloat32); + + if (output->type == kTfLiteInt32) { + const double effective_output_scale = + static_cast(input->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(effective_output_scale, &data->output_multiplier, + &data->output_shift); + } + + data->quantization_params.zero_point = input->params.zero_point; + data->quantization_params.scale = static_cast(input->params.scale); + data->output_zero_point = output->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tflite_detection_postprocess.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/detection_postprocess.cc similarity index 77% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tflite_detection_postprocess.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/detection_postprocess.cc index 8418629..2209a58 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tflite_detection_postprocess.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/detection_postprocess.cc @@ -1,8 +1,11 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -10,31 +13,22 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include #include +#include -#define FLATBUFFERS_LOCALE_INDEPENDENT 0 #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" namespace tflite { - -// We use global memory to keep track of these... Why, you might ask... I'm not sure -// if this is a bug with our version of TFLite, and incompatibility with this op, but -// the output tensors are defined as using 4 bytes for each tensor. When these are filled -// they are thus writing in uninitialized memory or overwriting previous tensors. -// Perhaps this is fixed in TF2.4 or something, but for now we'll just allocate some -// global memory. -float post_process_boxes[100 * 4 * sizeof(float)]; -float post_process_classes[100]; -float post_process_scores[100]; - namespace { /** @@ -124,37 +118,29 @@ struct OpData { }; void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); OpData* op_data = nullptr; const uint8_t* buffer_t = reinterpret_cast(buffer); const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); - - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - op_data = (OpData*)context->AllocatePersistentBuffer(context, sizeof(OpData)); + op_data = reinterpret_cast( + context->AllocatePersistentBuffer(context, sizeof(OpData))); op_data->max_detections = m["max_detections"].AsInt32(); op_data->max_classes_per_detection = m["max_classes_per_detection"].AsInt32(); - - if (m["detections_per_class"].IsNull()) { + if (m["detections_per_class"].IsNull()) op_data->detections_per_class = kNumDetectionsPerClass; - } - else { + else op_data->detections_per_class = m["detections_per_class"].AsInt32(); - } - - if (m["use_regular_nms"].IsNull()) { + if (m["use_regular_nms"].IsNull()) op_data->use_regular_non_max_suppression = false; - } - else { + else op_data->use_regular_non_max_suppression = m["use_regular_nms"].AsBool(); - } op_data->non_max_suppression_score_threshold = m["nms_score_threshold"].AsFloat(); op_data->intersection_over_union_threshold = m["nms_iou_threshold"].AsFloat(); - op_data->num_classes = m["num_classes"].AsInt32(); - op_data->scale_values.y = m["y_scale"].AsFloat(); op_data->scale_values.x = m["x_scale"].AsFloat(); op_data->scale_values.h = m["h_scale"].AsFloat(); @@ -163,22 +149,20 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { return op_data; } -void Free(TfLiteContext* context, void* buffer) { - /* noop */ -} - TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* op_data = static_cast(node->user_data); + MicroContext* micro_context = GetMicroContext(context); + // Inputs: box_encodings, scores, anchors TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); - const TfLiteTensor* input_box_encodings = - GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - GetInput(context, node, kInputTensorClassPredictions); - const TfLiteTensor* input_anchors = - GetInput(context, node, kInputTensorAnchors); - + TfLiteTensor* input_box_encodings = + micro_context->AllocateTempInputTensor(node, kInputTensorBoxEncodings); + TfLiteTensor* input_class_predictions = + micro_context->AllocateTempInputTensor(node, + kInputTensorClassPredictions); + TfLiteTensor* input_anchors = + micro_context->AllocateTempInputTensor(node, kInputTensorAnchors); TF_LITE_ENSURE_EQ(context, NumDimensions(input_box_encodings), 3); TF_LITE_ENSURE_EQ(context, NumDimensions(input_class_predictions), 3); TF_LITE_ENSURE_EQ(context, NumDimensions(input_anchors), 2); @@ -236,6 +220,10 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // num_detections TF_LITE_ENSURE_EQ(context, NumOutputs(node), 4); + micro_context->DeallocateTempTfLiteTensor(input_box_encodings); + micro_context->DeallocateTempTfLiteTensor(input_class_predictions); + micro_context->DeallocateTempTfLiteTensor(input_anchors); + return kTfLiteOk; } @@ -252,47 +240,28 @@ class Dequantizer { float scale_; }; -void DequantizeBoxEncodings(const TfLiteTensor* input_box_encodings, - int idx, float quant_zero_point, float quant_scale, - int length_box_encoding, - CenterSizeEncoding* box_centersize) { - const uint8_t* boxes = - tflite::GetTensorData(input_box_encodings) + - length_box_encoding * idx; - Dequantizer dequantize(quant_zero_point, quant_scale); - // See definition of the KeyPointBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/keypoint_box_coder.py - // The first four elements are the box coordinates, which is the same as the - // FastRnnBoxCoder at - // https://github.com/tensorflow/models/blob/master/research/object_detection/box_coders/faster_rcnn_box_coder.py - box_centersize->y = dequantize(boxes[0]); - box_centersize->x = dequantize(boxes[1]); - box_centersize->h = dequantize(boxes[2]); - box_centersize->w = dequantize(boxes[3]); -} - template -T ReInterpretTensor(const TfLiteTensor* tensor) { - const float* tensor_base = tflite::GetTensorData(tensor); +T ReInterpretTensor(const TfLiteEvalTensor* tensor) { + const float* tensor_base = tflite::micro::GetTensorData(tensor); return reinterpret_cast(tensor_base); } template -T ReInterpretTensor(TfLiteTensor* tensor) { - float* tensor_base = tflite::GetTensorData(tensor); +T ReInterpretTensor(TfLiteEvalTensor* tensor) { + float* tensor_base = tflite::micro::GetTensorData(tensor); return reinterpret_cast(tensor_base); } TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, OpData* op_data) { // Parse input tensor boxencodings - const TfLiteTensor* input_box_encodings = - tflite::GetInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); TF_LITE_ENSURE_EQ(context, input_box_encodings->dims->data[0], kBatchSize); const int num_boxes = input_box_encodings->dims->data[1]; TF_LITE_ENSURE(context, input_box_encodings->dims->data[2] >= kNumCoordBox); - const TfLiteTensor* input_anchors = - tflite::GetInput(context, node, kInputTensorAnchors); + const TfLiteEvalTensor* input_anchors = + tflite::micro::GetEvalInput(context, node, kInputTensorAnchors); // Decode the boxes to get (ymin, xmin, ymax, xmax) based on the anchors CenterSizeEncoding box_centersize; @@ -300,35 +269,20 @@ TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, CenterSizeEncoding anchor; for (int idx = 0; idx < num_boxes; ++idx) { switch (input_box_encodings->type) { - // Quantized - case kTfLiteUInt8: { - DequantizeBoxEncodings( - input_box_encodings, idx, - static_cast(op_data->input_box_encodings.zero_point), - static_cast(op_data->input_box_encodings.scale), - input_box_encodings->dims->data[2], &box_centersize); - DequantizeBoxEncodings( - input_anchors, idx, - static_cast(op_data->input_anchors.zero_point), - static_cast(op_data->input_anchors.scale), kNumCoordBox, - &anchor); - break; - } // Float case kTfLiteFloat32: { // Please see DequantizeBoxEncodings function for the support detail. const int box_encoding_idx = idx * input_box_encodings->dims->data[2]; - const float* boxes = &(tflite::GetTensorData( + const float* boxes = &(tflite::micro::GetTensorData( input_box_encodings)[box_encoding_idx]); box_centersize = *reinterpret_cast(boxes); anchor = ReInterpretTensor(input_anchors)[idx]; break; } - default: { + default: // Unsupported type. return kTfLiteError; - } } float ycenter = static_cast(static_cast(box_centersize.y) / @@ -366,9 +320,61 @@ TfLiteStatus DecodeCenterSizeBoxes(TfLiteContext* context, TfLiteNode* node, void DecreasingPartialArgSort(const float* values, int num_values, int num_to_sort, int* indices) { std::iota(indices, indices + num_values, 0); - std::partial_sort( - indices, indices + num_to_sort, indices + num_values, - [&values](const int i, const int j) { return values[i] > values[j]; }); + std::partial_sort(indices, indices + num_to_sort, indices + num_values, + [&values](const int i, const int j) { + return std::tie(values[i], j) > std::tie(values[j], i); + }); +} + +template +void InsertionSort(int* start, int* end, Compare compare) { + for (int* i = start; i != end; ++i) { + std::rotate(std::upper_bound(start, i, *i, compare), i, i + 1); + } +} + +template +void TopDownMerge(int* values, int* scratch, const int half_num_values, + int num_values, Compare compare) { + int left = 0; + int right = half_num_values; + + for (int i = 0; i < num_values; i++) { + if (left >= half_num_values || + (right < num_values && compare(values[right], values[left]))) { + scratch[i] = values[right++]; + } else { + scratch[i] = values[left++]; + } + } + memcpy(values, scratch, num_values * sizeof(int)); +} + +template +void MergeSort(int* values, int* scratch, const int num_values, + Compare compare) { + constexpr int threshold = 20; + + if (num_values < threshold) { + InsertionSort(values, values + num_values, compare); + return; + } + + const int half_num_values = num_values / 2; + + MergeSort(values, scratch, half_num_values, compare); + MergeSort(values + half_num_values, scratch, num_values - half_num_values, + compare); + TopDownMerge(values, scratch, half_num_values, num_values, compare); +} + +void DecreasingArgSort(const float* values, int num_values, int* indices, + int* scratch) { + std::iota(indices, indices + num_values, 0); + + MergeSort(indices, scratch, num_values, [&values](const int i, const int j) { + return values[i] > values[j]; + }); } int SelectDetectionsAboveScoreThreshold(const float* values, int size, @@ -423,8 +429,8 @@ TfLiteStatus NonMaxSuppressionSingleClassHelper( TfLiteContext* context, TfLiteNode* node, OpData* op_data, const float* scores, int* selected, int* selected_size, int max_detections) { - const TfLiteTensor* input_box_encodings = - tflite::GetInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); const int num_boxes = input_box_encodings->dims->data[1]; const float non_max_suppression_score_threshold = op_data->non_max_suppression_score_threshold; @@ -453,8 +459,16 @@ TfLiteStatus NonMaxSuppressionSingleClassHelper( int* sorted_indices = reinterpret_cast( context->GetScratchBuffer(context, op_data->sorted_indices_idx)); - DecreasingPartialArgSort(keep_scores, num_scores_kept, num_scores_kept, - sorted_indices); + // Reusing keep_indices for scratch buffer and write back its values + // after the sorting is done. + DecreasingArgSort(keep_scores, num_scores_kept, sorted_indices, keep_indices); + int counter = 0; + for (int i = 0; i < num_boxes; i++) { + if (scores[i] >= non_max_suppression_score_threshold) { + keep_indices[counter] = i; + counter++; + } + } const int num_boxes_kept = num_scores_kept; const int output_size = std::min(num_boxes_kept, max_detections); @@ -504,18 +518,18 @@ TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context, TfLiteNode* node, OpData* op_data, const float* scores) { - const TfLiteTensor* input_box_encodings = - tflite::GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - tflite::GetInput(context, node, kInputTensorClassPredictions); - TfLiteTensor* detection_boxes = - tflite::GetOutput(context, node, kOutputTensorDetectionBoxes); - TfLiteTensor* detection_classes = tflite::GetOutput( + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); + TfLiteEvalTensor* detection_boxes = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); + TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( context, node, kOutputTensorDetectionClasses); - TfLiteTensor* detection_scores = - tflite::GetOutput(context, node, kOutputTensorDetectionScores); - TfLiteTensor* num_detections = - tflite::GetOutput(context, node, kOutputTensorNumDetections); + TfLiteEvalTensor* detection_scores = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); + TfLiteEvalTensor* num_detections = + tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); const int num_boxes = input_box_encodings->dims->data[1]; const int num_classes = op_data->num_classes; @@ -604,23 +618,23 @@ TfLiteStatus NonMaxSuppressionMultiClassRegularHelper(TfLiteContext* context, ReInterpretTensor(detection_boxes)[output_box_index] = reinterpret_cast(decoded_boxes)[anchor_index]; // detection_classes - tflite::GetTensorData(detection_classes)[output_box_index] = + tflite::micro::GetTensorData(detection_classes)[output_box_index] = class_index; // detection_scores - tflite::GetTensorData(detection_scores)[output_box_index] = + tflite::micro::GetTensorData(detection_scores)[output_box_index] = selected_score; } else { ReInterpretTensor( detection_boxes)[output_box_index] = {0.0f, 0.0f, 0.0f, 0.0f}; // detection_classes - tflite::GetTensorData(detection_classes)[output_box_index] = + tflite::micro::GetTensorData(detection_classes)[output_box_index] = 0.0f; // detection_scores - tflite::GetTensorData(detection_scores)[output_box_index] = + tflite::micro::GetTensorData(detection_scores)[output_box_index] = 0.0f; } } - tflite::GetTensorData(num_detections)[0] = + tflite::micro::GetTensorData(num_detections)[0] = size_of_sorted_indices; return kTfLiteOk; @@ -637,19 +651,19 @@ TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context, TfLiteNode* node, OpData* op_data, const float* scores) { - const TfLiteTensor* input_box_encodings = - tflite::GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - tflite::GetInput(context, node, kInputTensorClassPredictions); - - // TfLiteTensor* detection_boxes = - // tflite::GetOutput(context, node, kOutputTensorDetectionBoxes); - // TfLiteTensor* detection_classes = tflite::GetOutput( - // context, node, kOutputTensorDetectionClasses); - // TfLiteTensor* detection_scores = - // tflite::GetOutput(context, node, kOutputTensorDetectionScores); - TfLiteTensor* num_detections = - tflite::GetOutput(context, node, kOutputTensorNumDetections); + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); + TfLiteEvalTensor* detection_boxes = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionBoxes); + + TfLiteEvalTensor* detection_classes = tflite::micro::GetEvalOutput( + context, node, kOutputTensorDetectionClasses); + TfLiteEvalTensor* detection_scores = + tflite::micro::GetEvalOutput(context, node, kOutputTensorDetectionScores); + TfLiteEvalTensor* num_detections = + tflite::micro::GetEvalOutput(context, node, kOutputTensorNumDetections); const int num_boxes = input_box_encodings->dims->data[1]; const int num_classes = op_data->num_classes; @@ -701,55 +715,32 @@ TfLiteStatus NonMaxSuppressionMultiClassFastHelper(TfLiteContext* context, // detection_boxes float* decoded_boxes = reinterpret_cast( context->GetScratchBuffer(context, op_data->decoded_boxes_idx)); - // ReInterpretTensor(detection_boxes)[box_offset] = - // reinterpret_cast(decoded_boxes)[selected_index]; - - ((BoxCornerEncoding*)post_process_boxes)[box_offset] = - reinterpret_cast(decoded_boxes)[selected_index]; + ReInterpretTensor(detection_boxes)[box_offset] = + reinterpret_cast(decoded_boxes)[selected_index]; // detection_classes - // tflite::GetTensorData(detection_classes)[box_offset] = - // class_indices[col]; - - post_process_classes[box_offset] = class_indices[col]; + tflite::micro::GetTensorData(detection_classes)[box_offset] = + class_indices[col]; // detection_scores - // tflite::GetTensorData(detection_scores)[box_offset] = - // box_scores[class_indices[col]]; - - post_process_scores[box_offset] = box_scores[class_indices[col]]; + tflite::micro::GetTensorData(detection_scores)[box_offset] = + box_scores[class_indices[col]]; output_box_index++; } } - tflite::GetTensorData(num_detections)[0] = output_box_index; + tflite::micro::GetTensorData(num_detections)[0] = output_box_index; return kTfLiteOk; } -void DequantizeClassPredictions(const TfLiteTensor* input_class_predictions, - const int num_boxes, - const int num_classes_with_background, - float* scores, OpData* op_data) { - float quant_zero_point = - static_cast(op_data->input_class_predictions.zero_point); - float quant_scale = - static_cast(op_data->input_class_predictions.scale); - Dequantizer dequantize(quant_zero_point, quant_scale); - const uint8_t* scores_quant = - tflite::GetTensorData(input_class_predictions); - for (int idx = 0; idx < num_boxes * num_classes_with_background; ++idx) { - scores[idx] = dequantize(scores_quant[idx]); - } -} - TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, TfLiteNode* node, OpData* op_data) { // Get the input tensors - const TfLiteTensor* input_box_encodings = - tflite::GetInput(context, node, kInputTensorBoxEncodings); - const TfLiteTensor* input_class_predictions = - tflite::GetInput(context, node, kInputTensorClassPredictions); + const TfLiteEvalTensor* input_box_encodings = + tflite::micro::GetEvalInput(context, node, kInputTensorBoxEncodings); + const TfLiteEvalTensor* input_class_predictions = + tflite::micro::GetEvalInput(context, node, kInputTensorClassPredictions); const int num_boxes = input_box_encodings->dims->data[1]; const int num_classes = op_data->num_classes; @@ -764,16 +755,8 @@ TfLiteStatus NonMaxSuppressionMultiClass(TfLiteContext* context, const float* scores; switch (input_class_predictions->type) { - case kTfLiteUInt8: { - float* temporary_scores = reinterpret_cast( - context->GetScratchBuffer(context, op_data->scores_idx)); - DequantizeClassPredictions(input_class_predictions, num_boxes, - num_classes_with_background, temporary_scores, - op_data); - scores = temporary_scores; - } break; case kTfLiteFloat32: - scores = tflite::GetTensorData(input_class_predictions); + scores = tflite::micro::GetTensorData(input_class_predictions); break; default: // Unsupported type. @@ -816,30 +799,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } } // namespace -TfLiteRegistration Register_DETECTION_POSTPROCESS() { - return {/*init=*/Init, - /*free=*/Free, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -namespace ops { -namespace micro { -TfLiteRegistration Register_TFLite_Detection_PostProcess() { - return {/*init=*/Init, - /*free=*/Free, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} -} // namespace micro +TfLiteRegistration* Register_DETECTION_POSTPROCESS() { + static TfLiteRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval); + return &r; } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cc similarity index 78% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cc index 4defb74..e5fb262 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/div.cc @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,6 +21,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -29,7 +30,7 @@ constexpr int kInputTensor1 = 0; constexpr int kInputTensor2 = 1; constexpr int kOutputTensor = 0; -struct OpData { +struct OpDataDiv { // Parameters used in the quantized paths where the output is 8bit int32_t input1_zero_point; int32_t input2_zero_point; @@ -42,21 +43,9 @@ struct OpData { int output_shift; }; -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteDivParams* params, OpData* data) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - const TfLiteTensor* input1; - TF_LITE_ENSURE_OK(context, - GetInputSafe(context, node, kInputTensor1, &input1)); - const TfLiteTensor* input2; - TF_LITE_ENSURE_OK(context, - GetInputSafe(context, node, kInputTensor2, &input2)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); - +TfLiteStatus CalculateOpDataDiv(TfLiteContext* context, TfLiteTensor* input1, + TfLiteTensor* input2, TfLiteTensor* output, + TfLiteDivParams* params, OpDataDiv* data) { TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); @@ -78,17 +67,38 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); + return context->AllocatePersistentBuffer(context, sizeof(OpDataDiv)); } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - auto* params = static_cast(node->builtin_data); - auto* data = static_cast(node->user_data); - return CalculateOpData(context, node, params, data); + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + OpDataDiv* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataDiv(context, input1, input2, output, params, data)); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, - const OpData* data, const TfLiteEvalTensor* input1, + const OpDataDiv* data, const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { tflite::ArithmeticParams op_params = {}; @@ -118,7 +128,7 @@ void EvalDiv(TfLiteContext* context, TfLiteNode* node, TfLiteDivParams* params, } TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteDivParams* params, const OpData* data, + TfLiteDivParams* params, const OpDataDiv* data, const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { @@ -153,8 +163,7 @@ TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, } #undef TF_LITE_DIV } else { - TF_LITE_KERNEL_LOG( - context, "Unsupported combination of input and output types in DIV."); + MicroPrintf("Unsupported combination of input and output types in DIV."); return kTfLiteError; } @@ -165,7 +174,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->builtin_data != nullptr); auto* params = static_cast(node->builtin_data); TFLITE_DCHECK(node->user_data != nullptr); - auto* data = static_cast(node->user_data); + auto* data = static_cast(node->user_data); const TfLiteEvalTensor* input1 = tflite::micro::GetEvalInput(context, node, kInputTensor1); @@ -180,10 +189,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, EvalQuantized(context, node, params, data, input1, input2, output)); } else { - TF_LITE_KERNEL_LOG(context, - "DIV only supports FLOAT32, quantized INT8 " - "now, got type %s (%d).", - TfLiteTypeGetName(output->type), output->type); + MicroPrintf( + "DIV only supports FLOAT32, quantized INT8 " + "now, got type %s (%d).", + TfLiteTypeGetName(output->type), output->type); return kTfLiteError; } @@ -193,14 +202,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_DIV() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cc new file mode 100644 index 0000000..4ee7f2c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cc @@ -0,0 +1,430 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace ops { +namespace micro { +namespace elementwise { +namespace { + +constexpr int kAbsNameId = 0; +constexpr int kRsrqtNameId = 1; + +const int kElementwiseInputTensor = 0; +const int kElementwiseOutputTensor = 0; + +struct OpDataAbsRsqrt { + int32_t multiplier; + int shift; + int input_offset; + int output_offset; + bool needs_rescale; + TfLiteQuantizationType input_quantization_type; + TfLiteType input_type; +}; + +bool IsNumericSupportedType(const TfLiteType type) { + return type == kTfLiteFloat32; +} + +bool IsLogicalSupportedType(const TfLiteType type) { + return type == kTfLiteBool; +} + +bool IsAbsSupportedType(const TfLiteType type) { + return type == kTfLiteFloat32 || type == kTfLiteInt8 || type == kTfLiteInt16; +} + +bool IsRsqrtSupportedType(const TfLiteType type) { + return type == kTfLiteFloat32 || type == kTfLiteInt8; +} + +inline void SetAbsOutputMultiplier(const float input_scale, + const float output_scale, + int32_t* multiplier, int* shift) { + QuantizeMultiplier(static_cast(input_scale / output_scale), + multiplier, shift); +} + +inline void SetRsqrtOutputMultiplier(const float input_scale, + const float output_scale, + int32_t* multiplier, int* shift) { + const double scale = + 1. / static_cast((std::sqrt(input_scale) * output_scale)); + QuantizeMultiplier(scale, multiplier, shift); +} + +typedef bool (*IsSupportedType)(TfLiteType); +template +TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kElementwiseInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kElementwiseOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + if (!IsSupportedType(input->type)) { + MicroPrintf("Input data type %s (%d) is not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +typedef bool (*IsSupportedType)(TfLiteType); +template +TfLiteStatus PrepareAbsRsqrt(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + if (!IsSupportedType(input->type)) { + MicroPrintf("Input data type %s (%d) is not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + auto* op_data = static_cast(node->user_data); + op_data->input_type = input->type; + + // For int16 type input, we support both quantized and non-quantized + // evaluation. + if (op_nameid == kAbsNameId) { + op_data->input_quantization_type = input->quantization.type; + } + + if (input->type == kTfLiteInt8 || + (input->type == kTfLiteInt16 && + input->quantization.type != kTfLiteNoQuantization)) { + TF_LITE_ENSURE_EQ(context, input->quantization.type, + kTfLiteAffineQuantization); + TF_LITE_ENSURE_EQ(context, output->quantization.type, + kTfLiteAffineQuantization); + const auto* input_params = + reinterpret_cast(input->quantization.params); + const auto* output_params = reinterpret_cast( + output->quantization.params); + TF_LITE_ENSURE(context, input_params != nullptr); + TF_LITE_ENSURE(context, input_params->scale != nullptr); + TF_LITE_ENSURE(context, input_params->scale->size > 0); + TF_LITE_ENSURE(context, input_params->zero_point->size > 0); + TF_LITE_ENSURE(context, output_params != nullptr); + TF_LITE_ENSURE(context, output_params->scale != nullptr); + TF_LITE_ENSURE(context, output_params->scale->size > 0); + TF_LITE_ENSURE(context, output_params->zero_point->size > 0); + op_data->input_offset = input_params->zero_point->data[0]; + op_data->output_offset = output_params->zero_point->data[0]; + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, op_data->input_offset, 0); + TF_LITE_ENSURE_EQ(context, op_data->output_offset, 0); + } + const float input_scale = input_params->scale->data[0]; + const float output_scale = output_params->scale->data[0]; + op_data->needs_rescale = input_scale != output_scale; + if (op_nameid == kAbsNameId && op_data->needs_rescale) { + SetAbsOutputMultiplier(input_scale, output_scale, &op_data->multiplier, + &op_data->shift); + } else if (op_nameid == kRsrqtNameId) { + SetRsqrtOutputMultiplier(input_scale, output_scale, &op_data->multiplier, + &op_data->shift); + } + } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +inline TfLiteStatus EvalImplQuantized( + TfLiteContext* context, TfLiteNode* node, + T func(TfLiteContext*, TfLiteNode*, T), + TfLiteStatus validate_input_func(TfLiteContext*, TfLiteNode*, T), + TfLiteType expected_type) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); + const size_t num_elements = ElementCount(*input->dims); + const T* in_data = tflite::micro::GetTensorData(input); + T* out_data = tflite::micro::GetTensorData(output); + for (size_t i = 0; i < num_elements; ++i) { + if (validate_input_func) { + TF_LITE_ENSURE_OK(context, + validate_input_func(context, node, in_data[i])); + } + out_data[i] = func(context, node, in_data[i]); + } + return kTfLiteOk; +} + +template +inline T AbsHelper(T i) { + return std::abs(i); +} + +template +inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, + T func(T), TfLiteStatus validate_input_func(T), + TfLiteType expected_type) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); + const size_t num_elements = ElementCount(*input->dims); + const T* in_data = tflite::micro::GetTensorData(input); + T* out_data = tflite::micro::GetTensorData(output); + for (size_t i = 0; i < num_elements; ++i) { + if (validate_input_func) { + TF_LITE_ENSURE_OK(context, validate_input_func(in_data[i])); + } + out_data[i] = func(in_data[i]); + } + return kTfLiteOk; +} + +inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, + float float_func(float)) { + return EvalImpl(context, node, float_func, + /*validate_input_func=*/nullptr, kTfLiteFloat32); +} + +inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, + + bool bool_func(bool)) { + return EvalImpl(context, node, bool_func, + /*validate_input_func=*/nullptr, kTfLiteBool); +} + +void* ElementWiseAbsRsqrtInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataAbsRsqrt)); +} + +template +inline T AbsEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { + const auto* op_data = static_cast(node->user_data); + const int kMin = std::numeric_limits::min(); + const int kMax = std::numeric_limits::max(); + + const int32_t value = std::abs(i - op_data->input_offset); + if (!op_data->needs_rescale) { + return static_cast( + std::min(std::max(static_cast(value + op_data->output_offset), + static_cast(kMin)), + static_cast(kMax))); + } + + const int32_t output = tflite::MultiplyByQuantizedMultiplier( + value, op_data->multiplier, op_data->shift) + + op_data->output_offset; + return static_cast(std::min( + std::max(static_cast(output), static_cast(kMin)), + static_cast(kMax))); +} + +template +inline T RsqrtEvalQuantized(TfLiteContext* context, TfLiteNode* node, T i) { + const auto* op_data = static_cast(node->user_data); + const int kMin = std::numeric_limits::min(); + const int kMax = std::numeric_limits::max(); + + const int32_t value = (i - op_data->input_offset); + const int32_t kShift = 20; // Shift to keep value integer. + if (value == 0) { + // Assume that any value close to 0 represents the max output value. + return static_cast(kMax); + } + int32_t inv_sqrt_multiplier; + int inv_sqrt_shift; + GetInvSqrtQuantizedMultiplierExp(value, kReverseShift, &inv_sqrt_multiplier, + &inv_sqrt_shift); + const int32_t data = tflite::MultiplyByQuantizedMultiplier( + static_cast(1), inv_sqrt_multiplier, inv_sqrt_shift + kShift); + const int32_t output = + tflite::MultiplyByQuantizedMultiplier(data, op_data->multiplier, + op_data->shift - kShift) + + op_data->output_offset; + return static_cast(std::min( + std::max(static_cast(output), static_cast(kMin)), + static_cast(kMax))); +} + +template +TfLiteStatus validate_input_func(TfLiteContext* context, TfLiteNode* node, + T i) { + const auto* op_data = static_cast(node->user_data); + + TF_LITE_ENSURE_MSG(context, i >= op_data->input_offset, + "Rsqrt is only defined for positive values"); + return static_cast(kTfLiteOk); +} + +TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { + OpDataAbsRsqrt* op_data = reinterpret_cast(node->user_data); + TfLiteType type = op_data->input_type; + TfLiteQuantizationType input_quantization_type = + op_data->input_quantization_type; + TfLiteStatus eval_result; + + switch (type) { + case kTfLiteFloat32: + eval_result = EvalNumeric(context, node, std::abs); + break; + case kTfLiteInt8: + eval_result = + EvalImplQuantized(context, node, AbsEvalQuantized, + /*validate_input_func=*/nullptr, type); + break; + case kTfLiteInt16: + eval_result = + input_quantization_type == kTfLiteNoQuantization + ? EvalImpl(context, node, AbsHelper, + /*validate_input_func=*/nullptr, type) + : EvalImplQuantized(context, node, AbsEvalQuantized, + /*validate_input_func=*/nullptr, + type); + break; + default: + MicroPrintf("Current data type %s is not supported.", + TfLiteTypeGetName(type)); + return kTfLiteError; + break; + } + return eval_result; +} + +TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { + return EvalNumeric(context, node, std::sin); +} + +TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { + return EvalNumeric(context, node, std::cos); +} + +TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { + return EvalNumeric(context, node, std::log); +} + +TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { + return EvalNumeric(context, node, std::sqrt); +} + +TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { + const auto* op_data = static_cast(node->user_data); + TfLiteType type = op_data->input_type; + switch (type) { + case kTfLiteFloat32: + return EvalImpl( + context, node, [](float f) { return 1.f / std::sqrt(f); }, + /*validate_input_func=*/nullptr, type); + case kTfLiteInt8: + return EvalImplQuantized(context, node, + elementwise::RsqrtEvalQuantized, + elementwise::validate_input_func, type); + + default: + MicroPrintf("Current data type %s is not supported.", + TfLiteTypeGetName(type)); + return kTfLiteError; + } +} + +TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { + return EvalNumeric(context, node, [](float f) { return f * f; }); +} + +TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { + return EvalLogical(context, node, [](bool v) { return !v; }); +} + +} // namespace +} // namespace elementwise + +TfLiteRegistration Register_ABS() { + return tflite::micro::RegisterOp( + elementwise::ElementWiseAbsRsqrtInit, + elementwise::PrepareAbsRsqrt, + elementwise::AbsEval); +} + +TfLiteRegistration Register_SIN() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::SinEval); +} + +TfLiteRegistration Register_COS() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::CosEval); +} + +TfLiteRegistration Register_LOG() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::LogEval); +} + +TfLiteRegistration Register_SQRT() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::SqrtEval); +} + +TfLiteRegistration Register_RSQRT() { + return tflite::micro::RegisterOp( + elementwise::ElementWiseAbsRsqrtInit, + elementwise::PrepareAbsRsqrt, + elementwise::RsqrtEval); +} + +TfLiteRegistration Register_SQUARE() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::SquareEval); +} + +TfLiteRegistration Register_LOGICAL_NOT() { + return tflite::micro::RegisterOp( + nullptr, elementwise::GenericPrepare, + elementwise::LogicalNotEval); +} + +} // namespace micro +} // namespace ops +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cpp deleted file mode 100644 index d173cb5..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elementwise.cpp +++ /dev/null @@ -1,214 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace elementwise { -namespace { - -bool IsNumericSupportedType(const TfLiteType type) { - return type == kTfLiteFloat32; -} - -bool IsLogicalSupportedType(const TfLiteType type) { - return type == kTfLiteBool; -} - -typedef bool (*IsSupportedType)(TfLiteType); -template -TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); - TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (!IsSupportedType(input->type)) { - TF_LITE_KERNEL_LOG(context, "Input data type %s (%d) is not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -template -inline TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node, - T func(T), TfLiteType expected_type) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, expected_type); - const size_t num_elements = ElementCount(*input->dims); - const T* in_data = tflite::micro::GetTensorData(input); - T* out_data = tflite::micro::GetTensorData(output); - for (size_t i = 0; i < num_elements; ++i) { - out_data[i] = func(in_data[i]); - } - return kTfLiteOk; -} - -inline TfLiteStatus EvalNumeric(TfLiteContext* context, TfLiteNode* node, - float float_func(float)) { - return EvalImpl(context, node, float_func, kTfLiteFloat32); -} - -inline TfLiteStatus EvalLogical(TfLiteContext* context, TfLiteNode* node, - bool bool_func(bool)) { - return EvalImpl(context, node, bool_func, kTfLiteBool); -} - -TfLiteStatus AbsEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::abs); -} - -TfLiteStatus SinEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sin); -} - -TfLiteStatus CosEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::cos); -} - -TfLiteStatus LogEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::log); -} - -TfLiteStatus SqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, std::sqrt); -} - -TfLiteStatus RsqrtEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return 1.f / std::sqrt(f); }); -} - -TfLiteStatus SquareEval(TfLiteContext* context, TfLiteNode* node) { - return EvalNumeric(context, node, [](float f) { return f * f; }); -} - -TfLiteStatus LogicalNotEval(TfLiteContext* context, TfLiteNode* node) { - return EvalLogical(context, node, [](bool v) { return !v; }); -} - -} // namespace -} // namespace elementwise - -TfLiteRegistration Register_ABS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::AbsEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SIN() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SinEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_COS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::CosEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_RSQRT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::RsqrtEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_SQUARE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::SquareEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOGICAL_NOT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/ - elementwise::GenericPrepare, - /*invoke=*/elementwise::LogicalNotEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cc similarity index 86% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cc index 0e5f83b..7581772 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/elu.cc @@ -25,6 +25,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -45,7 +46,10 @@ using TransformFunc = float (*)(float); template void PopulateLookupTable(const TfLiteTensor* input, const TfLiteTensor* output, const TransformFunc transform, OpData* data) { - if (sizeof(T) != 1) TF_LITE_FATAL("Lookup table valid only for 8bit"); + if (sizeof(T) != 1) { + MicroPrintf("Lookup table valid only for 8bit"); + TFLITE_ABORT; + } const float inverse_scale = 1 / output->params.scale; int32_t maxval = std::numeric_limits::max(); @@ -76,13 +80,16 @@ void EvalUsingLookupTable(const OpData* data, const TfLiteEvalTensor* input, } TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); // Use LUT to handle quantized elu path. @@ -93,7 +100,8 @@ TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { }; PopulateLookupTable(input, output, transform, data); } - + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -128,9 +136,8 @@ TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: - TF_LITE_KERNEL_LOG( - context, "ELU only supports float32 and int8 currently, got %s.", - TfLiteTypeGetName(input->type)); + MicroPrintf("ELU only supports float32 and int8 currently, got %s.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } @@ -138,14 +145,7 @@ TfLiteStatus EluEval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_ELU() { - return {/*init=*/EluInit, - /*free=*/nullptr, - /*prepare=*/EluPrepare, - /*invoke=*/EluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(EluInit, EluPrepare, EluEval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cc new file mode 100644 index 0000000..e2bccde --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cc @@ -0,0 +1,214 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#if EI_CLASSIFIER_TFLITE_ETHOSU_POLYFILL || EI_ETHOS + +#if EI_CLASSIFIER_TFLITE_ETHOSU_POLYFILL +// Modified by Edge Impulse +// Add stub definitions so that EON Compiler can run + +int ethosu_invoke(struct ethosu_driver *drv, + const void *custom_data_ptr, + const int custom_data_size, + const uint64_t *base_addr, + const size_t *base_addr_size, + const int num_base_addr) +{ return 0; } + +// forward declare the struct +struct ethosu_driver; + +struct ethosu_driver *ethosu_reserve_driver(void) { return nullptr; } +void ethosu_release_driver(struct ethosu_driver *drv) {} +#else +#include +#endif +namespace tflite { +namespace { + +constexpr uint8_t CO_TYPE_ETHOSU = 1; + +struct OpData { + int cms_data_size; + int base_addr_idx; + int base_addr_size_idx; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +void Free(TfLiteContext* context, void* buffer) {} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(context != nullptr); + TF_LITE_ENSURE(context, node->inputs->size > 0); + TFLITE_DCHECK(node->user_data != nullptr); + TF_LITE_ENSURE(context, node->custom_initial_data_size > 0); + + OpData* data = static_cast(node->user_data); + int num_base_addr = node->inputs->size + node->outputs->size; + + // Request arrays for the base address pointers and sizes. + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, num_base_addr * sizeof(uint64_t), &data->base_addr_idx)); + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, num_base_addr * sizeof(size_t), &data->base_addr_size_idx)); + + // Get command stream data size. + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* tensor = micro_context->AllocateTempInputTensor(node, 0); + data->cms_data_size = tensor->bytes; + micro_context->DeallocateTempTfLiteTensor(tensor); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + // Get base addresses. + TfLiteEvalTensor* tensor; + int i = 0; + int num_tensors = 0; + void* cms_data; + uint8_t co_type; + int result; + const OpData* data = static_cast(node->user_data); + uint64_t* base_addrs = static_cast( + context->GetScratchBuffer(context, data->base_addr_idx)); + size_t* base_addrs_size = static_cast( + context->GetScratchBuffer(context, data->base_addr_size_idx)); + + const uint8_t* custom_data = + static_cast(node->custom_initial_data); + auto root = flexbuffers::GetRoot(custom_data, node->custom_initial_data_size); + co_type = root.AsInt8(); + if (co_type != CO_TYPE_ETHOSU) { + MicroPrintf("CO_TYPE != ETHOSU"); + return kTfLiteError; + } + + // Get command stream data address. + tensor = context->GetEvalTensor(context, node->inputs->data[0]); + cms_data = reinterpret_cast(tensor->data.uint8); + + // Get addresses to weights/scratch/input data. + for (i = 1; i < node->inputs->size; ++i) { + tensor = context->GetEvalTensor(context, node->inputs->data[i]); + base_addrs[num_tensors] = + static_cast(reinterpret_cast(tensor->data.uint8)); + size_t byte_size = 1; + for (int k = 0; k < tensor->dims->size; k++) { + byte_size = byte_size * tensor->dims->data[k]; + } + base_addrs_size[num_tensors] = byte_size; + num_tensors++; + } + + // Get addresses to output data. + for (i = 0; i < node->outputs->size; ++i) { + tensor = context->GetEvalTensor(context, node->outputs->data[i]); + base_addrs[num_tensors] = + static_cast(reinterpret_cast(tensor->data.uint8)); + size_t byte_size = 1; + for (int k = 0; k < tensor->dims->size; k++) { + byte_size = byte_size * tensor->dims->data[k]; + } + base_addrs_size[num_tensors] = byte_size; + num_tensors++; + } + + // Ethos-U guarantees that the tensors that require a base pointer are among + // the 8 first tensors + // When Vela optimizes a tflite file it will assign the tensors like this: + // + // +-------+------------------------+ +--------+-------------+ + // | INPUT | Description | | OUTPUT | Description | + // +-------+------------------------+ +--------+-------------+ + // | 0 | Ethos-U command stream | | 0..m | Outputs | + // | 1 | TFLM model | +--------+-------------+ + // | 2 | TFLM arena | + // | 3 | Ethos-U fast scratch | + // | 4..n | Inputs | + // +-------+------------------------+ + // + // This code will assign the NPU base addresses like this: + // + // +--------------+----------------------+ + // | Base address | Description | + // +--------------+----------------------+ + // | 0 | TFLM model | + // | 1 | TFLM arena | + // | 2 | Ethos-U fast scratch | + // | 3..n | Input tensors | + // | n..m | Output tensors | + // +--------------+----------------------+ + // + // The number of base address will be limited to 8. + // + // NOTE! The command stream produced by Vela will access the IFM and OFM + // buffers using base address 1. This means that it is not possible to point + // the input and output tensors outside of the TFLM arena. + num_tensors = std::min(num_tensors, 8); + + struct ethosu_driver* drv = ethosu_reserve_driver(); + result = ethosu_invoke(drv, cms_data, data->cms_data_size, base_addrs, + base_addrs_size, num_tensors); + ethosu_release_driver(drv); + + if (-1 == result) { + return kTfLiteError; + } else { + return kTfLiteOk; + } +} + +} // namespace + +TfLiteRegistration* Register_ETHOSU() { + static TfLiteRegistration r = tflite::micro::RegisterOp(Init, Prepare, Eval); + return &r; +} + +const char* GetString_ETHOSU() { return "ethos-u"; } + +} // namespace tflite + +#else + +// +// This is a stub file for non-Ethos platforms +// +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteRegistration* Register_ETHOSU() { return nullptr; } + +const char* GetString_ETHOSU() { return ""; } + +} // namespace tflite + +#endif // Ethos flag \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cc similarity index 81% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cc index 2c74649..c727cb9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/exp.cc @@ -19,6 +19,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -27,11 +28,15 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input->type); @@ -40,6 +45,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < output->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } @@ -56,8 +64,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { static_cast(flat_size), tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) currently not supported by Exp.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) currently not supported by Exp.", + TfLiteTypeGetName(input->type), input->type); return kTfLiteError; } return kTfLiteOk; @@ -65,14 +73,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_EXP() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cc similarity index 50% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cc index ecdc619..f2b638b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/expand_dims.cc @@ -17,6 +17,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -26,67 +27,85 @@ constexpr int kInputTensor = 0; constexpr int kAxisTensor = 1; constexpr int kOutputTensor = 0; -TfLiteStatus ExpandTensorDim(TfLiteContext* context, - const TfLiteEvalTensor* input, int32_t axis, - TfLiteEvalTensor* output) { - const TfLiteIntArray* input_dims = input->dims; - TfLiteIntArray* output_dims = output->dims; - if (axis < 0) { - axis = input_dims->size + 1 + axis; - } - TF_LITE_ENSURE(context, (axis <= input_dims->size)); - - output_dims->size = input_dims->size + 1; - for (int i = 0; i < output_dims->size; ++i) { - if (i < axis) { - output_dims->data[i] = input_dims->data[i]; - } else if (i == axis) { - output_dims->data[i] = 1; - } else { - output_dims->data[i] = input_dims->data[i - 1]; - } - } - return kTfLiteOk; -} - TfLiteStatus GetAxisValueFromTensor(TfLiteContext* context, - const TfLiteEvalTensor* axis, + const TfLiteTensor* axis, int32_t* axis_value) { - const int axis_dims = (tflite::micro::GetTensorShape(axis)).DimensionsCount(); + const int axis_dims = (tflite::GetTensorShape(axis)).DimensionsCount(); if (axis_dims > 1) { - TF_LITE_KERNEL_LOG(context, "Axis has only one element for Expand_Dims.", - axis_dims); + MicroPrintf("Axis has only one element for Expand_Dims.", axis_dims); return kTfLiteError; } if (kTfLiteInt32 == (axis->type)) { - const int32_t* axis_ptr = tflite::micro::GetTensorData(axis); + const int32_t* axis_ptr = tflite::GetTensorData(axis); *axis_value = axis_ptr[0]; return kTfLiteOk; } else { - TF_LITE_KERNEL_LOG(context, - "Axis type %s (%d) not supported by Expand_Dims.", - TfLiteTypeGetName(axis->type), axis->type); + MicroPrintf("Axis type %s (%d) not supported by Expand_Dims.", + TfLiteTypeGetName(axis->type), axis->type); return kTfLiteError; } } +// Verifies that the output tensor's dimension shape is equivalent to inserting +// a dimension of length 1 at the dimension index axis of input's shape as +// defined in https://www.tensorflow.org/api_docs/python/tf/expand_dims. +TfLiteStatus VerifyTensorDim(TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* axis_tensor, + const TfLiteTensor* output) { + int32_t axis_value = 0; + TF_LITE_ENSURE_OK(context, + GetAxisValueFromTensor(context, axis_tensor, &axis_value)); + + tflite::RuntimeShape input_shape = tflite::GetTensorShape(input); + if (axis_value < 0) { + axis_value = input_shape.DimensionsCount() + 1 + axis_value; + } + TF_LITE_ENSURE(context, axis_value <= input_shape.DimensionsCount()); + + // TFLM only supports fixed dimension tensor and assumes that the output shape + // is fully specified in the model. As such, TFLM directly use the pointer to + // the dimension array in the model buffer. + tflite::RuntimeShape output_shape = tflite::GetTensorShape(output); + + TF_LITE_ENSURE(context, output_shape.DimensionsCount() == + input_shape.DimensionsCount() + 1); + for (int i = 0; i < output_shape.DimensionsCount(); ++i) { + if (i < axis_value) { + TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i)); + } else if (i == axis_value) { + TF_LITE_ENSURE(context, output_shape.Dims(i) == 1); + } else { + TF_LITE_ENSURE(context, output_shape.Dims(i) == input_shape.Dims(i - 1)); + } + } + return kTfLiteOk; +} + TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - const TfLiteTensor* axis; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kAxisTensor, &axis)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* axis = + micro_context->AllocateTempInputTensor(node, kAxisTensor); + TF_LITE_ENSURE(context, axis != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); output->type = input->type; if (IsDynamicTensor(axis)) { - TF_LITE_KERNEL_LOG(context, - "DynamicTensor is not yet supported by Expand_Dims."); + MicroPrintf("DynamicTensor is not yet supported by Expand_Dims."); return kTfLiteError; } + TF_LITE_ENSURE_OK(context, VerifyTensorDim(context, input, axis, output)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -100,23 +119,9 @@ void memCopyN(T* out, const T* in, const int num_elements) { TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* axis = - tflite::micro::GetEvalInput(context, node, kAxisTensor); TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); const int flat_size = ElementCount(*input->dims); - const int input_dims = input->dims->size; - - int32_t axis_value; - TF_LITE_ENSURE_OK(context, - GetAxisValueFromTensor(context, axis, &axis_value)); - if ((axis_value > static_cast(input_dims)) || - (axis_value < static_cast(-(input_dims + 1)))) { - TF_LITE_KERNEL_LOG(context, "Invalid Expand_Dims axis value (%d).", - axis_value); - return kTfLiteError; - } - ExpandTensorDim(context, input, axis_value, output); switch (input->type) { case kTfLiteFloat32: { @@ -128,8 +133,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input), flat_size); } break; default: - TF_LITE_KERNEL_LOG( - context, + MicroPrintf( "Expand_Dims only currently supports int8 and float32, got %d.", input->type); return kTfLiteError; @@ -139,14 +143,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_EXPAND_DIMS() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cc similarity index 68% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cc index 90a235c..202caef 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fill.cc @@ -21,6 +21,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { @@ -46,8 +47,6 @@ TfLiteStatus EnsureEq(TfLiteContext* context, const TfLiteIntArray* array, switch (tensor->type) { case kTfLiteInt8: return EnsureEqImpl(context, array, tensor); - case kTfLiteUInt8: - return EnsureEqImpl(context, array, tensor); case kTfLiteInt16: return EnsureEqImpl(context, array, tensor); case kTfLiteInt32: @@ -55,9 +54,8 @@ TfLiteStatus EnsureEq(TfLiteContext* context, const TfLiteIntArray* array, case kTfLiteInt64: return EnsureEqImpl(context, array, tensor); default: - TF_LITE_KERNEL_LOG(context, - "cannot compare int array to tensor of type %d.", - tensor->type); + MicroPrintf("cannot compare int array to tensor of type %d.", + tensor->type); return kTfLiteError; } } @@ -67,14 +65,18 @@ constexpr int kValueTensor = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + // Ensure inputs and outputs exist. - const TfLiteTensor* dims; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kDimsTensor, &dims)); - const TfLiteTensor* value; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kValueTensor, &value)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); + TfLiteTensor* dims = + micro_context->AllocateTempInputTensor(node, kDimsTensor); + TF_LITE_ENSURE(context, dims != nullptr); + TfLiteTensor* value = + micro_context->AllocateTempInputTensor(node, kValueTensor); + TF_LITE_ENSURE(context, value != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); // The value tensor must be a scalar. TF_LITE_ENSURE_EQ(context, NumDimensions(value), 0); @@ -82,10 +84,19 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // The value type and output type must match. TF_LITE_ENSURE_EQ(context, value->type, output->type); - // The dims tensor must match the output tensor shape. As a byproduct, - // ensures the dims tensor is of an integer type. - TF_LITE_ENSURE_OK(context, EnsureEq(context, output->dims, dims)); + // The dimension of the output tensor is known in model already. + TFLITE_DCHECK(output->dims != nullptr); + + if (dims->data.data != nullptr) { + // When the dims tensor is specified in model already (i.e. is not an + // activation tensor), the dims tensor must match the output tensor shape. + // As a byproduct, ensures the dims tensor is of an integer type. + TF_LITE_ENSURE_OK(context, EnsureEq(context, output->dims, dims)); + } + micro_context->DeallocateTempTfLiteTensor(dims); + micro_context->DeallocateTempTfLiteTensor(value); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -105,10 +116,15 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: FillImpl(value, output); break; + case kTfLiteInt32: + FillImpl(value, output); + break; + case kTfLiteInt8: + FillImpl(value, output); + break; default: - TF_LITE_KERNEL_LOG( - context, "Fill only currently supports float32 for input 1, got %d.", - TfLiteTypeGetName(value->type)); + MicroPrintf("Fill only currently supports float32 for input 1, got %d.", + TfLiteTypeGetName(value->type)); return kTfLiteError; } @@ -118,14 +134,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_FILL() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cc similarity index 79% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cc index 9fa0b2b..76c1a19 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,9 +20,8 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" namespace tflite { -namespace ops { -namespace micro { -namespace floor { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; @@ -39,19 +38,11 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); return kTfLiteOk; } -} // namespace floor + +} // namespace TfLiteRegistration Register_FLOOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/floor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_div.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_div.cc new file mode 100644 index 0000000..9fc135c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_div.cc @@ -0,0 +1,130 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_div.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + return nullptr; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +TfLiteStatus EvalFloorDiv(TfLiteContext* context, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const T* denominator_data = tflite::micro::GetTensorData(input2); + + // Validate the denominator. + for (int i = 0; i < tflite::ElementCount(*input2->dims); ++i) { + if (std::equal_to()(denominator_data[i], 0)) { + MicroPrintf("Division by 0"); + return kTfLiteError; + } + } + + bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); + + if (requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorDiv); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorDiv); + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input1->type) { + case kTfLiteFloat32: { + return EvalFloorDiv(context, input1, input2, output); + } + default: { + MicroPrintf("Type '%s' is not supported by FLOOR_DIV.", + TfLiteTypeGetName(input1->type)); + return kTfLiteError; + } + } +} + +} // namespace + +TfLiteRegistration Register_FLOOR_DIV() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_mod.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_mod.cc new file mode 100644 index 0000000..acf4bbc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/floor_mod.cc @@ -0,0 +1,128 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/floor_mod.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +// OLD-TODO(b/117523611): We should factor out a binary_op and put binary ops +// there. +namespace tflite { +namespace { + +// Input/output tensor index. +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +// OLD-TODO(b/117912880): Support quantization. + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, output->type); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + return nullptr; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +template +TfLiteStatus EvalFloorMod(TfLiteContext* context, bool requires_broadcast, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const T* denominator_data = tflite::micro::GetTensorData(input2); + + if (requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorMod); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), denominator_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), reference_ops::FloorMod); + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + bool requires_broadcast = !tflite::micro::HaveSameShapes(input1, input2); + + switch (input1->type) { + case kTfLiteFloat32: { + return EvalFloorMod(context, requires_broadcast, input1, input2, + output); + } + default: { + MicroPrintf("Type '%s' is not supported by FLOOR_MOD.", + TfLiteTypeGetName(input1->type)); + return kTfLiteError; + } + } +} + +} // namespace + +TfLiteRegistration Register_FLOOR_MOD() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cc new file mode 100644 index 0000000..27ef622 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cc @@ -0,0 +1,1809 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + OpDataFullyConnected reference_op_data; + + // Conv 1x1 that may be invoked in some cases currently need per channel + // quantization. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; + + // Index to buffer for optimizations if applicable. + int buffer_idx; + + int32_t batches; + int32_t accum_depth; + int32_t output_depth; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kFullyConnectedInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = micro_context->AllocateTempInputTensor( + node, kFullyConnectedWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kFullyConnectedBiasTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kFullyConnectedOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + const RuntimeShape filter_shape = GetTensorShape(filter); + const RuntimeShape output_shape = GetTensorShape(output); + const int filter_dim_count = filter_shape.DimensionsCount(); + const int output_dim_count = output_shape.DimensionsCount(); + cmsis_nn_dims filter_dims; + filter_dims.n = filter_shape.Dims(filter_dim_count - 1); + filter_dims.h = 1; + filter_dims.w = 1; + filter_dims.c = output_shape.Dims(output_dim_count - 1); + + data->accum_depth = filter_shape.Dims(filter_dim_count - 1); + data->batches = FlatSizeSkipDim(output_shape, output_dim_count - 1); + data->output_depth = output_shape.Dims(output_dim_count - 1); + + // Set buffer index to a reset value + data->buffer_idx = -1; + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, params->activation, input->type, input, filter, bias, output, + &(data->reference_op_data))); + + int32_t buf_size = 0; + + if (input->type == kTfLiteInt16) { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I16 + MicroPrintf("Filter data type %s currently not supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; +#endif + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + buf_size = arm_fully_connected_s16_get_buffer_size(&filter_dims); + } else if (input->type == kTfLiteInt8) { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + MicroPrintf("Filter data type %s currently not supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; +#endif + const RuntimeShape input_shape = GetTensorShape(input); + + TFLITE_DCHECK_GE(output_dim_count, 2); + TFLITE_DCHECK_LE(output_dim_count, 4); + +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + buf_size = arm_fully_connected_s8_get_buffer_size(&filter_dims); +#else + if (output_dim_count > 2 && data->accum_depth % 4 == 0) { + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, data->output_depth * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, data->output_depth * sizeof(int32_t))); + + cmsis_nn_dims input_dims; + input_dims.n = data->batches; + input_dims.h = 1; + input_dims.w = 1; + input_dims.c = data->accum_depth; + + buf_size = arm_convolve_1x1_s8_fast_get_buffer_size(&input_dims); + } else { + buf_size = arm_fully_connected_s8_get_buffer_size(&filter_dims); + } +#endif + } + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena( + context, filter_size, &data->reference_op_data.filter_buffer_index); + } + + if (buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buf_size, &data->buffer_idx)); + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + + return kTfLiteOk; +} + +void PopulateCommonParams(TfLiteContext* context, + cmsis_nn_per_tensor_quant_params* const quant_params, + cmsis_nn_dims* const input_dims, + cmsis_nn_dims* const filter_dims, + cmsis_nn_dims* const bias_dims, + cmsis_nn_dims* const output_dims, + cmsis_nn_context* const ctx, const OpData& data) { + quant_params->multiplier = data.reference_op_data.output_multiplier; + quant_params->shift = data.reference_op_data.output_shift; + + input_dims->n = data.batches; + input_dims->h = 1; + input_dims->w = 1; + input_dims->c = data.accum_depth; + + filter_dims->n = data.accum_depth; + filter_dims->h = 1; + filter_dims->w = 1; + filter_dims->c = data.output_depth; + + bias_dims->n = 1; + bias_dims->h = 1; + bias_dims->w = 1; + bias_dims->c = data.output_depth; + + output_dims->n = data.batches; + output_dims->h = 1; + output_dims->w = 1; + output_dims->c = data.output_depth; + + ctx->buf = nullptr; + ctx->size = 0; + if (data.buffer_idx > -1) { + ctx->buf = context->GetScratchBuffer(context, data.buffer_idx); + } +} + +TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + const int output_dim_count = output_shape.DimensionsCount(); + TFLITE_DCHECK_GE(output_dim_count, 2); + TFLITE_DCHECK_LE(output_dim_count, 4); + + cmsis_nn_per_tensor_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &quant_params, &input_dims, &filter_dims, + &bias_dims, &output_dims, &ctx, data); + + const int32_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); + +#if EI_TFLITE_DISABLE_CONV_2D_IN_I8 + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.filter_offset = 0; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s8( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); +#else + + if (output_dim_count > 2 && data.accum_depth % 4 == 0) { + cmsis_nn_conv_params conv_params; + conv_params.dilation.h = 1; + conv_params.dilation.w = 1; + conv_params.input_offset = -data.reference_op_data.input_zero_point; + conv_params.output_offset = data.reference_op_data.output_zero_point; + conv_params.stride.h = 1; + conv_params.stride.w = 1; + conv_params.padding.h = 0; + conv_params.padding.w = 0; + conv_params.activation.min = data.reference_op_data.output_activation_min; + conv_params.activation.max = data.reference_op_data.output_activation_max; + + cmsis_nn_per_channel_quant_params per_channel_quant_params; + per_channel_quant_params.multiplier = + const_cast(data.per_channel_output_multiplier); + per_channel_quant_params.shift = + const_cast(data.per_channel_output_shift); + + for (int i = 0; i < data.output_depth; i++) { + per_channel_quant_params.multiplier[i] = quant_params.multiplier; + per_channel_quant_params.shift[i] = quant_params.shift; + } + + TF_LITE_ENSURE_EQ( + context, + arm_convolve_1x1_s8_fast( + &ctx, &conv_params, &per_channel_quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.filter_offset = 0; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s8( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } +#endif + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedInt16(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + cmsis_nn_per_tensor_quant_params quant_params; + cmsis_nn_dims input_dims; + cmsis_nn_dims filter_dims; + cmsis_nn_dims bias_dims; + cmsis_nn_dims output_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &quant_params, &input_dims, &filter_dims, + &bias_dims, &output_dims, &ctx, data); + + const int64_t* bias_data = + tflite::micro::GetOptionalTensorData(bias); + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = -data.reference_op_data.input_zero_point; + fc_params.output_offset = data.reference_op_data.output_zero_point; + fc_params.filter_offset = 0; + fc_params.activation.min = data.reference_op_data.output_activation_min; + fc_params.activation.max = data.reference_op_data.output_activation_max; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s16( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, bias_data, + &output_dims, tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + // Checks in Prepare ensure input, output and filter types are all the same. + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_F32 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + const float* bias_data = + tflite::micro::GetOptionalTensorData(bias); + tflite::reference_ops::FullyConnected( + FullyConnectedParamsFloat(params->activation), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), bias_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { + switch (filter_int8.type) { + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + MicroPrintf("Filter data type %s currently not supported.", + TfLiteTypeGetName(filter->type)); + return kTfLiteError; +#endif + return EvalQuantizedInt8(context, node, data, input, &filter_int8, + bias, output); + default: + MicroPrintf("Filter Type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), filter->type); + return kTfLiteError; + } + break; + } + case kTfLiteInt16: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I16 + MicroPrintf("Filter data type %s currently not supported.", + TfLiteTypeGetName(filter->type)); + return kTfLiteError; +#endif + return EvalQuantizedInt16(context, node, data, input, filter, bias, + output); + } + default: { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +// Note that the current function names are not ideal at all (this EvalInt8 +// function internally calls EvalQuantizedInt8, and there is similar name +// aliasing in the Eval function too). We will be attempting to have a more +// descriptive naming convention but holding off on that for now, since the +// renaming might be coupled with reducing code duplication and some additional +// refactoring. +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt8) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + TfLiteEvalTensor filter_int8 = tflite::micro::MakeUnpackedInt4Tensor( + context, data.reference_op_data.filter_buffer_index, filter); + + return EvalQuantizedInt8(context, node, data, input, &filter_int8, bias, + output); +} + +TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt16) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + return EvalQuantizedInt16(context, node, data, input, filter, bias, output); +} + +} // namespace + +TfLiteRegistration Register_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +TfLiteRegistration Register_FULLY_CONNECTED_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt8); +} + +TfLiteRegistration Register_FULLY_CONNECTED_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, EvalInt16); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct OpData { + // The scaling factor from input to output (aka the 'real multiplier') can + // be represented as a fixed point multiplier plus a left shift. + int32_t output_multiplier; + int output_shift; + // The range of the fused activation layer. For example for kNone and + // uint8_t these would be 0 and 255. + int32_t output_activation_min; + int32_t output_activation_max; + // The index of the temporary tensor where the quantized inputs are cached. + int input_quantized_index; + // Cached tensor zero point values for quantized operations. + int32_t input_zero_point; + int32_t filter_zero_point; + int32_t output_zero_point; + + // The result of checking if MLI optimized version of tensors can be used. + bool is_mli_applicable; + + // Tensors in MLI format. + mutable ops::micro::MliTensorInterface mli_in; + mutable ops::micro::MliTensorInterface mli_weights; + mutable ops::micro::MliTensorInterface mli_bias; + mutable ops::micro::MliTensorInterface mli_out; + +#ifdef MLI_2_0 + mli_fully_connected_cfg* cfg; +#endif +}; + +constexpr int kInputTensor = 0; +constexpr int kWeightsTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, + const TfLiteTensor* filter, const TfLiteTensor* bias, + const TfLiteFullyConnectedParams* params, + int32_t output_activation_min, + int32_t output_activation_max) { + // MLI optimized version only supports int8_t datatype and no fused Relu and + // symmetric per-tensor quantization of weights (not per-axis) + bool ret_val = + (filter->type == kTfLiteInt8) && (input->type == kTfLiteInt8) && + (bias->type == kTfLiteInt32) && +#ifndef MLI_2_0 + (params->activation == kTfLiteActNone || + (output_activation_min == -128 && output_activation_max == 127)) && +#endif + (filter->params.zero_point == 0); + return ret_val; +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, + const TfLiteFullyConnectedParams* params, + TfLiteType data_type, const TfLiteTensor* input, + const TfLiteTensor* filter, + const TfLiteTensor* bias, TfLiteTensor* output, + OpData* data) { + TfLiteStatus status = kTfLiteOk; +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, input, filter, bias, output, &real_multiplier)); + int exponent; + QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); + data->output_shift = -exponent; + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } +#endif + return status; +} + +} // namespace + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kWeightsTensor); + TfLiteTensor* bias = micro_context->AllocateTempInputTensor(node, kBiasTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, input->type == filter->type, + "Hybrid models are not supported on TFLite Micro."); + + data->input_zero_point = input->params.zero_point; + data->filter_zero_point = filter->params.zero_point; + data->output_zero_point = output->params.zero_point; + + TfLiteStatus status = CalculateOpData(context, params, input->type, input, + filter, bias, output, data); + + data->is_mli_applicable = + IsMliApplicable(context, input, filter, bias, params, + data->output_activation_min, data->output_activation_max); + + if (input->type == kTfLiteInt8 && data->is_mli_applicable) { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; +#endif + data->mli_in = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_weights = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_bias = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_out = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + + ops::micro::ConvertToMliTensor(input, &data->mli_in); + ops::micro::ConvertToMliTensor(filter, &data->mli_weights); + ops::micro::ConvertToMliTensor(bias, &data->mli_bias); +#ifdef MLI_2_0 + ops::micro::AdjustBiasTensor(&data->mli_bias, &data->mli_in, + &data->mli_weights); +#endif + ops::micro::ConvertToMliTensor(output, &data->mli_out); + +#ifdef MLI_2_0 + if (data->output_activation_min == -128 && + data->output_activation_max == 127) { + data->cfg->relu.type = MLI_RELU_NONE; + } else if (params->activation == kTfLiteActRelu) { + data->cfg->relu.type = MLI_RELU_GEN; + } else if (params->activation == kTfLiteActRelu6) { + data->cfg->relu.type = MLI_RELU_6; + } else if (params->activation == kTfLiteActReluN1To1) { + data->cfg->relu.type = MLI_RELU_1; + } else { + data->cfg->relu.type = MLI_RELU_NONE; + } +#endif + + /* The input tensor can have more than 2 dimensions. for the compute this + doesn't make any difference because all the inputs or a batch entry will + be used anyway. because the MLI kernel doesn't recognize the multiple + dimensions, the tensor shape is casted to a {batchnum, inputsize} shape. */ + data->mli_in.Shape()[0] = data->mli_out.Shape()[0]; +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + data->mli_in.Shape()[1] = data->mli_weights.Shape()[0]; +#else + data->mli_in.Shape()[1] = data->mli_weights.Shape()[1]; +#endif + data->mli_in.Shape()[2] = 0; + data->mli_in.Shape()[3] = 0; + data->mli_in.MemStride()[0] = data->mli_in.Shape()[1]; + data->mli_in.MemStride()[1] = 0; + *data->mli_in.Rank() = 2; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(bias); + micro_context->DeallocateTempTfLiteTensor(output); + return status; +} + +TfLiteStatus EvalMliQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + const TfLiteFullyConnectedParams* params, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + ops::micro::MliTensorAttachBuffer(input, &data.mli_in); + ops::micro::MliTensorAttachBuffer(filter, &data.mli_weights); + ops::micro::MliTensorAttachBuffer(bias, &data.mli_bias); + ops::micro::MliTensorAttachBuffer(output, &data.mli_out); + + // Tensors for data in fast (local) memory and config to copy data from + // external to local memory + mli_tensor weights_local = *data.mli_weights.MliTensor(); + mli_tensor bias_local = *data.mli_bias.MliTensor(); + mli_tensor in_local = *data.mli_in.MliTensor(); + mli_tensor out_local = *data.mli_out.MliTensor(); + + ops::micro::MliTensorInterface weights_local_interface(&weights_local); + ops::micro::MliTensorInterface bias_local_interface(&bias_local); + ops::micro::MliTensorInterface in_local_interface(&in_local); + ops::micro::MliTensorInterface out_local_interface(&out_local); + + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + const int weight_out_dimension = 1; +#else + const int weight_out_dimension = 0; +#endif + // bias has only 1 dimension + const int bias_out_ch_dimension = 0; + const int out_tensor_dimension = 1; + const int input_size_dimension = 1; + int slice_size = data.mli_weights.Shape()[weight_out_dimension]; + + /* allocate the local buffers, and compute the slice size */ + TF_LITE_ENSURE_STATUS( + ops::micro::get_arc_scratch_buffer_for_fully_connect_tensors( + context, &in_local_interface, &weights_local_interface, + &bias_local_interface, &out_local_interface)); + TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_weights( + &weights_local_interface, &bias_local_interface, weight_out_dimension, + &slice_size)); + + int max_out_slice_size = *out_local_interface.DataCapacity() / + mli_hlp_tensor_element_size(&out_local); + + if (slice_size > max_out_slice_size) slice_size = max_out_slice_size; + + /* is_local indicates that the tensor is already in local memory, + so in that case the original tensor can be used, + and there is no need to copy it to the local tensor*/ + const bool in_is_local = + in_local_interface.Data() == data.mli_in.Data(); + const bool out_is_local = + out_local_interface.Data() == data.mli_out.Data(); + const bool b_is_local = + bias_local_interface.Data() == data.mli_bias.Data(); +#ifndef MLI_2_0_KRNL_TEST + const bool w_is_local = + weights_local_interface.Data() == data.mli_weights.Data(); +#endif + +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + ops::micro::TensorSlicer w_slice(data.mli_weights.MliTensor(), + weight_out_dimension, slice_size, 0, 0, 0, + true); +#else + ops::micro::TensorSlicer w_slice(data.mli_weights.MliTensor(), + weight_out_dimension, slice_size); +#endif + ops::micro::TensorSlicer b_slice(data.mli_bias.MliTensor(), + bias_out_ch_dimension, slice_size); + ops::micro::TensorSlicer out_ch_slice(data.mli_out.MliTensor(), + out_tensor_dimension, slice_size, 0, 0, + 0, true); + +#ifdef MLI_2_0_KRNL_TEST + mli_tensor* w_ptr = &weights_local; +#else + mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; +#endif + mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; + + void* input_buffer_ptr = NULL; + + while (!w_slice.Done()) { +#if defined(MLI_2_0) && !defined(MLI_2_0_KRNL_TEST) + w_ptr->el_params.sa.scale.mem.pi16 = NULL; + b_ptr->el_params.sa.scale.mem.pi16 = NULL; +#endif + +#ifndef MLI_2_0_KRNL_TEST + mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); +#endif + mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); + + // Slice the input over the batches (one at a time with the size of a + // complete input) + ops::micro::TensorSlicer in_slice( + data.mli_in.MliTensor(), input_size_dimension, + data.mli_in.Shape()[input_size_dimension]); + + /* output tensor is already sliced in the output size dimension. + out_ch_slice.Sub() is the tensor for the amount of output size of this + iteration of the weight slice loop. This tensor needs to be further + sliced over the batch */ + ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), out_tensor_dimension, + slice_size); + + /* setup the pointers to the local or remote tensor to make the code + * inside the loop easier. */ + mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; + mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; + +#ifdef MLI_2_0_KRNL_TEST + /* Permute weights tensor to the HWCN layout */ + // Assertion here to prevent usage non-contiguous buffer memory. + if (data.mli_out.Shape()[out_tensor_dimension] != + out_slice.Sub()->shape[0]) { + MicroPrintf("Slicing is not supported with real-time permutation."); + return kTfLiteError; + } + mli_permute_cfg permute_cfg = {{1, 0, 2, 3}}; + ops::micro::permute_weights(data.mli_weights.MliTensor(), &permute_cfg, + w_ptr, &out_ptr->data); +#endif + + while (!out_slice.Done()) { + if (!out_is_local) { + ops::micro::PrepareLocalTensor(out_slice.Sub(), &out_local); + ops::micro::PrepareLocalTensor(in_slice.Sub(), &in_local); + } + // if same input copy as previous iteration, skip the copy of input +#ifdef MLI_2_0 + if (in_slice.Sub()->data.mem.pi8 != input_buffer_ptr) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data.mem.pi8; + } + mli_fully_connected_cfg cfg; + cfg.relu.type = MLI_RELU_NONE; + mli_krn_fully_connected_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, &cfg, out_ptr); +#else + if (in_slice.Sub()->data != input_buffer_ptr) { + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + input_buffer_ptr = in_slice.Sub()->data; + } + mli_krn_fully_connected_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, out_ptr); +#endif + + mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); + + in_slice.Next(); + out_slice.Next(); + } + w_slice.Next(); + b_slice.Next(); + out_ch_slice.Next(); + } + return kTfLiteOk; +} + +TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, + const OpData& data, const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + tflite::FullyConnectedParams op_params; + op_params.input_offset = -data.input_zero_point; + op_params.weights_offset = -data.filter_zero_point; + op_params.output_offset = data.output_zero_point; + op_params.output_multiplier = data.output_multiplier; + op_params.output_shift = -data.output_shift; + op_params.quantized_activation_min = data.output_activation_min; + op_params.quantized_activation_max = data.output_activation_max; + +#define TF_LITE_FULLY_CONNECTED(output_data_type) \ + reference_ops::FullyConnected( \ + op_params, tflite::micro::GetTensorShape(input), \ + tflite::micro::GetTensorData(input), \ + tflite::micro::GetTensorShape(filter), \ + tflite::micro::GetTensorData(filter), \ + tflite::micro::GetTensorShape(bias), \ + tflite::micro::GetTensorData(bias), \ + tflite::micro::GetTensorShape(output), \ + tflite::micro::GetTensorData(output)) + + switch (output->type) { + case kTfLiteUInt8: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_OUT_U8 + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(filter->type)); + return kTfLiteError; + #endif + + TF_LITE_FULLY_CONNECTED(uint8_t); + break; + case kTfLiteInt16: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_OUT_I16 + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(filter->type)); + return kTfLiteError; + #endif + + TF_LITE_FULLY_CONNECTED(int16_t); + break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + + return kTfLiteOk; +#else + MicroPrintf("Node configuration is not supported by ARC MLI Library."); + return kTfLiteError; +#endif + } +} + +TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLiteFusedActivation activation, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + float output_activation_min, output_activation_max; + CalculateActivationRange(activation, &output_activation_min, + &output_activation_max); + tflite::FullyConnectedParams op_params; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + tflite::reference_ops::FullyConnected( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; +#else + MicroPrintf("Type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kBiasTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + switch (input->type) { + case kTfLiteFloat32: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_F32 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + return EvalFloat(context, node, params->activation, input, filter, bias, + output); + case kTfLiteInt8: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + if (data.is_mli_applicable) { + return EvalMliQuantizedInt8(context, node, params, data, input, filter, + bias, output); + } else { + return EvalQuantized(context, node, data, input, filter, bias, output); + } + + case kTfLiteUInt8: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_U8 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + return EvalQuantized(context, node, data, input, filter, bias, output); + + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteRegistration Register_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "sl_mvp_ml_fully_connected.h" + +namespace tflite { +namespace sl { +namespace fully_connected { + +struct OpData { + int32_t output_multiplier; + int output_shift; + sli_mvp_ml_fully_connected_s8_params_t op_params; + float16_t *bias_fp16; + bool use_mvp; +}; + +constexpr int kInputTensor = 0; +constexpr int kWeightsTensor = 1; +constexpr int kBiasTensor = 2; +constexpr int kOutputTensor = 0; + +// TODO(b/169801227): This global struct is needed for the linker to drop unused +// code (for example, by using Register_FULLY_CONNECTED_INT8 instead of +// Register_FULLY_CONNECTED). +TfLiteRegistration fully_connected_registration; + +sli_shape_t dims2shape(const TfLiteIntArray *dim) +{ + TFLITE_DCHECK(dim->size <= 4); + + sli_shape_t shape = {0}; + for (int i = 0; i < dim->size; i++) { + shape.dim[i] = dim->data[i]; + } + return shape; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + TfLiteFullyConnectedParams* params = + reinterpret_cast(node->builtin_data); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* weight = GetInput(context, node, kWeightsTensor); + const TfLiteTensor* bias = GetInput(context, node, kBiasTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + int32_t output_min; + int32_t output_max; + float16_t *bias_data = nullptr; + int bias_len = 0; + + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + if (!(input->type == kTfLiteFloat32 || input->type == kTfLiteInt8)) { + // Unsupported datatype used by model + return kTfLiteError; + } + + if (bias) { + RuntimeShape bias_shape = GetTensorShape(bias); + bias_len = bias_shape.FlatSize(); + } + + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &output_min, &output_max)); + + double real_multiplier = 0.0; + TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( + context, input, weight, bias, output, &real_multiplier)); + + data->op_params.input = GetTensorData(input); + data->op_params.input_shape = dims2shape(input->dims); + data->op_params.input_offset = -input->params.zero_point; + data->op_params.weight = GetTensorData(weight); + data->op_params.weight_shape = dims2shape(weight->dims); + data->op_params.weight_offset = -weight->params.zero_point; + data->op_params.bias = nullptr; + data->op_params.bias_length = bias_len; + data->op_params.output = GetTensorData(output); + data->op_params.output_shape = dims2shape(output->dims); + data->op_params.output_offset = output->params.zero_point; + data->op_params.output_multiplier = sli_mvp_ml_fully_connected_output_multiplier(real_multiplier); + data->op_params.activation_min = static_cast(output_min); + data->op_params.activation_max = static_cast(output_max); + + data->use_mvp = sli_mvp_ml_fully_connected_s8_is_supported(&data->op_params); + + if (data->use_mvp && bias) { + // Convert int32_t to float16_t as the MVP does not support loading int32 values. + const int32_t *bias_src = GetTensorData(bias); + bias_data = static_cast(context->AllocatePersistentBuffer(context, bias_len * sizeof(float16_t))); + if (bias_data == nullptr) { + return kTfLiteError; + } + sl_status_t status = sli_mvp_ml_fully_connected_bias_convert(bias_src, bias_data, bias_len); + if (status != SL_STATUS_OK) { + return kTfLiteError; + } + data->op_params.bias = bias_data; + } + + if (!data->use_mvp) { + // In this case we have to convert the output scale factor to a + // value in the TensorFlow fixed point format (Q.31 + shift) + int exponent; + QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); + data->output_shift = -exponent; + } + } + + return kTfLiteOk; +} + +TfLiteStatus EvalQuantizedInt8_MVP(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + sli_mvp_ml_fully_connected_s8_params_t *params = const_cast(&data.op_params); + params->input = tflite::micro::GetTensorData(input); + params->output = tflite::micro::GetTensorData(output); + + sl_status_t result = sli_mvp_ml_fully_connected_s8(params); + if (result == SL_STATUS_OK) { + return kTfLiteOk; + } else { + return kTfLiteError; + } +} + +TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, + const OpData& data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) { + if (data.use_mvp && input->type == kTfLiteInt8) { + return EvalQuantizedInt8_MVP(context, node, data, input, filter, bias, output); + } + + // The 'if' condition can be removed when null handling of bias is added to + // arm_fully_connected_s8 + if (nullptr != tflite::micro::GetTensorData(bias)) { + const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); + const int batches = output_shape.Dims(0); + const int output_depth = output_shape.Dims(1); + const RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); + const int filter_dim_count = filter_shape.DimensionsCount(); + const int accum_depth = filter_shape.Dims(filter_dim_count - 1); + const RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + + cmsis_nn_fc_params fc_params; + fc_params.input_offset = data.op_params.input_offset; + fc_params.output_offset = data.op_params.output_offset; + fc_params.filter_offset = data.op_params.weight_offset; + fc_params.activation.min = data.op_params.activation_min; + fc_params.activation.max = data.op_params.activation_max; + + cmsis_nn_per_tensor_quant_params quant_params; + quant_params.multiplier = data.output_multiplier; + // TODO(b/138810107): Figure out whether output shift should be inverted + quant_params.shift = -data.output_shift; + + cmsis_nn_dims input_dims; + input_dims.n = batches; + input_dims.h = 1; + input_dims.w = 1; + input_dims.c = accum_depth; + + cmsis_nn_dims filter_dims; + filter_dims.n = accum_depth; + filter_dims.h = 1; + filter_dims.w = 1; + filter_dims.c = output_depth; + + cmsis_nn_dims bias_dims; + bias_dims.n = 1; + bias_dims.h = 1; + bias_dims.w = 1; + bias_dims.c = output_depth; + + cmsis_nn_dims output_dims; + output_dims.n = batches; + output_dims.h = 1; + output_dims.w = 1; + output_dims.c = output_depth; + + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + TF_LITE_ENSURE_EQ( + context, + arm_fully_connected_s8( + &ctx, &fc_params, &quant_params, &input_dims, + tflite::micro::GetTensorData(input), &filter_dims, + tflite::micro::GetTensorData(filter), &bias_dims, + tflite::micro::GetTensorData(bias), &output_dims, + tflite::micro::GetTensorData(output)), + ARM_MATH_SUCCESS); + } else { + tflite::FullyConnectedParams op_params; + op_params.input_offset = data.op_params.input_offset; + op_params.weights_offset = data.op_params.weight_offset; + op_params.output_offset = data.op_params.output_offset; + op_params.output_multiplier = data.output_multiplier; + // TODO(b/138810107): Figure out whether output shift should be inverted + op_params.output_shift = -data.output_shift; + op_params.quantized_activation_min = data.op_params.activation_min; + op_params.quantized_activation_max = data.op_params.activation_max; + + reference_integer_ops::FullyConnected( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + return kTfLiteOk; +} + +TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLiteFusedActivation activation, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { + float output_activation_min, output_activation_max; + CalculateActivationRange(activation, &output_activation_min, + &output_activation_max); + tflite::FullyConnectedParams op_params; + op_params.float_activation_min = output_activation_min; + op_params.float_activation_max = output_activation_max; + tflite::reference_ops::FullyConnected( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + switch (input->type) { + case kTfLiteFloat32: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + return EvalFloat(context, node, params->activation, input, filter, bias, + output); + case kTfLiteInt8: + #if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + return EvalQuantizedInt8(context, node, data, input, filter, bias, + output); + + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +// Note that the current function names are not ideal at all (this EvalInt8 +// function internally calls EvalQuantizedInt8, and there is similar name +// aliasing in the Eval function too). We will be attempting to have a more +// descriptive naming convention but holding off on that for now, since the +// renaming might be coupled with reducing code duplication and some additional +// refactoring. +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + if (input->type != kTfLiteInt8) { + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + return EvalQuantizedInt8(context, node, data, input, filter, bias, output); +} + +} // namespace fully_connected +} // namespace sl + +TfLiteRegistration Register_FULLY_CONNECTED() { + return {/*init*/sl::fully_connected::Init, + /*free*/nullptr, + /*prepare*/sl::fully_connected::Prepare, + /*invoke*/sl::fully_connected::Eval, + /*profiling_string*/nullptr, + /*builtin_code*/0, + /*custom_name*/nullptr, + /*version*/0}; +} + +TfLiteRegistration Register_FULLY_CONNECTED_INT8() { + return {/*init*/sl::fully_connected::Init, + /*free*/nullptr, + /*prepare*/sl::fully_connected::Prepare, + /*invoke*/sl::fully_connected::EvalInt8, + /*profiling_string*/nullptr, + /*builtin_code*/0, + /*custom_name*/nullptr, + /*version*/0}; +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +#include + +long long fc_total_time = 0; + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, + sizeof(OpDataFullyConnected)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kFullyConnectedInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = micro_context->AllocateTempInputTensor( + node, kFullyConnectedWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kFullyConnectedBiasTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kFullyConnectedOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG(context, input->type == filter->type, + "Hybrid models are not supported on TFLite Micro."); + + TF_LITE_ENSURE_OK(context, CalculateOpDataFullyConnected( + context, params->activation, input->type, + input, filter, bias, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const auto& data = + *(static_cast(node->user_data)); + + long long start_time = esp_timer_get_time(); + // Checks in Prepare ensure input, output and filter types are all the same. + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::FullyConnected( + FullyConnectedParamsFloat(params->activation), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + const int32_t* bias_data = + nullptr != bias ? tflite::micro::GetTensorData(bias) + : nullptr; +#if ESP_NN + const RuntimeShape& filter_shape = tflite::micro::GetTensorShape(filter); + const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output); + const int filter_dim_count = filter_shape.DimensionsCount(); + const int batches = output_shape.Dims(0); + const int output_depth = output_shape.Dims(1); + TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2)); + const int accum_depth = filter_shape.Dims(filter_dim_count - 1); + + const int8_t *input_data = tflite::micro::GetTensorData(input); + int8_t *output_data = tflite::micro::GetTensorData(output); + const int8_t *filter_data = tflite::micro::GetTensorData(filter); + + for (int b = 0; b < batches; ++b) { + esp_nn_fully_connected_s8(input_data, -data.input_zero_point, + accum_depth, + filter_data, -data.filter_zero_point, + bias_data, output_data, output_depth, + data.output_zero_point, + data.output_shift, data.output_multiplier, + data.output_activation_min, + data.output_activation_max); + input_data += accum_depth; + output_data += output_depth; + } +#else + tflite::reference_integer_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), bias_data, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#endif + break; + } + + case kTfLiteUInt8: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_U8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: { + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + } + fc_total_time += esp_timer_get_time() - start_time; + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#else +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, + sizeof(OpDataFullyConnected)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kFullyConnectedInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = micro_context->AllocateTempInputTensor( + node, kFullyConnectedWeightsTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kFullyConnectedBiasTensor); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor( + node, kFullyConnectedOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + if (filter->type == kTfLiteInt4) { + int filter_size = + RuntimeShape(filter->dims->size, + reinterpret_cast(filter->dims->data)) + .FlatSize(); + context->RequestScratchBufferInArena(context, filter_size, + &data->filter_buffer_index); + } + + TF_LITE_ENSURE_OK(context, CalculateOpDataFullyConnected( + context, params->activation, input->type, + input, filter, bias, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + const auto* params = + static_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); + const TfLiteEvalTensor* bias = + tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + + const auto& data = + *(static_cast(node->user_data)); + + // Checks in Prepare ensure input, output and filter types are all the same. + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_F32 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::FullyConnected( + FullyConnectedParamsFloat(params->activation), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_FULLY_CONNECTED_IN_I8 + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + switch (filter->type) { + case kTfLiteInt4: { + int8_t* unpacked_filter_data = static_cast( + context->GetScratchBuffer(context, data.filter_buffer_index)); + tflite::tensor_utils::UnpackDenseInt4IntoInt8( + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(filter).FlatSize(), + unpacked_filter_data); + tflite::reference_integer_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), unpacked_filter_data, + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + case kTfLiteInt8: { + tflite::reference_integer_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), input->type); + return kTfLiteError; + } + } + break; + } + + case kTfLiteInt16: { + switch (filter->type) { + case kTfLiteInt8: { + tflite::reference_integer_ops::FullyConnected( + FullyConnectedParamsQuantized(data), + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(filter->type), input->type); + return kTfLiteError; + } + } + break; + } + + default: { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_FULLY_CONNECTED() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cpp deleted file mode 100644 index a7535c8..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.cpp +++ /dev/null @@ -1,870 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - OpDataFullyConnected reference_op_data; - - // Index to buffer for optimizations if applicable. - int buffer_idx; -}; - -// TODO(b/169801227): This global struct is needed for the linker to drop unused -// code (for example, by using Register_FULLY_CONNECTED_INT8 instead of -// Register_FULLY_CONNECTED). -TfLiteRegistration fully_connected_registration; - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - const TfLiteTensor* input = - GetInput(context, node, kFullyConnectedInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = - GetInput(context, node, kFullyConnectedWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kFullyConnectedBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kFullyConnectedOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - // Set buffer index to a reset value - data->buffer_idx = -1; - TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( - context, params->activation, input->type, input, filter, bias, output, - &(data->reference_op_data))); - - if (input->type == kTfLiteInt8) { - RuntimeShape filter_shape = GetTensorShape(filter); - RuntimeShape output_shape = GetTensorShape(output); - - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - const int filter_dim_count = filter_shape.DimensionsCount(); - cmsis_nn_dims filter_dims; - filter_dims.n = filter_shape.Dims(filter_dim_count - 1); - filter_dims.h = 1; - filter_dims.w = 1; - filter_dims.c = output_shape.Dims(1); - - const int32_t buf_size = - arm_fully_connected_s8_get_buffer_size(&filter_dims); - - if (buf_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buf_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2); - const int batches = output_shape.Dims(0); - const int output_depth = output_shape.Dims(1); - const RuntimeShape filter_shape = tflite::micro::GetTensorShape(filter); - const int filter_dim_count = filter_shape.DimensionsCount(); - const int accum_depth = filter_shape.Dims(filter_dim_count - 1); - const RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - - cmsis_nn_fc_params fc_params; - fc_params.input_offset = -data.reference_op_data.input_zero_point; - fc_params.output_offset = data.reference_op_data.output_zero_point; - fc_params.filter_offset = -data.reference_op_data.filter_zero_point; - fc_params.activation.min = data.reference_op_data.output_activation_min; - fc_params.activation.max = data.reference_op_data.output_activation_max; - - cmsis_nn_per_tensor_quant_params quant_params; - quant_params.multiplier = data.reference_op_data.output_multiplier; - quant_params.shift = data.reference_op_data.output_shift; - - cmsis_nn_dims input_dims; - input_dims.n = batches; - input_dims.h = 1; - input_dims.w = 1; - input_dims.c = accum_depth; - - cmsis_nn_dims filter_dims; - filter_dims.n = accum_depth; - filter_dims.h = 1; - filter_dims.w = 1; - filter_dims.c = output_depth; - - cmsis_nn_dims bias_dims; - bias_dims.n = 1; - bias_dims.h = 1; - bias_dims.w = 1; - bias_dims.c = output_depth; - - cmsis_nn_dims output_dims; - output_dims.n = batches; - output_dims.h = 1; - output_dims.w = 1; - output_dims.c = output_depth; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TF_LITE_ENSURE_EQ( - context, - arm_fully_connected_s8( - &ctx, &fc_params, &quant_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - tflite::micro::GetTensorData(filter), &bias_dims, - tflite::micro::GetTensorData(bias), &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::FullyConnected( - FullyConnectedParamsFloat(params->activation), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - case kTfLiteInt8: { - return EvalQuantizedInt8(context, node, data, input, filter, bias, - output); - } - case kTfLiteUInt8: { - tflite::reference_ops::FullyConnected( - FullyConnectedParamsQuantized(data.reference_op_data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - default: { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -// Note that the current function names are not ideal at all (this EvalInt8 -// function internally calls EvalQuantizedInt8, and there is similar name -// aliasing in the Eval function too). We will be attempting to have a more -// descriptive naming convention but holding off on that for now, since the -// renaming might be coupled with reducing code duplication and some additional -// refactoring. -TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - if (input->type != kTfLiteInt8) { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - - return EvalQuantizedInt8(context, node, data, input, filter, bias, output); -} - -} // namespace - -TfLiteRegistration Register_FULLY_CONNECTED() { - fully_connected_registration.init = Init; - fully_connected_registration.free = nullptr; - fully_connected_registration.prepare = Prepare; - fully_connected_registration.invoke = Eval; - fully_connected_registration.profiling_string = nullptr; - fully_connected_registration.builtin_code = 0; - fully_connected_registration.custom_name = nullptr; - fully_connected_registration.version = 0; - return fully_connected_registration; -} - -TfLiteRegistration Register_FULLY_CONNECTED_INT8() { - fully_connected_registration.init = Init; - fully_connected_registration.free = nullptr; - fully_connected_registration.prepare = Prepare; - fully_connected_registration.invoke = EvalInt8; - fully_connected_registration.profiling_string = nullptr; - fully_connected_registration.builtin_code = 0; - fully_connected_registration.custom_name = nullptr; - fully_connected_registration.version = 0; - return fully_connected_registration; -} - -} // namespace tflite - -#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" - -#include "mli_api.h" // NOLINT -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -struct OpData { - // The scaling factor from input to output (aka the 'real multiplier') can - // be represented as a fixed point multiplier plus a left shift. - int32_t output_multiplier; - int output_shift; - // The range of the fused activation layer. For example for kNone and - // uint8_t these would be 0 and 255. - int32_t output_activation_min; - int32_t output_activation_max; - // The index of the temporary tensor where the quantized inputs are cached. - int input_quantized_index; - // Cached tensor zero point values for quantized operations. - int32_t input_zero_point; - int32_t filter_zero_point; - int32_t output_zero_point; - - // The result of checking if MLI optimized version of tensors can be used. - bool is_mli_applicable; - - // Tensors in MLI format. - mli_tensor* mli_in; - mli_tensor* mli_weights; - mli_tensor* mli_bias; - mli_tensor* mli_out; -}; - -constexpr int kInputTensor = 0; -constexpr int kWeightsTensor = 1; -constexpr int kBiasTensor = 2; -constexpr int kOutputTensor = 0; - -bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, - const TfLiteTensor* filter, const TfLiteTensor* bias, - const TfLiteFullyConnectedParams* params) { - // MLI optimized version only supports int8_t datatype and no fused Relu and - // symmetric per-tensor quantization of weights (not per-axis) - bool ret_val = (filter->type == kTfLiteInt8) && - (input->type == kTfLiteInt8) && (bias->type == kTfLiteInt32) && - (params->activation == kTfLiteActNone) && - (filter->params.zero_point == 0); - return ret_val; -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, - const TfLiteFullyConnectedParams* params, - TfLiteType data_type, const TfLiteTensor* input, - const TfLiteTensor* filter, - const TfLiteTensor* bias, TfLiteTensor* output, - OpData* data) { - TfLiteStatus status = kTfLiteOk; -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - if (data_type != kTfLiteFloat32 && !data->is_mli_applicable) { - double real_multiplier = 0.0; - TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( - context, input, filter, bias, output, &real_multiplier)); - int exponent; - QuantizeMultiplier(real_multiplier, &data->output_multiplier, &exponent); - data->output_shift = -exponent; - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } -#endif - return status; -} - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* filter = GetInput(context, node, kWeightsTensor); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - data->input_zero_point = input->params.zero_point; - data->filter_zero_point = filter->params.zero_point; - data->output_zero_point = output->params.zero_point; - - data->is_mli_applicable = - IsMliApplicable(context, input, filter, bias, params); - - if (input->type == kTfLiteInt8 && data->is_mli_applicable) { - data->mli_in = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_weights = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_bias = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_out = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - - ops::micro::ConvertToMliTensor(input, data->mli_in); - ops::micro::ConvertToMliTensor(filter, data->mli_weights); - ops::micro::ConvertToMliTensor(bias, data->mli_bias); - ops::micro::ConvertToMliTensor(output, data->mli_out); - - /* The input tensor can have more than 2 dimensions. for the compute this - doesn't make any difference because all the inputs or a batch entry will - be used anyway. because the MLI kernel doesn't recognize the multiple - dimensions, the tensor shape is casted to a {batchnum, inputsize} shape. */ - data->mli_in->shape[0] = data->mli_out->shape[0]; - data->mli_in->shape[1] = data->mli_weights->shape[1]; - data->mli_in->shape[2] = 0; - data->mli_in->shape[3] = 0; - data->mli_in->rank = 2; - } - - return (CalculateOpData(context, params, input->type, input, filter, bias, - output, data)); -} - -TfLiteStatus EvalMliQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const TfLiteFullyConnectedParams* params, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { - ops::micro::MliTensorAttachBuffer(input, data.mli_in); - ops::micro::MliTensorAttachBuffer(filter, data.mli_weights); - ops::micro::MliTensorAttachBuffer(bias, data.mli_bias); - ops::micro::MliTensorAttachBuffer(output, data.mli_out); - - // Tensors for data in fast (local) memory and config to copy data from - // external to local memory - mli_tensor weights_local = *data.mli_weights; - mli_tensor bias_local = *data.mli_bias; - mli_tensor in_local = *data.mli_in; - mli_tensor out_local = *data.mli_out; - mli_mov_cfg_t copy_config; - mli_mov_cfg_for_copy(©_config); - const int weight_out_dimension = 0; - const int out_tensor_dimension = 1; - const int input_size_dimension = 1; - int slice_size = data.mli_weights->shape[weight_out_dimension]; - - /* allocate the local buffers, and compute the slice size */ - TF_LITE_ENSURE_STATUS( - ops::micro::get_arc_scratch_buffer_for_fully_connect_tensors( - context, &in_local, &weights_local, &bias_local, &out_local)); - TF_LITE_ENSURE_STATUS(ops::micro::arc_scratch_buffer_calc_slice_size_weights( - &weights_local, &bias_local, weight_out_dimension, &slice_size)); - int max_out_slice_size = - out_local.capacity / mli_hlp_tensor_element_size(&out_local); - if (slice_size > max_out_slice_size) slice_size = max_out_slice_size; - - /* is_local indicates that the tensor is already in local memory, - so in that case the original tensor can be used, - and there is no need to copy it to the local tensor*/ - const bool in_is_local = in_local.data == data.mli_in->data; - const bool out_is_local = out_local.data == data.mli_out->data; - const bool w_is_local = weights_local.data == data.mli_weights->data; - const bool b_is_local = bias_local.data == data.mli_bias->data; - - ops::micro::TensorSlicer w_slice(data.mli_weights, weight_out_dimension, - slice_size); - ops::micro::TensorSlicer b_slice(data.mli_bias, weight_out_dimension, - slice_size); - ops::micro::TensorSlicer out_ch_slice(data.mli_out, out_tensor_dimension, - slice_size, 0, 0, 0, true); - - mli_tensor* w_ptr = w_is_local ? w_slice.Sub() : &weights_local; - mli_tensor* b_ptr = b_is_local ? b_slice.Sub() : &bias_local; - - void* input_buffer_ptr = NULL; - - while (!w_slice.Done()) { - mli_mov_tensor_sync(w_slice.Sub(), ©_config, w_ptr); - mli_mov_tensor_sync(b_slice.Sub(), ©_config, b_ptr); - - // Slice the input over the batches (one at a time with the size of a - // complete input) - ops::micro::TensorSlicer in_slice(data.mli_in, input_size_dimension, - data.mli_in->shape[input_size_dimension]); - - /* output tensor is already sliced in the output size dimension. - out_ch_slice.Sub() is the tensor for the amount of output size of this - iteration of the weight slice loop. This tensor needs to be further - sliced over the batch */ - ops::micro::TensorSlicer out_slice(out_ch_slice.Sub(), out_tensor_dimension, - slice_size); - - /* setup the pointers to the local or remote tensor to make the code - * inside the loop easier. */ - mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; - mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; - - while (!out_slice.Done()) { - // if same input copy as previous iteration, skip the copy of input - if (in_slice.Sub()->data != input_buffer_ptr) { - mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); - input_buffer_ptr = in_slice.Sub()->data; - } - mli_krn_fully_connected_sa8_sa8_sa32(in_ptr, w_ptr, b_ptr, out_ptr); - mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); - - in_slice.Next(); - out_slice.Next(); - } - w_slice.Next(); - b_slice.Next(); - out_ch_slice.Next(); - } - return kTfLiteOk; -} - -TfLiteStatus EvalQuantizedInt8(TfLiteContext* context, TfLiteNode* node, - const OpData& data, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - tflite::FullyConnectedParams op_params; - op_params.input_offset = -data.input_zero_point; - op_params.weights_offset = -data.filter_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - - reference_integer_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -#else - TF_LITE_KERNEL_LOG(context, - "Node configuration is not supported by ARC MLI Library."); - return kTfLiteError; -#endif -} - -TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, - const OpData& data, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - const int32_t input_offset = -data.input_zero_point; - const int32_t filter_offset = -data.filter_zero_point; - const int32_t output_offset = data.output_zero_point; - - tflite::FullyConnectedParams op_params; - op_params.input_offset = input_offset; - op_params.weights_offset = filter_offset; - op_params.output_offset = output_offset; - op_params.output_multiplier = data.output_multiplier; - // Legacy ops used mixed left and right shifts. Now all are +ve-means-left. - op_params.output_shift = -data.output_shift; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - -#define TF_LITE_FULLY_CONNECTED(output_data_type) \ - reference_ops::FullyConnected( \ - op_params, tflite::micro::GetTensorShape(input), \ - tflite::micro::GetTensorData(input), \ - tflite::micro::GetTensorShape(filter), \ - tflite::micro::GetTensorData(filter), \ - tflite::micro::GetTensorShape(bias), \ - tflite::micro::GetTensorData(bias), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)) - switch (output->type) { - case kTfLiteUInt8: - TF_LITE_FULLY_CONNECTED(uint8_t); - break; - case kTfLiteInt16: - TF_LITE_FULLY_CONNECTED(int16_t); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; -#endif -} - -TfLiteStatus EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteFusedActivation activation, - const TfLiteEvalTensor* input, - const TfLiteEvalTensor* filter, - const TfLiteEvalTensor* bias, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - float output_activation_min, output_activation_max; - CalculateActivationRange(activation, &output_activation_min, - &output_activation_max); - tflite::FullyConnectedParams op_params; - op_params.float_activation_min = output_activation_min; - op_params.float_activation_max = output_activation_max; - tflite::reference_ops::FullyConnected( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; -#endif -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kBiasTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: - return EvalFloat(context, node, params->activation, input, filter, bias, - output); - case kTfLiteInt8: - if (data.is_mli_applicable) { - return EvalMliQuantizedInt8(context, node, params, data, input, filter, - bias, output); - } else { - return EvalQuantizedInt8(context, node, data, input, filter, bias, - output); - } - - case kTfLiteUInt8: - return EvalQuantized(context, node, data, input, filter, bias, output); - - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteRegistration Register_FULLY_CONNECTED() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#else -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, - sizeof(OpDataFullyConnected)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - auto* data = static_cast(node->user_data); - const auto params = - static_cast(node->builtin_data); - - const TfLiteTensor* input = - GetInput(context, node, kFullyConnectedInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = - GetInput(context, node, kFullyConnectedWeightsTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kFullyConnectedBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kFullyConnectedOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - return CalculateOpDataFullyConnected(context, params->activation, input->type, - input, filter, bias, output, data); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - const auto* params = - static_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kFullyConnectedInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFullyConnectedWeightsTensor); - const TfLiteEvalTensor* bias = - tflite::micro::GetEvalInput(context, node, kFullyConnectedBiasTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kFullyConnectedOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const auto& data = - *(static_cast(node->user_data)); - - // Checks in Prepare ensure input, output and filter types are all the same. - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::FullyConnected( - FullyConnectedParamsFloat(params->activation), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - - case kTfLiteInt8: { - tflite::reference_integer_ops::FullyConnected( - FullyConnectedParamsQuantized(data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - - case kTfLiteUInt8: { - tflite::reference_ops::FullyConnected( - FullyConnectedParamsQuantized(data), - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - break; - } - default: { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_FULLY_CONNECTED() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h index 19363ca..b245abe 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -38,6 +38,14 @@ struct OpDataFullyConnected { int32_t input_zero_point; int32_t filter_zero_point; int32_t output_zero_point; + +// TODO(b/258710417): enable by default once optimized fully-connected works for +// all targets. +#if !defined(HEXAGON) + // A buffer used to store unpacked filter values. This is used if the source + // tensor is of n-bit precision that cannot be easily processed by kernels. + int filter_buffer_index; +#endif }; extern const int kFullyConnectedInputTensor; @@ -65,14 +73,9 @@ TfLiteStatus CalculateOpDataFullyConnected( // (reference or optimized) must define this function. TfLiteRegistration Register_FULLY_CONNECTED(); -#if defined(CMSIS_NN) || defined(ARDUINO) -// The Arduino is a special case where we use the CMSIS kernels, but because of -// the current approach to building for Arduino, we do not support -DCMSIS_NN as -// part of the build. As a result, we use defined(ARDUINO) as proxy for the -// CMSIS kernels for this one special case. - -// Returns a TfLiteRegistration struct for cmsis_nn kernel variant that only -// supports int8. +#if defined(CMSIS_NN) || defined(HEXAGON) || defined(EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8. TfLiteRegistration Register_FULLY_CONNECTED_INT8(); #else @@ -86,6 +89,24 @@ inline TfLiteRegistration Register_FULLY_CONNECTED_INT8() { } #endif + +#if defined(CMSIS_NN) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int16. +TfLiteRegistration Register_FULLY_CONNECTED_INT16(); + +#else +// Note that while this block gets used for both reference and optimized kernels +// that do not have any specialized implementations, the only goal here is to +// define fallback implementation that allow reference kernels to still be used +// from applications that call a more specific kernel variant. + +inline TfLiteRegistration Register_FULLY_CONNECTED_INT16() { + return Register_FULLY_CONNECTED(); +} + +#endif + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_FULLY_CONNECTED_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cc similarity index 93% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cc index 5e1fca1..d38ea3e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected_common.cc @@ -65,6 +65,11 @@ TfLiteStatus CalculateOpDataFullyConnected( &data->output_shift); data->input_zero_point = input->params.zero_point; + // Filter weights will always be symmetric quantized since we only support + // int8 quantization. See + // https://github.com/tensorflow/tensorflow/issues/44912 for additional + // context. + TFLITE_DCHECK(filter->params.zero_point == 0); data->filter_zero_point = filter->params.zero_point; data->output_zero_point = output->params.zero_point; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cc new file mode 100644 index 0000000..4fb05d8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather.cc @@ -0,0 +1,226 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TF_LITE_STATIC_MEMORY + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kInputPositions = 1; +constexpr int kOutputTensor = 0; + +template +TfLiteStatus Gather(const TfLiteGatherParams* params, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* coords, TfLiteEvalTensor* output) { + const InputT* input_data = tflite::micro::GetTensorData(input); + const CoordsT* coords_data = tflite::micro::GetTensorData(coords); + InputT* output_data = tflite::micro::GetTensorData(output); + const TfLiteIntArray* input_dims = input->dims; + const int input_dims_size = input_dims->size; + int axis = params->axis; + if (axis < 0) { + axis += input_dims_size; + } + TFLITE_DCHECK_GE(axis, 0); + TFLITE_DCHECK_LT(axis, input_dims_size); + + int batch_dims = params->batch_dims; + // batch_dims should be in range: [-rank(coords), rank(coords)]. + // Negative batch_dims is added with rank of coords. + const TfLiteIntArray* coords_dims = coords->dims; + const int coords_dims_size = coords_dims->size; + if (batch_dims < 0) { + batch_dims += coords_dims_size; + } + TFLITE_DCHECK_GE(batch_dims, 0); + TFLITE_DCHECK_LT(batch_dims, input_dims_size); + TFLITE_DCHECK_LE(batch_dims, coords_dims_size); + TFLITE_DCHECK_GE(axis, batch_dims); + for (int i = 0; i < batch_dims; ++i) { + TFLITE_DCHECK_EQ(input_dims->data[i], coords_dims->data[i]); + } + + const int axis_size = input_dims->data[axis]; + + int batch_size = 1; + for (int i = 0; i < batch_dims; ++i) { + batch_size *= input_dims->data[i]; + } + int outer_size = 1; + for (int i = batch_dims; i < axis; ++i) { + outer_size *= input_dims->data[i]; + } + int inner_size = 1; + for (int i = axis + 1; i < input_dims_size; ++i) { + inner_size *= input_dims->data[i]; + } + int coord_size = 1; + for (int i = batch_dims; i < coords_dims_size; ++i) { + coord_size *= coords_dims->data[i]; + } + + for (int batch = 0; batch < batch_size; ++batch) { + for (int outer = 0; outer < outer_size; ++outer) { + for (int coord = 0; coord < coord_size; ++coord) { + TFLITE_DCHECK_GE(coords_data[coord], 0); + TFLITE_DCHECK_LT(coords_data[coord], axis_size); + std::memcpy(output_data + + (((batch * outer_size) + outer) * coord_size + coord) * + inner_size, + input_data + (((batch * outer_size) + outer) * axis_size + + coords_data[batch * coord_size + coord]) * + inner_size, + sizeof(InputT) * inner_size); + } + } + } + return kTfLiteOk; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const auto* params = + reinterpret_cast(node->builtin_data); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* coords = + micro_context->AllocateTempInputTensor(node, kInputPositions); + TF_LITE_ENSURE(context, coords != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + switch (coords->type) { + case kTfLiteInt32: + break; + default: + MicroPrintf("Positions of type '%s' are not supported by gather.", + TfLiteTypeGetName(coords->type)); + return kTfLiteError; + break; + } + + // Assign to output the input type. + output->type = input->type; + + // Check conditions for different types. + switch (input->type) { + case kTfLiteFloat32: + case kTfLiteInt8: + break; + default: + MicroPrintf("Type '%s' is not supported by gather.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + break; + } + + int axis = params->axis; + if (axis < 0) { + axis += NumDimensions(input); + } + TF_LITE_ENSURE(context, 0 <= axis && axis < NumDimensions(input)); + + int batch_dims = params->batch_dims; + // batch_dims should be in range: [-rank(coords), rank(coords)]. + // Negative batch_dims is added with rank of coords. + if (batch_dims < 0) { + batch_dims += NumDimensions(coords); + } + TF_LITE_ENSURE(context, batch_dims <= axis); + TF_LITE_ENSURE(context, 0 <= batch_dims && batch_dims < NumDimensions(input)); + TF_LITE_ENSURE(context, batch_dims <= NumDimensions(coords)); + for (int i = 0; i < batch_dims; ++i) { + TF_LITE_ENSURE_EQ(context, input->dims->data[i], coords->dims->data[i]); + } + + // GATHER updates the output tensor dimensions, but TfLiteTensor in the + // MicroInterpreter is a temporary allocation. We must therefore relocate the + // dims from the FlatBuffer to the persistant storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + TfLiteIntArray* output_shape = output->dims; + output_shape->size = + NumDimensions(input) + NumDimensions(coords) - 1 - batch_dims; + int output_index = 0; + for (int i = 0; i < axis; ++i) { + output_shape->data[output_index++] = input->dims->data[i]; + } + for (int i = batch_dims; i < coords->dims->size; ++i) { + output_shape->data[output_index++] = coords->dims->data[i]; + } + for (int i = axis + 1; i < input->dims->size; ++i) { + output_shape->data[output_index++] = input->dims->data[i]; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(coords); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const auto* params = + reinterpret_cast(node->builtin_data); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* coords = + tflite::micro::GetEvalInput(context, node, kInputPositions); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (coords->type == kTfLiteInt32) { + switch (input->type) { + case kTfLiteFloat32: + return Gather(params, input, coords, output); + break; + case kTfLiteInt8: + return Gather(params, input, coords, output); + break; + default: + MicroPrintf("Type '%s' is not supported by gather.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + break; + } + } + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_GATHER() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite +#endif // TF_LITE_STATIC_MEMORY \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather_nd.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather_nd.cc new file mode 100644 index 0000000..5e4b261 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/gather_nd.cc @@ -0,0 +1,212 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kParams = 0; +constexpr int kIndices = 1; +constexpr int kOutputTensor = 0; +constexpr int MAX_INDICES_ND = 5; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* params = micro_context->AllocateTempInputTensor(node, kParams); + TF_LITE_ENSURE(context, params != nullptr); + TfLiteTensor* indices = + micro_context->AllocateTempInputTensor(node, kIndices); + TF_LITE_ENSURE(context, indices != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + switch (params->type) { + case kTfLiteFloat32: + case kTfLiteInt8: + break; + default: + MicroPrintf("Params of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(params->type)); + return kTfLiteError; + break; + } + switch (indices->type) { + case kTfLiteInt32: + break; + default: + MicroPrintf("Indices of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(indices->type)); + return kTfLiteError; + } + + const int params_rank = NumDimensions(params); + const int indices_rank = NumDimensions(indices); + const int indices_nd = SizeOfDimension(indices, indices_rank - 1); + if (params_rank < 1) { + MicroPrintf("Params must be at least a vector."); + return kTfLiteError; + } + if (indices_rank < 1) { + MicroPrintf("Indices must be at least a vector."); + return kTfLiteError; + } + if (indices_nd > params_rank) { + MicroPrintf("Index innermost dimension length must be <= params rank."); + return kTfLiteError; + } + if (indices_nd > MAX_INDICES_ND) { + MicroPrintf("Index innermost dimension length must not exceed %d.", + MAX_INDICES_ND); + return kTfLiteError; + } + + // Assign to output the input type. + output->type = params->type; + + // The tensor output dims must be relocated + // from the FlatBuffer to the persistant storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + // TFLM gather_nd does not create the output tensor, but it needs to ensure + // that the output shape is correct. The result shape is + // indices.shape[:-1] + params.shape[indices.shape[-1]:] + TfLiteIntArray* output_shape = output->dims; + int output_index = 0; + for (int i = 0; i < indices_rank - 1; ++i) { + output_shape->data[output_index++] = indices->dims->data[i]; + } + for (int i = indices_nd; i < params_rank; ++i) { + output_shape->data[output_index++] = params->dims->data[i]; + } + output_shape->size = output_index; + + micro_context->DeallocateTempTfLiteTensor(params); + micro_context->DeallocateTempTfLiteTensor(indices); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +template +TfLiteStatus GatherNd(const TfLiteEvalTensor* params, + const TfLiteEvalTensor* indices, + TfLiteEvalTensor* output) { + const int indices_dims = indices->dims->size; + const int indices_nd = indices->dims->data[indices_dims - 1]; + const int params_dims = params->dims->size; + const IndicesT* index_data = tflite::micro::GetTensorData(indices); + const ParamsT* param_data = tflite::micro::GetTensorData(params); + ParamsT* output_data = tflite::micro::GetTensorData(output); + + int n_slices = 1; + for (int i = 0; i < indices_dims - 1; ++i) { + n_slices *= indices->dims->data[i]; + } + + // If indices[-1] == params.rank, fetch single elements. + // If indices[-1] < params.rank, fetch slices. + int slice_size = 1; + for (int i = indices_nd; i < params_dims; ++i) { + slice_size *= params->dims->data[i]; + } + + int params_flat_size = ElementCount(*params->dims); + int remain_flat_size = params_flat_size; + + // Number of elements per dimension + int dims_to_count[MAX_INDICES_ND]; + for (int i = 0; i < indices_nd; ++i) { + dims_to_count[i] = remain_flat_size / params->dims->data[i]; + remain_flat_size = dims_to_count[i]; + } + + for (int i = 0; i < n_slices; ++i) { + int from_pos = 0; + for (int j = 0; j < indices_nd; ++j) { + int offset = i * indices_nd + j; + IndicesT index = index_data[offset]; + from_pos += index * dims_to_count[j]; + } + if (from_pos < 0 || from_pos + slice_size > params_flat_size) { + return kTfLiteError; + } + std::memcpy(output_data + i * slice_size, param_data + from_pos, + sizeof(ParamsT) * slice_size); + } + return kTfLiteOk; +} + +template +TfLiteStatus EvalGatherNd(TfLiteContext* context, + const TfLiteEvalTensor* params, + const TfLiteEvalTensor* indices, + TfLiteEvalTensor* output) { + TfLiteStatus status = kTfLiteError; + switch (params->type) { + case kTfLiteFloat32: + status = GatherNd(params, indices, output); + break; + case kTfLiteInt8: + status = GatherNd(params, indices, output); + break; + default: + MicroPrintf("Params type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(params->type)); + return kTfLiteError; + } + if (status != kTfLiteOk) { + MicroPrintf("gather_nd index out of bounds"); + } + return status; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* params = + tflite::micro::GetEvalInput(context, node, kParams); + const TfLiteEvalTensor* indices = + tflite::micro::GetEvalInput(context, node, kIndices); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (indices->type) { + case kTfLiteInt32: + return EvalGatherNd(context, params, indices, output); + break; + default: + MicroPrintf("Indices of type '%s' are not supported by gather_nd.", + TfLiteTypeGetName(indices->type)); + return kTfLiteError; + } +} +} // namespace + +TfLiteRegistration Register_GATHER_ND() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cc new file mode 100644 index 0000000..0f8a718 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cc @@ -0,0 +1,75 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { +void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(HardSwishParams)); +} + +TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kHardSwishInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kHardSwishOutputTensor); + HardSwishParams* params = static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { + tflite::reference_ops::HardSwish( + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + case kTfLiteInt8: { + tflite::reference_ops::HardSwish( + *params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + default: { + MicroPrintf("Unsupported type %s", TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_HARD_SWISH() { + return tflite::micro::RegisterOp(HardSwishInit, tflite::HardSwishPrepare, + HardSwishEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h new file mode 100644 index 0000000..cb34f13 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h @@ -0,0 +1,30 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kHardSwishInputTensor; +extern const int kHardSwishOutputTensor; + +TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node); +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_HARD_SWISH_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish_common.cc similarity index 55% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish_common.cc index b0c179e..1b82154 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish_common.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,43 +13,39 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h" - #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/hard_swish.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/hard_swish.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { -namespace ops { -namespace micro { -namespace hard_swish { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -void* HardSwishInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(HardSwishParams)); -} +const int kHardSwishInputTensor = 0; +const int kHardSwishOutputTensor = 0; TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TFLITE_DCHECK(node->user_data != nullptr); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kHardSwishInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kHardSwishOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { + if (input->type == kTfLiteInt8) { HardSwishParams* params = static_cast(node->user_data); params->input_zero_point = input->params.zero_point; @@ -81,62 +77,10 @@ TfLiteStatus HardSwishPrepare(TfLiteContext* context, TfLiteNode* node) { ¶ms->reluish_multiplier_fixedpoint_int16); } - return kTfLiteOk; -} + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); -TfLiteStatus HardSwishEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - HardSwishParams* params = static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::HardSwish( - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - case kTfLiteUInt8: { - tflite::reference_ops::HardSwish( - *params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - case kTfLiteInt8: { - tflite::reference_ops::HardSwish( - *params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: { - TF_LITE_KERNEL_LOG( - context, - "Only float32/int8_t/uint8_t are supported currently, got %s", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - } return kTfLiteOk; } -} // namespace hard_swish - -TfLiteRegistration Register_HARD_SWISH() { - return {/*init=*/hard_swish::HardSwishInit, - /*free=*/nullptr, - /*prepare=*/hard_swish::HardSwishPrepare, - /*invoke=*/hard_swish::HardSwishEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/if.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/if.cc new file mode 100644 index 0000000..afa9920 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/if.cc @@ -0,0 +1,121 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +struct OpData { + int then_subgraph_index; + int else_subgraph_index; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + op_data->then_subgraph_index = params->then_subgraph_index; + op_data->else_subgraph_index = params->else_subgraph_index; + + TF_LITE_ENSURE(context, node->inputs->size > 0); + + // The first input is the condition. + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + TfLiteTensor* cond = micro_context->AllocateTempInputTensor(node, 0); + + TF_LITE_ENSURE(context, cond != nullptr); + TF_LITE_ENSURE_EQ(context, cond->type, kTfLiteBool); + TF_LITE_ENSURE_EQ(context, NumElements(cond), 1); + + micro_context->DeallocateTempTfLiteTensor(cond); + + // The first input of the node is the condition. The rest of inputs are + // passed to the branch subgraphs. Therefore, the number of subgraph inputs + // will be the number of node inputs - 1. + size_t num_inputs = node->inputs->size - 1; + size_t num_outputs = node->outputs->size; + + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->then_subgraph_index < graph_info.NumSubgraphs()); + TF_LITE_ENSURE(context, + op_data->else_subgraph_index < graph_info.NumSubgraphs()); + + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->then_subgraph_index)); + TF_LITE_ENSURE_EQ( + context, num_outputs, + graph_info.NumSubgraphOutputs(op_data->then_subgraph_index)); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const OpData* op_data = reinterpret_cast(node->user_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + TfLiteTensor* cond = micro_context->AllocateTempInputTensor(node, 0); + + TF_LITE_ENSURE(context, cond != nullptr); + bool cond_value = cond->data.b[0]; + micro_context->DeallocateTempTfLiteTensor(cond); + + MicroGraph* graph_info = µ_context->graph(); + // Currently we copy the input / output between the subgraphs. + int active_branch_subgraph_index = + cond_value ? op_data->then_subgraph_index : op_data->else_subgraph_index; + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, active_branch_subgraph_index, + /*first_tensor_idx=*/1)); + + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(active_branch_subgraph_index)); + + TF_LITE_ENSURE_OK( + context, tflite::micro::CopySubgraphOutputsToOpOutputs( + context, node, graph_info, active_branch_subgraph_index)); + + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_IF() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cc new file mode 100644 index 0000000..e731f4e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cc @@ -0,0 +1,121 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h" + +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" + +namespace tflite { +namespace micro { + +// TODO(b/161841696): Consider moving away from global arena buffers: +constexpr int KernelRunner::kKernelRunnerBufferSize_; +uint8_t KernelRunner::kKernelRunnerBuffer_[]; + +void ClearBufferApi(TfLiteContext* context_) { + context_->GetScratchBuffer = nullptr; + context_->GetExternalContext = nullptr; + context_->AllocatePersistentBuffer = nullptr; + context_->RequestScratchBufferInArena = nullptr; +} + +KernelRunner::KernelRunner(const TfLiteRegistration& registration, + TfLiteTensor* tensors, int tensors_size, + TfLiteIntArray* inputs, TfLiteIntArray* outputs, + void* builtin_data, TfLiteIntArray* intermediates) + : registration_(registration), + allocator_(SingleArenaBufferAllocator::Create(kKernelRunnerBuffer_, + kKernelRunnerBufferSize_)), + mock_micro_graph_(allocator_), + fake_micro_context_(tensors, allocator_, &mock_micro_graph_) { + // Prepare TfLiteContext: + context_.impl_ = static_cast(&fake_micro_context_); + context_.ReportError = MicroContextReportOpError; + context_.recommended_num_threads = 1; + context_.GetTensor = MicroContextGetTensor; + context_.GetEvalTensor = MicroContextGetEvalTensor; + tflite::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + + context_.recommended_num_threads = 0; + + // Prepare TfLiteNode: + node_.inputs = inputs; + node_.outputs = outputs; + node_.builtin_data = builtin_data; + node_.intermediates = intermediates; +} + +bool KernelRunner::ValidateTempBufferDeallocated() { + return fake_micro_context_.IsAllTempTfLiteTensorDeallocated(); +} + +TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, + size_t length) { + if (registration_.init) { + tflite::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + node_.user_data = registration_.init(&context_, init_data, length); + } + + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); + + if (registration_.prepare) { + tflite ::micro::ClearBufferApi(&context_); + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + context_.RequestScratchBufferInArena = + MicroContextRequestScratchBufferInArena; + context_.GetExternalContext = MicroContextGetExternalContext; + TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); + } + + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); + + return kTfLiteOk; +} + +TfLiteStatus KernelRunner::Invoke() { + tflite::micro::ClearBufferApi(&context_); + context_.GetScratchBuffer = MicroContextGetScratchBuffer; + + if (registration_.invoke == nullptr) { + MicroPrintf("TfLiteRegistration missing invoke function pointer!"); + return kTfLiteError; + } + + TF_LITE_ENSURE_STATUS(registration_.invoke(&context_, &node_)); + + TF_LITE_ENSURE(&context_, ValidateTempBufferDeallocated()); + + return kTfLiteOk; +} + +TfLiteStatus KernelRunner::Free() { + tflite::micro::ClearBufferApi(&context_); + context_.GetScratchBuffer = MicroContextGetScratchBuffer; + + if (registration_.free == nullptr) { + MicroPrintf("TfLiteRegistration missing free function pointer!"); + return kTfLiteError; + } + + registration_.free(&context_, node_.user_data); + return kTfLiteOk; +} +} // namespace micro +} // namespace tflite \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cpp deleted file mode 100644 index bf7dd92..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.cpp +++ /dev/null @@ -1,161 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h" - -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" - -namespace tflite { -namespace micro { - -namespace { -constexpr size_t kBufferAlignment = 16; -} // namespace - -// TODO(b/161841696): Consider moving away from global arena buffers: -constexpr int KernelRunner::kNumScratchBuffers_; -constexpr int KernelRunner::kKernelRunnerBufferSize_; -uint8_t KernelRunner::kKernelRunnerBuffer_[]; - -KernelRunner::KernelRunner(const TfLiteRegistration& registration, - TfLiteTensor* tensors, int tensors_size, - TfLiteIntArray* inputs, TfLiteIntArray* outputs, - void* builtin_data) - : allocator_(SimpleMemoryAllocator::Create(GetMicroErrorReporter(), - kKernelRunnerBuffer_, - kKernelRunnerBufferSize_)), - registration_(registration), - tensors_(tensors) { - // Prepare TfLiteContext: - context_.impl_ = static_cast(this); - context_.ReportError = ReportOpError; - context_.recommended_num_threads = 1; - context_.GetTensor = GetTensor; - context_.GetEvalTensor = GetEvalTensor; - context_.AllocatePersistentBuffer = AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = RequestScratchBufferInArena; - context_.GetScratchBuffer = GetScratchBuffer; - - // Prepare TfLiteNode: - node_.inputs = inputs; - node_.outputs = outputs; - node_.builtin_data = builtin_data; -} - -TfLiteStatus KernelRunner::InitAndPrepare(const char* init_data, - size_t length) { - if (registration_.init) { - node_.user_data = registration_.init(&context_, init_data, length); - } - if (registration_.prepare) { - TF_LITE_ENSURE_STATUS(registration_.prepare(&context_, &node_)); - } - return kTfLiteOk; -} - -TfLiteStatus KernelRunner::Invoke() { - if (registration_.invoke == nullptr) { - MicroPrintf("TfLiteRegistration missing invoke function pointer!"); - return kTfLiteError; - } - return registration_.invoke(&context_, &node_); -} - -TfLiteTensor* KernelRunner::GetTensor(const struct TfLiteContext* context, - int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - return &runner->tensors_[tensor_index]; -} - -TfLiteEvalTensor* KernelRunner::GetEvalTensor( - const struct TfLiteContext* context, int tensor_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - TfLiteEvalTensor* eval_tensor = - reinterpret_cast(runner->allocator_->AllocateTemp( - sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); - TFLITE_DCHECK(eval_tensor != nullptr); - - // In unit tests, the TfLiteTensor pointer contains the source of truth for - // buffers and values: - eval_tensor->data = runner->tensors_[tensor_index].data; - eval_tensor->dims = runner->tensors_[tensor_index].dims; - eval_tensor->type = runner->tensors_[tensor_index].type; - return eval_tensor; -} - -void* KernelRunner::AllocatePersistentBuffer(TfLiteContext* context, - size_t bytes) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - return runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); -} - -TfLiteStatus KernelRunner::RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(buffer_index != nullptr); - - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - if (runner->scratch_buffer_count_ == kNumScratchBuffers_) { - MicroPrintf("Exceeded the maximum number of scratch tensors allowed (%d).", - kNumScratchBuffers_); - return kTfLiteError; - } - - // For tests, we allocate scratch buffers from the tail and keep them around - // for the lifetime of model. This means that the arena size in the tests will - // be more than what we would have if the scratch buffers could share memory. - runner->scratch_buffers_[runner->scratch_buffer_count_] = - runner->allocator_->AllocateFromTail(bytes, kBufferAlignment); - TFLITE_DCHECK(runner->scratch_buffers_[runner->scratch_buffer_count_] != - nullptr); - - *buffer_index = runner->scratch_buffer_count_++; - return kTfLiteOk; -} - -void* KernelRunner::GetScratchBuffer(TfLiteContext* context, int buffer_index) { - TFLITE_DCHECK(context != nullptr); - KernelRunner* runner = reinterpret_cast(context->impl_); - TFLITE_DCHECK(runner != nullptr); - - TFLITE_DCHECK(runner->scratch_buffer_count_ <= kNumScratchBuffers_); - if (buffer_index >= runner->scratch_buffer_count_) { - return nullptr; - } - return runner->scratch_buffers_[buffer_index]; -} - -void KernelRunner::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { - va_list args; - va_start(args, format); - GetMicroErrorReporter()->Report(format, args); - va_end(args); -} - -} // namespace micro -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h index 7a29c86..cf3c690 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_runner.h @@ -18,7 +18,9 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/fake_micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h" namespace tflite { namespace micro { @@ -33,7 +35,8 @@ class KernelRunner { public: KernelRunner(const TfLiteRegistration& registration, TfLiteTensor* tensors, int tensors_size, TfLiteIntArray* inputs, - TfLiteIntArray* outputs, void* builtin_data); + TfLiteIntArray* outputs, void* builtin_data, + TfLiteIntArray* intermediates = nullptr); // Calls init and prepare on the kernel (i.e. TfLiteRegistration) struct. Any // exceptions will be DebugLog'd and returned as a status code. @@ -45,34 +48,31 @@ class KernelRunner { // passed into the constructor of this class. TfLiteStatus Invoke(); - protected: - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_index); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_index); - static void* AllocatePersistentBuffer(TfLiteContext* context, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* context, - size_t bytes, - int* buffer_index); - static void* GetScratchBuffer(TfLiteContext* context, int buffer_index); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); + // Calls Free on a given TfLiteRegistration pointer(if it's implemented). + // After successful Free, kTfLiteOk status will be returned. If Free is not + // implemented for a given kernel kTfLiteError will be returned. + TfLiteStatus Free(); - private: - static constexpr int kNumScratchBuffers_ = 12; + // Returns a pointer to the internal MockMicroGraph which KernelRunner uses + // to stub out MicroGraph methods and track invocations on each subgraph. + MockMicroGraph* GetMockGraph() { return &mock_micro_graph_; } + + // Returns true if all temp buffer in tests are deallocated. + // TODO(b/209453859): move this function to private after deallocation checks + // are enabled for all kernel tests. + bool ValidateTempBufferDeallocated(); + private: static constexpr int kKernelRunnerBufferSize_ = 10000; static uint8_t kKernelRunnerBuffer_[kKernelRunnerBufferSize_]; - SimpleMemoryAllocator* allocator_ = nullptr; - const TfLiteRegistration& registration_; - TfLiteTensor* tensors_ = nullptr; - TfLiteContext context_ = {}; TfLiteNode node_ = {}; + const TfLiteRegistration& registration_; - int scratch_buffer_count_ = 0; - uint8_t* scratch_buffers_[kNumScratchBuffers_]; + SingleArenaBufferAllocator* allocator_; + MockMicroGraph mock_micro_graph_; + FakeMicroContext fake_micro_context_; }; } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h index b701618..616e7ff 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,48 +21,71 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" namespace tflite { namespace micro { +TfLiteRegistration RegisterOp( + void* (*init)(TfLiteContext* context, const char* buffer, size_t length), + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*free)(TfLiteContext* context, void* buffer) = nullptr); + +// Prints out n bytes in a int8_t buffer as hex +void PrintNBytes(const int8_t* tensor_data, int n_bytes, + const char* prefix = nullptr); + +// Prints out the the n bytes in a TfLiteEvalTensor as hex +void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, + const char* prefix = nullptr); + +// Prints out the the n bytes in a TfLiteTensor as hex +void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, + const char* prefix = nullptr); + // Returns a mutable tensor for a given input index. is_variable must be checked // during prepare when the full TfLiteTensor is available. -inline TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, - const TfLiteNode* node, - int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->inputs->data[index]); -} +TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index); // Returns the TfLiteEvalTensor struct for a given input index in a node. -inline const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - return GetMutableEvalInput(context, node, index); -} +const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index); // Returns the TfLiteEvalTensor struct for a given output index in a node. -inline TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, - const TfLiteNode* node, int index) { - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(node != nullptr); - return context->GetEvalTensor(context, node->outputs->data[index]); -} +TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, + const TfLiteNode* node, int index); -// Returns data for a TfLiteEvalTensor struct. +// Returns data for a TfLiteEvalTensor struct that are expected to exist. template T* GetTensorData(TfLiteEvalTensor* tensor) { - return tensor != nullptr ? reinterpret_cast(tensor->data.raw) : nullptr; + TFLITE_DCHECK(tensor != nullptr); + return reinterpret_cast(tensor->data.raw); } -// Returns const data for a TfLiteEvalTensor struct. +// Returns const data for a TfLiteEvalTensor struct that are expected to exist. template const T* GetTensorData(const TfLiteEvalTensor* tensor) { TFLITE_DCHECK(tensor != nullptr); return reinterpret_cast(tensor->data.raw); } +// Returns data for a TfLiteEvalTensor struct that could be null. +template +T* GetOptionalTensorData(TfLiteEvalTensor* tensor) { + return tensor == nullptr ? nullptr : reinterpret_cast(tensor->data.raw); +} + +// Returns const data for a TfLiteEvalTensor struct that could be null. +template +const T* GetOptionalTensorData(const TfLiteEvalTensor* tensor) { + return tensor == nullptr ? nullptr + : reinterpret_cast(tensor->data.raw); +} + // Returns the shape of a TfLiteEvalTensor struct. const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor); @@ -72,6 +95,50 @@ bool HaveSameShapes(const TfLiteEvalTensor* input1, PaddingType RuntimePaddingType(TfLitePadding padding); +// Relocate tensor dims from FlatBuffer to the persistent storage arena. +// The old dims data is copied to the new storage area. +// The tensor and eval_tensor must be the same tensor. +// Only use during Prepare phase. +TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, + TfLiteTensor* tensor, + TfLiteEvalTensor* eval_tensor); + +// Copy all op input tensors to op output tensors. Requires all op input tensor +// shapes and types to be identical to op output tensor shapes and types. +TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node); + +// Copy all op input tensors to subgraph input tensors. Requires all op input +// tensor shapes and types to be identical to subgraph input tensor shapes and +// types. +TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx, + int first_tensor_idx); + +// Copy all op output tensors to subgraph input tensors. Requires all op output +// tensor shapes and types to be identical to subgraph input tensor shapes and +// types. +TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx); + +// Copy all subgraph output tensors to op outputs. Requires all subgraph output +// tensor shapes and types to be identical to op output tensor shapes and types. +TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx); + +// If tensor is INT4, make a new TfLiteEvalTensor with data unpacked into +// a scratch buffer. The returned tensor will have the kTfLiteInt8 type. +// Assume scratch buffer is previously requested in Prepare, and +// scratch_buffer_index can be used to retrieve that buffer. +// If the tensor is not INT4, a shallow copy is returned. +TfLiteEvalTensor MakeUnpackedInt4Tensor(TfLiteContext* context, + int scratch_buffer_index, + const TfLiteEvalTensor* tensor); } // namespace micro } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cc new file mode 100644 index 0000000..73ab130 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cc @@ -0,0 +1,280 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace micro { + +namespace { + +int ValidateTensorIndexing(const TfLiteContext* context, int index, + int max_size, const int* tensor_indices) { + if (index >= 0 && index < max_size) { + const int tensor_index = tensor_indices[index]; + if (tensor_index != kTfLiteOptionalTensor) { + return tensor_index; + } + } + return -1; +} + +} // namespace + +TfLiteRegistration RegisterOp( + void* (*init)(TfLiteContext* context, const char* buffer, size_t length), + TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node), + TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node), + void (*free)(TfLiteContext* context, void* buffer)) { + return {/*init=*/init, + /*free=*/free, + /*prepare=*/prepare, + /*invoke=*/invoke, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0, + /*registration_external=*/nullptr}; +} + +// Returns a mutable tensor for a given input index. is_variable must be checked +// during prepare when the full TfLiteTensor is available. +TfLiteEvalTensor* GetMutableEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(node != nullptr); + const int tensor_index = ValidateTensorIndexing( + context, index, node->inputs->size, node->inputs->data); + + if (tensor_index < 0) { + return nullptr; + } + + return context->GetEvalTensor(context, node->inputs->data[index]); +} + +// Returns the TfLiteEvalTensor struct for a given input index in a node. +const TfLiteEvalTensor* GetEvalInput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + return GetMutableEvalInput(context, node, index); +} + +// Returns the TfLiteEvalTensor struct for a given output index in a node. +TfLiteEvalTensor* GetEvalOutput(const TfLiteContext* context, + const TfLiteNode* node, int index) { + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(node != nullptr); + return context->GetEvalTensor(context, node->outputs->data[index]); +} + +bool HaveSameShapes(const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2) { + TFLITE_DCHECK(input1 != nullptr); + TFLITE_DCHECK(input2 != nullptr); + return TfLiteIntArrayEqual(input1->dims, input2->dims); +} + +const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { + if (tensor == nullptr || tensor->dims == nullptr) { + return RuntimeShape(); + } + TfLiteIntArray* dims = tensor->dims; + const int dims_size = dims->size; + const int32_t* dims_data = reinterpret_cast(dims->data); + return RuntimeShape(dims_size, dims_data); +} + +PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +// Relocate tensor dims from FlatBuffer to the persistent storage arena. +// The old dims data is copied to the new storage area. +// The tensor and eval_tensor must be the same tensor. +// Only use during Prepare phase. +TfLiteStatus CreateWritableTensorDimsWithCopy(TfLiteContext* context, + TfLiteTensor* tensor, + TfLiteEvalTensor* eval_tensor) { + TF_LITE_ENSURE(context, tensor != nullptr); + TF_LITE_ENSURE(context, eval_tensor != nullptr); + TF_LITE_ENSURE(context, context->AllocatePersistentBuffer != nullptr); + int ranks = tensor->dims->size; + size_t alloc_size = TfLiteIntArrayGetSizeInBytes(ranks); + TfLiteIntArray* new_dims = static_cast( + context->AllocatePersistentBuffer(context, alloc_size)); + TfLiteIntArray* old_dims = tensor->dims; + new_dims->size = ranks; + tensor->dims = new_dims; + eval_tensor->dims = new_dims; + for (int i = 0; i < ranks; i++) { + new_dims->data[i] = old_dims->data[i]; + } + + return kTfLiteOk; +} + +// Verify that both tensors have the same type and size, then return the size +// of both tensors in bytes if they are the same, or -1 if they are different. +size_t ValidateAndGetTensorSizes(const TfLiteEvalTensor* tensor1, + const TfLiteEvalTensor* tensor2) { + TFLITE_DCHECK(tensor1->type == tensor2->type); + size_t tensor1_size = 0; + size_t tensor2_size = 0; + TfLiteEvalTensorByteLength(tensor1, &tensor1_size); + TfLiteEvalTensorByteLength(tensor2, &tensor2_size); + return (tensor1_size == tensor2_size) ? tensor1_size : -1; +} + +TfLiteStatus CopyOpInputsToOpOutputs(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE(context, node->inputs->size == node->outputs->size); + for (int i = 0; i < node->inputs->size; i++) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, i); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + int bytes = ValidateAndGetTensorSizes(input, output); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(output->data.raw, input->data.raw, bytes); + } + return kTfLiteOk; +} + +// Args: +// 1. int8_t tensor_data - int8_t buffer of unknown size who's data you'd +// like +// to print +// 2. int n_btyes - a small int representing number of bytes you want to +// print +// to debug output. It should always be <= tensor_data's size. +// 3. prefix - optional message you'd like to print before printing bytes +// +// Purpose: +// Function takes in paramaters above and prints n_bytes bytes from the +// tensor_data buffer. This can be use to debug the output of a model and it's +// op. + +void PrintNBytes(const int8_t* tensor_data, int n_bytes, const char* prefix) { + if (prefix != nullptr) { + MicroPrintf("%s", prefix); + } + + for (int i = 0; i < n_bytes; ++i) { + MicroPrintf(" %x", tensor_data[i]); + } + MicroPrintf("\n"); +} + +// same as the PrintNBytes above but the buffer needs to be extracted out of the +// TfLiteEvalTensor* +void PrintNBytes(const TfLiteEvalTensor* tensor, int n_bytes, + const char* prefix) { + const int8_t* tensor_data = tflite::micro::GetTensorData(tensor); + PrintNBytes(tensor_data, n_bytes, prefix); +} + +// same as the PrintNBytes above but the buffer needs to be extracted out of the +// TfLiteEvalTensor* +void PrintNBytes(const TfLiteTensor* tensor, int n_bytes, const char* prefix) { + const int8_t* tensor_data = tflite::GetTensorData(tensor); + PrintNBytes(tensor_data, n_bytes, prefix); +} + +TfLiteStatus CopyOpInputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx, + int first_tensor_idx) { + TF_LITE_ENSURE(context, + static_cast(node->inputs->size - first_tensor_idx) == + graph_info->NumSubgraphInputs(subgraph_idx)); + for (int i = 0; i < node->inputs->size - first_tensor_idx; i++) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, i + first_tensor_idx); + TfLiteEvalTensor* subgraph_input = + graph_info->GetSubgraphInput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(input, subgraph_input); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(subgraph_input->data.raw, input->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteStatus CopyOpOutputsToSubgraphInputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx) { + TF_LITE_ENSURE(context, static_cast(node->outputs->size) == + graph_info->NumSubgraphInputs(subgraph_idx)); + for (int i = 0; i < node->outputs->size; i++) { + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + TfLiteEvalTensor* subgraph_input = + graph_info->GetSubgraphInput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(output, subgraph_input); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(subgraph_input->data.raw, output->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteStatus CopySubgraphOutputsToOpOutputs(TfLiteContext* context, + TfLiteNode* node, + MicroGraph* graph_info, + int subgraph_idx) { + TF_LITE_ENSURE(context, static_cast(node->outputs->size) == + graph_info->NumSubgraphOutputs(subgraph_idx)); + for (int i = 0; i < node->outputs->size; i++) { + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, i); + TfLiteEvalTensor* subgraph_output = + graph_info->GetSubgraphOutput(subgraph_idx, i); + int bytes = ValidateAndGetTensorSizes(output, subgraph_output); + TF_LITE_ENSURE(context, bytes >= 0); + memcpy(output->data.raw, subgraph_output->data.raw, bytes); + } + return kTfLiteOk; +} + +TfLiteEvalTensor MakeUnpackedInt4Tensor(TfLiteContext* context, + int scratch_buffer_index, + const TfLiteEvalTensor* tensor) { + if (tensor->type != kTfLiteInt4) { + return *tensor; + } + + TfLiteEvalTensor new_tensor; + new_tensor.data.data = static_cast( + context->GetScratchBuffer(context, scratch_buffer_index)); + new_tensor.dims = tensor->dims; + new_tensor.type = kTfLiteInt8; + tflite::tensor_utils::UnpackDenseInt4IntoInt8( + tflite::micro::GetTensorData(tensor), + tflite::micro::GetTensorShape(tensor).FlatSize(), + tflite::micro::GetTensorData(&new_tensor)); + return new_tensor; +} + +} // namespace micro +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cpp deleted file mode 100644 index 23eacea..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util_micro.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" - -namespace tflite { -namespace micro { - -bool HaveSameShapes(const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2) { - TFLITE_DCHECK(input1 != nullptr); - TFLITE_DCHECK(input2 != nullptr); - return TfLiteIntArrayEqual(input1->dims, input2->dims); -} - -const RuntimeShape GetTensorShape(const TfLiteEvalTensor* tensor) { - if (tensor == nullptr || tensor->dims == nullptr) { - return RuntimeShape(); - } - TfLiteIntArray* dims = tensor->dims; - const int dims_size = dims->size; - const int32_t* dims_data = reinterpret_cast(dims->data); - return RuntimeShape(dims_size, dims_data); -} - -PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -} // namespace micro -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cc similarity index 80% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cc index a693eba..8cd1e7e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2_pool_2d.cc @@ -21,6 +21,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -36,15 +37,18 @@ constexpr int kTensorShapeRank = 4; enum { kBatchRank = 0, kHeightRank, kWidthRank, kChannelRank }; TfLiteStatus L2Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + auto* params = static_cast(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); TF_LITE_ENSURE_EQ(context, NumDimensions(input), kTensorShapeRank); TF_LITE_ENSURE_EQ(context, NumDimensions(output), kTensorShapeRank); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); @@ -70,12 +74,21 @@ TfLiteStatus L2Prepare(TfLiteContext* context, TfLiteNode* node) { // The dims storage is expected to be the same area in memory // for both TfLiteTensor and TfLiteEvalTensor. This is important // because TfLiteTensor in the MicroInterpreter is a temporary - // allocation. + // allocation. For the KernelRunner interpreter, TfLiteEvalTensor + // is a temporary allocation. We must therefore relocate the dims + // from the FlatBuffer to the persistant storage arena. + TfLiteEvalTensor* output_eval = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, tflite::micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); output->dims->data[kBatchRank] = batches; output->dims->data[kHeightRank] = out_height; output->dims->data[kWidthRank] = out_width; output->dims->data[kChannelRank] = channels_out; + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; } @@ -113,9 +126,8 @@ TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { L2EvalFloat(*params, *input, &op_params, output); break; default: - TF_LITE_KERNEL_LOG(context, - "L2_POOL_2D only supports float32 currently, got %s.", - TfLiteTypeGetName(input->type)); + MicroPrintf("L2_POOL_2D only supports float32 currently, got %s.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; @@ -124,14 +136,7 @@ TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_L2_POOL_2D() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/L2Prepare, - /*invoke=*/L2Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, L2Prepare, L2Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cc similarity index 81% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cc index 764929c..ede02db 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/l2norm.cc @@ -19,6 +19,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/l2normalization.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace ops { @@ -49,28 +50,31 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); - TF_LITE_ENSURE(context, NumDimensions(input) <= 4); - TF_LITE_ENSURE(context, output->type == kTfLiteFloat32 || - output->type == kTfLiteUInt8 || - output->type == kTfLiteInt8); + TF_LITE_ENSURE(context, + output->type == kTfLiteFloat32 || output->type == kTfLiteInt8); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { + if (output->type == kTfLiteInt8) { data->input_zero_point = input->params.zero_point; } else if (output->type == kTfLiteFloat32) { data->input_zero_point = 0; } - // TODO(ahentz): For some reason our implementations don't support - // activations. + // Our implementations don't currently support activations. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -110,12 +114,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output), epsilon); - } else if (output->type == kTfLiteUInt8) { - reference_ops::L2Normalization( - data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); } else if (output->type == kTfLiteInt8) { const auto input_shape = tflite::micro::GetTensorShape(input); const auto output_shape = tflite::micro::GetTensorShape(output); @@ -129,8 +127,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(input), tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, "Output type is %s, requires float.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Output type is %s, requires float.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } @@ -140,14 +138,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace l2norm TfLiteRegistration Register_L2NORM_REF() { - return {/*init=*/l2norm::Init, - /*free=*/nullptr, - /*prepare=*/l2norm::Prepare, - /*invoke=*/l2norm::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(l2norm::Init, l2norm::Prepare, l2norm::Eval); } TfLiteRegistration Register_L2_NORMALIZATION() { return Register_L2NORM_REF(); } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cc similarity index 58% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cc index 1222f30..042528d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.cc @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21,23 +21,10 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace { - -// Input/output tensor index. -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct LeakyReluOpData { - // quantization parameters - int32_t output_multiplier_alpha; - int32_t output_shift_alpha; - int32_t output_multiplier_identity; - int32_t output_shift_identity; - int32_t input_zero_point; - int32_t output_zero_point; -}; template void QuantizeLeakyRelu(const LeakyReluOpData& data, @@ -58,51 +45,11 @@ void QuantizeLeakyRelu(const LeakyReluOpData& data, tflite::micro::GetTensorData(output)); } -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - - if (output->type == kTfLiteInt8) { - LeakyReluOpData* data = static_cast(node->user_data); - const auto* params = - static_cast(node->builtin_data); - - data->input_zero_point = input->params.zero_point; - data->output_zero_point = output->params.zero_point; - - int output_shift_alpha; - double alpha_multiplier = static_cast( - input->params.scale * params->alpha / output->params.scale); - QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, - &output_shift_alpha); - data->output_shift_alpha = static_cast(output_shift_alpha); - - int output_shift_identity; - double identity_multiplier = - static_cast(input->params.scale / output->params.scale); - QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, - &output_shift_identity); - data->output_shift_identity = static_cast(output_shift_identity); - } - - return kTfLiteOk; -} - void* LeakyReluInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(LeakyReluOpData)); } -TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { - return CalculateOpData(context, node); -} - TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); @@ -127,27 +74,22 @@ TfLiteStatus LeakyReluEval(TfLiteContext* context, TfLiteNode* node) { QuantizeLeakyRelu(data, input, output); return kTfLiteOk; } break; + case kTfLiteInt16: { + QuantizeLeakyRelu(data, input, output); + return kTfLiteOk; + } break; default: - TF_LITE_KERNEL_LOG( - context, "Only float32, int8 are supported by LEAKY_RELU, got %s.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Only float32, int8 are supported by LEAKY_RELU, got %s.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteError; } -} // namespace - TfLiteRegistration Register_LEAKY_RELU() { - return {/*init=*/LeakyReluInit, - /*free=*/nullptr, - /*prepare=*/LeakyReluPrepare, - /*invoke=*/LeakyReluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(LeakyReluInit, LeakyReluPrepare, + LeakyReluEval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h new file mode 100644 index 0000000..fe43060 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h @@ -0,0 +1,43 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +// Input/output tensor index. +extern const int kInputTensor; +extern const int kOutputTensor; + +struct LeakyReluOpData { + // quantization parameters + int32_t output_multiplier_alpha; + int32_t output_shift_alpha; + int32_t output_multiplier_identity; + int32_t output_shift_identity; + int32_t input_zero_point; + int32_t output_zero_point; +}; + +TfLiteStatus CalculateOpDataLeakyRelu(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LEAKY_RELU_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu_common.cc new file mode 100644 index 0000000..b71b743 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu_common.cc @@ -0,0 +1,78 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/leaky_relu.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/leaky_relu.h" + +namespace tflite { + +// Input/output tensor index. +const int kInputTensor = 0; +const int kOutputTensor = 0; + +TfLiteStatus CalculateOpDataLeakyRelu(TfLiteContext* context, + TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + LeakyReluOpData* data = static_cast(node->user_data); + const auto* params = + static_cast(node->builtin_data); + + data->input_zero_point = input->params.zero_point; + data->output_zero_point = output->params.zero_point; + + int output_shift_alpha; + double alpha_multiplier = static_cast( + input->params.scale * params->alpha / output->params.scale); + QuantizeMultiplier(alpha_multiplier, &data->output_multiplier_alpha, + &output_shift_alpha); + data->output_shift_alpha = static_cast(output_shift_alpha); + + int output_shift_identity; + double identity_multiplier = + static_cast(input->params.scale / output->params.scale); + QuantizeMultiplier(identity_multiplier, &data->output_multiplier_identity, + &output_shift_identity); + data->output_shift_identity = static_cast(output_shift_identity); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus LeakyReluPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpDataLeakyRelu(context, node); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/log_softmax.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/log_softmax.cc new file mode 100644 index 0000000..4cfccb2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/log_softmax.cc @@ -0,0 +1,148 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/log_softmax.h" + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// used only with quantized data +struct LogSoftmaxOpData { + int32_t input_multiplier; + int32_t input_left_shift; + int32_t reverse_scaling_divisor; + int32_t reverse_scaling_right_shift; + int diff_min; + size_t outer_size; // number of tensor elements skipping computation axis + size_t depth; // number of tensor elements on computation axis +}; + +// input/output tensor index +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + TF_LITE_ENSURE(context, HaveSameShapes(input, output)); + + if (input->type == kTfLiteInt8) { + node->user_data = + context->AllocatePersistentBuffer(context, sizeof(LogSoftmaxOpData)); + auto data = static_cast(node->user_data); + + // quantization datum + constexpr int32_t kOutputZeroPoint = 127; + constexpr float kOutputScale = 16.0 / 256; + constexpr double kBeta = 1.0; + constexpr int kScaledDiffIntegerBits = 5; + + TF_LITE_ENSURE(context, output->params.scale == kOutputScale); + TF_LITE_ENSURE(context, output->params.zero_point == kOutputZeroPoint); + + int input_left_shift; + int reverse_scaling_right_shift; + tflite::PreprocessLogSoftmaxScalingExp( + kBeta, static_cast(input->params.scale), kScaledDiffIntegerBits, + &data->input_multiplier, &input_left_shift, + &data->reverse_scaling_divisor, &reverse_scaling_right_shift); + data->input_left_shift = static_cast(input_left_shift); + data->reverse_scaling_right_shift = + static_cast(-reverse_scaling_right_shift); + // diff_min has a negative value, and is used to limit the maximum magnitude + // of the diffs, which are <= 0. + data->diff_min = + -tflite::CalculateInputRadius(kScaledDiffIntegerBits, input_left_shift); + + RuntimeShape input_shape = GetTensorShape(input); + const int trailing_dim = input_shape.DimensionsCount() - 1; + data->outer_size = + static_cast(FlatSizeSkipDim(input_shape, trailing_dim)); + data->depth = static_cast(input_shape.Dims(trailing_dim)); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { + return CalculateOpData(context, node); +} + +TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { + const LogSoftmaxOpData* data = + static_cast(node->user_data); + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: { + SoftmaxParams op_params = {}; + reference_ops::LogSoftmax(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { + SoftmaxParams op_params = {}; + op_params.input_multiplier = data->input_multiplier; + op_params.input_left_shift = data->input_left_shift; + op_params.reverse_scaling_divisor = data->reverse_scaling_divisor; + op_params.reverse_scaling_right_shift = data->reverse_scaling_right_shift; + op_params.diff_min = data->diff_min; + reference_ops::LogSoftmax(op_params, data->outer_size, data->depth, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: + MicroPrintf("LOG_SOFTMAX only supports float32, int8, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } +} + +} // namespace + +TfLiteRegistration Register_LOG_SOFTMAX() { + return tflite::micro::RegisterOp(nullptr, LogSoftmaxPrepare, LogSoftmaxEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cc new file mode 100644 index 0000000..2b38501 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cc @@ -0,0 +1,44 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { + return LogicalImpl(context, node, LogicalOr); +} + +TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { + return LogicalImpl(context, node, LogicalAnd); +} + +} // namespace + +TfLiteRegistration Register_LOGICAL_OR() { + return tflite::micro::RegisterOp(nullptr, nullptr, LogicalOrEval); +} + +TfLiteRegistration Register_LOGICAL_AND() { + return tflite::micro::RegisterOp(nullptr, nullptr, LogicalAndEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h new file mode 100644 index 0000000..8dadde4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h @@ -0,0 +1,35 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { +// Input/output tensor index. +extern const int kLogicalInputTensor1; +extern const int kLogicalInputTensor2; +extern const int kLogicalOutputTensor; + +TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, + bool (*func)(bool, bool)); + +bool LogicalOr(bool x, bool y); +bool LogicalAnd(bool x, bool y); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LOGICAL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical_common.cc similarity index 58% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical_common.cc index 2f3a062..1586d2f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logical_common.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,26 +17,23 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/logical.h" namespace tflite { -namespace ops { -namespace micro { -namespace logical { -namespace { // Input/output tensor index. -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; +const int kLogicalInputTensor1 = 0; +const int kLogicalInputTensor2 = 1; +const int kLogicalOutputTensor = 0; TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, bool (*func)(bool, bool)) { const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); + tflite::micro::GetEvalInput(context, node, kLogicalInputTensor1); const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); + tflite::micro::GetEvalInput(context, node, kLogicalInputTensor2); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kLogicalOutputTensor); if (tflite::micro::HaveSameShapes(input1, input2)) { reference_ops::BinaryFunction( @@ -61,45 +58,6 @@ TfLiteStatus LogicalImpl(TfLiteContext* context, TfLiteNode* node, bool LogicalOr(bool x, bool y) { return x || y; } -TfLiteStatus LogicalOrEval(TfLiteContext* context, TfLiteNode* node) { - return LogicalImpl(context, node, LogicalOr); -} - bool LogicalAnd(bool x, bool y) { return x && y; } -TfLiteStatus LogicalAndEval(TfLiteContext* context, TfLiteNode* node) { - return LogicalImpl(context, node, LogicalAnd); -} - -} // namespace -} // namespace logical - -TfLiteRegistration Register_LOGICAL_OR() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalOrEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_LOGICAL_AND() { - // Init, Free, Prepare, Eval are satisfying the Interface required by - // TfLiteRegistration. - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/logical::LogicalAndEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cc similarity index 51% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cc index 8959178..82579ea 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -24,71 +24,25 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { namespace { -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input_zero_point; - int32_t input_range_radius; - int32_t input_multiplier; - int input_left_shift; -}; - -TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, - OpData* data) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, - std::numeric_limits::min()); - - static constexpr int kInputIntegerBits = 4; - const double input_real_multiplier = - static_cast(input->params.scale) * - static_cast(1 << (31 - kInputIntegerBits)); - - data->input_zero_point = input->params.zero_point; - - const double q = std::frexp(input_real_multiplier, &data->input_left_shift); - data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); - - data->input_range_radius = - CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); - } - return kTfLiteOk; -} -} // namespace void* LogisticInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateArithmeticOpData(context, node, data); + return context->AllocatePersistentBuffer(context, sizeof(OpDataLogistic)); } TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); + tflite::micro::GetEvalInput(context, node, kLogisticInputTensor); TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); + tflite::micro::GetEvalOutput(context, node, kLogisticOutputTensor); TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + OpDataLogistic* data = static_cast(node->user_data); if (input->type == kTfLiteFloat32) { switch (output->type) { @@ -100,9 +54,25 @@ TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt16) { + switch (output->type) { + case kTfLiteInt16: { + reference_integer_ops::Logistic( + data->input_multiplier, data->input_left_shift, + NumElements(input->dims), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else if (input->type == kTfLiteInt8) { @@ -117,34 +87,25 @@ TfLiteStatus LogisticEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { // TODO(b/141211002): Also support other data types once we have supported // temporary tensors in TFLM. - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace activations +} // namespace TfLiteRegistration Register_LOGISTIC() { - return {/*init=*/activations::LogisticInit, - /*free=*/nullptr, - /*prepare=*/activations::LogisticPrepare, - /*invoke=*/activations::LogisticEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(LogisticInit, LogisticPrepare, LogisticEval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h new file mode 100644 index 0000000..43325e1 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h @@ -0,0 +1,42 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { +extern const int kLogisticInputTensor; +extern const int kLogisticOutputTensor; + +struct OpDataLogistic { + int32_t input_zero_point; + int32_t input_range_radius; + int32_t input_multiplier; + int input_left_shift; +}; + +TfLiteStatus CalculateArithmeticOpDataLogistic(TfLiteContext* context, + TfLiteNode* node, + OpDataLogistic* data); + +TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LOGISTIC_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic_common.cc new file mode 100644 index 0000000..9f27a91 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic_common.cc @@ -0,0 +1,119 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/logistic.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/logistic.h" + +namespace tflite { +const int kLogisticInputTensor = 0; +const int kLogisticOutputTensor = 0; + +TfLiteStatus CalculateArithmeticOpDataLogistic(TfLiteContext* context, + TfLiteNode* node, + OpDataLogistic* data) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kLogisticInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kLogisticOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + if (input->type == kTfLiteInt8) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, + std::numeric_limits::min()); + + static constexpr int kInputIntegerBits = 4; + const double input_real_multiplier = + static_cast(input->params.scale) * + static_cast(1 << (31 - kInputIntegerBits)); + + data->input_zero_point = input->params.zero_point; + + const double q = std::frexp(input_real_multiplier, &data->input_left_shift); + data->input_multiplier = static_cast(TfLiteRound(q * (1ll << 31))); + + data->input_range_radius = + CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); + } + + if (input->type == kTfLiteInt16) { + static constexpr int kInputIntegerBits = 3; + static constexpr int kOutputFractionalBits = 15; + + // See comments in TanhPrepare about requiring zero_point==0 + // and a power-of-two ("POT") scale. + + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + + int input_scale_log2_rounded; + bool param_scale_pot = + CheckedLog2(input->params.scale, &input_scale_log2_rounded); + + data->input_left_shift = + (15 - kInputIntegerBits) + input_scale_log2_rounded; + param_scale_pot &= (data->input_left_shift == 0); + + if (param_scale_pot) { + data->input_multiplier = 0; + } else { + // Calculate multiplier to change input scale to 1/(3*4096) + // as required by the table lookup. + // In this scaling +/-2^17 represents +/-10.7 + double multiplier = + static_cast(input->params.scale) * 4096.0 * 3.0; + + data->input_left_shift = 0; + + while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) { + data->input_left_shift++; + multiplier = multiplier * 2.0; + } + + data->input_multiplier = static_cast(multiplier); + } + + int output_scale_log2_rounded; + TF_LITE_ENSURE( + context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); + TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, + -kOutputFractionalBits); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus LogisticPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + OpDataLogistic* data = static_cast(node->user_data); + + return CalculateArithmeticOpDataLogistic(context, node, data); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.cc new file mode 100644 index 0000000..037caf7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.cc @@ -0,0 +1,222 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/logistic.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/tanh.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { +namespace lstm_internal { + +const int32_t kInt16Max = std::numeric_limits::max(); +const int32_t kInt16Min = std::numeric_limits::min(); + +void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + int32_t sum = input_1[index] + input_2[index]; + const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum)); + output[index] = static_cast(sum_clamped); + } + } +} + +void AddElementWise(const float* input_1, const float* input_2, int n_batch, + int n_input, float* output) { + for (int batch = 0; batch < n_batch; ++batch) { + for (int i = 0; i < n_input; ++i) { + const int index = batch * n_input + i; + output[index] = input_1[index] + input_2[index]; + } + } +} + +void Sigmoid(const RuntimeShape& data_shape, int16_t* data) { + reference_integer_ops::Logistic( + 0 /*data->input_multiplier*/, 0 /*data->input_left_shift */, + data_shape.FlatSize() /*NumElements(input->dims)*/, + data /* tflite::micro::GetTensorData(input) */, + data /*tflite::micro::GetTensorData(output) */); +} + +void Sigmoid(const RuntimeShape& data_shape, float* data) { + reference_ops::Logistic(data_shape, data, data_shape, data); +} + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + int16_t* input_data, const RuntimeShape& output_data_shape, + int16_t* output_data) { + int32_t tanh_input_left_shift = (15 + cell_state_scale_power) - 3; + if (tanh_input_left_shift < 0) /* handling negative shift value */ + { + int32_t i; + tanh_input_left_shift = -tanh_input_left_shift; + for (i = 0; i < input_data_shape.FlatSize(); i++) { + input_data[i] = input_data[i] >> tanh_input_left_shift; + } + tanh_input_left_shift = 0; + } + reference_integer_ops::Tanh(0, tanh_input_left_shift, input_data_shape, + input_data, output_data_shape, output_data); +} + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + float* input_data, const RuntimeShape& output_data_shape, + float* output_data) { + reference_ops::Tanh(input_data_shape, input_data, output_data_shape, + output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int8_t* output_data) { + return reference_integer_ops::MulElementwise( + shape.FlatSize(), params, input1_data, input2_data, output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int16_t* output_data) { + return reference_integer_ops::MulElementwise( + shape.FlatSize(), params, input1_data, input2_data, output_data); +} + +// Input and output have the same shape in LSTM +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const float* input1_data, const float* input2_data, + float* output_data) { + return reference_ops::Mul(params, shape, input1_data, shape, input2_data, + shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int32_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data) { + return tflite::reference_integer_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int16_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int64_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data) { + return tflite::reference_integer_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& filter_shape, const float* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data) { + return tflite::reference_ops::FullyConnected( + params, input_shape, input_data, filter_shape, filter_data, bias_shape, + bias_data, output_shape, output_data); +} + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + int16_t* vector) { + for (int i = 0; i < v_size; i++) { + vector[i] = + std::max(std::min(cell_state_info.quantized_cell_clip, vector[i]), + static_cast(-cell_state_info.quantized_cell_clip)); + } +} + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + float* vector) { + for (int i = 0; i < v_size; i++) { + vector[i] = std::max(std::min(cell_state_info.cell_clip, vector[i]), + -cell_state_info.cell_clip); + } +} + +// Increment the data offset so the sigle time step invocation call can access +// the corresponding input/output tensor data at the time step +void LstmStepManager::UpdateTime() { + current_time_ += 1; + TFLITE_DCHECK_LE(current_time_, size_info_.time_steps); + // default as one batch per inference + int input_step = size_info_.input_dimension; + int output_step = size_info_.state_dimension; + // time major: batch inference + if (size_info_.time_major) { + input_step = input_step * size_info_.batch_size; + output_step = output_step * size_info_.batch_size; + } + + input_offset_ += input_step; + output_offset_ += output_step; +} + +// Increment the data offset so the sigle time step invocation call can access +// the corresponding hidden/cell state tensor data at the time step (for single +// batch inference only) +void LstmStepManager::UpdateBatch() { + current_batch_ += 1; + TFLITE_DCHECK_LE(current_batch_, size_info_.batch_size); + // batch inference for time major: no action needed + if (size_info_.time_major) { + return; + } + // otherwise: singe batch inference, go to the next batch + hidden_state_offset_ += size_info_.state_dimension; + cell_state_offset_ += size_info_.state_dimension; +} + +// Input shape for each single time LSTM invocation. +// Multi-batch for time_major input +RuntimeShape LstmStepManager::InputShape() const { + int batch_size = 1; + if (size_info_.time_major) { + batch_size = size_info_.batch_size; + } + const int dims[2] = {batch_size, size_info_.input_dimension}; + const int32_t* dims_data = reinterpret_cast(dims); + return RuntimeShape(2, dims_data); +} + +// State shape (both hidden and cell) for each single time LSTM invocation. +// Multi-batch for time_major input +RuntimeShape LstmStepManager::StateShape() const { + int batch_size = 1; + if (size_info_.time_major) { + batch_size = size_info_.batch_size; + } + const int dims[2] = {batch_size, size_info_.state_dimension}; + const int32_t* dims_data = reinterpret_cast(dims); + return RuntimeShape(2, dims_data); +} + +} // namespace lstm_internal +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h new file mode 100644 index 0000000..fcdbfe8 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h @@ -0,0 +1,417 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Functions to perform integer evaulation for standard LSTM (e.g., defined in +// the keras lstm layer, no peephole etc.). Currently used by the 16 bits +// activation case only + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_GENERAL_H_ +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +// Since LSTM includes multiple intermediate stages, introducing the internal +// namespace to expose them for testing +namespace lstm_internal { + +void Sigmoid(const RuntimeShape& data_shape, int16_t* data); + +void Sigmoid(const RuntimeShape& data_shape, float* data); + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + int16_t* input_data, const RuntimeShape& output_data_shape, + int16_t* output_data); + +void Tanh(int32_t cell_state_scale_power, const RuntimeShape& input_data_shape, + float* input_data, const RuntimeShape& output_data_shape, + float* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int8_t* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const int16_t* input1_data, const int16_t* input2_data, + int16_t* output_data); + +void Mul(const RuntimeShape& shape, const ArithmeticParams& params, + const float* input1_data, const float* input2_data, + float* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int8_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int32_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const int16_t* input_data, + const RuntimeShape& filter_shape, const int8_t* filter_data, + const RuntimeShape& bias_shape, const int64_t* bias_data, + const RuntimeShape& output_shape, int16_t* output_data); + +void FullyConnected(const FullyConnectedParams& params, + const RuntimeShape& input_shape, const float* input_data, + const RuntimeShape& filter_shape, const float* filter_data, + const RuntimeShape& bias_shape, const float* bias_data, + const RuntimeShape& output_shape, float* output_data); + +void AddElementWise(const int16_t* input_1, const int16_t* input_2, int n_batch, + int n_input, int16_t* output); + +void AddElementWise(const float* input_1, const float* input_2, int n_batch, + int n_input, float* output); + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + int16_t* vector); + +void Clipping(const int v_size, const CellStateInfo& cell_state_info, + float* vector); + +// Manages the slice position (offset), slice length (sliced tensor shape), +// and update rules for input/output/hidden state/cell state tensors at each +// time step. +class LstmStepManager { + public: + LstmStepManager() = delete; + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + explicit LstmStepManager(const LstmSizeInfo* size_info) + : size_info_(*size_info) {} + + void UpdateTime(); + void UpdateBatch(); + + void ResetTime() { current_time_ = 0; } + RuntimeShape InputShape() const; + RuntimeShape StateShape() const; + + int InputOffset() const { return input_offset_; } + int OutputOffset() const { return output_offset_; } + int HiddenStateOffset() const { return hidden_state_offset_; } + int CellStateOffset() const { return cell_state_offset_; } + + private: + int current_time_ = 0; + int current_batch_ = 0; + int input_offset_ = 0; + int output_offset_ = 0; + int hidden_state_offset_ = 0; + int cell_state_offset_ = 0; + // Sizeinfo is from LstmOpData, which reside in the memory arena + // (guarante to outlast LSTMStepManager, which reside in stack) + const LstmSizeInfo& size_info_; +}; + +// Calculates a single LSTM gate. +// Implements the following formula: +// gate = activate(FC(input) + FC(recurrent)) +// Activation is sigmoid except for the "cell" gate (configurable, usually tanh) +template +void CalculateLstmGate( + const LstmStepManager& step_info, const GateParameters& gate_params, + // Input FC + const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Output + CellType* gate_output, + // Scratch arrays + CellType* fc_output_buffer, const TfLiteFusedActivation activation) { + const auto gate_output_shape = step_info.StateShape(); + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE(step_info.InputOffset() + step_info.InputShape().FlatSize(), + tflite::micro::GetTensorShape(input).FlatSize()); + TFLITE_DCHECK_LE( + step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(recurrent).FlatSize()); + + // Input FC + FullyConnected(gate_params.input_fc_params, step_info.InputShape(), + tflite::micro::GetTensorData(input) + + step_info.InputOffset(), + micro::GetTensorShape(input_weight), + tflite::micro::GetTensorData(input_weight), + tflite::micro::GetTensorShape(input_bias), + tflite::micro::GetOptionalTensorData(input_bias), + gate_output_shape, gate_output); + + // Recurrent FC + FullyConnected(gate_params.recurrent_fc_params, step_info.StateShape(), + tflite::micro::GetTensorData(recurrent) + + step_info.HiddenStateOffset(), + tflite::micro::GetTensorShape(recurrent_weight), + tflite::micro::GetTensorData(recurrent_weight), + tflite::micro::GetTensorShape(recurrent_bias), + tflite::micro::GetOptionalTensorData(recurrent_bias), + gate_output_shape, fc_output_buffer); + + AddElementWise(gate_output, fc_output_buffer, + /*n_batch=*/gate_output_shape.DimsData()[0], + /*n_state=*/gate_output_shape.DimsData()[1], gate_output); + // Apply activation + switch (activation) { + case kTfLiteActSigmoid: + Sigmoid(gate_output_shape, gate_output); + break; + case kTfLiteActTanh: { + // Set the scale power to -12 to avoid shift + Tanh(/*cell_state_scale_power=*/-12, gate_output_shape, gate_output, + gate_output_shape, gate_output); + } break; + default: + // Only Sigmoid or Tanh is used. + TFLITE_ASSERT_FALSE; + } +} + +// Update the cell state using the output from the forget gate, input gate, and +// cell gate Formula: updated_cell_state = forget_gate_output*cell_state + +// input_gate_output * cell_gate_output, where * denotes element wise +// multiplication +template +void UpdateLstmCell(const LstmStepManager& step_info, + TfLiteEvalTensor* cell_state, + // Gate outputs + CellType* forget_gate_output, + const CellType* input_gate_output, + const CellType* cell_gate_output, + // Mul parameters + const ArithmeticParams& forget_cell_mul_params, + const ArithmeticParams& input_mul_params, + const CellStateInfo& cell_state_info, CellType* buffer) { + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.CellStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(cell_state).FlatSize()); + + auto cell_state_shape = step_info.StateShape(); + // Forget Gate x Cell State + Mul(cell_state_shape, forget_cell_mul_params, forget_gate_output, + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(), + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + // Input Gate x Cell Gate + Mul(cell_state_shape, input_mul_params, input_gate_output, cell_gate_output, + buffer); + + // Update the cell state + AddElementWise(tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(), + buffer, + /*n_batch=*/cell_state_shape.DimsData()[0], + /*n_state=*/cell_state_shape.DimsData()[1], + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + + if (cell_state_info.cell_clip > 0) { + Clipping(cell_state_shape.FlatSize(), cell_state_info, + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset()); + } +} + +// Update the hidden state of the LSTM kernel using the following formula: +// updated_hidden_state = Tanh(updated_cell_state) * output_gate_output, * means +// element wise multiplication +template +void UpdateLstmHidden(const LstmStepManager& step_info, + TfLiteEvalTensor* cell_state, + TfLiteEvalTensor* hidden_state, + const CellType* output_gate_output, + const ArithmeticParams& mul_params, + int32_t cell_state_scale_power, CellType* buffer) { + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.CellStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(cell_state).FlatSize()); + TFLITE_DCHECK_LE( + step_info.HiddenStateOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(hidden_state).FlatSize()); + + auto cell_state_shape = step_info.StateShape(); + CellType* cell_state_data = + tflite::micro::GetTensorData(cell_state) + + step_info.CellStateOffset(); + // Tanh(cell_state) + Tanh(cell_state_scale_power, cell_state_shape, cell_state_data, + cell_state_shape, buffer); + // Update the hidden state + Mul(cell_state_shape, mul_params, buffer, output_gate_output, + tflite::micro::GetTensorData(hidden_state) + + step_info.HiddenStateOffset()); +} + +template +void LstmStep(const LstmStepManager& step_info, const OpDataLSTM& op_data, + LSTMKernelContents& kernel_content, + LSTMBuffers& buffers) { + /*Step1: Calculate gate outputs to prepare cell state update*/ + CellType* gate_internal_buffer = buffers.buffer3; + CellType* forget_gate_output = buffers.buffer0; + CalculateLstmGate( + step_info, op_data.forget_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToForgetWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmForgetGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToForgetWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + forget_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + // Input Gate calculation; + CellType* input_gate_output = buffers.buffer1; + CalculateLstmGate( + step_info, op_data.input_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToInputWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToInputWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + input_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + // Cell Gate calculation + CellType* cell_gate_output = buffers.buffer2; + CalculateLstmGate( + step_info, op_data.cell_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToCellWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmCellGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToCellWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + cell_gate_output, + // Scratch arrays + gate_internal_buffer, op_data.cell_gate_nonlinear_type); + + /*Step2: update the cell state */ + const InterGateParameters& inter_gate_params = op_data.inter_gate_parameters; + CellType* updated_input_buffer = buffers.buffer1; // reuse buffer + + UpdateLstmCell(step_info, kernel_content.CellStateTensor(), + forget_gate_output, input_gate_output, + cell_gate_output, + inter_gate_params.forget_cell_mul_params, + inter_gate_params.input_mul_params, + op_data.cell_state_info, updated_input_buffer); + + /*Step3: update the hidden state */ + CellType* output_gate_output = buffers.buffer1; // reuse buffer + CalculateLstmGate( + step_info, op_data.output_gate_parameters, + // Input FC + kernel_content.GetInternalTensor(tflite::kLstmInputTensor), + kernel_content.GetInternalTensor(tflite::kLstmInputToOutputWeightsTensor), + kernel_content.GetInternalTensor(tflite::kLstmOutputGateBiasTensor), + // Recurrent FC + kernel_content.HiddenStateTensor(), + kernel_content.GetInternalTensor( + tflite::kLstmRecurrentToOutputWeightsTensor), + /*recurrent_bias*/ nullptr, + // Output + output_gate_output, + // Scratch arrays + gate_internal_buffer, kTfLiteActSigmoid); + + CellType* tanh_activated_cell_buffer = buffers.buffer0; // reuse buffer + tflite::lstm_internal::UpdateLstmHidden( + step_info, kernel_content.CellStateTensor(), + kernel_content.HiddenStateTensor(), output_gate_output, + inter_gate_params.output_mul_params, + op_data.cell_state_info.cell_state_scale_power, + tanh_activated_cell_buffer); + + /*Step4: copy the update the hidden state to output*/ + // Check offset validity to avoid memory overflow + TFLITE_DCHECK_LE( + step_info.OutputOffset() + step_info.StateShape().FlatSize(), + tflite::micro::GetTensorShape(kernel_content.output_tensor).FlatSize()); + // record the output (from the updated hidden state) + ActivationType* output_ptr = tflite::micro::GetTensorData( + kernel_content.output_tensor); + const auto* hidden_state = kernel_content.HiddenStateTensor(); + std::memcpy(output_ptr + step_info.OutputOffset(), + tflite::micro::GetTensorData(hidden_state) + + step_info.HiddenStateOffset(), + step_info.StateShape().FlatSize() * sizeof(ActivationType)); +} + +} // namespace lstm_internal + +// Evaulate the LSTM kernel with (potential) multi-steps and multi-batch input +// Since +template +TfLiteStatus EvalLstm(const OpDataLSTM& op_data, + LSTMKernelContents& kernel_content, + LSTMBuffers& buffers) { + lstm_internal::LstmStepManager step_info(&op_data.size_info); + const auto& size_info = op_data.size_info; + // time is the first dimention, enable batch computation + if (size_info.time_major) { + for (int t = 0; t < size_info.time_steps; t++) { + lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + // prepare for the next time step + step_info.UpdateTime(); + } + } else { + // batch first, unable to size the input data. single batch inference + for (int b = 0; b < size_info.batch_size; b++) { + for (int t = 0; t < size_info.time_steps; t++) { + lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + // prepare for the next time step + step_info.UpdateTime(); + } + // prepare for the next batch + step_info.UpdateBatch(); + step_info.ResetTime(); + } + } + return kTfLiteOk; +} +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_16ACT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval_test.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval_test.h new file mode 100644 index 0000000..cfaec49 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval_test.h @@ -0,0 +1,817 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/testdata/lstm_test_data.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/testing/micro_test.h" + +namespace tflite { +namespace testing { + +/*Helper Functions (mainly about mimicking the kernel preparation)*/ + +// Create fully connected parameters using quantization settings of input and +// weight tensors. +// Since TfLiteContext is not available during the kernel test, here we mimic +// (put into stack memory) CalculateOpDataFullyConnected in +// tensorflow/lite/micro/kernels/fully_connected_common.cc +template +tflite::FullyConnectedParams CreateFCParams( + const TensorQuantizationParameters& input_quant_params, + const TensorQuantizationParameters& weight_quant_params, + const float nonlinear_activation_input_scale) { + OpDataFullyConnected data; + const double input_product_scale = + input_quant_params.scale * weight_quant_params.scale; + double effective_scale = + input_product_scale / + static_cast(nonlinear_activation_input_scale); + + QuantizeMultiplier(effective_scale, &data.output_multiplier, + &data.output_shift); + + data.input_zero_point = input_quant_params.zero_point; + + data.filter_zero_point = 0; // symmetrically quantized + data.output_zero_point = 0; // symmetrically quantized + + data.output_activation_min = std::numeric_limits::min(); + data.output_activation_max = std::numeric_limits::max(); + + return tflite::FullyConnectedParamsQuantized(data); +} + +inline tflite::FullyConnectedParams CreateFCParamsFloat() { + FullyConnectedParams op_params; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +// Wrapper function to create gate parameters for the four internal LSTM gates +template +tflite::GateParameters CreateGateParams( + const TensorQuantizationParameters& input_quant_params, + const TensorQuantizationParameters& hidden_state_quant_params, + const GateQuantizationParameters& gate_quantization_settings, + const float nonlinear_activation_input_scale) { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParams( + input_quant_params, gate_quantization_settings.activation_weight, + nonlinear_activation_input_scale); + gate_params.recurrent_fc_params = CreateFCParams( + hidden_state_quant_params, gate_quantization_settings.recurrent_weight, + nonlinear_activation_input_scale); + return gate_params; +} + +inline tflite::GateParameters CreateGateParamsFloat() { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParamsFloat(); + gate_params.recurrent_fc_params = CreateFCParamsFloat(); + return gate_params; +} +// Create parameters for element wise multiplication that happens in a) cell +// state update ; b) hidden state update +// Note that all the output of gates are symmetrically quantized so only scales +// are required for input. However, during the hidden state update phase, the +// output is the updated hidden state, which is asymmetrically quantized. Thus +// output may require zero point +template +tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale, + const float input2_scale, + const float output_scale, + const int output_zp = 0) { + tflite::ArithmeticParams op_params = {}; + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + op_params.input1_offset = 0; + op_params.input2_offset = 0; + op_params.output_offset = output_zp; + + const double input_product_scale = + static_cast(input1_scale) * static_cast(input2_scale); + double effective_scale = + input_product_scale / static_cast(output_scale); + + QuantizeMultiplier(effective_scale, &op_params.output_multiplier, + &op_params.output_shift); + return op_params; +} + +inline tflite::ArithmeticParams CreateInterGateMulParamsFloat() { + tflite::ArithmeticParams op_params = {}; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +// Create the additional information about the cell state, which include: +// cell_state_scale_power: used in integer nonlinear function (e.g., tanh) +// quantized_cell_clip: quantized cell clip range +CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale, + const float cell_clip) { + CellStateInfo cell_state_info; + // cell_state_scale_power: 2^-cell_state_scale_power = cell state scale + int buffer; + tflite::CheckedLog2(cell_state_scale, &buffer); + cell_state_info.cell_state_scale_power = buffer; + // Cell state specifics + cell_state_info.cell_clip = cell_clip; + cell_state_info.quantized_cell_clip = static_cast( + std::min(std::max(static_cast(cell_clip) / + static_cast(cell_state_scale), + -32768.0), + 32767.0)); + return cell_state_info; +} + +// Create LSTMKernelContents from LstmNodeContent by copying TfLiteEvalTensor +// pointers +template +LSTMKernelContents CreateLSTMKernelContent( + LstmNodeContent& + node_contents) { + LSTMKernelContents kernel_content; + // Point to correct tensors + kernel_content.internal_tensors[kLstmInputTensor] = + node_contents.GetEvalTensor(kLstmInputTensor); + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToInputWeightsTensor); + kernel_content.internal_tensors[kLstmInputToForgetWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToForgetWeightsTensor); + kernel_content.internal_tensors[kLstmInputToCellWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToCellWeightsTensor); + kernel_content.internal_tensors[kLstmInputToOutputWeightsTensor] = + node_contents.GetEvalTensor(kLstmInputToOutputWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToInputWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToInputWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToForgetWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToForgetWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToCellWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToCellWeightsTensor); + kernel_content.internal_tensors[kLstmRecurrentToOutputWeightsTensor] = + node_contents.GetEvalTensor(kLstmRecurrentToOutputWeightsTensor); + kernel_content.internal_tensors[kLstmInputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmInputGateBiasTensor); + kernel_content.internal_tensors[kLstmForgetGateBiasTensor] = + node_contents.GetEvalTensor(kLstmForgetGateBiasTensor); + kernel_content.internal_tensors[kLstmCellGateBiasTensor] = + node_contents.GetEvalTensor(kLstmCellGateBiasTensor); + kernel_content.internal_tensors[kLstmOutputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmOutputGateBiasTensor); + kernel_content.internal_tensors[kLstmOutputStateTensor] = + node_contents.GetEvalTensor(kLstmOutputStateTensor); + kernel_content.internal_tensors[kLstmOutputGateBiasTensor] = + node_contents.GetEvalTensor(kLstmOutputGateBiasTensor); + kernel_content.internal_tensors[kLstmCellStateTensor] = + node_contents.GetEvalTensor(kLstmCellStateTensor); + // Not used internal tensors + kernel_content.internal_tensors[kLstmCellToInputWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmCellToForgetWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmCellToOutputWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmProjectionWeightsTensor] = nullptr; + kernel_content.internal_tensors[kLstmProjectionBiasTensor] = nullptr; + kernel_content.internal_tensors[kLstmInputLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmForgetLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmInputLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmCellLayerNormCoefficientsTensor] = + nullptr; + kernel_content.internal_tensors[kLstmOutputLayerNormCoefficientsTensor] = + nullptr; + // Output tensor + kernel_content.output_tensor = node_contents.OutputEvalTensor(); + return kernel_content; +} + +// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I), +// State dimension (S)) that defines the LSTM using the input and hidden state +// tensor +LstmSizeInfo CreateLstmSizeInfo( + const bool time_major, const TfLiteIntArray* input_tensor_shape, + const TfLiteIntArray* hidden_state_tensor_shape) { + LstmSizeInfo size_info; + size_info.time_major = time_major; + size_info.batch_size = + time_major ? input_tensor_shape->data[1] : input_tensor_shape->data[0]; + size_info.time_steps = + time_major ? input_tensor_shape->data[0] : input_tensor_shape->data[1]; + size_info.input_dimension = input_tensor_shape->data[2]; + size_info.state_dimension = hidden_state_tensor_shape->data[1]; + return size_info; +} + +// Create the LstmOpData using the LstmNodeContent and +// NodeQuantizationParameters (defined in test_data/lstm_test_data) During the +// actual inference phase, OpDataLSTM is created using information from the +// flatbuffer file. The test divide the complete LSTM node information into +// LstmNodeContent and NodeQuantizationParameters for easy construction +// purposes +template +OpDataLSTM CreateLstmOpData( + LstmNodeContent& + node_contents) { + const auto& builtin_data = node_contents.BuiltinData(); + const auto& quantization_settings = node_contents.QuantizationSettings(); + OpDataLSTM op_data; + + op_data.cell_gate_nonlinear_type = builtin_data.activation; + op_data.size_info = + CreateLstmSizeInfo(builtin_data.time_major, + node_contents.GetEvalTensor(kLstmInputTensor)->dims, + node_contents.HiddenStateEvalTensor()->dims); + + op_data.cell_state_info = CreateLstmCellStateInfo( + quantization_settings.cell_state.scale, builtin_data.cell_clip); + + // Gate Parameters + op_data.forget_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.forget_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.input_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.input_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.cell_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.cell_gate, + quantization_settings.nonlinear_activation_input_scale); + op_data.output_gate_parameters = CreateGateParams( + quantization_settings.input, quantization_settings.hidden_state, + quantization_settings.output_gate, + quantization_settings.nonlinear_activation_input_scale); + // Inter gate multiplication parameters + op_data.inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.scale); + op_data.inter_gate_parameters.input_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale); + op_data.inter_gate_parameters.output_mul_params = + CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point); + return op_data; +} + +template +OpDataLSTM CreateLstmOpDataFloat( + LstmNodeContent& node_contents) { + const auto& builtin_data = node_contents.BuiltinData(); + OpDataLSTM op_data; + + op_data.cell_gate_nonlinear_type = builtin_data.activation; + op_data.size_info = + CreateLstmSizeInfo(builtin_data.time_major, + node_contents.GetEvalTensor(kLstmInputTensor)->dims, + node_contents.HiddenStateEvalTensor()->dims); + op_data.cell_state_info.cell_clip = builtin_data.cell_clip; + op_data.cell_state_info.quantized_cell_clip = 0; // No quantization + op_data.cell_state_info.cell_state_scale_power = 0; // No quantization + + // Gate Parameters + op_data.forget_gate_parameters = CreateGateParamsFloat(); + op_data.input_gate_parameters = CreateGateParamsFloat(); + op_data.cell_gate_parameters = CreateGateParamsFloat(); + op_data.output_gate_parameters = CreateGateParamsFloat(); + // Inter gate multiplication parameters + op_data.inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParamsFloat(); + op_data.inter_gate_parameters.input_mul_params = + CreateInterGateMulParamsFloat(); + op_data.inter_gate_parameters.output_mul_params = + CreateInterGateMulParamsFloat(); + return op_data; +} + +/*Test Functions Below Here*/ +template +void ValidateResultGoldens(const T* golden, const T* output_data, + const int output_len, const float tolerance) { + for (int i = 0; i < output_len; ++i) { + TF_LITE_MICRO_EXPECT_NEAR(golden[i], output_data[i], tolerance); + } +} + +template +void TestCalculateLstmGateFloat(const TfLiteEvalTensor* input, + const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, + const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Result comparison + TfLiteFusedActivation nonlinear_type, + const float* expected_vals, float tolerance) { + float gate_output[batch_size * state_dimension] = {}; + float fc_output_buffer[batch_size * state_dimension] = {}; + + tflite::GateParameters gate_params = CreateGateParamsFloat(); + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, input->dims, recurrent->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + tflite::lstm_internal::CalculateLstmGate( + step_info, gate_params, + // Input FC + input, input_weight, input_bias, + // Recurrent FC + recurrent, recurrent_weight, recurrent_bias, + // Output + gate_output, + // Scratch arrays + fc_output_buffer, nonlinear_type); + + ValidateResultGoldens(expected_vals, gate_output, + batch_size * state_dimension, tolerance); +} + +template +void TestCalculateLstmGateInteger( + const TfLiteEvalTensor* input, const TfLiteEvalTensor* input_weight, + const TfLiteEvalTensor* input_bias, + // Recurrent FC + const TfLiteEvalTensor* recurrent, const TfLiteEvalTensor* recurrent_weight, + const TfLiteEvalTensor* recurrent_bias, + // Quantization settings + const NodeQuantizationParameters& node_quantization_settings, + const GateQuantizationParameters& gate_quantization_settings, + // Result comparison + TfLiteFusedActivation nonlinear_type, const float* expected_vals, + float tolerance) { + CellType gate_output[batch_size * state_dimension] = {}; + CellType fc_output_buffer[batch_size * state_dimension] = {}; + + tflite::GateParameters gate_params = CreateGateParams( + node_quantization_settings.input, node_quantization_settings.hidden_state, + gate_quantization_settings, + node_quantization_settings.nonlinear_activation_input_scale); + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, input->dims, recurrent->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // only int8 weight is supported now + tflite::lstm_internal::CalculateLstmGate( + step_info, gate_params, + // Input FC + input, input_weight, input_bias, + // Recurrent FC + recurrent, recurrent_weight, recurrent_bias, + // Output + gate_output, + // Scratch arrays + fc_output_buffer, nonlinear_type); + + float gate_output_float[batch_size * state_dimension] = {}; + Dequantize(gate_output, batch_size * state_dimension, + node_quantization_settings.nonlinear_activation_output_scale, 0, + gate_output_float); + + ValidateResultGoldens(expected_vals, gate_output_float, + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmCellFloat( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + float buffer[batch_size * state_dimension] = {}; + + auto forget_cell_mul_params = CreateInterGateMulParamsFloat(); + auto input_mul_params = CreateInterGateMulParamsFloat(); + + auto cell_state = node_content.CellStateEvalTensor(); + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // copy the data since it will be updated + float forget_gate[batch_size * state_dimension] = {}; + std::memcpy(forget_gate, gate_output_data.expected_forget_gate_output, + batch_size * state_dimension * sizeof(float)); + + CellStateInfo cell_state_info; + cell_state_info.cell_clip = node_content.BuiltinData().cell_clip; + // Call the function to be tested + tflite::lstm_internal::UpdateLstmCell( + step_info, cell_state, forget_gate, + gate_output_data.expected_input_gate_output, + gate_output_data.expected_cell_gate_output, forget_cell_mul_params, + input_mul_params, cell_state_info, buffer); + + ValidateResultGoldens(gate_output_data.expected_updated_cell, + tflite::micro::GetTensorData(cell_state), + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmCellInteger( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + const auto& quantization_settings = node_content.QuantizationSettings(); + CellType quantized_forget_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_forget_gate_output, + quantized_forget_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType quantized_input_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_input_gate_output, + quantized_input_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType quantized_cell_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_cell_gate_output, + quantized_cell_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType buffer[batch_size * state_dimension] = {}; + + auto forget_cell_mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.scale); + auto input_mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.cell_state.scale); + + auto cell_state_info = + CreateLstmCellStateInfo(quantization_settings.cell_state.scale, + node_content.BuiltinData().cell_clip); + + auto cell_state = node_content.CellStateEvalTensor(); + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + // Call the function to be tested + tflite::lstm_internal::UpdateLstmCell( + step_info, cell_state, quantized_forget_gate, quantized_input_gate, + quantized_cell_gate, forget_cell_mul_params, input_mul_params, + cell_state_info, buffer); + + float cell_state_float[batch_size * state_dimension] = {}; + Dequantize(tflite::micro::GetTensorData(cell_state), + batch_size * state_dimension, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, cell_state_float); + + ValidateResultGoldens(gate_output_data.expected_updated_cell, + cell_state_float, batch_size * state_dimension, + tolerance); +} + +template +void TestUpdateLstmHiddenFloat( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + float buffer[batch_size * state_dimension] = {}; + + auto mul_params = CreateInterGateMulParamsFloat(); + + int32_t cell_state_scale_power = 0; + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + auto cell_state = node_content.CellStateEvalTensor(); + auto hidden_state = node_content.HiddenStateEvalTensor(); + + tflite::lstm_internal::UpdateLstmHidden( + step_info, cell_state, hidden_state, + gate_output_data.expected_output_gate_output, mul_params, + cell_state_scale_power, buffer); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + tflite::micro::GetTensorData(hidden_state), + batch_size * state_dimension, tolerance); +} + +template +void TestUpdateLstmHiddenInteger( + const GateOutputCheckData& gate_output_data, + LstmNodeContent& node_content, + const float tolerance) { + const auto& quantization_settings = node_content.QuantizationSettings(); + CellType quantized_output_gate[batch_size * state_dimension] = {}; + tflite::Quantize(gate_output_data.expected_output_gate_output, + quantized_output_gate, batch_size * state_dimension, + quantization_settings.nonlinear_activation_output_scale, 0); + + CellType buffer[batch_size * state_dimension] = {}; + + auto mul_params = CreateInterGateMulParams( + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.nonlinear_activation_output_scale, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point); + + int cell_state_scale_power_buffer; + tflite::CheckedLog2(quantization_settings.cell_state.scale, + &cell_state_scale_power_buffer); + int32_t cell_state_scale_power = cell_state_scale_power_buffer; + + // Create step information: only one time step, no need to update + auto size_info = tflite::testing::CreateLstmSizeInfo( + /*time_major*/ false, + node_content.GetEvalTensor(tflite::kLstmInputTensor)->dims, + node_content.HiddenStateEvalTensor()->dims); + // revise time_major = true to enable batch inference + size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&size_info); + + auto cell_state = node_content.CellStateEvalTensor(); + auto hidden_state = node_content.HiddenStateEvalTensor(); + + tflite::lstm_internal::UpdateLstmHidden( + step_info, cell_state, hidden_state, quantized_output_gate, mul_params, + cell_state_scale_power, buffer); + + float hidden_state_float[batch_size * state_dimension] = {}; + Dequantize(tflite::micro::GetTensorData(hidden_state), + batch_size * state_dimension, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, hidden_state_float); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + hidden_state_float, batch_size * state_dimension, + tolerance); +} + +template +void TestLstmStepFloat( + const GateOutputCheckData& gate_output_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + /*can not be const, state will be updated*/ + LstmNodeContent& node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + LSTMBuffers buffers; + // Scratch buffers on the stack + float buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + float buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + float buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + float buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents); + // set time_major to true to test batch inference + op_data.size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&op_data.size_info); + tflite::lstm_internal::LstmStep( + step_info, op_data, kernel_content, buffers); + + ValidateResultGoldens( + gate_output_data.expected_updated_hidden, + tflite::micro::GetTensorData(kernel_content.HiddenStateTensor()), + batch_size * state_dimension, hidden_state_tolerance); + ValidateResultGoldens( + gate_output_data.expected_updated_cell, + tflite::micro::GetTensorData(kernel_content.CellStateTensor()), + batch_size * state_dimension, cell_state_tolerance); +} + +template +void TestLstmStepInteger( + const GateOutputCheckData& gate_output_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + /*can not be const, state will be updated*/ + LstmNodeContent& + node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + LSTMBuffers buffers; + + // Scratch buffers on the stack + CellType buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + CellType buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + CellType buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + CellType buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpData(node_contents); + // set time_major to true to test batch inference + op_data.size_info.time_major = true; + tflite::lstm_internal::LstmStepManager step_info(&op_data.size_info); + tflite::lstm_internal::LstmStep(step_info, op_data, kernel_content, + buffers); + + const auto& quantization_settings = node_contents.QuantizationSettings(); + float dequantized_hidden_state[batch_size * state_dimension] = {}; + Dequantize( + tflite::micro::GetTensorData( + kernel_content.HiddenStateTensor()), + batch_size * state_dimension, quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, dequantized_hidden_state); + + float dequantized_cell_state[batch_size * state_dimension] = {}; + Dequantize( + tflite::micro::GetTensorData(kernel_content.CellStateTensor()), + batch_size * state_dimension, quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, dequantized_cell_state); + + ValidateResultGoldens(gate_output_data.expected_updated_hidden, + dequantized_hidden_state, batch_size * state_dimension, + hidden_state_tolerance); + ValidateResultGoldens(gate_output_data.expected_updated_cell, + dequantized_cell_state, batch_size * state_dimension, + cell_state_tolerance); +} + +template +void TestEvalLstmFloat( + const LstmEvalCheckData< + batch_size * time_steps * input_dimension, batch_size * state_dimension, + batch_size * state_dimension * time_steps>& eval_check_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + LstmNodeContent& node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the node + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + // Scratch buffers on the stack + LSTMBuffers buffers; + float buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + float buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + float buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + float buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpDataFloat(node_contents); + + tflite::EvalLstm(op_data, kernel_content, + buffers); + + ValidateResultGoldens(eval_check_data.expected_hidden_state, + node_contents.GetHiddenStateData(), + batch_size * state_dimension, hidden_state_tolerance); + + ValidateResultGoldens(eval_check_data.expected_cell_state, + node_contents.GetCellStateData(), + batch_size * state_dimension, cell_state_tolerance); + + ValidateResultGoldens(eval_check_data.expected_output, + node_contents.GetOutputData(), + batch_size * state_dimension, hidden_state_tolerance); +} + +template +void TestEvalLstmInteger( + const LstmEvalCheckData< + batch_size * time_steps * input_dimension, batch_size * state_dimension, + batch_size * state_dimension * time_steps>& eval_check_data, + const float hidden_state_tolerance, const float cell_state_tolerance, + LstmNodeContent& + node_contents) { + // Mimicking the kernel preparation phase, node_contents approximate the node + LSTMKernelContents kernel_content = CreateLSTMKernelContent(node_contents); + // Scratch buffers on the stack + LSTMBuffers buffers; + CellType buffer0[batch_size * state_dimension] = {}; + buffers.buffer0 = buffer0; + CellType buffer1[batch_size * state_dimension] = {}; + buffers.buffer1 = buffer1; + CellType buffer2[batch_size * state_dimension] = {}; + buffers.buffer2 = buffer2; + CellType buffer3[batch_size * state_dimension] = {}; + buffers.buffer3 = buffer3; + + OpDataLSTM op_data = CreateLstmOpData(node_contents); + + tflite::EvalLstm( + op_data, kernel_content, buffers); + + const auto& quantization_settings = node_contents.QuantizationSettings(); + float dequantized_hidden_state[batch_size * state_dimension] = {}; + Dequantize(node_contents.GetHiddenStateData(), batch_size * state_dimension, + quantization_settings.hidden_state.scale, + quantization_settings.hidden_state.zero_point, + dequantized_hidden_state); + + ValidateResultGoldens(eval_check_data.expected_hidden_state, + dequantized_hidden_state, batch_size * state_dimension, + hidden_state_tolerance); + + float dequantized_cell_state[batch_size * state_dimension] = {}; + Dequantize(node_contents.GetCellStateData(), batch_size * state_dimension, + quantization_settings.cell_state.scale, + quantization_settings.cell_state.zero_point, + dequantized_cell_state); + ValidateResultGoldens(eval_check_data.expected_cell_state, + dequantized_cell_state, batch_size * state_dimension, + cell_state_tolerance); + + float dequantized_output[batch_size * state_dimension * time_steps] = {}; + Dequantize(node_contents.GetOutputData(), + batch_size * state_dimension * time_steps, + quantization_settings.output.scale, + quantization_settings.output.zero_point, dequantized_output); + ValidateResultGoldens(eval_check_data.expected_output, dequantized_output, + batch_size * state_dimension, hidden_state_tolerance); +} + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_EVAL_TEST_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h new file mode 100644 index 0000000..54020f6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h @@ -0,0 +1,150 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +// Input Tensors of size {n_batch, n_input} +constexpr int kLstmInputTensor = 0; + +// Input weight tensors of size: {n_cell, n_input} +constexpr int kLstmInputToInputWeightsTensor = 1; // Optional +constexpr int kLstmInputToForgetWeightsTensor = 2; +constexpr int kLstmInputToCellWeightsTensor = 3; +constexpr int kLstmInputToOutputWeightsTensor = 4; + +// Recurrent weight tensors of size {n_cell, n_output} +constexpr int kLstmRecurrentToInputWeightsTensor = 5; // Optional +constexpr int kLstmRecurrentToForgetWeightsTensor = 6; +constexpr int kLstmRecurrentToCellWeightsTensor = 7; +constexpr int kLstmRecurrentToOutputWeightsTensor = 8; + +// Peephole weights tensors of size {n_cell}, representing a diagonal matrix. +constexpr int kLstmCellToInputWeightsTensor = 9; // Optional +constexpr int kLstmCellToForgetWeightsTensor = 10; // Optional +constexpr int kLstmCellToOutputWeightsTensor = 11; // Optional + +// Gates bias tensors of size {n_cell} +constexpr int kLstmInputGateBiasTensor = 12; // Optional +constexpr int kLstmForgetGateBiasTensor = 13; +constexpr int kLstmCellGateBiasTensor = 14; +constexpr int kLstmOutputGateBiasTensor = 15; + +// Projection weight tensor of size {n_output, n_cell} +constexpr int kLstmProjectionWeightsTensor = 16; // Optional +// Projection bias tensor of size {n_output} +constexpr int kLstmProjectionBiasTensor = 17; // Optional + +// These state tensors are defined as variable tensors, and will be modified by +// this op. +constexpr int kLstmOutputStateTensor = 18; +constexpr int kLstmCellStateTensor = 19; + +// Layer norm coefficient tensors of size {n_cell}, representing a diagonal +// matrix. +constexpr int kLstmInputLayerNormCoefficientsTensor = 20; // Optional +constexpr int kLstmForgetLayerNormCoefficientsTensor = 21; // Optional +constexpr int kLstmCellLayerNormCoefficientsTensor = 22; // Optional +constexpr int kLstmOutputLayerNormCoefficientsTensor = 23; // Optional + +// Output tensors. +constexpr int kLstmOutputTensor = 0; + +// Parameters for the two fully conncted computation inside each gate +struct GateParameters { + FullyConnectedParams input_fc_params; + FullyConnectedParams recurrent_fc_params; +}; + +// Paramaters for the element wise multiplications between gate outputs +struct InterGateParameters { + ArithmeticParams forget_cell_mul_params; + ArithmeticParams input_mul_params; + ArithmeticParams output_mul_params; +}; + +// Size information about the LSTM kernel, which is deduced from tensors stored +// in the flat buffer file. +struct LstmSizeInfo { + bool time_major; + int batch_size; + int time_steps; + int input_dimension; + int state_dimension; +}; + +// Contains information about the cell state tensor +struct CellStateInfo { + float cell_clip; + // clipping range for cell state only 16 bits cell is supported (could be + // generalized through templatation) + int16_t quantized_cell_clip; + // 2^-cell_state_scale_power = cell state scale, required by integer tanh + // computation + int32_t cell_state_scale_power; +}; + +// Contains required computation information for LSTM kernel evaluation. +// Specifically, it includes shape and quantization settings for the LSTM +// internal operations. Formatted to support operations defined in the +// tensorflow/lite/kernels/internal/reference/integer_ops +// Should be constructed during the preparation phase +struct OpDataLSTM { + LstmSizeInfo size_info; + CellStateInfo cell_state_info; + TfLiteFusedActivation cell_gate_nonlinear_type; + GateParameters forget_gate_parameters; + GateParameters input_gate_parameters; + GateParameters cell_gate_parameters; + GateParameters output_gate_parameters; + InterGateParameters inter_gate_parameters; + int buffer_indices[4]; // TFLM only +}; + +// Provide an interface to access the internal tensors and buffers used for LSTM +// invocation. Constructed during the invocation phase +struct LSTMKernelContents { + public: + // Internal tensors, fixed (const). see lstm_shared.h for tensor names + const TfLiteEvalTensor* GetInternalTensor(const int tensor_index) const { + return internal_tensors[tensor_index]; + } + // Variable tensors (will be changed, can not be const) + TfLiteEvalTensor* HiddenStateTensor() { + return internal_tensors[kLstmOutputStateTensor]; + } + TfLiteEvalTensor* CellStateTensor() { + return internal_tensors[kLstmCellStateTensor]; + } + // Node internal tensors with indexes defined at the beginning of the file + TfLiteEvalTensor* internal_tensors[24]; + TfLiteEvalTensor* output_tensor; +}; + +template +struct LSTMBuffers { + // TFLM buffers requires buffer index from LstmOpData. + CellType* buffer0; + CellType* buffer1; + CellType* buffer2; + CellType* buffer3; +}; + +} // namespace tflite +#endif // TENSORFLOW_LITE_MICRO_KERNELS_LSTM_SHARED_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cc similarity index 72% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cc index c253928..c003e68 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/maximum_minimum.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,11 +23,10 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace maximum_minimum { + namespace { // This file has a reference implementation of TFMaximum/TFMinimum. @@ -64,8 +63,6 @@ struct MinimumOp { } }; -} // namespace - template void TFLiteOperation(TfLiteContext* context, TfLiteNode* node, const OpContext& op_context) { @@ -88,9 +85,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: TFLiteOperation(context, node, op_context); break; - case kTfLiteUInt8: - TFLiteOperation(context, node, op_context); - break; case kTfLiteInt8: TFLiteOperation(context, node, op_context); break; @@ -101,48 +95,28 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TFLiteOperation(context, node, op_context); break; default: - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by Maximum/Minimum.", - TfLiteTypeGetName(op_context.output->type), - op_context.output->type); + MicroPrintf("Type %s (%d) is not supported by Maximum/Minimum.", + TfLiteTypeGetName(op_context.output->type), + op_context.output->type); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, - "Kernel type not supported by Maximum/Minimum."); + MicroPrintf("Kernel type not supported by Maximum/Minimum."); return kTfLiteError; } return kTfLiteOk; } -} // namespace maximum_minimum +} // namespace TfLiteRegistration Register_MAXIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, + Eval); } TfLiteRegistration Register_MINIMUM() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/ - maximum_minimum::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, + Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h index d030d91..fd28a32 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -31,57 +31,61 @@ namespace tflite { // (https://abseil.io/tips/130). Any new ops (or cleanup of existing ops should // have their Register function declarations in the tflite namespace. -TfLiteRegistration Register_ADD_N(); -TfLiteRegistration Register_BATCH_TO_SPACE_ND(); -TfLiteRegistration Register_CAST(); -TfLiteRegistration Register_CONV_2D(); -TfLiteRegistration Register_DEPTHWISE_CONV_2D(); -TfLiteRegistration Register_DIV(); -TfLiteRegistration Register_ELU(); -TfLiteRegistration Register_EXP(); -TfLiteRegistration Register_EXPAND_DIMS(); -TfLiteRegistration Register_FILL(); -TfLiteRegistration Register_L2_POOL_2D(); -TfLiteRegistration Register_LEAKY_RELU(); -TfLiteRegistration Register_QUANTIZE(); -TfLiteRegistration Register_SHAPE(); -TfLiteRegistration Register_SOFTMAX(); -TfLiteRegistration Register_SPACE_TO_BATCH_ND(); -TfLiteRegistration Register_SQUEEZE(); -TfLiteRegistration Register_SVDF(); -TfLiteRegistration Register_TRANSPOSE_CONV(); -TfLiteRegistration Register_ZEROS_LIKE(); - -namespace ops { -namespace micro { - -TfLiteRegistration Register_ABS(); TfLiteRegistration Register_ADD(); +TfLiteRegistration Register_ADD_N(); TfLiteRegistration Register_ARG_MAX(); TfLiteRegistration Register_ARG_MIN(); +TfLiteRegistration Register_ASSIGN_VARIABLE(); TfLiteRegistration Register_AVERAGE_POOL_2D(); +TfLiteRegistration Register_BATCH_MATMUL(); +TfLiteRegistration Register_BATCH_TO_SPACE_ND(); +TfLiteRegistration Register_BROADCAST_ARGS(); +TfLiteRegistration Register_BROADCAST_TO(); +TfLiteRegistration Register_CALL_ONCE(); +TfLiteRegistration Register_CAST(); TfLiteRegistration Register_CEIL(); +TfLiteRegistration Register_COMPLEX_ABS(); // TODO(b/160234179): Change custom OPs to also return by value. TfLiteRegistration* Register_CIRCULAR_BUFFER(); TfLiteRegistration Register_CONCATENATION(); -TfLiteRegistration Register_COS(); +TfLiteRegistration Register_CONV_2D(); +TfLiteRegistration Register_CUMSUM(); +TfLiteRegistration Register_DEPTH_TO_SPACE(); +TfLiteRegistration Register_DEPTHWISE_CONV_2D(); TfLiteRegistration Register_DEQUANTIZE(); +TfLiteRegistration Register_DIV(); +TfLiteRegistration Register_ELU(); TfLiteRegistration Register_EQUAL(); +TfLiteRegistration* Register_ETHOSU(); +TfLiteRegistration Register_EXP(); +TfLiteRegistration Register_EXPAND_DIMS(); +TfLiteRegistration Register_FILL(); TfLiteRegistration Register_FLOOR(); +TfLiteRegistration Register_FLOOR_DIV(); +TfLiteRegistration Register_FLOOR_MOD(); +TfLiteRegistration Register_FULLY_CONNECTED(); +#ifndef TF_LITE_STATIC_MEMORY +TfLiteRegistration Register_GATHER(); +#endif // TF_LITE_STATIC_MEMORY +TfLiteRegistration Register_GATHER_ND(); TfLiteRegistration Register_GREATER(); TfLiteRegistration Register_GREATER_EQUAL(); TfLiteRegistration Register_HARD_SWISH(); +TfLiteRegistration Register_IMAG(); +TfLiteRegistration Register_IF(); +TfLiteRegistration Register_L2_POOL_2D(); +TfLiteRegistration Register_LEAKY_RELU(); TfLiteRegistration Register_LESS(); TfLiteRegistration Register_LESS_EQUAL(); -TfLiteRegistration Register_LOG(); +TfLiteRegistration Register_LOG_SOFTMAX(); TfLiteRegistration Register_LOGICAL_AND(); -TfLiteRegistration Register_LOGICAL_NOT(); TfLiteRegistration Register_LOGICAL_OR(); TfLiteRegistration Register_LOGISTIC(); -TfLiteRegistration Register_MAXIMUM(); TfLiteRegistration Register_MAX_POOL_2D(); +TfLiteRegistration Register_MAXIMUM(); TfLiteRegistration Register_MEAN(); TfLiteRegistration Register_MINIMUM(); +TfLiteRegistration Register_MIRROR_PAD(); TfLiteRegistration Register_MUL(); TfLiteRegistration Register_NEG(); TfLiteRegistration Register_NOT_EQUAL(); @@ -89,20 +93,54 @@ TfLiteRegistration Register_PACK(); TfLiteRegistration Register_PAD(); TfLiteRegistration Register_PADV2(); TfLiteRegistration Register_PRELU(); +TfLiteRegistration Register_QUANTIZE(); +TfLiteRegistration Register_READ_VARIABLE(); +TfLiteRegistration Register_REAL(); TfLiteRegistration Register_REDUCE_MAX(); +TfLiteRegistration Register_REDUCE_MIN(); TfLiteRegistration Register_RELU(); TfLiteRegistration Register_RELU6(); +TfLiteRegistration Register_RESIZE_BILINEAR(); +TfLiteRegistration Register_RFFT2D(); +#ifndef TF_LITE_STATIC_MEMORY +TfLiteRegistration Register_SELECT(); +TfLiteRegistration Register_SELECT_V2(); +#endif // TF_LITE_STATIC_MEMORY +TfLiteRegistration Register_SHAPE(); +TfLiteRegistration Register_SLICE(); +TfLiteRegistration Register_SOFTMAX(); +TfLiteRegistration Register_SPACE_TO_BATCH_ND(); +TfLiteRegistration Register_SPACE_TO_DEPTH(); +TfLiteRegistration Register_SPLIT_V(); +TfLiteRegistration Register_SQUARED_DIFFERENCE(); +TfLiteRegistration Register_SQUEEZE(); +TfLiteRegistration Register_STRIDED_SLICE(); +TfLiteRegistration Register_SUB(); +TfLiteRegistration Register_SUM(); +TfLiteRegistration Register_SVDF(); +TfLiteRegistration Register_TRANSPOSE(); +TfLiteRegistration Register_TRANSPOSE_CONV(); +// TODO(b/230666079): resolve conflict with xtensa implementation +TfLiteRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM(); +TfLiteRegistration Register_VAR_HANDLE(); +TfLiteRegistration Register_WHILE(); +TfLiteRegistration Register_ZEROS_LIKE(); + +namespace ops { +namespace micro { + +TfLiteRegistration Register_ABS(); +TfLiteRegistration Register_COS(); +TfLiteRegistration Register_LOG(); +TfLiteRegistration Register_LOGICAL_NOT(); TfLiteRegistration Register_RESHAPE(); TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR(); TfLiteRegistration Register_ROUND(); TfLiteRegistration Register_RSQRT(); TfLiteRegistration Register_SIN(); TfLiteRegistration Register_SPLIT(); -TfLiteRegistration Register_SPLIT_V(); TfLiteRegistration Register_SQRT(); TfLiteRegistration Register_SQUARE(); -TfLiteRegistration Register_STRIDED_SLICE(); -TfLiteRegistration Register_SUB(); TfLiteRegistration Register_UNPACK(); TfLiteRegistration Register_L2_NORMALIZATION(); TfLiteRegistration Register_TANH(); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.cc new file mode 100644 index 0000000..14be12c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.cc @@ -0,0 +1,67 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.h" + +#include +#include +#include +#include +#include +#include + +#include "edge-impulse-sdk/third_party/gemmlowp/fixedpoint/fixedpoint.h" // from @gemmlowp +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/cppmath.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" + +namespace tflite { + +// Apply sigmoid to elements of a vector. +void PortableApplySigmoidToVector(const float* vector, int v_size, + float* result) { + for (int v = 0; v < v_size; v++) { + result[v] = 1.0f / (1.0f + std::exp(-vector[v])); + } +} + +void PortableApplyTanhToVector(const float* vector, int v_size, float* result) { + for (int v = 0; v < v_size; v++) { + result[v] = std::tanh(vector[v]); + } +} + +void PortableApplyActivationToVector(const float* vector, int v_size, + TfLiteFusedActivation activation, + float* result) { + switch (activation) { + case kTfLiteActNone: + return; + case kTfLiteActRelu: + return tflite::tensor_utils::ApplyReluToVector(vector, v_size, result); + case kTfLiteActReluN1To1: + return tflite::tensor_utils::ApplyRelu1ToVector(vector, v_size, result); + case kTfLiteActRelu6: + return tflite::tensor_utils::ApplyRelu6ToVector(vector, v_size, result); + case kTfLiteActTanh: + return PortableApplyTanhToVector(vector, v_size, result); + case kTfLiteActSignBit: + return tflite::tensor_utils::ApplySignbitToVector(vector, v_size, result); + case kTfLiteActSigmoid: + return PortableApplySigmoidToVector(vector, v_size, result); + } +} + +} // namespace tflite \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.h new file mode 100644 index 0000000..fb3d97f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_tensor_utils.h @@ -0,0 +1,56 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// This file and the associated .cc file is branched from +// tensorflow/lite/kernels/internal/reference_portable_tensor_utils* +// TFLM needs to create its own because the original files are coupled with +// the tensor_utils module, which we cannot reuse due to its use of the +// Eigen library. + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/portable_tensor_utils.h" + +#if defined(_MSC_VER) +#define __restrict__ __restrict +#endif + +namespace tflite { + +// Not all backends support CpuBackendContext usage, so forward declare to avoid +// pulling in its implementation. +// TODO(b/230666277): consider removing this since micro does not utilize it +class CpuBackendContext; + +// Apply sigmoid to elements of a vector. +void PortableApplySigmoidToVector(const float* vector, int v_size, + float* result); +// Apply tanh to elements of a vector +void PortableApplyTanhToVector(const float* vector, int v_size, float* result); +// Apply appropriate activation function to elements of a vector. +void PortableApplyActivationToVector(const float* vector, int v_size, + TfLiteFusedActivation activation, + float* result); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_TENSOR_UTILS_H_ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mirror_pad.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mirror_pad.cc new file mode 100644 index 0000000..c409fcc --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mirror_pad.cc @@ -0,0 +1,215 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +namespace tflite { +namespace { + +struct OpDataMirrorPad { + int input_dims; + int output_size; + int offset; + int output_dims_num_elements_buffer_index; + int input_dims_num_elements_buffer_index; +}; + +// Helper method that fills the left and right pads. +template +inline void GetPadding(const T* data, int offset, int64_t* left_pad, + int64_t* right_pad) { + *left_pad = static_cast(*(data + offset * 2)); + *right_pad = static_cast(*(data + offset * 2 + 1)); +} + +// Given dimension index and the left/right padding. +// Returns the corresponding dimension in the input array. +inline int GetInputDimension(int padded_dimension, int left_pad, int right_pad, + int input_dim_size, int offset) { + if (padded_dimension < left_pad) { + const int original_ind = left_pad + offset - 1; + return original_ind - (std::min(padded_dimension, original_ind - offset)); + } + padded_dimension -= left_pad; + if (padded_dimension >= input_dim_size) { + padded_dimension -= input_dim_size; + const int original_ind = input_dim_size - (1 + offset); + return original_ind - std::min(padded_dimension, original_ind); + } + return padded_dimension; +} + +// Given and index in output array, returns the index of the value +// in input array. +int GetFlatIndex(int index, int num_dims, + const TfLiteEvalTensor* padding_matrix, + const TfLiteIntArray* input_dims, + int* output_dims_num_elements, int* input_dims_num_elements, + const int offset) { + int flat_index = 0; + int64_t left_pad = 0, right_pad = 0, dimension_index, index_in_input; + + for (int i = 0; i < num_dims; ++i) { + switch (padding_matrix->type) { + case kTfLiteInt32: + GetPadding(padding_matrix->data.i32, i, &left_pad, &right_pad); + break; + case kTfLiteInt64: + GetPadding(padding_matrix->data.i64, i, &left_pad, &right_pad); + break; + default: + break; + } + dimension_index = index / output_dims_num_elements[i]; + + index_in_input = GetInputDimension(dimension_index, left_pad, right_pad, + input_dims->data[i], offset); + + flat_index += index_in_input * (input_dims_num_elements)[i]; + index %= output_dims_num_elements[i]; + } + + return flat_index; +} + +template +void MirrorPad(const TfLiteEvalTensor* padding_matrix, + const TfLiteIntArray* input_dims, int* output_dims_num_elements, + int* input_dims_num_elements, const T* input_data, + T* output_data, const int offset, const int num_dims, + const int output_size) { + for (int i = 0; i < output_size; ++i) { + output_data[i] = input_data[GetFlatIndex( + i, num_dims, padding_matrix, input_dims, output_dims_num_elements, + input_dims_num_elements, offset)]; + } +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TfLiteStatus status = kTfLiteOk; + const OpDataMirrorPad* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input_tensor = + tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* padding_matrix = + tflite::micro::GetEvalInput(context, node, 1); + + TfLiteEvalTensor* output_tensor = + tflite::micro::GetEvalOutput(context, node, 0); + const int input_dims = data->input_dims; + const int output_size = data->output_size; + + int* input_dims_num_elements = (int*)context->GetScratchBuffer( + context, data->input_dims_num_elements_buffer_index); + int* output_dims_num_elements = (int*)context->GetScratchBuffer( + context, data->output_dims_num_elements_buffer_index); + + for (int i = 0; i < input_dims; i++) { + output_dims_num_elements[i] = 1; + input_dims_num_elements[i] = 1; + } + + for (int i = input_dims - 2; i >= 0; i--) { + output_dims_num_elements[i] = + output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1]; + + input_dims_num_elements[i] = + input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1]; + } + + switch (output_tensor->type) { + case kTfLiteFloat32: { + MirrorPad(padding_matrix, input_tensor->dims, output_dims_num_elements, + input_dims_num_elements, + tflite::micro::GetTensorData(input_tensor), + tflite::micro::GetTensorData(output_tensor), + data->offset, input_dims, output_size); + break; + } + case kTfLiteInt8: { + MirrorPad(padding_matrix, input_tensor->dims, output_dims_num_elements, + input_dims_num_elements, + tflite::micro::GetTensorData(input_tensor), + tflite::micro::GetTensorData(output_tensor), + data->offset, input_dims, output_size); + break; + } + default: + status = kTfLiteError; + break; + } + +#undef TF_LITE_MIRROR_PAD + + return status; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataMirrorPad)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataMirrorPad* data = static_cast(node->user_data); + + TfLiteTensor* input_tensor = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* padding_matrix = + micro_context->AllocateTempInputTensor(node, 1); + TfLiteTensor* output_tensor = + micro_context->AllocateTempOutputTensor(node, 0); + + TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); + TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), + NumDimensions(input_tensor)); + auto* params = + reinterpret_cast(node->builtin_data); + if (params == nullptr) { + return kTfLiteError; + } + + data->offset = + params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0 + : 1; + data->input_dims = NumDimensions(input_tensor); + data->output_size = NumElements(output_tensor); + + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, data->input_dims * sizeof(int), + &data->output_dims_num_elements_buffer_index)); + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, data->input_dims * sizeof(int), + &data->input_dims_num_elements_buffer_index)); + + micro_context->DeallocateTempTfLiteTensor(input_tensor); + micro_context->DeallocateTempTfLiteTensor(padding_matrix); + micro_context->DeallocateTempTfLiteTensor(output_tensor); + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_MIRROR_PAD() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h new file mode 100644 index 0000000..1b7b038 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h @@ -0,0 +1,145 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "mli_api.h" // NOLINT + +namespace tflite { + +// Convolution specialized function. +typedef mli_status (*conv_func_ptr)(const mli_tensor* /*in*/, + const mli_tensor* /*weights*/, + const mli_tensor* /*bias*/, + const mli_conv2d_cfg* /*cfg*/, + mli_tensor* /*out*/); + +#ifdef MLI_2_0 +conv_func_ptr __attribute__((weak)) +mli_krn_conv2d_hwcn(const mli_tensor* weights) { + int filter_w = weights->shape[KRNL_W_DIM_HWCN]; + int filter_h = weights->shape[KRNL_H_DIM_HWCN]; + + if (filter_w == 1 && filter_h == 1) { + return mli_krn_conv2d_hwcn_sa8_sa8_sa32_k1x1; + } else if (filter_w == 3 && filter_h == 3) { + return mli_krn_conv2d_hwcn_sa8_sa8_sa32_k3x3; + } else if (filter_w == 5 && filter_h == 5) { + return mli_krn_conv2d_hwcn_sa8_sa8_sa32_k5x5; + } else { + return mli_krn_conv2d_hwcn_sa8_sa8_sa32; + } +} +#else +conv_func_ptr __attribute__((weak)) +mli_krn_conv2d_hwcn(const mli_tensor* weights, const mli_conv2d_cfg* cfg) { + return mli_krn_conv2d_nhwc_sa8_sa8_sa32; +} +#endif + +// Depthwise convolution specialized function. +typedef mli_status (*depthwise_func_ptr)(const mli_tensor* /*in*/, + const mli_tensor* /*weights*/, + const mli_tensor* /*bias*/, + const mli_conv2d_cfg* /*cfg*/, + mli_tensor* /*out*/); + +#ifdef MLI_2_0 +depthwise_func_ptr __attribute__((weak)) +mli_krn_depthwise_conv2d(const mli_tensor* weights) { + int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N]; + int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N]; + + if (filter_w == 3 && filter_h == 3) { + return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32_k3x3; + } else if (filter_w == 5 && filter_h == 5) { + return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32_k5x5; + } else { + return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32; + } +} +#else +depthwise_func_ptr __attribute__((weak)) +mli_krn_depthwise_conv2d(const mli_tensor* weights, const mli_conv2d_cfg* cfg) { + return mli_krn_depthwise_conv2d_hwcn_sa8_sa8_sa32; +} +#endif + +#ifdef MLI_2_0 +depthwise_func_ptr __attribute__((weak)) +mli_krn_group_conv2d(const mli_tensor* weights) { + int filter_w = weights->shape[KRNL_DW_W_DIM_HW1N]; + int filter_h = weights->shape[KRNL_DW_H_DIM_HW1N]; + + if (filter_w == 3 && filter_h == 3) { + return mli_krn_group_conv2d_hwcn_sa8_sa8_sa32_k3x3; + } else if (filter_w == 5 && filter_h == 5) { + return mli_krn_group_conv2d_hwcn_sa8_sa8_sa32_k5x5; + } else { + return mli_krn_group_conv2d_hwcn_sa8_sa8_sa32; + } +} +#endif + +// Pooling specialized functions. +typedef mli_status (*pooling_func_ptr)(const mli_tensor* /*in*/, + const mli_pool_cfg* /*cfg*/, + mli_tensor* /*out*/); + +#ifdef MLI_2_0 +pooling_func_ptr __attribute__((weak)) +mli_krn_avepool(const mli_pool_cfg* cfg) { + int filter_w = cfg->kernel_width; + int filter_h = cfg->kernel_height; + + if (filter_w == 2 && filter_h == 2) { + return mli_krn_avepool_hwc_sa8_k2x2; + } else if (filter_w == 3 && filter_h == 3) { + return mli_krn_avepool_hwc_sa8_k3x3; + } else { + return mli_krn_avepool_hwc_sa8; + } +} +#else +pooling_func_ptr __attribute__((weak)) +mli_krn_avepool(const mli_pool_cfg* cfg) { + return mli_krn_avepool_hwc_sa8; +} +#endif + +#ifdef MLI_2_0 +pooling_func_ptr __attribute__((weak)) +mli_krn_maxpool(const mli_pool_cfg* cfg) { + int filter_w = cfg->kernel_width; + int filter_h = cfg->kernel_height; + + if (filter_w == 2 && filter_h == 2) { + return mli_krn_maxpool_hwc_sa8_k2x2; + } else if (filter_w == 3 && filter_h == 3) { + return mli_krn_maxpool_hwc_sa8_k3x3; + } else { + return mli_krn_maxpool_hwc_sa8; + } +} +#else +pooling_func_ptr __attribute__((weak)) +mli_krn_maxpool(const mli_pool_cfg* cfg) { + return mli_krn_maxpool_hwc_sa8; +} +#endif + +} // namespace tflite +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.cc new file mode 100644 index 0000000..bbd5e3a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.cc @@ -0,0 +1,160 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "mli_interface.h" // NOLINT + +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace ops { +namespace micro { + +#ifndef MLI_2_0 +template <> +int8_t* MliTensorInterface::Data(void) { + TFLITE_DCHECK(tensor_->el_type == MLI_EL_ASYM_I8); + return static_cast(tensor_->data); +} + +template <> +int32_t* MliTensorInterface::Data(void) { + TFLITE_DCHECK(tensor_->el_type == MLI_EL_ASYM_I32); + return static_cast(tensor_->data); +} + +template <> +int32_t* MliTensorInterface::Scale(void) { + return &tensor_->el_params.asym.scale.i32; +} + +template <> +int32_t** MliTensorInterface::Scale(void) { + return &tensor_->el_params.asym.scale.pi32; +} + +template <> +void MliTensorInterface::SetData(int8_t* data, uint32_t capacity) const { + TFLITE_DCHECK(tensor_->el_type == MLI_EL_ASYM_I8); + tensor_->data = data; + tensor_->capacity = capacity; +} + +template <> +void MliTensorInterface::SetData(int32_t* data, uint32_t capacity) const { + TFLITE_DCHECK(tensor_->el_type == MLI_EL_ASYM_I32); + tensor_->data = data; + tensor_->capacity = capacity; +} + +mli_tensor* MliTensorInterface::MliTensor(void) { return tensor_; } + +const mli_tensor* MliTensorInterface::MliTensor(void) const { + return static_cast( + const_cast(this)->MliTensor()); +} + +uint32_t* MliTensorInterface::Rank(void) { return &tensor_->rank; } + +const uint32_t* MliTensorInterface::DataCapacity(void) const { + return &tensor_->capacity; +} + +mli_element_type* MliTensorInterface::ElType(void) { return &tensor_->el_type; } + +template <> +int16_t* MliTensorInterface::ZeroPoint(void) { + return &tensor_->el_params.asym.zero_point.i16; +} + +template <> +int16_t** MliTensorInterface::ZeroPoint(void) { + return &tensor_->el_params.asym.zero_point.pi16; +} + +uint32_t* MliTensorInterface::ZeroPointCapacity(void) { return nullptr; } + +int32_t* MliTensorInterface::Dim(void) { return &tensor_->el_params.asym.dim; } + +uint32_t* MliTensorInterface::ScaleCapacity(void) { return nullptr; } + +template <> +int8_t* MliTensorInterface::ScaleFracBits(void) { + return &tensor_->el_params.asym.scale_frac_bits; +} + +uint32_t* MliTensorInterface::ScaleFracBitsCapacity(void) { return nullptr; } + +int32_t* MliTensorInterface::MemStride(void) { return tensor_->mem_stride; } + +uint32_t* MliTensorInterface::Shape(void) { return tensor_->shape; } + +const uint32_t* MliTensorInterface::Shape(void) const { + return static_cast( + const_cast(this)->Shape()); +} + +void MliTensorInterface::SetScale(float fscale) { + int exp; + frexpf(fscale, &exp); + int frac_bits = 31 - exp; + int32_t iscale = (int32_t)((1ll << frac_bits) * fscale + 0.5f); + *(this->ScaleFracBits()) = frac_bits; + *(this->Scale()) = (int32_t)iscale; +} + +void MliTensorInterface::SetScalePerChannel(float* fscale, + const int num_channels) { + int min_frac_bits; + for (int i = 0; i < num_channels; i++) { + int exp; + frexpf(fscale[i], &exp); + int cur_frac_bits = 31 - exp; + if (i == 0) { + min_frac_bits = cur_frac_bits; + } else { + min_frac_bits = + min_frac_bits < cur_frac_bits ? min_frac_bits : cur_frac_bits; + } + } + *this->ScaleFracBits() = min_frac_bits; + + for (int i = 0; i < num_channels; i++) { + int32_t iscale = (int32_t)((1ll << min_frac_bits) * fscale[i] + 0.5f); + (*this->Scale())[i] = iscale; + } +} + +void MliTensorInterface::SetElType(TfLiteType type) { + if (type == kTfLiteInt8) { + *this->ElType() = MLI_EL_ASYM_I8; + } else if (type == kTfLiteInt32) { + *this->ElType() = MLI_EL_ASYM_I32; + } else { + MicroPrintf("Wrong data type. Expected int8_t or int32_t."); + TFLITE_ABORT; + } +} +#endif + +} // namespace micro +} // namespace ops +} // namespace tflite + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.h new file mode 100644 index 0000000..e08f84a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_interface.h @@ -0,0 +1,80 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_INTERFACE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_INTERFACE_H_ + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +namespace tflite { +namespace ops { +namespace micro { + +// Abstracts access to mli_tensor fields to use different versions of MLI +// Library (1.x and 2.x) +// Example: +// ops::micro::MliTensorInterface mli_in = +// ops::micro::MliTensorInterface(static_cast( +// context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + +class MliTensorInterface { + public: + // Make sure that lifetime of MliTensorInterface instance isn't bigger than + // related mli_tensor. + MliTensorInterface(mli_tensor* tensor) : tensor_(tensor){}; + MliTensorInterface() = default; + ~MliTensorInterface() = default; + + template + T* Data(); + template + T Scale(); + template + T ZeroPoint(); + template + T ScaleFracBits(); + mli_tensor* MliTensor(); + const mli_tensor* MliTensor() const; + int32_t* Dim(); + uint32_t* Rank(); + uint32_t* Shape(); + const uint32_t* Shape() const; + const uint32_t* DataCapacity() const; + uint32_t* ScaleCapacity(); + mli_element_type* ElType(); + uint32_t* ScaleFracBitsCapacity(); + int32_t* MemStride(); + uint32_t* ZeroPointCapacity(); + + template + void SetData(T* data, uint32_t capacity) const; + void SetScale(float fscale); + void SetScalePerChannel(float* fscale, const int num_channels); + void SetElType(TfLiteType type); + + private: + mli_tensor* tensor_; +}; + +} // namespace micro +} // namespace ops +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_SLICERS_H_ + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cc similarity index 98% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cc index 877f9ef..8b65e38 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.cc @@ -1,4 +1,4 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h index 0dc760d..fa22020 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h @@ -1,4 +1,4 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 /* Copyright 2020 The TensorFlow Authors. All Rights Reserved. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h index c3d151d..4179c74 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h @@ -1,7 +1,7 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,102 +20,292 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_KERNELS_ARC_MLI_TF_UTILS_H_ #include "mli_api.h" // NOLINT +#include "mli_interface.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" -constexpr int kFracBitsQ15 = 15; -constexpr int kFracBitsQ31 = 31; +#define KRNL_C_DIM_NHWC 0 // output channels namespace tflite { namespace ops { namespace micro { -inline void ConvertToMliTensorData(const TfLiteTensor* tfT, mli_tensor* mliT) { +inline void ConvertToMliTensorData(const TfLiteTensor* tfT, + MliTensorInterface* mliT, + bool is_bias_tensor) { // Data is NULL until MliTensorAttachBuffer is called. - mliT->data = NULL; + mliT->SetElType(tfT->type); if (tfT->type == kTfLiteInt8) { - mliT->el_type = MLI_EL_ASYM_I8; + mliT->SetData(nullptr, tfT->bytes); } else if (tfT->type == kTfLiteInt32) { - mliT->el_type = MLI_EL_ASYM_I32; + mliT->SetData(nullptr, tfT->bytes); } else { - TF_LITE_FATAL("Wrong data type. Expected int8_t or int32_t."); + MicroPrintf("Wrong data type. Expected int8_t or int32_t."); + TFLITE_ABORT; } + const int32_t dims_count = GetTensorShape(tfT).DimensionsCount(); + *mliT->Rank() = is_bias_tensor ? 1 : dims_count; - mliT->capacity = tfT->bytes; - mliT->rank = GetTensorShape(tfT).DimensionsCount(); - for (int i = 0; i < GetTensorShape(tfT).DimensionsCount(); i++) { - mliT->shape[i] = GetTensorShape(tfT).Dims(i); + int mli_tensor_memstride = 1; + if (is_bias_tensor) { + mliT->Shape()[0] = GetTensorShape(tfT).Dims(dims_count - 1); + mliT->MemStride()[0] = mli_tensor_memstride; + } else { + for (int i = dims_count - 1; i >= 0; --i) { + mliT->Shape()[i] = GetTensorShape(tfT).Dims(i); + mliT->MemStride()[i] = mli_tensor_memstride; + mli_tensor_memstride *= GetTensorShape(tfT).Dims(i); + } } } -inline void ConvertToMliQuantParams(const TfLiteTensor* tfT, mli_tensor* mliT) { - mliT->el_params.asym.dim = -1; - mliT->el_params.asym.zero_point.i16 = tfT->params.zero_point; +inline void ConvertToMliQuantParams(const TfLiteTensor* tfT, + MliTensorInterface* mliT) { + *mliT->Dim() = -1; +#ifdef MLI_2_0 + *mliT->ZeroPointCapacity() = 0; +#endif + *mliT->ZeroPoint() = tfT->params.zero_point; float fscale = tfT->params.scale; - int exp; - frexpf(fscale, &exp); - int frac_bits = kFracBitsQ31 - exp; - int32_t iscale = (int32_t)((1ll << frac_bits) * fscale + 0.5f); - mliT->el_params.asym.scale_frac_bits = frac_bits; - mliT->el_params.asym.scale.i32 = (int32_t)iscale; + mliT->SetScale(fscale); } inline void ConvertToMliQuantParamsPerChannel(const TfLiteTensor* tfT, - mli_tensor* mliT) { + MliTensorInterface* mliT, + bool is_bias_tensor) { // mli tensor scale and zero_point arrays should be allocated at this point - TFLITE_DCHECK_NE(mliT->el_params.asym.scale.pi16, 0); - TFLITE_DCHECK_NE(mliT->el_params.asym.zero_point.pi16, 0); +#ifdef MLI_2_0 + TFLITE_DCHECK_NE(*mliT->Scale(), 0); + TFLITE_DCHECK_NE(*mliT->ZeroPoint(), 0); +#else + TFLITE_DCHECK_NE(*mliT->Scale(), 0); + TFLITE_DCHECK_NE(*mliT->ZeroPoint(), 0); +#endif // get per channel quantization parameters const auto* affine_quantization = reinterpret_cast(tfT->quantization.params); - mliT->el_params.asym.dim = affine_quantization->quantized_dimension; + int32_t quantized_dimension = + is_bias_tensor ? 0 : affine_quantization->quantized_dimension; + const int num_channels = mliT->Shape()[quantized_dimension]; - // find frac_bits - const int num_channels = - mliT->shape[affine_quantization->quantized_dimension]; - int min_frac_bits; + *mliT->Dim() = quantized_dimension; + + // set capacities +#ifdef MLI_2_0 + *mliT->ScaleFracBitsCapacity() = num_channels * sizeof(int8_t); + *mliT->ScaleCapacity() = num_channels * sizeof(int16_t); + *mliT->ZeroPointCapacity() = num_channels * sizeof(int16_t); +#endif float* fscale = affine_quantization->scale->data; - for (int i = 0; i < num_channels; i++) { - int exp; - frexpf(fscale[i], &exp); - int cur_frac_bits = kFracBitsQ31 - exp; - if (i == 0) { - min_frac_bits = cur_frac_bits; - } else { - min_frac_bits = - min_frac_bits < cur_frac_bits ? min_frac_bits : cur_frac_bits; - } - } - mliT->el_params.asym.scale_frac_bits = min_frac_bits; + mliT->SetScalePerChannel(fscale, num_channels); +#ifdef MLI_2_0 + int16_t* zero_point = *mliT->ZeroPoint(); for (int i = 0; i < num_channels; i++) { - int32_t iscale = (int32_t)((1ll << min_frac_bits) * fscale[i] + 0.5f); - mliT->el_params.asym.scale.pi32[i] = iscale; + zero_point[i] = tfT->params.zero_point; } +#endif } template -inline void MliTensorAttachBuffer(const TfLiteEvalTensor* tfT, - mli_tensor* mliT) { +inline void MliTensorAttachBuffer(const TfLiteEvalTensor*, + const MliTensorInterface*); + +template <> +inline void MliTensorAttachBuffer(const TfLiteEvalTensor* tfT, + const MliTensorInterface* mliT) { // "const_cast" here used to attach const data buffer to the initially // non-const mli_tensor. This is required by current implementation of MLI // backend and planned for redesign due to this and some other aspects. - mliT->data = const_cast( - static_cast(tflite::micro::GetTensorData(tfT))); + mliT->SetData( + const_cast(tflite::micro::GetTensorData(tfT)), + *mliT->DataCapacity()); } -inline void ConvertToMliTensor(const TfLiteTensor* tfT, mli_tensor* mliT) { - ConvertToMliTensorData(tfT, mliT); +template <> +inline void MliTensorAttachBuffer(const TfLiteEvalTensor* tfT, + const MliTensorInterface* mliT) { + // "const_cast" here used to attach const data buffer to the initially + // non-const mli_tensor. This is required by current implementation of MLI + // backend and planned for redesign due to this and some other aspects. + mliT->SetData( + const_cast(tflite::micro::GetTensorData(tfT)), + *mliT->DataCapacity()); +} + +inline void ConvertToMliTensor(const TfLiteTensor* tfT, + MliTensorInterface* mliT) { + ConvertToMliTensorData(tfT, mliT, false); ConvertToMliQuantParams(tfT, mliT); } inline void ConvertToMliTensorPerChannel(const TfLiteTensor* tfT, - mli_tensor* mliT) { - ConvertToMliTensorData(tfT, mliT); - ConvertToMliQuantParamsPerChannel(tfT, mliT); + MliTensorInterface* mliT, + bool is_bias_tensor) { + ConvertToMliTensorData(tfT, mliT, is_bias_tensor); + ConvertToMliQuantParamsPerChannel(tfT, mliT, is_bias_tensor); +} + +inline void PrepareLocalTensor(mli_tensor* tensor, mli_tensor* tensor_local) { +#ifdef MLI_2_0 + int8_t* local_data = tensor_local->data.mem.pi8; + *tensor_local = *tensor; + tensor_local->data.mem.pi8 = local_data; +#else + int8_t* local_data = static_cast(tensor_local->data); + *tensor_local = *tensor; + tensor_local->data = local_data; +#endif } + +inline void AdjustBiasTensor(MliTensorInterface* bias, MliTensorInterface* in, + MliTensorInterface* weights) { + int32_t quantized_dimension = *bias->Dim(); + const int num_channels = + quantized_dimension < 0 ? 1 : bias->Shape()[quantized_dimension]; + for (int i = 0; i < num_channels; i++) { + int32_t adjusted_bias_scale = + (*in->Scale()) * (*weights->Scale())[i]; + int in_shift = *in->ScaleFracBits(); + int w_shift = (*weights->ScaleFracBits())[i]; + int b_shift = (*bias->ScaleFracBits())[i]; + int bias_shift = in_shift + w_shift - b_shift; + (*bias->Scale())[i] = + (int16_t)(adjusted_bias_scale >> bias_shift); + } +} + +#ifdef MLI_2_0_KRNL_TEST +// Reorder an array according to given indexes. If backward is true, order of +// index array must be reversed. +inline static void reorder(uint32_t* arr, const uint8_t index[], + bool backward) { + uint32_t temp[MLI_MAX_RANK]; + for (int8_t i = 0; i < MLI_MAX_RANK; i++) { + if (backward) + temp[index[i]] = arr[i]; + else + temp[i] = arr[index[i]]; + } + for (int8_t i = 0; i < MLI_MAX_RANK; i++) { + arr[i] = temp[i]; + } +} + +// Change shape of mli tensor and recalculate mem strides. +inline void change_shape(mli_tensor* mliT, const uint8_t dim_order[]) { + reorder(mliT->shape, dim_order, false); + + // Calculate strides for new layout + int mli_tensor_memstride = 1; + for (int shape_idx = mliT->rank - 1; shape_idx >= 0; --shape_idx) { + mliT->mem_stride[shape_idx] = mli_tensor_memstride; + mli_tensor_memstride *= mliT->shape[shape_idx]; + } +} + +inline void permute_weights(const mli_tensor* weights_src, + const mli_permute_cfg* permute_cfg, + mli_tensor* weights_dst, + mli_data_container* buffer_data) { + mli_tensor buffer = {}; + buffer.el_params = weights_dst->el_params; + buffer.data = *buffer_data; + // Compare weights tensor size and avaliable buffer capacity. + int buffer_size = buffer_data->capacity; + int weights_size = mli_hlp_count_elem_num(weights_src, 0) * + mli_hlp_tensor_element_size(weights_src); + + // Need to change shape of distanation weights buffer according to permute + // dimensions order to calculate slice sizes + change_shape(weights_dst, permute_cfg->perm_dim); + + if (buffer_size >= weights_size) { + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); + mli_mov_tensor_sync(weights_src, ©_config, &buffer); + mli_krn_permute_sa8(&buffer, permute_cfg, weights_dst); + } else { + // Weights shape is NHWC and output (buffer) shape is HWC where N_w = C_o. + // Buffer size (H_o * W_o) must be more or equal then the weights size (H_w + // * W_w * C_w). So, this is the reason, why buffer size (output tensor) is + // divided by channel shape. + uint32_t slice_size = buffer_size / weights_src->shape[KRNL_C_DIM_NHWC]; + + mli_mov_cfg_t copy_config = {}; + uint32_t src_offsets[] = {0, 0, 0, 0}; + uint32_t src_sizes[] = {0, 0, 0, 0}; + int dst_mem_stride[] = {0, 0, 0, 0}; + + mli_tensor weights_dst_sub_tensor; + mli_sub_tensor_cfg sub_tensor_cfg = {}; + sub_tensor_cfg.sub_tensor_rank = weights_src->rank; + + // Calculate dimensions for slice accroding to buffer capacity. + // Now, after calling change_shape() function, dst weights buffer has the + // MLI layout (HWCN). This means, the innermost dimension (N) of dst weights + // tensor is equal to the innermost dimension of output tensor (N). + sub_tensor_cfg.size[weights_dst->rank - 1] = + src_sizes[weights_dst->rank - 1] = weights_src->shape[KRNL_C_DIM_NHWC]; + // Now need to calculate other shapes for weights slice. Total slice size is + // H*W*C*N, so to calculate sizes for each axis, avaliable slice size is + // divided by shape for each axis. + uint32_t slice_size_left = slice_size; + for (uint32_t i = 0; i < weights_dst->rank - 1; i++) { + sub_tensor_cfg.size[i] = src_sizes[i] = + slice_size_left / weights_dst->shape[i] > 0 ? weights_dst->shape[i] + : slice_size_left; + slice_size_left /= weights_dst->shape[i]; + slice_size_left = slice_size_left > 0 ? slice_size_left : 1; + } + // Need to reorder src tensor sizes because it is still in TFLM format + // (NHWC) and src_sizes array calculated as (HWCN). + reorder(src_sizes, permute_cfg->perm_dim, true); + + sub_tensor_cfg.offset[KRNL_C_DIM_HWCN] = src_offsets[KRNL_H_DIM_HWCN] = 0; + sub_tensor_cfg.offset[KRNL_H_DIM_HWCN] = src_offsets[KRNL_W_DIM_HWCN] = 0; + sub_tensor_cfg.offset[KRNL_W_DIM_HWCN] = src_offsets[KRNL_D_DIM_HWCN] = 0; + sub_tensor_cfg.offset[KRNL_D_DIM_HWCN] = src_offsets[KRNL_C_DIM_HWCN] = 0; + do { + do { + do { + do { + mli_mov_cfg_for_slice(©_config, (int*)src_offsets, + (int*)src_sizes, dst_mem_stride); + mli_mov_tensor_sync(weights_src, ©_config, &buffer); + + mli_hlp_create_subtensor(weights_dst, &sub_tensor_cfg, + &weights_dst_sub_tensor); + mli_krn_permute_sa8(&buffer, permute_cfg, &weights_dst_sub_tensor); + + // For each axis, it is necessary to recalculate the offsets and + // slice sizes. + sub_tensor_cfg.offset[2] = src_offsets[3] += src_sizes[3]; + src_sizes[3] = + std::min(src_sizes[3], weights_src->shape[3] - src_offsets[3]); + } while (src_offsets[3] < weights_src->shape[3]); + + sub_tensor_cfg.offset[1] = src_offsets[2] += src_sizes[2]; + src_sizes[2] = + std::min(src_sizes[2], weights_src->shape[2] - src_offsets[2]); + } while (src_offsets[2] < weights_src->shape[2]); + + sub_tensor_cfg.offset[0] = src_offsets[1] += src_sizes[1]; + src_sizes[1] = + std::min(src_sizes[1], weights_src->shape[1] - src_offsets[1]); + } while (src_offsets[1] < weights_src->shape[1]); + + sub_tensor_cfg.offset[3] = src_offsets[0] += src_sizes[0]; + src_sizes[0] = + std::min(src_sizes[0], weights_src->shape[0] - src_offsets[0]); + } while (src_offsets[0] < weights_src->shape[0]); + } +} +#endif + } // namespace micro } // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cc new file mode 100644 index 0000000..9f00d2e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cc @@ -0,0 +1,387 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void EvalQuantized(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + + op_params.quantized_activation_min = data->output_activation_min; + op_params.quantized_activation_max = data->output_activation_max; + op_params.float_activation_max = data->output_activation_max_f32; + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + if (input1->type == kTfLiteInt8) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else if (input1->type == kTfLiteInt16) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + + } else { + if (input1->type == kTfLiteInt8) { + arm_elementwise_mul_s8( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), op_params.input1_offset, + op_params.input2_offset, tflite::micro::GetTensorData(output), + op_params.output_offset, op_params.output_multiplier, + op_params.output_shift, op_params.quantized_activation_min, + op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } else if (input1->type == kTfLiteInt16) { + arm_elementwise_mul_s16( + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), + op_params.input1_offset, op_params.input2_offset, + tflite::micro::GetTensorData(output), + op_params.output_offset, op_params.output_multiplier, + op_params.output_shift, op_params.quantized_activation_min, + op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } + } +} + +} // namespace + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataMul* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + + switch (input1->type) { + case kTfLiteInt8: + EvalQuantized(context, node, data, input1, input2, output); + break; + case kTfLiteInt16: + EvalQuantized(context, node, data, input1, input2, output); + break; + case kTfLiteInt32: + EvalMulQuantizedReference(context, node, data, input1, input2, output); + break; + case kTfLiteFloat32: + EvalMulFloatReference(context, node, params, data, input1, input2, + output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus EvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + const OpDataMul* data = static_cast(node->user_data); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + TFLITE_DCHECK(input1->type == kTfLiteInt8); + + EvalQuantized(context, node, data, input1, input2, output); + + return kTfLiteOk; +} + +TfLiteStatus EvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + const OpDataMul* data = static_cast(node->user_data); + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + TFLITE_DCHECK(input1->type == kTfLiteInt16); + + EvalQuantized(context, node, data, input1, input2, output); + + return kTfLiteOk; +} + +TfLiteRegistration Register_MUL() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, Eval); +} + +TfLiteRegistration Register_MUL_INT8() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, EvalInt8); +} + +TfLiteRegistration Register_MUL_INT16() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, EvalInt16); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +#include + +long long mul_total_time = 0; + +namespace tflite { +#if ESP_NN +void MulEvalQuantized(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + op_params.quantized_activation_min = data->output_activation_min; + op_params.quantized_activation_max = data->output_activation_max; + op_params.float_activation_max = data->output_activation_max_f32; + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + const int8_t *input1_data = tflite::micro::GetTensorData(input1); + const int8_t *input2_data = tflite::micro::GetTensorData(input2); + int8_t *out_data = tflite::micro::GetTensorData(output); + + esp_nn_mul_elementwise_s8(input1_data, input2_data, op_params.input1_offset, + op_params.input2_offset, out_data, op_params.output_offset, + op_params.output_multiplier, op_params.output_shift, + op_params.quantized_activation_min, op_params.quantized_activation_max, + MatchingElementsSize(tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorShape(output))); + } +} +#endif + +TfLiteStatus MulEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataMul* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + + long long start_time = esp_timer_get_time(); + switch (input1->type) { + case kTfLiteInt8: +#if ESP_NN + MulEvalQuantized(context, node, data, input1, input2, output); +#else + EvalMulQuantizedReference(context, node, data, input1, input2, output); +#endif + break; + case kTfLiteInt32: + EvalMulQuantizedReference(context, node, data, input1, input2, output); + break; + case kTfLiteFloat32: + EvalMulFloatReference(context, node, params, data, input1, input2, + output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); + return kTfLiteError; + } + mul_total_time += esp_timer_get_time() - start_time; + return kTfLiteOk; +} + +TfLiteRegistration Register_MUL() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, MulEval); +} + +} // namespace tflite + +#else +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +TfLiteStatus MulEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataMul* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kMulInput1Tensor); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kMulInput2Tensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kMulOutputTensor); + + switch (input1->type) { + case kTfLiteInt8: + case kTfLiteInt16: + case kTfLiteInt32: + EvalMulQuantizedReference(context, node, data, input1, input2, output); + break; + case kTfLiteFloat32: + EvalMulFloatReference(context, node, params, data, input1, input2, + output); + break; + default: + MicroPrintf("Type %s (%d) not supported.", + TfLiteTypeGetName(input1->type), input1->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteRegistration Register_MUL() { + return tflite::micro::RegisterOp(MulInit, MulPrepare, MulEval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cpp deleted file mode 100644 index cef7cb6..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.cpp +++ /dev/null @@ -1,470 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace mul { - -constexpr int kInput1Tensor = 0; -constexpr int kInput2Tensor = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t output_activation_min; - int32_t output_activation_max; - - int32_t output_multiplier; - int output_shift; - - // Cached tensor zero point values for quantized operations. - int32_t input1_zero_point; - int32_t input2_zero_point; - int32_t output_zero_point; - - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, OpData* data) { - const TfLiteTensor* input1 = GetInput(context, node, kInput1Tensor); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInput2Tensor); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - - double real_multiplier = static_cast(input1->params.scale) * - static_cast(input2->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - } else { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateOpData(context, node, params, data); -} - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, const OpData& data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params; - op_params.quantized_activation_min = data.output_activation_min; - op_params.quantized_activation_max = data.output_activation_max; - op_params.input1_offset = -data.input1_zero_point; - op_params.input2_offset = -data.input2_zero_point; - op_params.output_offset = data.output_zero_point; - op_params.output_multiplier = data.output_multiplier; - op_params.output_shift = data.output_shift; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - -#define TF_LITE_MUL(type, opname, dtype) \ - type::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - TF_LITE_MUL(reference_integer_ops, BroadcastMul4DSlow, int8_t); - } else { - arm_elementwise_mul_s8( - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorData(input2), op_params.input1_offset, - op_params.input2_offset, tflite::micro::GetTensorData(output), - op_params.output_offset, op_params.output_multiplier, - op_params.output_shift, op_params.quantized_activation_min, - op_params.quantized_activation_max, - MatchingElementsSize(tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorShape(output))); - } - } else if (output->type == kTfLiteUInt8) { - if (need_broadcast) { - TF_LITE_MUL(reference_integer_ops, BroadcastMul4DSlow, uint8_t); - } else { - TF_LITE_MUL(reference_integer_ops, Mul, uint8_t); - } - } -#undef TF_LITE_MUL -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const OpData& data, - const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params; - op_params.float_activation_min = data.output_activation_min_f32; - op_params.float_activation_max = data.output_activation_max_f32; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); -#define TF_LITE_MUL(opname) \ - reference_ops::opname(op_params, tflite::micro::GetTensorShape(input1), \ - tflite::micro::GetTensorData(input1), \ - tflite::micro::GetTensorShape(input2), \ - tflite::micro::GetTensorData(input2), \ - tflite::micro::GetTensorShape(output), \ - tflite::micro::GetTensorData(output)); - - if (need_broadcast) { - TF_LITE_MUL(BroadcastMul4DSlow); - } else { - TF_LITE_MUL(Mul); - } -#undef TF_LITE_MUL -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInput1Tensor); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInput2Tensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input1->type) { - case kTfLiteUInt8: - case kTfLiteInt8: - EvalQuantized(context, node, data, input1, input2, output); - break; - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input1, input2, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace mul - -TfLiteRegistration Register_MUL() { - return {/* Init=*/mul::Init, - /* Free=*/nullptr, - /* Prepare=*/mul::Prepare, - /*invoke=*/mul::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#else -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace mul { -namespace { - -constexpr int kInput1Tensor = 0; -constexpr int kInput2Tensor = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - int32_t input1_zero_point; - int32_t input2_zero_point; - - int32_t output_activation_min; - int32_t output_activation_max; - int32_t output_zero_point; - int32_t output_multiplier; - int output_shift; - - float output_activation_min_f32; - float output_activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, OpData* data) { - const TfLiteTensor* input1 = GetInput(context, node, kInput1Tensor); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInput2Tensor); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); - TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - - TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - - double real_multiplier = static_cast(input1->params.scale) * - static_cast(input2->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->output_multiplier, - &data->output_shift); - - data->input1_zero_point = input1->params.zero_point; - data->input2_zero_point = input2->params.zero_point; - data->output_zero_point = output->params.zero_point; - } else { - CalculateActivationRange(params->activation, - &data->output_activation_min_f32, - &data->output_activation_max_f32); - } - - return kTfLiteOk; -} - -} // namespace - -void EvalQuantized(TfLiteContext* context, TfLiteNode* node, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - op_params.quantized_activation_min = data->output_activation_min; - op_params.quantized_activation_max = data->output_activation_max; - op_params.float_activation_max = data->output_activation_max_f32; - op_params.input1_offset = -data->input1_zero_point; - op_params.input2_offset = -data->input2_zero_point; - op_params.output_offset = data->output_zero_point; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Mul(op_params, - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else if (output->type == kTfLiteUInt8) { - if (need_broadcast) { - reference_integer_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::Mul(op_params, - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } -} - -void EvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLiteMulParams* params, const OpData* data, - const TfLiteEvalTensor* input1, const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - tflite::ArithmeticParams op_params = {}; - op_params.float_activation_min = data->output_activation_min_f32; - op_params.float_activation_max = data->output_activation_max_f32; - - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (need_broadcast) { - reference_ops::BroadcastMul4DSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - return CalculateOpData(context, node, params, data); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInput1Tensor); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInput2Tensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input1->type) { - case kTfLiteUInt8: - case kTfLiteInt8: - EvalQuantized(context, node, data, input1, input2, output); - break; - case kTfLiteFloat32: - EvalFloat(context, node, params, data, input1, input2, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input1->type), input1->type); - return kTfLiteError; - } - - return kTfLiteOk; -} -} // namespace mul - -TfLiteRegistration Register_MUL() { - return {/*init=*/mul::Init, - /*free=*/nullptr, - /*prepare=*/mul::Prepare, - /*invoke=*/mul::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h new file mode 100644 index 0000000..61d4605 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h @@ -0,0 +1,74 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kMulInput1Tensor; +extern const int kMulInput2Tensor; +extern const int kMulOutputTensor; + +struct OpDataMul { + int32_t input1_zero_point; + int32_t input2_zero_point; + + int32_t output_activation_min; + int32_t output_activation_max; + int32_t output_zero_point; + int32_t output_multiplier; + int output_shift; + + float output_activation_min_f32; + float output_activation_max_f32; +}; + +void* MulInit(TfLiteContext* context, const char* buffer, size_t length); + +TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, OpDataMul* data); + +TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node); + +TfLiteStatus EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output); + +void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output); + +// Generic must define registration function. +TfLiteRegistration Register_MUL(); + +#if defined(CMSIS_NN) +TfLiteRegistration Register_MUL_INT8(); +#else +// Fallback registration +inline TfLiteRegistration Register_MUL_INT8() { return Register_MUL(); } +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_MUL_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul_common.cc new file mode 100644 index 0000000..187fae2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/mul_common.cc @@ -0,0 +1,213 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mul.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" + +namespace tflite { + +const int kMulInput1Tensor = 0; +const int kMulInput2Tensor = 1; +const int kMulOutputTensor = 0; + +void* MulInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataMul)); +} + +TfLiteStatus CalculateOpDataMul(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, OpDataMul* data) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kMulInput1Tensor); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kMulInput2Tensor); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kMulOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + + double real_multiplier = static_cast(input1->params.scale) * + static_cast(input2->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, &data->output_multiplier, + &data->output_shift); + + data->input1_zero_point = input1->params.zero_point; + data->input2_zero_point = input2->params.zero_point; + data->output_zero_point = output->params.zero_point; + + if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, data->input1_zero_point, 0); + TF_LITE_ENSURE_EQ(context, data->input2_zero_point, 0); + TF_LITE_ENSURE_EQ(context, data->output_zero_point, 0); + } + } else if (output->type == kTfLiteInt32) { + CalculateActivationRange(params->activation, &data->output_activation_min, + &data->output_activation_max); + } else { + CalculateActivationRange(params->activation, + &data->output_activation_min_f32, + &data->output_activation_max_f32); + } + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus MulPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataMul* data = static_cast(node->user_data); + + return CalculateOpDataMul(context, node, params, data); +} + +TfLiteStatus EvalMulQuantizedReference(TfLiteContext* context, TfLiteNode* node, + const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + op_params.quantized_activation_min = data->output_activation_min; + op_params.quantized_activation_max = data->output_activation_max; + op_params.float_activation_max = data->output_activation_max_f32; + op_params.input1_offset = -data->input1_zero_point; + op_params.input2_offset = -data->input2_zero_point; + op_params.output_offset = data->output_zero_point; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (input1->type == kTfLiteInt8) { + if (need_broadcast) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Mul(op_params, + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } else if (input1->type == kTfLiteInt32) { + if (need_broadcast) { + reference_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } else if (input1->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, op_params.input1_offset, 0); + TF_LITE_ENSURE_EQ(context, op_params.input2_offset, 0); + TF_LITE_ENSURE_EQ(context, op_params.output_offset, 0); + + if (need_broadcast) { + reference_integer_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_integer_ops::Mul(op_params, + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } + return kTfLiteOk; +} + +void EvalMulFloatReference(TfLiteContext* context, TfLiteNode* node, + TfLiteMulParams* params, const OpDataMul* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params = {}; + op_params.float_activation_min = data->output_activation_min_f32; + op_params.float_activation_max = data->output_activation_max_f32; + + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + if (need_broadcast) { + reference_ops::BroadcastMul4DSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Mul(op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cc similarity index 75% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cc index cc19d42..249f7ad 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/neg.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,11 +18,11 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace neg { + +namespace { constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; @@ -41,26 +41,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace neg +} // namespace TfLiteRegistration Register_NEG() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/neg::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cc similarity index 82% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cc index 4130f7c..79615bd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pack.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,11 +17,10 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace pack { + namespace { constexpr int kOutputTensor = 0; @@ -82,10 +81,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return PackImpl(context, node, output, data->values_count, data->axis); } - case kTfLiteUInt8: { - return PackImpl(context, node, output, data->values_count, - data->axis); - } case kTfLiteInt8: { return PackImpl(context, node, output, data->values_count, data->axis); @@ -99,8 +94,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { data->axis); } default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by pack.", - TfLiteTypeGetName(output->type)); + MicroPrintf("Type '%s' is not supported by pack.", + TfLiteTypeGetName(output->type)); return kTfLiteError; } } @@ -109,19 +104,9 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } } // namespace -} // namespace pack TfLiteRegistration Register_PACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/pack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cc similarity index 73% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cc index ec59d19..a7d7edd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.cc @@ -23,11 +23,9 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace pad { namespace { struct OpData { @@ -35,27 +33,115 @@ struct OpData { int32_t output_zero_point; }; -} // namespace - void* Init(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(OpData)); } -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, /*index=*/0); + const TfLiteEvalTensor* constant_values = + NumInputs(node) == 3 + ? tflite::micro::GetEvalInput(context, node, /*index=*/2) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, /*index=*/0); + + switch (input->type) { + case kTfLiteFloat32: { + float pad_value = + constant_values == nullptr + ? 0.f + : *tflite::micro::GetTensorData(constant_values); + if (data->params.resizing_category == ResizingCategory::kImageStyle) { + reference_ops::PadImageStyle( + data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), &pad_value, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + case kTfLiteInt8: { + int8_t pad_value; + if (constant_values == nullptr) { + pad_value = static_cast(data->output_zero_point); + } else { + pad_value = *tflite::micro::GetTensorData(constant_values); + } + if (data->params.resizing_category == ResizingCategory::kImageStyle) { + reference_ops::PadImageStyle( + data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), &pad_value, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } break; + case kTfLiteInt16: { + int16_t pad_value = + constant_values == nullptr + ? 0 + : *tflite::micro::GetTensorData(constant_values); + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + case kTfLiteInt32: { + int32_t pad_value = + constant_values == nullptr + ? 0 + : *tflite::micro::GetTensorData(constant_values); + reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + &pad_value, tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } break; + default: + + MicroPrintf("Type %s not currently supported by Pad.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TFLITE_DCHECK(node->user_data != nullptr); OpData* data = static_cast(node->user_data); TF_LITE_ENSURE(context, NumInputs(node) == 2 || NumInputs(node) == 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, /*index=*/0); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, /*index=*/0); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* paddings = GetInput(context, node, /*index=*/1); + TfLiteTensor* paddings = + micro_context->AllocateTempInputTensor(node, /*index=*/1); TF_LITE_ENSURE(context, paddings != nullptr); - const TfLiteTensor* constant_values = - NumInputs(node) == 3 ? GetInput(context, node, /*index=*/2) : nullptr; - TfLiteTensor* output = GetOutput(context, node, /*index=*/0); + TfLiteTensor* constant_values = + NumInputs(node) == 3 + ? micro_context->AllocateTempInputTensor(node, /*index=*/2) + : nullptr; + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, /*index=*/0); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, input->type, output->type); @@ -103,21 +189,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->params.right_padding[idx] = paddings_data[idx * 2 + 1]; } - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { + if (input->type == kTfLiteInt8) { if (constant_values == nullptr) { // Quantized Pad requires that 0 is represented in the quantized // range. - if (input->type == kTfLiteUInt8) { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } else { - TF_LITE_ENSURE(context, output->params.zero_point >= - std::numeric_limits::min()); - TF_LITE_ENSURE(context, output->params.zero_point <= - std::numeric_limits::max()); - } + TF_LITE_ENSURE(context, output->params.zero_point >= + std::numeric_limits::min()); + TF_LITE_ENSURE(context, output->params.zero_point <= + std::numeric_limits::max()); } else { // Quantized Pad requires that 'constant_values' is represented in the // same quantized range as the input and output tensors. @@ -129,126 +208,23 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { data->output_zero_point = output->params.zero_point; } - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, /*index=*/0); - const TfLiteEvalTensor* constant_values = - NumInputs(node) == 3 - ? tflite::micro::GetEvalInput(context, node, /*index=*/2) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, /*index=*/0); - - switch (input->type) { - case kTfLiteFloat32: { - float pad_value = - constant_values == nullptr - ? 0.f - : *tflite::micro::GetTensorData(constant_values); - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteUInt8: { - uint8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt8: { - int8_t pad_value; - if (constant_values == nullptr) { - pad_value = static_cast(data->output_zero_point); - } else { - pad_value = *tflite::micro::GetTensorData(constant_values); - } - if (data->params.resizing_category == ResizingCategory::kImageStyle) { - reference_ops::PadImageStyle( - data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), &pad_value, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } break; - case kTfLiteInt32: { - int32_t pad_value = - constant_values == nullptr - ? 0 - : *tflite::micro::GetTensorData(constant_values); - reference_ops::Pad(data->params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - &pad_value, tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } break; - default: - - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported by Pad.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(paddings); + if (constant_values != nullptr) { + micro_context->DeallocateTempTfLiteTensor(constant_values); } -#undef TF_LITE_PAD + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } -} // namespace pad - TfLiteRegistration Register_PAD() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, PadPrepare, Eval); } // Also register Pad as PadV2. TfLiteRegistration Register_PADV2() { - return {/*init=*/pad::Init, - /*free=*/nullptr, - /*prepare=*/pad::Prepare, - /*invoke=*/pad::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, PadPrepare, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.h similarity index 72% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.h index 3ab4fce..81d1a9f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pad.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,15 +13,15 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -// -// This is a stub file for non-Ethos platforms -// +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ + #include "edge-impulse-sdk/tensorflow/lite/c/common.h" namespace tflite { -TfLiteRegistration* Register_ETHOSU() { return nullptr; } - -const char* GetString_ETHOSU() { return ""; } +TfLiteStatus PadPrepare(TfLiteContext* context, TfLiteNode* node); } // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_PAD_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cc new file mode 100644 index 0000000..8b6f9e0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cc @@ -0,0 +1,1567 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +struct OpData { + OpDataPooling reference_op_data; + + // Index to buffer for optimizations if applicable. + int buffer_idx; +}; + +void PopulateCommonParams( + TfLiteContext* const context, cmsis_nn_dims* const input_dims, + cmsis_nn_dims* const output_dims, cmsis_nn_pool_params* const pool_params, + cmsis_nn_context* const ctx, cmsis_nn_dims* const filter_dims, + const OpData& data, const RuntimeShape& input_shape, + const RuntimeShape& output_shape, const TfLitePoolParams* params) { + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + + input_dims->n = 1; + input_dims->h = input_shape.Dims(1); + input_dims->w = input_shape.Dims(2); + input_dims->c = depth; + + output_dims->n = 1; + output_dims->h = output_shape.Dims(1); + output_dims->w = output_shape.Dims(2); + output_dims->c = depth; + + pool_params->stride.h = params->stride_height; + pool_params->stride.w = params->stride_width; + pool_params->padding.h = data.reference_op_data.padding.height; + pool_params->padding.w = data.reference_op_data.padding.width; + pool_params->activation.min = data.reference_op_data.activation_min; + pool_params->activation.max = data.reference_op_data.activation_max; + + filter_dims->n = 1; + filter_dims->h = params->filter_height; + filter_dims->w = params->filter_width; + filter_dims->c = 1; + ctx->buf = nullptr; + ctx->size = 0; + if (data.buffer_idx > -1) { + ctx->buf = context->GetScratchBuffer(context, data.buffer_idx); + } +} + +void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK((input->type == kTfLiteInt8) || (input->type == kTfLiteInt16)); + + RuntimeShape input_shape = micro::GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = micro::GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + cmsis_nn_dims input_dims; + cmsis_nn_dims output_dims; + cmsis_nn_pool_params pool_params; + cmsis_nn_dims filter_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &input_dims, &output_dims, &pool_params, &ctx, + &filter_dims, data, input_shape, output_shape, params); + + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK_EQ( + arm_avgpool_s8(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + TFLITE_DCHECK_EQ( + arm_avgpool_s16(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } +} + +TfLiteStatus MaxEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, + const OpData& data, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK((input->type == kTfLiteInt8) || (input->type == kTfLiteInt16)); + + RuntimeShape input_shape = micro::GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = micro::GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + cmsis_nn_dims input_dims; + cmsis_nn_dims output_dims; + cmsis_nn_pool_params pool_params; + cmsis_nn_dims filter_dims; + cmsis_nn_context ctx; + + PopulateCommonParams(context, &input_dims, &output_dims, &pool_params, &ctx, + &filter_dims, data, input_shape, output_shape, params); + + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK_EQ( + arm_max_pool_s8(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } else { + TFLITE_DCHECK_EQ( + arm_max_pool_s16(&ctx, &pool_params, &input_dims, + micro::GetTensorData(input), &filter_dims, + &output_dims, micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + } + + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus MaxPrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_STATUS(PoolingPrepare(context, node)); + // Set buffer index to a reset value + static_cast(node->user_data)->buffer_idx = -1; + return kTfLiteOk; +} + +TfLiteStatus AveragePrepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_STATUS(PoolingPrepare(context, node)); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kPoolingInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kPoolingOutputTensor); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + RuntimeShape input_shape = GetTensorShape(input); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + + RuntimeShape output_shape = GetTensorShape(output); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int output_width = output_shape.Dims(2); + + const int32_t buffer_size = + input->type == kTfLiteInt16 + ? arm_avgpool_s16_get_buffer_size(output_width, depth) + : arm_avgpool_s8_get_buffer_size(output_width, depth); + + auto* data = static_cast(node->user_data); + if (buffer_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buffer_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + // Inputs and outputs share the same type, guaranteed by the converter. + if (input->type == kTfLiteFloat32) { +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + AveragePoolingEvalFloat(context, node, params, &data.reference_op_data, + input, output); + } else if (input->type == kTfLiteInt8) { +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + AverageEvalQuantized(context, node, params, data, input, output); + } else if (input->type == kTfLiteInt16) { + AverageEvalQuantized(context, node, params, data, input, output); + } else { + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus AverageEvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt8); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + AverageEvalQuantized(context, node, params, data, input, output); + + return kTfLiteOk; +} + +TfLiteStatus AverageEvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt16); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + AverageEvalQuantized(context, node, params, data, input, output); + + return kTfLiteOk; +} +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + if (input->type == kTfLiteFloat32) { +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + MaxPoolingEvalFloat(context, node, params, &data.reference_op_data, input, + output); + } else if (input->type == kTfLiteInt8) { +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + MaxEvalQuantized(context, node, params, data, input, output); + } else if (input->type == kTfLiteInt16) { + MaxEvalQuantized(context, node, params, data, input, output); + } else { + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus MaxEvalInt8(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt8); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + MaxEvalQuantized(context, node, params, data, input, output); + return kTfLiteOk; +} + +TfLiteStatus MaxEvalInt16(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TFLITE_DCHECK(input->type == kTfLiteInt16); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + MaxEvalQuantized(context, node, params, data, input, output); + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_AVERAGE_POOL_2D_INT8() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEvalInt8); +} + +TfLiteRegistration Register_AVERAGE_POOL_2D_INT16() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEvalInt16); +} + +TfLiteRegistration Register_AVERAGE_POOL_2D() { + return tflite::micro::RegisterOp(Init, AveragePrepare, AverageEval); +} + +TfLiteRegistration Register_MAX_POOL_2D_INT8() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEvalInt8); +} + +TfLiteRegistration Register_MAX_POOL_2D_INT16() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEvalInt16); +} + +TfLiteRegistration Register_MAX_POOL_2D() { + return tflite::micro::RegisterOp(Init, MaxPrepare, MaxEval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" + +#include "mli_api.h" // NOLINT +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_function_specializations.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +struct OpData { + TfLitePaddingValues padding; + int32_t activation_min; + int32_t activation_max; + float activation_min_f32; + float activation_max_f32; + + // The result of checking if MLI optimized version of tensors can be used. + bool is_mli_applicable; + + // Tensors in MLI format. + mutable ops::micro::MliTensorInterface mli_in; + mutable ops::micro::MliTensorInterface mli_out; + mli_pool_cfg* cfg; + + // Pointer to the mli convolution function. + pooling_func_ptr p_mli_krn_avepool_hwc_sa8; + pooling_func_ptr p_mli_krn_maxpool_hwc_sa8; +}; + +enum MliPoolingType { AveragePooling = 0, MaxPooling = 1 }; + +bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, + const TfLitePoolParams* params) { + // MLI optimized version only supports int8_t datatype and no fused Relu + return (input->type == kTfLiteInt8 && params->activation == kTfLiteActNone); +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, + const TfLitePoolParams* params, + const TfLiteTensor* input, + const TfLiteTensor* output, OpData* data) { + // input: batch, height, width, channel + int height = SizeOfDimension(input, 1); + int width = SizeOfDimension(input, 2); + + int out_height, out_width; + + data->padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + /*dilation_rate_height=*/1, + /*dilation_rate_width=*/1, height, width, params->filter_height, + params->filter_width, params->padding, &out_height, &out_width); + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + data->is_mli_applicable = IsMliApplicable(context, input, params); + + TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); + + if (input->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, &data->activation_min_f32, + &data->activation_max_f32); + } else if (input->type == kTfLiteInt8) { + CalculateActivationRangeQuantized(context, params->activation, output, + &data->activation_min, + &data->activation_max); + } + + if (data->is_mli_applicable) { + data->mli_in = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->mli_out = ops::micro::MliTensorInterface(static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_tensor)))); + data->cfg = static_cast( + context->AllocatePersistentBuffer(context, sizeof(mli_pool_cfg))); + + ops::micro::ConvertToMliTensor(input, &data->mli_in); + ops::micro::ConvertToMliTensor(output, &data->mli_out); + + data->cfg->kernel_width = params->filter_width; + data->cfg->kernel_height = params->filter_height; + data->cfg->stride_width = params->stride_width; + data->cfg->stride_height = params->stride_height; + + if (params->padding == kTfLitePaddingValid) { + data->cfg->padding_left = 0; + data->cfg->padding_right = 0; + data->cfg->padding_top = 0; + data->cfg->padding_bottom = 0; + } else { + data->cfg->padding_left = data->padding.width; + data->cfg->padding_right = + data->padding.width + data->padding.width_offset; + data->cfg->padding_top = data->padding.height; + data->cfg->padding_bottom = + data->padding.height + data->padding.height_offset; + } + + // Choose pooling mli specialized functions. + data->p_mli_krn_avepool_hwc_sa8 = mli_krn_avepool(data->cfg); + data->p_mli_krn_maxpool_hwc_sa8 = mli_krn_maxpool(data->cfg); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +void AverageEvalFloat(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + float activation_min, activation_max; + CalculateActivationRange(params->activation, &activation_min, + &activation_max); + + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.float_activation_min = activation_min; + op_params.float_activation_max = activation_max; + reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} + +// Prepare MLI tensors and run Average or Max Pooling +TfLiteStatus EvalMli(TfLiteContext* context, const TfLitePoolParams* params, + const OpData& data, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output, + const MliPoolingType pooling_type) { + mli_pool_cfg cfg_local = *data.cfg; + + ops::micro::MliTensorAttachBuffer(input, &data.mli_in); + ops::micro::MliTensorAttachBuffer(output, &data.mli_out); + + const int height_dimension = 1; + int in_slice_height = 0; + int out_slice_height = 0; + const int overlap = cfg_local.kernel_height - cfg_local.stride_height; + + // Tensors for data in fast (local) memory and config to copy data from + // external to local memory + mli_tensor in_local = *data.mli_in.MliTensor(); + mli_tensor out_local = *data.mli_out.MliTensor(); + + ops::micro::MliTensorInterface in_local_interface(&in_local); + ops::micro::MliTensorInterface out_local_interface(&out_local); + + mli_mov_cfg_t copy_config; + mli_mov_cfg_for_copy(©_config); + TF_LITE_ENSURE_STATUS(get_arc_scratch_buffer_for_pooling_tensors( + context, &in_local_interface, &out_local_interface)); + + bool in_is_local = + in_local_interface.Data() == data.mli_in.Data(); + bool out_is_local = + out_local_interface.Data() == data.mli_out.Data(); + + TF_LITE_ENSURE_STATUS(arc_scratch_buffer_calc_slice_size_io( + &in_local_interface, &out_local_interface, cfg_local.kernel_height, + cfg_local.stride_height, cfg_local.padding_top, cfg_local.padding_bottom, + &in_slice_height, &out_slice_height)); + + /* mli_in tensor contains batches of HWC tensors. so it is a 4 dimensional + tensor. because the mli kernel will process one HWC tensor at a time, the 4 + dimensional tensor needs to be sliced into nBatch 3 dimensional tensors. on + top of that there could be a need to also slice in the Height dimension. + for that the sliceHeight has been calculated. The tensor slicer is + configured that it will completely slice the nBatch dimension (0) and slice + the height dimension (1) in chunks of 'sliceHeight' */ + ops::micro::TensorSlicer in_slice(data.mli_in.MliTensor(), height_dimension, + in_slice_height, cfg_local.padding_top, + cfg_local.padding_bottom, overlap); + ops::micro::TensorSlicer out_slice(data.mli_out.MliTensor(), height_dimension, + out_slice_height); + + /* is_local indicates that the tensor is already in local memory, + so in that case the original tensor can be used, + and there is no need to copy it to the local tensor*/ + mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; + mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; + + while (!out_slice.Done()) { + if (!out_is_local) { + ops::micro::PrepareLocalTensor(out_slice.Sub(), &out_local); + ops::micro::PrepareLocalTensor(in_slice.Sub(), &in_local); + } + cfg_local.padding_top = in_slice.GetPaddingPre(); + cfg_local.padding_bottom = in_slice.GetPaddingPost(); + + mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); + if (pooling_type == AveragePooling) { + TFLITE_DCHECK(data.p_mli_krn_avepool_hwc_sa8 != nullptr); + data.p_mli_krn_avepool_hwc_sa8(in_ptr, &cfg_local, out_ptr); + } else if (pooling_type == MaxPooling) { + TFLITE_DCHECK(data.p_mli_krn_maxpool_hwc_sa8 != nullptr); + data.p_mli_krn_maxpool_hwc_sa8(in_ptr, &cfg_local, out_ptr); + } + mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); + + in_slice.Next(); + out_slice.Next(); + } + return kTfLiteOk; +} + +void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + TFLITE_DCHECK(input->type == kTfLiteInt8); + + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.quantized_activation_min = data.activation_min; + op_params.quantized_activation_max = data.activation_max; + + reference_integer_ops::AveragePool( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf("Type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} + +void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.float_activation_min = data.activation_min_f32; + op_params.float_activation_max = data.activation_max_f32; + reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf( + + "Node configuration or type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} + +void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpData& data, + const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { +#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) + TFLITE_DCHECK(input->type == kTfLiteInt8); + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data.padding.height; + op_params.padding_values.width = data.padding.width; + op_params.quantized_activation_min = data.activation_min; + op_params.quantized_activation_max = data.activation_max; + + reference_integer_ops::MaxPool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#else + MicroPrintf( + + "Node configuration or type %s (%d) is not supported by ARC MLI Library.", + TfLiteTypeGetName(input->type), input->type); +#endif +} + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + // Inputs and outputs share the same type, guaranteed by the converter. + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + AverageEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + if (data.is_mli_applicable) { + EvalMli(context, params, data, input, output, AveragePooling); + } else { + AverageEvalQuantized(context, node, params, data, input, output); + } + break; + default: + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + MaxEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + if (data.is_mli_applicable) { + EvalMli(context, params, data, input, output, MaxPooling); + } else { + MaxEvalQuantized(context, node, params, data, input, output); + } + break; + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_AVERAGE_POOL_2D() { + return tflite::micro::RegisterOp(Init, Prepare, AverageEval); +} + +TfLiteRegistration Register_MAX_POOL_2D() { + return tflite::micro::RegisterOp(Init, Prepare, MaxEval); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +#include "sl_mvp_ml_pooling.h" + +namespace tflite { +namespace sl { +namespace pooling { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +enum op_support { kMvp, kCmsisNN, kTFLMrefF32}; + +struct OpData { + float activation_min_f32; + float activation_max_f32; + sli_mvp_ml_pooling_s8_params_t op_params; + op_support supported; + int buffer_idx; +}; + +} // namespace + + +void* Init(TfLiteContext* context, const char* buffer, size_t length) +{ + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) +{ + OpData* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + + data->op_params.padding = params->padding == kTfLitePaddingSame; + data->op_params.stride_height = params->stride_height; + data->op_params.stride_width = params->stride_width; + data->op_params.filter_height = params->filter_height; + data->op_params.filter_width = params->filter_width; + data->op_params.batches = MatchingDim(GetTensorShape(input), 0, + GetTensorShape(output), 0); + data->op_params.channels = MatchingDim(GetTensorShape(input), 3, + GetTensorShape(output), 3); + data->op_params.input_height = SizeOfDimension(input, 1); + data->op_params.input_width = SizeOfDimension(input, 2); + data->op_params.output_height = SizeOfDimension(output, 1); + data->op_params.output_width = SizeOfDimension(output, 2); + + int out_height, out_width; + auto padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + 1, 1, // dilation rate height/width. + data->op_params.input_height, data->op_params.input_width, + params->filter_height, params->filter_width, + params->padding, + &out_height, &out_width); + TFLITE_DCHECK_EQ(out_height, data->op_params.output_height); + TFLITE_DCHECK_EQ(out_width, data->op_params.output_width); + data->op_params.pad_height = padding.height; + data->op_params.pad_width = padding.width; + + if (input->type == kTfLiteFloat32) { + data->supported = kTFLMrefF32; + CalculateActivationRange(params->activation, + &data->activation_min_f32, + &data->activation_max_f32); + } else { + CalculateActivationRangeQuantized(context, params->activation, output, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max)); + if (input->type != kTfLiteInt8) { + TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + } + + return kTfLiteOk; +} + +TfLiteStatus AveragePrepare(TfLiteContext* context, TfLiteNode* node) +{ + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + TfLiteStatus status = Prepare(context, node); + + if (status == kTfLiteOk) { + if (input->type == kTfLiteInt8) { + data->supported = sli_mvp_ml_average_pooling_s8_is_supported(&data->op_params) + ? kMvp : kCmsisNN; + if (data->supported == kCmsisNN) { + const int32_t buffer_size = arm_avgpool_s8_get_buffer_size( + data->op_params.output_width, + data->op_params.channels); + + if (buffer_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, buffer_size, &data->buffer_idx)); + } else { + data->buffer_idx = -1; + } + } + } + } + return status; +} + +TfLiteStatus MaxPrepare(TfLiteContext* context, TfLiteNode* node) +{ + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + + TfLiteStatus status = Prepare(context, node); + + if (status == kTfLiteOk) { + if (input->type == kTfLiteInt8) { + data->supported = sli_mvp_ml_max_pooling_s8_is_supported(&data->op_params) + ? kMvp : kCmsisNN; + } + } + + return status; +} + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) +{ + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + data->op_params.input = tflite::micro::GetTensorData(input); + data->op_params.output = tflite::micro::GetTensorData(output); + + if (data->supported == kMvp) { + // Use MVP accelerated kernel. + TF_LITE_ENSURE_EQ(context, + SL_STATUS_OK, + sli_mvp_ml_average_pooling_s8(&data->op_params)); + + } else if (data->supported == kCmsisNN) { + // Use CMSIS-NN optimized kernel. + cmsis_nn_dims input_dims; + input_dims.n = 1; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.channels; + + cmsis_nn_dims output_dims; + output_dims.n = 1; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.channels; + + cmsis_nn_pool_params pool_params; + pool_params.stride.h = data->op_params.stride_height; + pool_params.stride.w = data->op_params.stride_width; + pool_params.padding.h = data->op_params.pad_height; + pool_params.padding.w = data->op_params.pad_width; + pool_params.activation.min = data->op_params.output_activation_min; + pool_params.activation.max = data->op_params.output_activation_max; + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + filter_dims.c = 1; + + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + if (data->buffer_idx > -1) { + ctx.buf = context->GetScratchBuffer(context, data->buffer_idx); + } + + TFLITE_DCHECK_EQ( + arm_avgpool_s8(&ctx, &pool_params, &input_dims, + data->op_params.input, &filter_dims, + &output_dims, + data->op_params.output), + ARM_MATH_SUCCESS); + } else if (data->supported == kTFLMrefF32) { + #if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + // Use TFLM reference kernel. + tflite::PoolParams op_params; + op_params.stride_height = data->op_params.stride_height; + op_params.stride_width = data->op_params.stride_width; + op_params.filter_height = data->op_params.filter_height; + op_params.filter_width = data->op_params.filter_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.padding_values.width = data->op_params.pad_width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::AveragePool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + } else { + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) +{ + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + data->op_params.input = tflite::micro::GetTensorData(input); + data->op_params.output = tflite::micro::GetTensorData(output); + + if (data->supported == kMvp) { + // Use MVP accelerated kernel. + TF_LITE_ENSURE_EQ(context, + SL_STATUS_OK, + sli_mvp_ml_max_pooling_s8(&data->op_params)); + + } else if (data->supported == kCmsisNN) { + // Use CMSIS-NN optimized kernel. + cmsis_nn_dims input_dims; + input_dims.n = 1; + input_dims.h = data->op_params.input_height; + input_dims.w = data->op_params.input_width; + input_dims.c = data->op_params.channels; + + cmsis_nn_dims output_dims; + output_dims.n = 1; + output_dims.h = data->op_params.output_height; + output_dims.w = data->op_params.output_width; + output_dims.c = data->op_params.channels; + + cmsis_nn_pool_params pool_params; + pool_params.stride.h = data->op_params.stride_height; + pool_params.stride.w = data->op_params.stride_width; + pool_params.padding.h = data->op_params.pad_height; + pool_params.padding.w = data->op_params.pad_width; + pool_params.activation.min = data->op_params.output_activation_min; + pool_params.activation.max = data->op_params.output_activation_max; + + cmsis_nn_dims filter_dims; + filter_dims.n = 1; + filter_dims.h = data->op_params.filter_height; + filter_dims.w = data->op_params.filter_width; + filter_dims.c = 1; + + cmsis_nn_context ctx; + ctx.buf = nullptr; + ctx.size = 0; + + TFLITE_DCHECK_EQ( + arm_max_pool_s8(&ctx, &pool_params, &input_dims, + data->op_params.input, &filter_dims, + &output_dims, + data->op_params.output), + ARM_MATH_SUCCESS); + } else if (data->supported == kTFLMrefF32) { + #if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + #endif + + // Use TFLM reference kernel. + tflite::PoolParams op_params; + op_params.stride_height = data->op_params.stride_height; + op_params.stride_width = data->op_params.stride_width; + op_params.filter_height = data->op_params.filter_height; + op_params.filter_width = data->op_params.filter_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.padding_values.width = data->op_params.pad_width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::MaxPool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + + } else { + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace pooling +} // namespace sl + +TfLiteRegistration Register_MAX_POOL_2D() { + static TfLiteRegistration max_pool_registration = { + /*init=*/sl::pooling::Init, + /*free=*/nullptr, + /*prepare=*/sl::pooling::MaxPrepare, + /*invoke=*/sl::pooling::MaxEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0 + }; + + return max_pool_registration; +} + +// Just to keep all_ops_resolver() happy during development ... +TfLiteRegistration Register_AVERAGE_POOL_2D() { + static TfLiteRegistration avg_pool_registration = { + /*init=*/sl::pooling::Init, + /*free=*/nullptr, + /*prepare=*/sl::pooling::AveragePrepare, + /*invoke=*/sl::pooling::AverageEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0 + }; + + return avg_pool_registration; +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +#include + +long long pooling_total_time = 0; + +namespace tflite { + +namespace { +#if ESP_NN +void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + + const int stride_height = params->stride_height; + const int stride_width = params->stride_width; + const int filter_height = params->filter_height; + const int filter_width = params->filter_width; + const int activation_min = data->activation_min; + const int activation_max = data->activation_max; + const int pad_height = data->padding.height; + const int pad_width = data->padding.width; + + const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input); + const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output); + TFLITE_DCHECK_LE(activation_min, activation_max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + + const int8_t *input_data = tflite::micro::GetTensorData(input); + int8_t *output_data = tflite::micro::GetTensorData(output); + + const int input_size = input_width * input_height * depth; + const int output_size = output_width * output_height * depth; + + if (depth % 4 == 0) { // S3 version only supports channels multiple of 4 + for (int batch = 0; batch < batches; ++batch) { + esp_nn_avg_pool_s8(input_data, input_width, input_height, + output_data, output_width, output_height, + stride_width, stride_height, + filter_width, filter_height, + pad_width, pad_height, + activation_min, activation_max, depth); + input_data += input_size; + output_data += output_size; + } + } else { + for (int batch = 0; batch < batches; ++batch) { + esp_nn_avg_pool_s8_ansi(input_data, input_width, input_height, + output_data, output_width, output_height, + stride_width, stride_height, + filter_width, filter_height, + pad_width, pad_height, + activation_min, activation_max, depth); + input_data += input_size; + output_data += output_size; + } + } +} + +void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { + + const int stride_height = params->stride_height; + const int stride_width = params->stride_width; + const int filter_height = params->filter_height; + const int filter_width = params->filter_width; + const int activation_min = data->activation_min; + const int activation_max = data->activation_max; + const int pad_height = data->padding.height; + const int pad_width = data->padding.width; + + const RuntimeShape& input_shape = tflite::micro::GetTensorShape(input); + const RuntimeShape& output_shape = tflite::micro::GetTensorShape(output); + TFLITE_DCHECK_LE(activation_min, activation_max); + TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); + TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); + const int batches = MatchingDim(input_shape, 0, output_shape, 0); + const int depth = MatchingDim(input_shape, 3, output_shape, 3); + const int input_height = input_shape.Dims(1); + const int input_width = input_shape.Dims(2); + const int output_height = output_shape.Dims(1); + const int output_width = output_shape.Dims(2); + + const int8_t *input_data = tflite::micro::GetTensorData(input); + int8_t *output_data = tflite::micro::GetTensorData(output); + + const int input_size = input_width * input_height * depth; + const int output_size = output_width * output_height * depth; + if (depth % 4 == 0) { // S3 version only supports channels multiple of 4 + for (int batch = 0; batch < batches; ++batch) { + esp_nn_max_pool_s8(input_data, input_width, input_height, + output_data, output_width, output_height, + stride_width, stride_height, + filter_width, filter_height, + pad_width, pad_height, + activation_min, activation_max, depth); + input_data += input_size; + output_data += output_size; + } + } else { + for (int batch = 0; batch < batches; ++batch) { + esp_nn_max_pool_s8_ansi(input_data, input_width, input_height, + output_data, output_width, output_height, + stride_width, stride_height, + filter_width, filter_height, + pad_width, pad_height, + activation_min, activation_max, depth); + input_data += input_size; + output_data += output_size; + } + } +} +#endif + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataPooling* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + long long start_time = esp_timer_get_time(); + // Inputs and outputs share the same type, guaranteed by the converter. + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + AveragePoolingEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif +#if ESP_NN + AverageEvalQuantized(context, node, params, data, input, output); +#else + AveragePoolingEvalQuantized(context, node, params, data, input, output); +#endif + break; + default: + TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + pooling_total_time += esp_timer_get_time() - start_time; + return kTfLiteOk; +} + +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataPooling* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + long long start_time = esp_timer_get_time(); + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + MaxPoolingEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif +#if ESP_NN + MaxEvalQuantized(context, node, params, data, input, output); +#else + MaxPoolingEvalQuantized(context, node, params, data, input, output); +#endif + break; + default: + TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + pooling_total_time += esp_timer_get_time() - start_time; + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataPooling)); +} + +} // namespace + +TfLiteRegistration Register_AVERAGE_POOL_2D() { + return tflite::micro::RegisterOp(Init, PoolingPrepare, AverageEval); +} + +TfLiteRegistration Register_MAX_POOL_2D() { + return tflite::micro::RegisterOp(Init, PoolingPrepare, MaxEval); +} + +} // namespace tflite + +#else +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataPooling* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + // Inputs and outputs share the same type, guaranteed by the converter. + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + AveragePoolingEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_AVERAGE_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + AveragePoolingEvalQuantized(context, node, params, data, input, + output); + break; + case kTfLiteInt16: + AveragePoolingEvalQuantized(context, node, params, data, input, + output); + break; + default: + MicroPrintf("Input type %s is not currently supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataPooling* data = + static_cast(node->user_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kPoolingInputTensor); + TfLiteEvalTensor* output = + micro::GetEvalOutput(context, node, kPoolingOutputTensor); + + switch (input->type) { + case kTfLiteFloat32: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + MaxPoolingEvalFloat(context, node, params, data, input, output); + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_MAX_POOL_2D_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + MaxPoolingEvalQuantized(context, node, params, data, input, + output); + break; + case kTfLiteInt16: + MaxPoolingEvalQuantized(context, node, params, data, input, + output); + break; + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataPooling)); +} + +} // namespace + +TfLiteRegistration Register_AVERAGE_POOL_2D() { + return tflite::micro::RegisterOp(Init, PoolingPrepare, AverageEval); +} + +TfLiteRegistration Register_MAX_POOL_2D() { + return tflite::micro::RegisterOp(Init, PoolingPrepare, MaxEval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cpp deleted file mode 100644 index 6005767..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.cpp +++ /dev/null @@ -1,1111 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/base.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pooling { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - TfLitePaddingValues padding; - // Index to buffer for optimizations if applicable. - int buffer_idx; - - int32_t activation_min; - int32_t activation_max; - float activation_min_f32; - float activation_max_f32; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, TfLiteTensor* output, - OpData* data) { - // input: batch, height, width, channel - int height = SizeOfDimension(input, 1); - int width = SizeOfDimension(input, 2); - - int out_height, out_width; - - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - /*dilation_rate_height=*/1, - /*dilation_rate_width=*/1, height, width, params->filter_height, - params->filter_width, params->padding, &out_height, &out_width); - - if (input->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, &data->activation_min_f32, - &data->activation_max_f32); - } else { - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->activation_min, - &data->activation_max)); - TFLITE_DCHECK_LE(data->activation_min, data->activation_max); - } - - // Set buffer index to a reset value - data->buffer_idx = -1; - - return kTfLiteOk; -} - -void AverageEvalFloat(const TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - float activation_min, activation_max; - CalculateActivationRange(params->activation, &activation_min, - &activation_max); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = activation_min; - op_params.float_activation_max = activation_max; - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - TFLITE_DCHECK(input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); - - if (input->type == kTfLiteUInt8) { - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - - cmsis_nn_dims input_dims; - input_dims.n = 1; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = depth; - - cmsis_nn_dims output_dims; - output_dims.n = 1; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = depth; - - cmsis_nn_pool_params pool_params; - pool_params.stride.h = params->stride_height; - pool_params.stride.w = params->stride_width; - pool_params.padding.h = data.padding.height; - pool_params.padding.w = data.padding.width; - pool_params.activation.min = data.activation_min; - pool_params.activation.max = data.activation_max; - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = params->filter_height; - filter_dims.w = params->filter_width; - filter_dims.c = 1; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_avgpool_s8(&ctx, &pool_params, &input_dims, - tflite::micro::GetTensorData(input), - &filter_dims, &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - } -} - -void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - float activation_min, activation_max; - CalculateActivationRange(params->activation, &activation_min, - &activation_max); - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = data.activation_min_f32; - op_params.float_activation_max = data.activation_max_f32; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void MaxEvalQuantizedUInt8(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -TfLiteStatus MaxEvalInt8(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - RuntimeShape input_shape = tflite::micro::GetTensorShape(input); - RuntimeShape output_shape = tflite::micro::GetTensorShape(output); - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - - cmsis_nn_dims input_dims; - input_dims.n = 1; - input_dims.h = input_shape.Dims(1); - input_dims.w = input_shape.Dims(2); - input_dims.c = depth; - - cmsis_nn_dims output_dims; - output_dims.n = 1; - output_dims.h = output_shape.Dims(1); - output_dims.w = output_shape.Dims(2); - output_dims.c = depth; - - cmsis_nn_pool_params pool_params; - pool_params.stride.h = params->stride_height; - pool_params.stride.w = params->stride_width; - pool_params.padding.h = data.padding.height; - pool_params.padding.w = data.padding.width; - pool_params.activation.min = data.activation_min; - pool_params.activation.max = data.activation_max; - - cmsis_nn_dims filter_dims; - filter_dims.n = 1; - filter_dims.h = params->filter_height; - filter_dims.w = params->filter_width; - filter_dims.c = 1; - - cmsis_nn_context ctx; - ctx.buf = nullptr; - ctx.size = 0; - if (data.buffer_idx > -1) { - ctx.buf = context->GetScratchBuffer(context, data.buffer_idx); - } - - TFLITE_DCHECK_EQ( - arm_max_pool_s8(&ctx, &pool_params, &input_dims, - tflite::micro::GetTensorData(input), &filter_dims, - &output_dims, - tflite::micro::GetTensorData(output)), - ARM_MATH_SUCCESS); - - return kTfLiteOk; -} - -} // namespace - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus MaxPrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - return kTfLiteOk; -} - -TfLiteStatus AveragePrepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - if (input->type == kTfLiteInt8) { - RuntimeShape input_shape = GetTensorShape(input); - TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4); - - RuntimeShape output_shape = GetTensorShape(output); - TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4); - - const int depth = MatchingDim(input_shape, 3, output_shape, 3); - const int output_width = output_shape.Dims(2); - - const int32_t buffer_size = - arm_avgpool_s8_get_buffer_size(output_width, depth); - - if (buffer_size > 0) { - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, buffer_size, &data->buffer_idx)); - } else { - data->buffer_idx = -1; - } - } - return kTfLiteOk; -} - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - AverageEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: - MaxEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - MaxEvalQuantizedUInt8(context, node, params, data, input, output); - break; - case kTfLiteInt8: - MaxEvalInt8(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace pooling - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::AveragePrepare, - /*invoke=*/pooling::AverageEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::MaxPrepare, - /*invoke=*/pooling::MaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#elif EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "mli_api.h" // NOLINT -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_slicers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/mli_tf_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pooling { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - TfLitePaddingValues padding; - int32_t activation_min; - int32_t activation_max; - float activation_min_f32; - float activation_max_f32; - - // The result of checking if MLI optimized version of tensors can be used. - bool is_mli_applicable; - - // Tensors in MLI format. - mli_tensor* mli_in; - mli_tensor* mli_out; - mli_pool_cfg* cfg; -}; - -enum MliPoolingType { AveragePooling = 0, MaxPooling = 1 }; - -bool IsMliApplicable(TfLiteContext* context, const TfLiteTensor* input, - const TfLitePoolParams* params) { - // MLI optimized version only supports int8_t datatype and no fused Relu - return (input->type == kTfLiteInt8 && params->activation == kTfLiteActNone); -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, - const TfLiteTensor* output, OpData* data) { - // input: batch, height, width, channel - int height = SizeOfDimension(input, 1); - int width = SizeOfDimension(input, 2); - - int out_height, out_width; - - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - /*dilation_rate_height=*/1, - /*dilation_rate_width=*/1, height, width, params->filter_height, - params->filter_width, params->padding, &out_height, &out_width); - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - data->is_mli_applicable = IsMliApplicable(context, input, params); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - if (input->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, &data->activation_min_f32, - &data->activation_max_f32); - } else if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - CalculateActivationRangeQuantized(context, params->activation, output, - &data->activation_min, - &data->activation_max); - } - - if (data->is_mli_applicable) { - data->mli_in = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->mli_out = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_tensor))); - data->cfg = static_cast( - context->AllocatePersistentBuffer(context, sizeof(mli_pool_cfg))); - - ops::micro::ConvertToMliTensor(input, data->mli_in); - ops::micro::ConvertToMliTensor(output, data->mli_out); - - data->cfg->kernel_width = params->filter_width; - data->cfg->kernel_height = params->filter_height; - data->cfg->stride_width = params->stride_width; - data->cfg->stride_height = params->stride_height; - - if (params->padding == kTfLitePaddingValid) { - data->cfg->padding_left = 0; - data->cfg->padding_right = 0; - data->cfg->padding_top = 0; - data->cfg->padding_bottom = 0; - } else { - data->cfg->padding_left = data->padding.width; - data->cfg->padding_right = - data->padding.width + data->padding.width_offset; - data->cfg->padding_top = data->padding.height; - data->cfg->padding_bottom = - data->padding.height + data->padding.height_offset; - } - } - return kTfLiteOk; -} - -void AverageEvalFloat(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - float activation_min, activation_max; - CalculateActivationRange(params->activation, &activation_min, - &activation_max); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = activation_min; - op_params.float_activation_max = activation_max; - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -// Prepare MLI tensors and run Average or Max Pooling -TfLiteStatus EvalMli(TfLiteContext* context, const TfLitePoolParams* params, - const OpData& data, const TfLiteEvalTensor* input, - TfLiteEvalTensor* output, - const MliPoolingType pooling_type) { - mli_pool_cfg cfg_local = *data.cfg; - - ops::micro::MliTensorAttachBuffer(input, data.mli_in); - ops::micro::MliTensorAttachBuffer(output, data.mli_out); - - const int height_dimension = 1; - int in_slice_height = 0; - int out_slice_height = 0; - const int overlap = cfg_local.kernel_height - cfg_local.stride_height; - - // Tensors for data in fast (local) memory and config to copy data from - // external to local memory - mli_tensor in_local = *data.mli_in; - mli_tensor out_local = *data.mli_out; - mli_mov_cfg_t copy_config; - mli_mov_cfg_for_copy(©_config); - TF_LITE_ENSURE_STATUS(get_arc_scratch_buffer_for_pooling_tensors( - context, &in_local, &out_local)); - bool in_is_local = in_local.data == data.mli_in->data; - bool out_is_local = out_local.data == data.mli_out->data; - TF_LITE_ENSURE_STATUS(arc_scratch_buffer_calc_slice_size_io( - &in_local, &out_local, cfg_local.kernel_height, cfg_local.stride_height, - cfg_local.padding_top, cfg_local.padding_bottom, &in_slice_height, - &out_slice_height)); - - /* mli_in tensor contains batches of HWC tensors. so it is a 4 dimensional - tensor. because the mli kernel will process one HWC tensor at a time, the 4 - dimensional tensor needs to be sliced into nBatch 3 dimensional tensors. on - top of that there could be a need to also slice in the Height dimension. - for that the sliceHeight has been calculated. The tensor slicer is - configured that it will completely slice the nBatch dimension (0) and slice - the height dimension (1) in chunks of 'sliceHeight' */ - TensorSlicer in_slice(data.mli_in, height_dimension, in_slice_height, - cfg_local.padding_top, cfg_local.padding_bottom, - overlap); - TensorSlicer out_slice(data.mli_out, height_dimension, out_slice_height); - - /* is_local indicates that the tensor is already in local memory, - so in that case the original tensor can be used, - and there is no need to copy it to the local tensor*/ - mli_tensor* in_ptr = in_is_local ? in_slice.Sub() : &in_local; - mli_tensor* out_ptr = out_is_local ? out_slice.Sub() : &out_local; - - while (!out_slice.Done()) { - cfg_local.padding_top = in_slice.GetPaddingPre(); - cfg_local.padding_bottom = in_slice.GetPaddingPost(); - - mli_mov_tensor_sync(in_slice.Sub(), ©_config, in_ptr); - if (pooling_type == AveragePooling) - mli_krn_avepool_hwc_sa8(in_ptr, &cfg_local, out_ptr); - else if (pooling_type == MaxPooling) - mli_krn_maxpool_hwc_sa8(in_ptr, &cfg_local, out_ptr); - mli_mov_tensor_sync(out_ptr, ©_config, out_slice.Sub()); - - in_slice.Next(); - out_slice.Next(); - } - return kTfLiteOk; -} - -void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - TFLITE_DCHECK(input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::AveragePool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -#else - TF_LITE_KERNEL_LOG(context, - "Type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.float_activation_min = data.activation_min_f32; - op_params.float_activation_max = data.activation_max_f32; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -#else - TF_LITE_KERNEL_LOG( - context, - "Node configuration or type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} - -void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData& data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { -#if !defined(TF_LITE_STRIP_REFERENCE_IMPL) - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data.padding.height; - op_params.padding_values.width = data.padding.width; - op_params.quantized_activation_min = data.activation_min; - op_params.quantized_activation_max = data.activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::MaxPool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -#else - TF_LITE_KERNEL_LOG( - context, - "Node configuration or type %s (%d) is not supported by ARC MLI Library.", - TfLiteTypeGetName(input->type), input->type); -#endif -} -} // namespace - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - if (data.is_mli_applicable) { - EvalMli(context, params, data, input, output, AveragePooling); - } else { - AverageEvalQuantized(context, node, params, data, input, output); - } - break; - default: - TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - switch (input->type) { - case kTfLiteFloat32: - MaxEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - if (data.is_mli_applicable) { - EvalMli(context, params, data, input, output, MaxPooling); - } else { - MaxEvalQuantized(context, node, params, data, input, output); - } - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace pooling - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::AverageEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::MaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#else -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace pooling { - -namespace { - -constexpr int kInputTensor = 0; -constexpr int kOutputTensor = 0; - -struct OpData { - TfLitePaddingValues padding; - int32_t activation_min; - int32_t activation_max; - float activation_min_f32; - float activation_max_f32; -}; - -TfLiteStatus CalculateOpData(const TfLiteContext* context, - const TfLitePoolParams* params, - const TfLiteTensor* input, - const TfLiteTensor* output, OpData* data) { - // input: batch, height, width, channel - int height = SizeOfDimension(input, 1); - int width = SizeOfDimension(input, 2); - - int out_height, out_width; - - data->padding = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - /*dilation_rate_height=*/1, - /*dilation_rate_width=*/1, height, width, params->filter_height, - params->filter_width, params->padding, &out_height, &out_width); - - return kTfLiteOk; -} - -void AverageEvalFloat(const TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.float_activation_min = data->activation_min_f32; - op_params.float_activation_max = data->activation_max_f32; - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void AverageEvalQuantized(TfLiteContext* context, const TfLiteNode* node, - const TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, - TfLiteEvalTensor* output) { - TFLITE_DCHECK(input->type == kTfLiteUInt8 || input->type == kTfLiteInt8); - - PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.quantized_activation_min = data->activation_min; - op_params.quantized_activation_max = data->activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::AveragePool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -void MaxEvalFloat(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.float_activation_min = data->activation_min_f32; - op_params.float_activation_max = data->activation_max_f32; - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); -} - -void MaxEvalQuantized(TfLiteContext* context, TfLiteNode* node, - TfLitePoolParams* params, const OpData* data, - const TfLiteEvalTensor* input, TfLiteEvalTensor* output) { - tflite::PoolParams op_params; - op_params.stride_height = params->stride_height; - op_params.stride_width = params->stride_width; - op_params.filter_height = params->filter_height; - op_params.filter_width = params->filter_width; - op_params.padding_values.height = data->padding.height; - op_params.padding_values.width = data->padding.width; - op_params.quantized_activation_min = data->activation_min; - op_params.quantized_activation_max = data->activation_max; - - if (input->type == kTfLiteUInt8) { - reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - reference_integer_ops::MaxPool( - op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} -} // namespace - -TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - // Inputs and outputs share the same type, guaranteed by the converter. - switch (input->type) { - case kTfLiteFloat32: - AverageEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - AverageEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Input type %s is not currently supported", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData* data = static_cast(node->user_data); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (input->type) { - case kTfLiteFloat32: - MaxEvalFloat(context, node, params, data, input, output); - break; - case kTfLiteUInt8: - case kTfLiteInt8: - MaxEvalQuantized(context, node, params, data, input, output); - break; - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - auto* params = reinterpret_cast(node->builtin_data); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS(CalculateOpData(context, params, input, output, data)); - - if (input->type == kTfLiteFloat32) { - CalculateActivationRange(params->activation, &data->activation_min_f32, - &data->activation_max_f32); - } else if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - CalculateActivationRangeQuantized(context, params->activation, output, - &data->activation_min, - &data->activation_max); - } - - return kTfLiteOk; -} - -} // namespace pooling - -TfLiteRegistration Register_AVERAGE_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::AverageEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_MAX_POOL_2D() { - return {/*init=*/pooling::Init, - /*free=*/nullptr, - /*prepare=*/pooling::Prepare, - /*invoke=*/pooling::MaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h new file mode 100644 index 0000000..d33aa23 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h @@ -0,0 +1,142 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +extern const int kPoolingInputTensor; +extern const int kPoolingOutputTensor; + +struct OpDataPooling { + TfLitePaddingValues padding; + int32_t activation_min; + int32_t activation_max; + float activation_min_f32; + float activation_max_f32; +}; + +TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context, + const TfLitePoolParams* params, + const TfLiteTensor* input, + const TfLiteTensor* output, + OpDataPooling* data); + +TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node); + +void AveragePoolingEvalFloat(const TfLiteContext* context, + const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output); + +template +void AveragePoolingEvalQuantized(TfLiteContext* context, const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.quantized_activation_min = data->activation_min; + op_params.quantized_activation_max = data->activation_max; + + reference_integer_ops::AveragePool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output); + +template +void MaxPoolingEvalQuantized(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + TFLITE_DCHECK(input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.quantized_activation_min = data->activation_min; + op_params.quantized_activation_max = data->activation_max; + + reference_integer_ops::MaxPool(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +#if defined(CMSIS_NN) +TfLiteRegistration Register_AVERAGE_POOL_2D_INT8(); + +TfLiteRegistration Register_MAX_POOL_2D_INT8(); + +TfLiteRegistration Register_AVERAGE_POOL_2D_INT16(); + +TfLiteRegistration Register_MAX_POOL_2D_INT16(); +#else +inline TfLiteRegistration Register_AVERAGE_POOL_2D_INT8() { + return tflite::Register_AVERAGE_POOL_2D(); +} + +inline TfLiteRegistration Register_MAX_POOL_2D_INT8() { + return tflite::Register_MAX_POOL_2D(); +} + +inline TfLiteRegistration Register_AVERAGE_POOL_2D_INT16() { + return tflite::Register_AVERAGE_POOL_2D(); +} + +inline TfLiteRegistration Register_MAX_POOL_2D_INT16() { + return tflite::Register_MAX_POOL_2D(); +} +#endif +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_POOLING_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling_common.cc new file mode 100644 index 0000000..8eb66e7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling_common.cc @@ -0,0 +1,128 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h" + +namespace tflite { + +const int kPoolingInputTensor = 0; +const int kPoolingOutputTensor = 0; + +TfLiteStatus CalculateOpDataPooling(const TfLiteContext* context, + const TfLitePoolParams* params, + const TfLiteTensor* input, + const TfLiteTensor* output, + OpDataPooling* data) { + // input: batch, height, width, channel + int height = SizeOfDimension(input, 1); + int width = SizeOfDimension(input, 2); + + int out_height, out_width; + + data->padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + /*dilation_rate_height=*/1, + /*dilation_rate_width=*/1, height, width, params->filter_height, + params->filter_width, params->padding, &out_height, &out_width); + + return kTfLiteOk; +} + +TfLiteStatus PoolingPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->builtin_data != nullptr); + auto* params = reinterpret_cast(node->builtin_data); + + TFLITE_DCHECK(node->user_data != nullptr); + OpDataPooling* data = static_cast(node->user_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kPoolingInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kPoolingOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataPooling(context, params, input, output, data)); + + if (input->type == kTfLiteFloat32) { + CalculateActivationRange(params->activation, &data->activation_min_f32, + &data->activation_max_f32); + } else if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + CalculateActivationRangeQuantized(context, params->activation, output, + &data->activation_min, + &data->activation_max); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +void AveragePoolingEvalFloat(const TfLiteContext* context, + const TfLiteNode* node, + const TfLitePoolParams* params, + const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::AveragePool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +void MaxPoolingEvalFloat(TfLiteContext* context, TfLiteNode* node, + TfLitePoolParams* params, const OpDataPooling* data, + const TfLiteEvalTensor* input, + TfLiteEvalTensor* output) { + tflite::PoolParams op_params; + op_params.stride_height = params->stride_height; + op_params.stride_width = params->stride_width; + op_params.filter_height = params->filter_height; + op_params.filter_width = params->filter_width; + op_params.padding_values.height = data->padding.height; + op_params.padding_values.width = data->padding.width; + op_params.float_activation_min = data->activation_min_f32; + op_params.float_activation_max = data->activation_max_f32; + reference_ops::MaxPool(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cc new file mode 100644 index 0000000..bceb7ff --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cc @@ -0,0 +1,75 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(PreluParams)); +} + +TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const PreluParams& params = + *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* alpha = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + switch (input->type) { + case kTfLiteFloat32: { + BroadcastPrelu4DSlowFloat(tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(alpha), + tflite::micro::GetTensorData(alpha), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + case kTfLiteInt8: { + reference_ops::BroadcastPrelu4DSlow( + params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(alpha), + tflite::micro::GetTensorData(alpha), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } break; + default: + MicroPrintf("Only float32 and uint8_t are supported currently, got %d.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } +} + +TfLiteRegistration Register_PRELU() { + return tflite::micro::RegisterOp(PreluInit, PreluPrepare, PreluEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h new file mode 100644 index 0000000..d5b780a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h @@ -0,0 +1,39 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, + const TfLiteTensor* alpha, + TfLiteTensor* output, PreluParams* params); + +void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape, + const float* input1_data, + const RuntimeShape& unextended_input2_shape, + const float* input2_data, + const RuntimeShape& unextended_output_shape, + float* output_data); + +TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_PRELU_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu_common.cc similarity index 50% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu_common.cc index b53956f..8c1f2ef 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu_common.cc @@ -1,4 +1,4 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,27 +13,22 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h" - #include #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/prelu.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/prelu.h" namespace tflite { -namespace ops { -namespace micro { -namespace activations { -namespace { TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, const TfLiteTensor* alpha, TfLiteTensor* output, PreluParams* params) { - if (output->type == kTfLiteInt8 || output->type == kTfLiteUInt8 || - output->type == kTfLiteInt16) { + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { double real_multiplier_1 = static_cast(input->params.scale) / static_cast(output->params.scale); double real_multiplier_2 = static_cast(input->params.scale) * @@ -52,12 +47,12 @@ TfLiteStatus CalculatePreluParams(const TfLiteTensor* input, return kTfLiteOk; } -} // namespace - -inline void BroadcastPrelu4DSlowFloat( - const RuntimeShape& unextended_input1_shape, const float* input1_data, - const RuntimeShape& unextended_input2_shape, const float* input2_data, - const RuntimeShape& unextended_output_shape, float* output_data) { +void BroadcastPrelu4DSlowFloat(const RuntimeShape& unextended_input1_shape, + const float* input1_data, + const RuntimeShape& unextended_input2_shape, + const float* input2_data, + const RuntimeShape& unextended_output_shape, + float* output_data) { TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4); TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4); @@ -85,85 +80,26 @@ inline void BroadcastPrelu4DSlowFloat( } } -void* PreluInit(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(PreluParams)); -} - TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TFLITE_DCHECK(node->user_data != nullptr); PreluParams* params = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, 0); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* alpha = GetInput(context, node, 1); + TfLiteTensor* alpha = micro_context->AllocateTempInputTensor(node, 1); TF_LITE_ENSURE(context, alpha != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); TF_LITE_ENSURE(context, output != nullptr); - return CalculatePreluParams(input, alpha, output, params); -} - -TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - const PreluParams& params = - *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* alpha = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - switch (input->type) { - case kTfLiteFloat32: { - BroadcastPrelu4DSlowFloat(tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteUInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteInt8: { - reference_ops::BroadcastPrelu4DSlow( - params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(alpha), - tflite::micro::GetTensorData(alpha), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - default: - TF_LITE_KERNEL_LOG( - context, "Only float32 and uint8_t are supported currently, got %d.", - TfLiteTypeGetName(input->type)); - return kTfLiteError; - } -} + TF_LITE_ENSURE_OK(context, + CalculatePreluParams(input, alpha, output, params)); -} // namespace activations - -TfLiteRegistration Register_PRELU() { - return {/*init=*/activations::PreluInit, - /*free=*/nullptr, - /*prepare=*/activations::PreluPrepare, - /*invoke=*/activations::PreluEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(alpha); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cc similarity index 84% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cc index ad02ec3..487f502 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.cc @@ -34,14 +34,8 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { } // namespace TfLiteRegistration Register_QUANTIZE() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/PrepareQuantizeReference, - /*invoke=*/EvalQuantizeReference, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, PrepareQuantizeReference, + EvalQuantizeReference); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cc similarity index 61% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cc index 29bc063..5ba29f4 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize_common.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/quantize.h" @@ -21,6 +23,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/quantize.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -33,9 +36,11 @@ TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); TF_LITE_ENSURE(context, output != nullptr); // TODO(b/128934713): Add support for fixed-point per-channel quantization. @@ -48,18 +53,25 @@ TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); - TF_LITE_ENSURE(context, input->type == kTfLiteFloat32 || - input->type == kTfLiteInt16 || - input->type == kTfLiteInt8); + TF_LITE_ENSURE( + context, input->type == kTfLiteFloat32 || input->type == kTfLiteInt32 || + input->type == kTfLiteInt16 || input->type == kTfLiteInt8 || + input->type == kTfLiteUInt8); TF_LITE_ENSURE(context, output->type == kTfLiteInt8 || output->type == kTfLiteInt16 || - output->type == kTfLiteInt32); + output->type == kTfLiteInt32 || + output->type == kTfLiteUInt8); if ((input->type == kTfLiteInt16 && output->type == kTfLiteInt8) || (input->type == kTfLiteInt8 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteUInt8) || + (input->type == kTfLiteUInt8 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt8 && output->type == kTfLiteInt16) || (input->type == kTfLiteInt8 && output->type == kTfLiteInt32) || (input->type == kTfLiteInt16 && output->type == kTfLiteInt16) || - (input->type == kTfLiteInt16 && output->type == kTfLiteInt32)) { + (input->type == kTfLiteInt16 && output->type == kTfLiteInt32) || + (input->type == kTfLiteInt32 && output->type == kTfLiteInt8) || + (input->type == kTfLiteInt32 && output->type == kTfLiteInt16)) { double effective_scale = static_cast(input->params.scale) / static_cast(output->params.scale); @@ -71,6 +83,9 @@ TfLiteStatus PrepareQuantizeReference(TfLiteContext* context, data->quantization_params.scale = static_cast(output->params.scale); data->input_zero_point = input->params.zero_point; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -98,9 +113,32 @@ TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); return kTfLiteOk; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteInt32) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else if (input->type == kTfLiteInt16) { @@ -128,9 +166,9 @@ TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); return kTfLiteOk; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else if (input->type == kTfLiteInt8) { @@ -145,6 +183,20 @@ TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { data->input_zero_point, data->quantization_params.zero_point, tflite::micro::GetTensorData(output)); break; + case kTfLiteUInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; case kTfLiteInt32: reference_ops::Requantize( tflite::micro::GetTensorData(input), size, @@ -153,15 +205,31 @@ TfLiteStatus EvalQuantizeReference(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + } else if (input->type == kTfLiteUInt8) { + size_t size = ElementCount(*input->dims); + switch (output->type) { + case kTfLiteInt8: + reference_ops::Requantize( + tflite::micro::GetTensorData(input), size, + data->requantize_output_multiplier, data->requantize_output_shift, + data->input_zero_point, data->quantization_params.zero_point, + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type)); return kTfLiteError; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/read_variable.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/read_variable.cc new file mode 100644 index 0000000..ba1fe4a --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/read_variable.cc @@ -0,0 +1,87 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +constexpr int kInputVariableId = 0; +constexpr int kOutputValue = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(NumInputs(node) == 1); + TFLITE_DCHECK(NumOutputs(node) == 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input_resource_id_tensor = + micro_context->AllocateTempInputTensor(node, kInputVariableId); + + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + TFLITE_DCHECK(input_resource_id_tensor->type == kTfLiteResource); + TFLITE_DCHECK(NumElements(input_resource_id_tensor) == 1); + + micro_context->DeallocateTempTfLiteTensor(input_resource_id_tensor); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input_resource_id_tensor = + tflite::micro::GetEvalInput(context, node, kInputVariableId); + TFLITE_DCHECK(input_resource_id_tensor != nullptr); + + TfLiteEvalTensor* output_value = + tflite::micro::GetEvalOutput(context, node, kOutputValue); + TFLITE_DCHECK(output_value != nullptr); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "READ_VARIABLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + TF_LITE_ENSURE_OK( + context, + resources->Read(input_resource_id_tensor->data.i32[0], output_value)); + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_READ_VARIABLE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/real.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/real.cc new file mode 100644 index 0000000..6ec5aad --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/real.cc @@ -0,0 +1,134 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace ops { +namespace micro { +namespace real { + +using std::complex; + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + if (input->type != kTfLiteComplex64 || output->type != kTfLiteFloat32) { + TF_LITE_KERNEL_LOG(context, "Types input %s (%d), output %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type, + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + } + + size_t total_input_els = 1; + for (size_t dim_ix = 0; dim_ix < input->dims->size; dim_ix++) { + total_input_els *= input->dims->data[dim_ix]; + } + + size_t total_output_els = 1; + for (size_t dim_ix = 0; dim_ix < output->dims->size; dim_ix++) { + total_output_els *= output->dims->data[dim_ix]; + } + + TFLITE_DCHECK(total_input_els == total_output_els); + + return kTfLiteOk; +} + +TfLiteStatus RealEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + size_t total_input_els = 1; + for (size_t dim_ix = 0; dim_ix < input->dims->size; dim_ix++) { + total_input_els *= input->dims->data[dim_ix]; + } + + for (size_t ix = 0; ix < total_input_els; ix++) { + output->data.f[ix] = input->data.c64[ix].re; + } + + return kTfLiteOk; +} + +TfLiteStatus ImagEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + size_t total_input_els = 1; + for (size_t dim_ix = 0; dim_ix < input->dims->size; dim_ix++) { + total_input_els *= input->dims->data[dim_ix]; + } + + for (size_t ix = 0; ix < total_input_els; ix++) { + output->data.f[ix] = input->data.c64[ix].im; + } + + return kTfLiteOk; +} + +} // namespace real +} // namespace micro +} // namespace ops + +TfLiteRegistration Register_REAL() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/ops::micro::real::Prepare, + /*invoke=*/ops::micro::real::RealEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +TfLiteRegistration Register_IMAG() { + return {/*init=*/nullptr, + /*free=*/nullptr, + /*prepare=*/ops::micro::real::Prepare, + /*invoke=*/ops::micro::real::ImagEval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cc new file mode 100644 index 0000000..b346282 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cc @@ -0,0 +1,86 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) { + return context->AllocatePersistentBuffer(context, sizeof(OpDataReduce)); +} + +TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) { + return PrepareMinMaxHelper(context, node, + static_cast(node->user_data)); +} + +TfLiteStatus PrepareMin(TfLiteContext* context, TfLiteNode* node) { + return PrepareMinMaxHelper(context, node, + static_cast(node->user_data)); +} + +TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { + return PrepareMeanOrSumHelper(context, node, + static_cast(node->user_data)); +} + +TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { + return EvalMeanHelper(context, node, + static_cast(node->user_data)); +} + +TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) { + OpDataReduce* op_data = static_cast(node->user_data); + return EvalMaxHelper(context, node, op_data); +} + +TfLiteStatus EvalMin(TfLiteContext* context, TfLiteNode* node) { + OpDataReduce* op_data = static_cast(node->user_data); + return EvalMinHelper(context, node, op_data); +} + +TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) { + return EvalSumHelper(context, node, + static_cast(node->user_data)); +} + +TfLiteRegistration Register_MEAN() { + return tflite::micro::RegisterOp(InitReduce, PrepareMeanOrSum, EvalMean); +} + +TfLiteRegistration Register_REDUCE_MAX() { + return tflite::micro::RegisterOp(InitReduce, PrepareMax, EvalMax); +} + +TfLiteRegistration Register_REDUCE_MIN() { + return tflite::micro::RegisterOp(InitReduce, PrepareMin, EvalMin); +} + +TfLiteRegistration Register_SUM() { + return tflite::micro::RegisterOp(InitReduce, PrepareMeanOrSum, EvalSum); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp deleted file mode 100644 index 4b94b45..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.cpp +++ /dev/null @@ -1,342 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace reduce { - -constexpr int kMaxNumberOfAxis = 4; -constexpr int kMaxNumberOfReducedAxis = 2; - -struct OpData { - int32_t multiplier; - int shift; - int temp_buffer_idx; - int resolved_axis_idx; - int input_zp; - float input_scale; - int output_zp; - float output_scale; - int num_output_elements; -}; - -void* InitReduce(TfLiteContext* context, const char* buffer, size_t length) { - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { - // Inputs Tensor (dtype depends on quantization): - // [0] = Input - // [1] = Axis - const TfLiteTensor* input = GetInput(context, node, 0); - - // Outputs Tensor (dtype depends on quantization): - // [0] = Output - - // Validate number of inputs and outputs - TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Validate axis type - const TfLiteTensor* axis = GetInput(context, node, 1); - TF_LITE_ENSURE(context, axis != nullptr); - TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); - - if (input->type == kTfLiteInt8) { - OpData* data = static_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &data->multiplier, &data->shift); - } - - return kTfLiteOk; -} - -TfLiteStatus PrepareMax(TfLiteContext* context, TfLiteNode* node) { - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - - OpData* op_data = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, 0); - const TfLiteTensor* output = GetOutput(context, node, 0); - const TfLiteTensor* axis = GetInput(context, node, 1); - - op_data->input_scale = input->params.scale; - op_data->output_scale = output->params.scale; - op_data->num_output_elements = NumElements(output); - - context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, - &op_data->temp_buffer_idx); - context->RequestScratchBufferInArena( - context, sizeof(int) * static_cast(ElementCount(*axis->dims)), - &op_data->resolved_axis_idx); - - return kTfLiteOk; -} - -TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, 0); - OpData* op_data = reinterpret_cast(node->user_data); - const TfLiteTensor* output = GetOutput(context, node, 0); - if (input->type == kTfLiteInt8) { - const double real_multiplier = static_cast(input->params.scale) / - static_cast(output->params.scale); - QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); - } - - int output_size = NumElements(output); - if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { - context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), - &op_data->temp_buffer_idx); - op_data->input_zp = input->params.zero_point; - op_data->input_scale = input->params.scale; - op_data->output_zp = output->params.zero_point; - op_data->output_scale = output->params.scale; - } - - TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); - // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) - return kTfLiteOk; -} - -void ResolveAxis(const int* axis_data, int axis_count, - tflite::MeanParams* op_params) { - int i = 0; - for (; i < axis_count; ++i) { - op_params->axis[i] = static_cast(axis_data[i]); - } - for (; i < 4; ++i) { - op_params->axis[i] = 1; - } - op_params->axis_count = axis_count; -} - -TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TfLiteReducerParams* params = - reinterpret_cast(node->builtin_data); - OpData* op_data = reinterpret_cast(node->user_data); - - int num_axis = static_cast(ElementCount(*axis->dims)); - int temp_index[kMaxNumberOfAxis]; - int resolved_axis[kMaxNumberOfReducedAxis]; - - tflite::MeanParams op_params; - ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, &op_params); - - // Special case mean implementation exists for 4D mean across axes 1 and 2. - bool special_case_4d_axes_1_and_2 = - input->dims->size == 4 && op_params.axis_count == 2 && - ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || - (op_params.axis[0] == 2 && op_params.axis[1] == 1)); - - switch (input->type) { - case kTfLiteFloat32: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, - tflite::micro::GetTensorData(output))); - } - } break; - case kTfLiteInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_integer_ops::Mean( - op_params, op_data->multiplier, op_data->shift, - tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), op_data->input_zp, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), op_data->output_zp); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_index, resolved_axis, temp_buffer)); - } else { - int32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - case kTfLiteUInt8: { - // Defer to specialized implementation for 4D Mean across axes 1 & 2. - if (params->keep_dims && special_case_4d_axes_1_and_2) { - reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - op_data->input_zp, op_data->input_scale, - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale); - } else if (op_data->input_zp == op_data->output_zp && - op_data->input_scale == op_data->output_scale) { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::Mean(tflite::micro::GetTensorData(input), - input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, - resolved_axis, temp_buffer)); - } else { - uint32_t* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - TF_LITE_ENSURE( - context, - reference_ops::QuantizedMeanOrSum( - tflite::micro::GetTensorData(input), op_data->input_zp, - op_data->input_scale, input->dims->data, input->dims->size, - tflite::micro::GetTensorData(output), - op_data->output_zp, op_data->output_scale, output->dims->data, - output->dims->size, tflite::micro::GetTensorData(axis), - num_axis, params->keep_dims, temp_index, resolved_axis, - temp_buffer, false)); - } - } break; - default: - TF_LITE_ENSURE_MSG(context, false, - "Currently, only float32, int8 or uint8 input type " - "is supported."); - } - return kTfLiteOk; -} - -TfLiteStatus EvalMax(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - TfLiteReducerParams* params = - static_cast(node->builtin_data); - OpData* op_data = static_cast(node->user_data); - - // Interpret an axis tensor with null dimensions as a scalar - int num_axis = static_cast(ElementCount(*axis->dims)); - int* temp_buffer = static_cast( - context->GetScratchBuffer(context, op_data->temp_buffer_idx)); - int* resolved_axis = static_cast( - context->GetScratchBuffer(context, op_data->resolved_axis_idx)); - switch (input->type) { - case kTfLiteFloat32: - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const float current, const float in) -> float { - return (in > current) ? in : current; - })); - break; - case kTfLiteInt8: - TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), - static_cast(op_data->output_scale)); - TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); - TF_LITE_ENSURE( - context, - reference_ops::ReduceGeneric( - tflite::micro::GetTensorData(input), input->dims->data, - input->dims->size, tflite::micro::GetTensorData(output), - output->dims->data, output->dims->size, - tflite::micro::GetTensorData(axis), num_axis, - params->keep_dims, temp_buffer, resolved_axis, - std::numeric_limits::lowest(), - [](const int8_t current, const int8_t in) -> int8_t { - return (in > current) ? in : current; - })); - break; - default: - TF_LITE_KERNEL_LOG(context, - "Only float32 and int8 types are supported.\n"); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace reduce - -TfLiteRegistration Register_MEAN() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMeanOrSum, - /*invoke=*/reduce::EvalMean, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -TfLiteRegistration Register_REDUCE_MAX() { - return {/*init=*/reduce::InitReduce, - /*free=*/nullptr, - /*prepare=*/reduce::PrepareMax, - /*invoke=*/reduce::EvalMax, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h new file mode 100644 index 0000000..6780df4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h @@ -0,0 +1,71 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" + +namespace tflite { + +extern const int kMaxNumberOfAxis; +extern const int kMaxNumberOfReducedAxis; + +struct OpDataReduce { + int32_t multiplier; + int shift; + int temp_buffer_idx; + int resolved_axis_idx; + int input_zp; + float input_scale; + int output_zp; + float output_scale; + int num_output_elements; + int num_axis; +}; + +TfLiteStatus PrepareMinMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus EvalMinHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data); + +void ReduceResolveAxis(const int* axis_data, int axis_count, + MeanParams* op_params); + +TfLiteRegistration Register_MEAN(); +TfLiteRegistration Register_REDUCE_MAX(); +TfLiteRegistration Register_REDUCE_MIN(); +TfLiteRegistration Register_SUM(); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_REDUCE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cc new file mode 100644 index 0000000..a2c5c38 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce_common.cc @@ -0,0 +1,417 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/reduce.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +const int kMaxNumberOfAxis = 5; +const int kMaxNumberOfReducedAxis = 2; + +TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node, + int32_t* multiplier, int* shift) { + MicroContext* micro_context = GetMicroContext(context); + + // Inputs Tensor (dtype depends on quantization): + // [0] = Input + // [1] = Axis + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + + // Outputs Tensor (dtype depends on quantization): + // [0] = Output + + // Validate number of inputs and outputs + TF_LITE_ENSURE_EQ(context, node->inputs->size, 2); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Validate axis type + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + TF_LITE_ENSURE(context, axis != nullptr); + TF_LITE_ENSURE_TYPES_EQ(context, axis->type, kTfLiteInt32); + + if (input->type == kTfLiteInt8) { + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + const double real_multiplier = static_cast(input->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, multiplier, shift); + micro_context->DeallocateTempTfLiteTensor(output); + } + micro_context->DeallocateTempTfLiteTensor(axis); + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} + +TfLiteStatus PrepareMinMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + TF_LITE_ENSURE_OK(context, PrepareSimple(context, node, &op_data->multiplier, + &op_data->shift)); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + + op_data->input_scale = input->params.scale; + op_data->output_scale = output->params.scale; + op_data->num_output_elements = NumElements(output); + + context->RequestScratchBufferInArena(context, sizeof(int) * input->dims->size, + &op_data->temp_buffer_idx); + context->RequestScratchBufferInArena( + context, sizeof(int) * static_cast(ElementCount(*axis->dims)), + &op_data->resolved_axis_idx); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(axis); + return kTfLiteOk; +} + +TfLiteStatus PrepareMeanOrSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 1); + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + const double real_multiplier = static_cast(input->params.scale) / + static_cast(output->params.scale); + QuantizeMultiplier(real_multiplier, &op_data->multiplier, &op_data->shift); + } + + int output_size = NumElements(output); + op_data->num_axis = NumElements(axis); + + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + context->RequestScratchBufferInArena(context, output_size * sizeof(int32_t), + &op_data->temp_buffer_idx); + op_data->input_zp = input->params.zero_point; + op_data->input_scale = input->params.scale; + op_data->output_zp = output->params.zero_point; + op_data->output_scale = output->params.scale; + } + + TF_LITE_ENSURE_OK( + context, + PrepareSimple(context, node, &(op_data->multiplier), &(op_data->shift))); + // TODO(b/144955155): Support uint8_t(b/144955155) and int8_t(b/144955018) + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(axis); + return kTfLiteOk; +} + +void ResolveAxis(const int* axis_data, int axis_count, + tflite::MeanParams* op_params) { + int i = 0; + for (; i < axis_count; ++i) { + op_params->axis[i] = static_cast(axis_data[i]); + } + for (; i < 4; ++i) { + op_params->axis[i] = 1; + } + op_params->axis_count = axis_count; +} + +template +TfLiteStatus QuantizedMeanOrSum(TfLiteContext* context, TfLiteNode* node, + int* temp_index, int* resolved_axis, + int32_t* temp_sum, OpDataReduce* op_data, + bool compute_sum) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + bool result = reference_ops::QuantizedMeanOrSumExtraArgs( + tflite::micro::GetTensorData(input), op_data->input_zp, + op_data->input_scale, &input->dims->data[0], input->dims->size, + tflite::micro::GetTensorData(output), op_data->output_scale, + op_data->multiplier, op_data->shift, op_data->output_zp, + &output->dims->data[0], output->dims->size, + tflite::micro::GetTensorData(axis), op_data->num_axis, + params->keep_dims, temp_index, resolved_axis, temp_sum, compute_sum); + TF_LITE_ENSURE(context, result); + + return kTfLiteOk; +} + +template +TfLiteStatus Mean(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data, int* temp_index, int* resolved_axis, + U* temp_sum) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + reference_ops::Mean( + tflite::micro::GetTensorData(input), &input->dims->data[0], + input->dims->size, tflite::micro::GetTensorData(output), + &output->dims->data[0], output->dims->size, + tflite::micro::GetTensorData(axis), op_data->num_axis, + params->keep_dims, temp_index, resolved_axis, temp_sum); + + return kTfLiteOk; +} + +template +TfLiteStatus EvalIntegerMean(TfLiteContext* context, TfLiteNode* node, + int num_axis, OpDataReduce* op_data, + int* temp_index, int* resolved_axis) { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + + if (op_data->input_zp == op_data->output_zp && + op_data->input_scale == op_data->output_scale) { + Mean(context, node, op_data, temp_index, + resolved_axis, temp_sum); + } else { + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/false); + } + return kTfLiteOk; +} + +TfLiteStatus EvalMeanHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TfLiteReducerParams* params = + reinterpret_cast(node->builtin_data); + + int num_axis = static_cast(ElementCount(*axis->dims)); + int temp_index[kMaxNumberOfAxis]; + int resolved_axis[kMaxNumberOfReducedAxis]; + + switch (input->type) { + case kTfLiteFloat32: { + tflite::MeanParams op_params; + ResolveAxis(tflite::micro::GetTensorData(axis), num_axis, + &op_params); + + // Special case mean implementation exists for 4D mean across axes 1 + // and 2. + bool special_case_4d_axes_1_and_2 = + input->dims->size == 4 && op_params.axis_count == 2 && + ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || + (op_params.axis[0] == 2 && op_params.axis[1] == 1)); + + // Defer to specialized implementation for 4D Mean across axes 1 & 2. + if (params->keep_dims && special_case_4d_axes_1_and_2) { + reference_ops::Mean(op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + TF_LITE_ENSURE( + context, + reference_ops::Mean( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_index, resolved_axis, + tflite::micro::GetTensorData(output))); + } + } break; + case kTfLiteInt8: { + TF_LITE_ENSURE_OK( + context, EvalIntegerMean(context, node, num_axis, op_data, + temp_index, resolved_axis)); + } break; + case kTfLiteInt16: { + TF_LITE_ENSURE_OK( + context, EvalIntegerMean(context, node, num_axis, op_data, + temp_index, resolved_axis)); + } break; + default: + TF_LITE_ENSURE_MSG(context, false, + "Currently, only float32, int8 or int16 input type " + "is supported."); + } + return kTfLiteOk; +} + +TfLiteStatus EvalMaxHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + // Interpret an axis tensor with null dimensions as a scalar + int num_axis = static_cast(ElementCount(*axis->dims)); + int* temp_buffer = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + int* resolved_axis = static_cast( + context->GetScratchBuffer(context, op_data->resolved_axis_idx)); + switch (input->type) { + case kTfLiteFloat32: + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::lowest(), + [](const float current, const float in) -> float { + return (in > current) ? in : current; + })); + break; + case kTfLiteInt8: + TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), + static_cast(op_data->output_scale)); + TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::lowest(), + [](const int8_t current, const int8_t in) -> int8_t { + return (in > current) ? in : current; + })); + break; + default: + MicroPrintf("Only float32 and int8 types are supported."); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalMinHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + // Interpret an axis tensor with null dimensions as a scalar + int num_axis = static_cast(ElementCount(*axis->dims)); + int* temp_buffer = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + int* resolved_axis = static_cast( + context->GetScratchBuffer(context, op_data->resolved_axis_idx)); + switch (input->type) { + case kTfLiteFloat32: + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::max(), + [](const float current, const float in) -> float { + return (in < current) ? in : current; + })); + break; + case kTfLiteInt8: + TF_LITE_ENSURE_EQ(context, static_cast(op_data->input_scale), + static_cast(op_data->output_scale)); + TF_LITE_ENSURE_EQ(context, op_data->input_zp, op_data->output_zp); + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_buffer, resolved_axis, + std::numeric_limits::max(), + [](const int8_t current, const int8_t in) -> int8_t { + return (in < current) ? in : current; + })); + break; + default: + MicroPrintf("Only float32 and int8 types are supported."); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalSumHelper(TfLiteContext* context, TfLiteNode* node, + OpDataReduce* op_data) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + const TfLiteEvalTensor* axis = tflite::micro::GetEvalInput(context, node, 1); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + TfLiteReducerParams* params = + static_cast(node->builtin_data); + + // Interpret an axis tensor with null dimensions as a scalar. + int num_axis = static_cast(ElementCount(*axis->dims)); + int temp_index[kMaxNumberOfAxis]; + int resolved_axis[kMaxNumberOfReducedAxis]; + + switch (input->type) { + case kTfLiteFloat32: { + TF_LITE_ENSURE( + context, + reference_ops::ReduceGeneric( + tflite::micro::GetTensorData(input), input->dims->data, + input->dims->size, tflite::micro::GetTensorData(output), + output->dims->data, output->dims->size, + tflite::micro::GetTensorData(axis), num_axis, + params->keep_dims, temp_index, resolved_axis, /*init_value=*/0.f, + [](const float current, const float in) -> float { + return in + current; + })); + } break; + case kTfLiteInt8: { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/true); + } break; + case kTfLiteInt16: { + int32_t* temp_sum = static_cast( + context->GetScratchBuffer(context, op_data->temp_buffer_idx)); + QuantizedMeanOrSum(context, node, temp_index, resolved_axis, + temp_sum, op_data, /*compute_sum=*/true); + } break; + default: + MicroPrintf("Only float32, int8, and int16 types are supported."); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cc similarity index 88% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cc index 877ba7e..f71298c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/reshape.cc @@ -13,6 +13,8 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ +#include + #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" @@ -31,9 +33,13 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the @@ -68,6 +74,9 @@ TfLiteStatus ReshapeOutput(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -93,9 +102,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { // Do nothing for in-place reshape. if (input->data.raw != output->data.raw) { // Otherwise perform reshape with copy. - for (size_t i = 0; i < input_bytes; ++i) { - output->data.raw[i] = input->data.raw[i]; - } + memcpy(output->data.raw, input->data.raw, input_bytes); } return kTfLiteOk; } @@ -103,14 +110,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace reshape TfLiteRegistration Register_RESHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/reshape::Prepare, - /*invoke=*/reshape::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, reshape::Prepare, reshape::Eval); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_bilinear.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_bilinear.cc new file mode 100644 index 0000000..01399ee --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_bilinear.cc @@ -0,0 +1,116 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/resize_bilinear.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kSizeTensor = 1; +constexpr int kOutputTensor = 0; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); + + TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); + output->type = input->type; + + TF_LITE_ENSURE_MSG(context, IsConstantTensor(size), + "Non constant size tensor not supported"); + + // Ensure params are valid. + auto* params = + reinterpret_cast(node->builtin_data); + if (params->half_pixel_centers && params->align_corners) { + MicroPrintf("If half_pixel_centers is True, align_corners must be False."); + return kTfLiteError; + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* size = + tflite::micro::GetEvalInput(context, node, kSizeTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + tflite::ResizeBilinearParams op_params; + op_params.align_corners = params->align_corners; + op_params.half_pixel_centers = params->half_pixel_centers; + reference_ops::ResizeBilinear(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(size), + tflite::micro::GetTensorData(size), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else if (output->type == kTfLiteInt8) { + tflite::ResizeBilinearParams op_params; + op_params.align_corners = params->align_corners; + op_params.half_pixel_centers = params->half_pixel_centers; + reference_ops::ResizeBilinearInteger( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(size), + tflite::micro::GetTensorData(size), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + MicroPrintf("Output type is %d, requires float or int8.", output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_RESIZE_BILINEAR() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc similarity index 78% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc index b716e80..d6f3df3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/resize_nearest_neighbor.cc @@ -21,6 +21,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace ops { @@ -32,12 +33,17 @@ constexpr int kSizeTensor = 1; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - const TfLiteTensor* size = GetInput(context, node, kSizeTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); // Our current implementations rely on the input being 4D, // and the size being 1D tensor with exactly 2 elements. @@ -49,9 +55,14 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { output->type = input->type; if (!IsConstantTensor(size)) { - TF_LITE_KERNEL_LOG(context, "Dynamic tensors are unsupported in tfmicro."); + MicroPrintf("Dynamic tensors are unsupported in tfmicro."); return kTfLiteError; } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; } @@ -78,26 +89,26 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteUInt8) { + } else if (output->type == kTfLiteInt8) { reference_ops::ResizeNearestNeighbor( op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(size), tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (output->type == kTfLiteInt8) { + tflite::micro::GetTensorData(output)); + } else if (output->type == kTfLiteInt16) { reference_ops::ResizeNearestNeighbor( op_params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), + tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(size), tflite::micro::GetTensorData(size), tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); + tflite::micro::GetTensorData(output)); } else { - TF_LITE_KERNEL_LOG(context, - "Output type is %d, requires float, uint8_t or int8_t.", - output->type); + MicroPrintf("Output tensor type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; } @@ -106,14 +117,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace resize_nearest_neighbor TfLiteRegistration Register_RESIZE_NEAREST_NEIGHBOR() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/resize_nearest_neighbor::Prepare, - /*invoke=*/resize_nearest_neighbor::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, resize_nearest_neighbor::Prepare, + resize_nearest_neighbor::Eval); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/rfft2d.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/rfft2d.cc new file mode 100644 index 0000000..fe4a16c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/rfft2d.cc @@ -0,0 +1,207 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include +#include +#include +#include + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" +#include "edge-impulse-sdk/dsp/kissfft/kiss_fftr.h" + +namespace tflite { +namespace ops { +namespace micro { +namespace rfft2d { + +using std::complex; + +constexpr int kInputTensor = 0; +constexpr int kFftLengthTensor = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + int kiss_fft_output_buffer_index; +}; + +bool IsPowerOfTwo(uint32_t v) { return v && !(v & (v - 1)); } + +static int software_rfft(float *fft_input, TfLiteComplex64 *output, size_t n_fft, size_t n_fft_out_features, kiss_fft_cpx *fft_output) { + size_t kiss_fftr_mem_length; + + // create fftr context (this should move to a scratch buffer...) + kiss_fftr_cfg cfg = kiss_fftr_alloc(n_fft, 0, NULL, NULL, &kiss_fftr_mem_length); + if (!cfg) { + ei_free(fft_output); + return -1; + } + + // execute the rfft operation + kiss_fftr(cfg, fft_input, fft_output); + + // and write back to the output + for (size_t ix = 0; ix < n_fft_out_features; ix++) { + output[ix].re = fft_output[ix].r; + output[ix].im = fft_output[ix].i; + } + + ei_free(cfg); + + return 0; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + (void)buffer; + (void)length; + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + // Check type and shape of the input tensor + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + + TfLiteTensor* fft_length = + micro_context->AllocateTempInputTensor(node, kFftLengthTensor); + const int32_t* fft_length_data = GetTensorData(fft_length); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + TF_LITE_ENSURE(context, NumDimensions(input) >= 2); + if (input->type != kTfLiteFloat32) { + context->ReportError(context, + "Type '%s' for input is not supported by rfft2d.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + // Check type and shape of the fft_length tensor + const RuntimeShape fft_length_shape = GetTensorShape(fft_length); + TF_LITE_ENSURE_EQ(context, NumDimensions(fft_length), 1); + TF_LITE_ENSURE_EQ(context, fft_length_shape.Dims(0), 2); + if (fft_length->type != kTfLiteInt32) { + context->ReportError(context, + "Type '%s' for fft_length is not supported by rfft2d.", + TfLiteTypeGetName(fft_length->type)); + return kTfLiteError; + } + + OpData* data = static_cast(node->user_data); + + size_t output_els = output->bytes / sizeof(TfLiteComplex64); + + TF_LITE_ENSURE_STATUS( + context->RequestScratchBufferInArena( + context, output_els * sizeof(kiss_fft_cpx), &data->kiss_fft_output_buffer_index)); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(fft_length); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteTensor* input; + TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); + const TfLiteTensor* fft_length; + TF_LITE_ENSURE_OK(context, + GetInputSafe(context, node, kFftLengthTensor, &fft_length)); + const int32_t* fft_length_data = GetTensorData(fft_length); + TfLiteTensor* output; + TF_LITE_ENSURE_OK(context, + GetOutputSafe(context, node, kOutputTensor, &output)); + + if (output->type != kTfLiteComplex64) { + context->ReportError(context, + "Type '%s' for output is not supported by rfft2d.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[0])); + TF_LITE_ENSURE(context, IsPowerOfTwo(fft_length_data[1])); + + int fft_height, fft_width; + fft_height = fft_length_data[0]; + fft_width = fft_length_data[1]; + + OpData* data = static_cast(node->user_data); + + if (fft_height != 1) { + context->ReportError(context, + "Only supports fft_height 1", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + + kiss_fft_cpx* shift_buffer = (kiss_fft_cpx*)context->GetScratchBuffer(context, data->kiss_fft_output_buffer_index); + + size_t in_row_els = 1; + for (size_t ix = 1; ix < input->dims->size; ix++) { + in_row_els *= input->dims->data[ix]; + } + size_t out_row_els = 1; + for (size_t ix = 1; ix < output->dims->size; ix++) { + out_row_els *= output->dims->data[ix]; + } + + for (size_t row = 0; row < input->dims->data[0]; row++) { + float *in_ptr = &input->data.f[row * in_row_els]; + auto out_ptr = &output->data.c64[row * out_row_els]; + + int x = software_rfft(in_ptr, out_ptr, fft_width, in_row_els, shift_buffer); + if (x != 0) { + context->ReportError(context, + "software_rfft failed (%d)", + x); + return kTfLiteError; + } + } + + return kTfLiteOk; +} + +} // namespace rfft2d +} // namespace micro +} // namespace ops + +TfLiteRegistration Register_RFFT2D() { + return {/*init=*/ops::micro::rfft2d::Init, + /*free=*/nullptr, + /*prepare=*/ops::micro::rfft2d::Prepare, + /*invoke=*/ops::micro::rfft2d::Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cc similarity index 85% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cc index 6c4f23f..56e30d3 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/round.cc @@ -29,9 +29,13 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); @@ -42,6 +46,9 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { for (int i = 0; i < output->dims->size; ++i) { TF_LITE_ENSURE_EQ(context, output->dims->data[i], input->dims->data[i]); } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -61,14 +68,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace round TfLiteRegistration Register_ROUND() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/round::Prepare, - /*invoke=*/round::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, round::Prepare, round::Eval); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cc new file mode 100644 index 0000000..b119d67 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cc @@ -0,0 +1,397 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" + +namespace tflite { +namespace ops { +namespace micro { + +#if (defined(__Xxy)) || (defined(__Xvdsp)) +static void get_arc_two_buffer_sizes(int request_size_1, int request_size_2, + int* grant_size_1, int* grant_size_2) { + int maxrequest = 0; + int secondrequest = 0; + int maxavailable = 0; + int secondavail = 0; + + // determine the largest requested buffer. + if (request_size_1 > request_size_2) { + maxrequest = request_size_1; + secondrequest = request_size_2; + } else { + maxrequest = request_size_2; + secondrequest = request_size_1; + } + + // find the two largest available buffers. + get_arc_scratch_buffer_two_max_sizes(&maxavailable, &secondavail); + + // in case two buffers are available, the largest buffer can go to the largest + // request. + if (secondavail > 0) { // this condition can be enhanced to prevent cases + // where the second buffer is so small that it is + // better to use one buffer and split it. + if (request_size_1 > request_size_2) { + *grant_size_1 = maxavailable; + *grant_size_2 = secondavail; + } else { + *grant_size_1 = secondavail; + *grant_size_2 = maxavailable; + } + } else { + // In case only one buffer is available, + // use only the max buffer, and split it. + *grant_size_1 = maxavailable / 2; + *grant_size_2 = maxavailable / 2; + } +} + +static TfLiteStatus get_arc_scratch_buffer_for_io_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* out) { + int request_size_in = 0; + int request_size_out = 0; + int grant_size_in = 0; + int grant_size_out = 0; + if (!inside_arc_ccm(in->Data())) { + // In case the input tensor contains multiple batches, it has rank 4 + // because the mli kernel cannot operate on batches, we need to have the + // size of a single HWC tensor. that is why the start_rank is 1 in case of + // input rank 4 + int start_rank = *in->Rank() - 3; + request_size_in = mli_hlp_count_elem_num(in->MliTensor(), start_rank) * + mli_hlp_tensor_element_size(in->MliTensor()); + } + if (!inside_arc_ccm(out->Data())) { + // In case the input tensor contains multiple batches, it has rank 4 + // because the mli kernel cannot operate on batches, we need to have the + // size of a single batch. that is why the start_rank is 1 in case of input + // rank 4 + int start_rank = *out->Rank() - 3; + request_size_out = mli_hlp_count_elem_num(out->MliTensor(), start_rank) * + mli_hlp_tensor_element_size(out->MliTensor()); + } + + get_arc_two_buffer_sizes(request_size_in, request_size_out, &grant_size_in, + &grant_size_out); + if (!inside_arc_ccm(in->Data())) { + in->SetData( + static_cast(get_arc_scratch_buffer(grant_size_in)), + grant_size_in); + if (in->Data() == NULL) return kTfLiteError; + } + + if (!inside_arc_ccm(out->Data())) { + out->SetData( + static_cast(get_arc_scratch_buffer(grant_size_out)), + grant_size_out); + if (out->Data() == NULL) return kTfLiteError; + } + + return kTfLiteOk; +} +#endif + +TfLiteStatus get_arc_scratch_buffer_for_conv_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights, + MliTensorInterface* bias, MliTensorInterface* out) { + TfLiteStatus ret_val = kTfLiteOk; +#if (defined(__Xxy)) || (defined(__Xvdsp)) + init_arc_scratch_buffers(); + + if (!inside_arc_ccm(bias->Data())) { + uint32_t bias_mem_requirements = + mli_hlp_count_elem_num(bias->MliTensor(), 0) * + mli_hlp_tensor_element_size(bias->MliTensor()); + bias->SetData( + static_cast(get_arc_scratch_buffer(bias_mem_requirements)), + bias_mem_requirements); + } + + if (bias->Data() == NULL) { + int max_bias_size = 0; + get_arc_scratch_buffer_max_size(&max_bias_size); + bias->SetData( + static_cast(get_arc_scratch_buffer(max_bias_size)), + max_bias_size); + if (max_bias_size == 0) ret_val = kTfLiteError; + } + if (bias->Data() == NULL) ret_val = kTfLiteError; + + if (!inside_arc_ccm(weights->Data())) { + int weights_size = mli_hlp_count_elem_num(weights->MliTensor(), 0) * + mli_hlp_tensor_element_size(weights->MliTensor()); + int max_weights_size = 0; + weights->SetData( + static_cast(get_arc_scratch_buffer(weights_size)), + weights_size); + if (weights->Data() == NULL) { + get_arc_scratch_buffer_max_size(&max_weights_size); + weights->SetData( + static_cast(get_arc_scratch_buffer(max_weights_size)), + max_weights_size); + if (max_weights_size == 0) ret_val = kTfLiteError; + } + if (weights->Data() == NULL) ret_val = kTfLiteError; + } + + if (ret_val == kTfLiteOk) { + ret_val = get_arc_scratch_buffer_for_io_tensors(context, in, out); + } +#endif + return ret_val; +} + +TfLiteStatus get_arc_scratch_buffer_for_fully_connect_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights, + MliTensorInterface* bias, MliTensorInterface* out) { + TfLiteStatus ret_val = kTfLiteOk; + +#if (defined(__Xxy)) || (defined(__Xvdsp)) + init_arc_scratch_buffers(); + + if (!inside_arc_ccm(bias->Data())) { + int bias_mem_requirements = mli_hlp_count_elem_num(bias->MliTensor(), 0) * + mli_hlp_tensor_element_size(bias->MliTensor()); + bias->SetData( + static_cast(get_arc_scratch_buffer(bias_mem_requirements)), + bias_mem_requirements); + } + + if (bias->Data() == NULL) { + int max_bias_size = 0; + get_arc_scratch_buffer_max_size(&max_bias_size); + bias->SetData( + static_cast(get_arc_scratch_buffer(max_bias_size)), + max_bias_size); + if (max_bias_size == 0) ret_val = kTfLiteError; + } + if (bias->Data() == NULL) ret_val = kTfLiteError; + + if (!inside_arc_ccm(weights->Data())) { + int weights_size = mli_hlp_count_elem_num(weights->MliTensor(), 0) * + mli_hlp_tensor_element_size(weights->MliTensor()); + int max_weights_size = 0; + weights->SetData( + static_cast(get_arc_scratch_buffer(weights_size)), + weights_size); + if (weights->Data() == NULL) { + get_arc_scratch_buffer_max_size(&max_weights_size); + weights->SetData( + static_cast(get_arc_scratch_buffer(max_weights_size)), + max_weights_size); + if (max_weights_size == 0) ret_val = kTfLiteError; + } + if (weights->Data() == NULL) ret_val = kTfLiteError; + } + + /* strategy for FC kernels: + first allocate input, because this cannot be sliced. (in case of batch + processing, only a single input needs to be allocated) then weights & + bias because if fully loaded, they can be reused over batches. then + output. The number of output channels (for weights slicing) depends on + size of output and size of weights&bias */ + + if (!inside_arc_ccm(in->Data())) { + /* In case the input tensor contains multiple batches, + only count the size if the inner most dimension */ + int size_in = mli_hlp_count_elem_num(in->MliTensor(), *in->Rank() - 1) * + mli_hlp_tensor_element_size(in->MliTensor()); + in->SetData(static_cast(get_arc_scratch_buffer(size_in)), + size_in); + if (in->Data() == NULL) { + in->SetData(nullptr, 0); + ret_val = kTfLiteError; + } + } + if (!inside_arc_ccm(out->Data())) { + /* In case the input tensor contains multiple batches, + only count the size if the inner most dimension */ + int out_size = mli_hlp_count_elem_num(out->MliTensor(), *out->Rank() - 1) * + mli_hlp_tensor_element_size(out->MliTensor()); + int max_out_size = 0; + out->SetData(static_cast(get_arc_scratch_buffer(out_size)), + out_size); + if (out->Data() == NULL) { + get_arc_scratch_buffer_max_size(&max_out_size); + out->SetData( + static_cast(get_arc_scratch_buffer(max_out_size)), + max_out_size); + if (max_out_size == 0) ret_val = kTfLiteError; + } + if (out->Data() == NULL) ret_val = kTfLiteError; + } +#endif + return ret_val; +} + +TfLiteStatus get_arc_scratch_buffer_for_eltwise_tensors( + TfLiteContext* context, MliTensorInterface* in1, MliTensorInterface* in2, + MliTensorInterface* out) { + TfLiteStatus ret_val = kTfLiteOk; +#if (defined(__Xxy)) || (defined(__Xvdsp)) + init_arc_scratch_buffers(); + constexpr int tsr_num = 3; + int in1_size = mli_hlp_count_elem_num(in1->MliTensor(), 0) * + mli_hlp_tensor_element_size(in1->MliTensor()); + int in2_size = mli_hlp_count_elem_num(in2->MliTensor(), 0) * + mli_hlp_tensor_element_size(in2->MliTensor()); + int out_size = mli_hlp_count_elem_num(out->MliTensor(), 0) * + mli_hlp_tensor_element_size(out->MliTensor()); + int sizes[tsr_num] = {in1_size, in2_size, out_size}; + MliTensorInterface* in_tensors[tsr_num] = {in1, in2, out}; + for (int i = 0; i < tsr_num; ++i) { + if (!inside_arc_ccm(in_tensors[i]->Data())) { + auto* data_ptr = get_arc_scratch_buffer(sizes[i]); + if (data_ptr == nullptr) { + get_arc_scratch_buffer_max_size(&sizes[i]); + data_ptr = get_arc_scratch_buffer(sizes[i]); + } + if (data_ptr == nullptr || sizes[i] == 0) { + in_tensors[i]->SetData(nullptr, 0); + ret_val = kTfLiteError; + } else { + in_tensors[i]->SetData(static_cast(data_ptr), + sizes[i]); + } + } + } +#endif + return ret_val; +} + +TfLiteStatus arc_scratch_buffer_calc_slice_size_io( + const MliTensorInterface* in, const MliTensorInterface* out, + const int kernel_height, const int stride_height, const int padding_top, + const int padding_bot, int* in_slice_height, int* out_slice_height) { + const int height_dimension = 1; + const int in_height = in->Shape()[height_dimension]; + const int out_height = out->Shape()[height_dimension]; + const int line_size_in = + mli_hlp_count_elem_num(in->MliTensor(), height_dimension + 1) * + mli_hlp_tensor_element_size(in->MliTensor()); + const int line_size_out = + mli_hlp_count_elem_num(out->MliTensor(), height_dimension + 1) * + mli_hlp_tensor_element_size(out->MliTensor()); + int max_lines_in = 0; + int max_lines_out = 0; + int max_out_lines_for_input = 0; + bool fit = + (static_cast(*in->DataCapacity()) >= in_height * line_size_in) && + (static_cast(*out->DataCapacity()) >= out_height * line_size_out); + if (fit) { + // in case both tensors completely fit in the capacity, there is no need + // for slicing. As padding can affect effective input region, we also + // derive it from output height, and rely on a clipping logic which intend + // to reduce last smaller slice. I.e the only slice is a kind of "smaller + // last slice that need to be corrected" + *in_slice_height = std::max(in_height, out_height * stride_height); + *out_slice_height = out_height; + } else { + // First compute how many lines fit into the input tensor, and compute how + // many output lines can be computed with that. + max_lines_in = std::min( + in_height, static_cast(*in->DataCapacity()) / line_size_in); + if (max_lines_in >= in_height) { + max_out_lines_for_input = out_height; + } else if (2 * max_lines_in >= in_height) { + // in this case only two slices are needed, so both could benefit from + // padding. take the MIN to get the worst case. + max_out_lines_for_input = + (max_lines_in + std::min(padding_top, padding_bot) - kernel_height + + 1) / + stride_height; + } else { + max_out_lines_for_input = + (max_lines_in - kernel_height + 1) / stride_height; + } + // Then compute how many output lines fit into the output tensor. + max_lines_out = std::min( + out_height, static_cast(*out->DataCapacity()) / line_size_out); + // the smallest of the two determines the slice height for the output, and + // the derived sliceheight for the input. + *out_slice_height = std::min(max_out_lines_for_input, max_lines_out); + *in_slice_height = *out_slice_height * stride_height; + } + + if ((*in_slice_height > 0) && (*out_slice_height > 0)) { + return kTfLiteOk; + } else { + return kTfLiteError; + } +} + +TfLiteStatus arc_scratch_buffer_calc_slice_size_weights( + const MliTensorInterface* weights, const MliTensorInterface* bias, + const int weight_out_ch_dimension, int* slice_channels) { + const int channels = weights->Shape()[weight_out_ch_dimension]; + const int ch_size_w = + (mli_hlp_count_elem_num(weights->MliTensor(), 0) / channels) * + mli_hlp_tensor_element_size(weights->MliTensor()); + const int ch_size_b = + (mli_hlp_count_elem_num(bias->MliTensor(), 0) / channels) * + mli_hlp_tensor_element_size(bias->MliTensor()); + int max_ch_weigths = 0; + int max_ch_bias = 0; + + bool fit = + (static_cast(*weights->DataCapacity()) >= channels * ch_size_w) && + (static_cast(*bias->DataCapacity()) >= channels * ch_size_b); + if (fit) { + // in case both tensors completely fit in the capacity, there is no need + // for slicing + *slice_channels = channels; + } else { + // First compute how many channels fit into the weights tensor + max_ch_weigths = std::min( + channels, static_cast(*weights->DataCapacity()) / ch_size_w); + // Ten compute how many channels fit into the bias tensor. + max_ch_bias = + std::min(channels, static_cast(*bias->DataCapacity()) / ch_size_b); + // the smallest of the two determines the slice size + *slice_channels = std::min(max_ch_weigths, max_ch_bias); + } + + if (*slice_channels > 0) { + return kTfLiteOk; + } else { + return kTfLiteError; + } +} + +TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* out) { +#if (defined(__Xxy)) || (defined(__Xvdsp)) + init_arc_scratch_buffers(); + return get_arc_scratch_buffer_for_io_tensors(context, in, out); +#else + return kTfLiteOk; +#endif +} + +} // namespace micro +} // namespace ops +} // namespace tflite + +#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cpp deleted file mode 100644 index d974448..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.cpp +++ /dev/null @@ -1,347 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h" - -#include - -#include - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h" - -namespace tflite { -namespace ops { -namespace micro { - -#ifdef __Xxy -static void get_arc_two_buffer_sizes(int request_size_1, int request_size_2, - int* grant_size_1, int* grant_size_2) { - int maxrequest = 0; - int secondrequest = 0; - int maxavailable = 0; - int secondavail = 0; - - // determine the largest requested buffer. - if (request_size_1 > request_size_2) { - maxrequest = request_size_1; - secondrequest = request_size_2; - } else { - maxrequest = request_size_2; - secondrequest = request_size_1; - } - - // find the two largest available buffers. - get_arc_scratch_buffer_two_max_sizes(&maxavailable, &secondavail); - - // in case two buffers are available, the largest buffer can go to the largest - // request. - if (secondavail > 0) { // this condition can be enhanced to prevent cases - // where the second buffer is so small that it is - // better to use one buffer and split it. - if (request_size_1 > request_size_2) { - *grant_size_1 = maxavailable; - *grant_size_2 = secondavail; - } else { - *grant_size_1 = secondavail; - *grant_size_2 = maxavailable; - } - } else { - // In case only one buffer is available, - // use only the max buffer, and split it. - *grant_size_1 = maxavailable / 2; - *grant_size_2 = maxavailable / 2; - } -} - -static TfLiteStatus get_arc_scratch_buffer_for_io_tensors( - TfLiteContext* context, mli_tensor* in, mli_tensor* out) { - int request_size_in = 0; - int request_size_out = 0; - int grant_size_in = 0; - int grant_size_out = 0; - if (!inside_arc_ccm(in->data)) { - // In case the input tensor contains multiple batches, it has rank 4 - // because the mli kernel cannot operate on batches, we need to have the - // size of a single HWC tensor. that is why the start_rank is 1 in case of - // input rank 4 - int start_rank = in->rank - 3; - request_size_in = mli_hlp_count_elem_num(in, start_rank) * - mli_hlp_tensor_element_size(in); - } - if (!inside_arc_ccm(out->data)) { - // In case the input tensor contains multiple batches, it has rank 4 - // because the mli kernel cannot operate on batches, we need to have the - // size of a single batch. that is why the start_rank is 1 in case of input - // rank 4 - int start_rank = out->rank - 3; - request_size_out = mli_hlp_count_elem_num(out, start_rank) * - mli_hlp_tensor_element_size(out); - } - - get_arc_two_buffer_sizes(request_size_in, request_size_out, &grant_size_in, - &grant_size_out); - - if (!inside_arc_ccm(in->data)) { - in->data = get_arc_scratch_buffer(grant_size_in); - in->capacity = grant_size_in; - if (in->data == NULL) return kTfLiteError; - } - if (!inside_arc_ccm(out->data)) { - out->data = get_arc_scratch_buffer(grant_size_out); - out->capacity = grant_size_out; - if (out->data == NULL) return kTfLiteError; - } - - return kTfLiteOk; -} -#endif - -TfLiteStatus get_arc_scratch_buffer_for_conv_tensors(TfLiteContext* context, - mli_tensor* in, - mli_tensor* weights, - mli_tensor* bias, - mli_tensor* out) { - TfLiteStatus ret_val = kTfLiteOk; -#ifdef __Xxy - init_arc_scratch_buffers(); - if (!inside_arc_ccm(weights->data)) { - int weights_size = mli_hlp_count_elem_num(weights, 0) * - mli_hlp_tensor_element_size(weights); - int max_weights_size = 0; - weights->data = get_arc_scratch_buffer(weights_size); - weights->capacity = weights_size; - if (weights->data == NULL) { - get_arc_scratch_buffer_max_size(&max_weights_size); - weights->data = get_arc_scratch_buffer(max_weights_size); - weights->capacity = max_weights_size; - if (max_weights_size == 0) ret_val = kTfLiteError; - } - if (weights->data == NULL) ret_val = kTfLiteError; - } - - if (!inside_arc_ccm(bias->data)) { - uint32_t bias_mem_requirements = - mli_hlp_count_elem_num(bias, 0) * mli_hlp_tensor_element_size(bias); - bias->data = get_arc_scratch_buffer(bias_mem_requirements); - bias->capacity = bias_mem_requirements; - } - - if (ret_val == kTfLiteOk) { - ret_val = get_arc_scratch_buffer_for_io_tensors(context, in, out); - } - - if (bias->data == NULL) { - int max_bias_size = 0; - get_arc_scratch_buffer_max_size(&max_bias_size); - bias->data = get_arc_scratch_buffer(max_bias_size); - bias->capacity = max_bias_size; - if (max_bias_size == 0) ret_val = kTfLiteError; - } - if (bias->data == NULL) ret_val = kTfLiteError; - -#endif - return ret_val; -} - -TfLiteStatus get_arc_scratch_buffer_for_fully_connect_tensors( - TfLiteContext* context, mli_tensor* in, mli_tensor* weights, - mli_tensor* bias, mli_tensor* out) { - TfLiteStatus ret_val = kTfLiteOk; -#ifdef __Xxy - init_arc_scratch_buffers(); - /* strategy for FC kernels: - first allocate input, because this cannot be sliced. (in case of batch - processing, only a single input needs to be allocated) then weights & bias - because if fully loaded, they can be reused over batches. then output. - The number of output channels (for weights slicing) depends on size of - output and size of weights&bias */ - - if (!inside_arc_ccm(in->data)) { - /* In case the input tensor contains multiple batches, - only count the size if the inner most dimension */ - int size_in = mli_hlp_count_elem_num(in, in->rank - 1) * - mli_hlp_tensor_element_size(in); - in->data = get_arc_scratch_buffer(size_in); - in->capacity = size_in; - if (in->data == NULL) { - in->capacity = 0; - ret_val = kTfLiteError; - } - } - - if (!inside_arc_ccm(weights->data)) { - int weights_size = mli_hlp_count_elem_num(weights, 0) * - mli_hlp_tensor_element_size(weights); - int max_weights_size = 0; - weights->data = get_arc_scratch_buffer(weights_size); - weights->capacity = weights_size; - if (weights->data == NULL) { - get_arc_scratch_buffer_max_size(&max_weights_size); - weights->data = get_arc_scratch_buffer(max_weights_size); - weights->capacity = max_weights_size; - if (max_weights_size == 0) ret_val = kTfLiteError; - } - if (weights->data == NULL) ret_val = kTfLiteError; - } - - if (!inside_arc_ccm(bias->data)) { - int bias_mem_requirements = - mli_hlp_count_elem_num(bias, 0) * mli_hlp_tensor_element_size(bias); - bias->data = get_arc_scratch_buffer(bias_mem_requirements); - bias->capacity = bias_mem_requirements; - } - - if (!inside_arc_ccm(out->data)) { - /* In case the input tensor contains multiple batches, - only count the size if the inner most dimension */ - int out_size = mli_hlp_count_elem_num(out, out->rank - 1) * - mli_hlp_tensor_element_size(out); - int max_out_size = 0; - out->data = get_arc_scratch_buffer(out_size); - out->capacity = out_size; - if (out->data == NULL) { - get_arc_scratch_buffer_max_size(&max_out_size); - out->data = get_arc_scratch_buffer(max_out_size); - out->capacity = max_out_size; - if (max_out_size == 0) ret_val = kTfLiteError; - } - if (out->data == NULL) ret_val = kTfLiteError; - } - - if (bias->data == NULL) { - int max_bias_size = 0; - get_arc_scratch_buffer_max_size(&max_bias_size); - bias->data = get_arc_scratch_buffer(max_bias_size); - bias->capacity = max_bias_size; - if (max_bias_size == 0) ret_val = kTfLiteError; - } - if (bias->data == NULL) ret_val = kTfLiteError; - -#endif - return ret_val; -} - -TfLiteStatus arc_scratch_buffer_calc_slice_size_io( - const mli_tensor* in, const mli_tensor* out, const int kernel_height, - const int stride_height, const int padding_top, const int padding_bot, - int* in_slice_height, int* out_slice_height) { - const int height_dimension = 1; - const int in_height = in->shape[height_dimension]; - const int out_height = out->shape[height_dimension]; - const int line_size_in = mli_hlp_count_elem_num(in, height_dimension + 1) * - mli_hlp_tensor_element_size(in); - const int line_size_out = mli_hlp_count_elem_num(out, height_dimension + 1) * - mli_hlp_tensor_element_size(out); - int max_lines_in = 0; - int max_lines_out = 0; - int max_out_lines_for_input = 0; - bool fit = (static_cast(in->capacity) >= in_height * line_size_in) && - (static_cast(out->capacity) >= out_height * line_size_out); - if (fit) { - // in case both tensors completely fit in the capacity, there is no need for - // slicing. As padding can affect effective input region, we also derive it - // from output height, and rely on a clipping logic which intend to reduce - // last smaller slice. I.e the only slice is a kind of - // "smaller last slice that need to be corrected" - *in_slice_height = std::max(in_height, out_height * stride_height); - *out_slice_height = out_height; - } else { - // First compute how many lines fit into the input tensor, and compute how - // many output lines can be computed with that. - max_lines_in = - std::min(in_height, static_cast(in->capacity) / line_size_in); - if (max_lines_in >= in_height) { - max_out_lines_for_input = out_height; - } else if (2 * max_lines_in >= in_height) { - // in this case only two slices are needed, so both could benefit from - // padding. take the MIN to get the worst case. - max_out_lines_for_input = - (max_lines_in + std::min(padding_top, padding_bot) - kernel_height + - 1) / - stride_height; - } else { - max_out_lines_for_input = - (max_lines_in - kernel_height + 1) / stride_height; - } - // Then compute how many output lines fit into the output tensor. - max_lines_out = - std::min(out_height, static_cast(out->capacity) / line_size_out); - // the smallest of the two determines the slice height for the output, and - // the derived sliceheight for the input. - *out_slice_height = std::min(max_out_lines_for_input, max_lines_out); - *in_slice_height = *out_slice_height * stride_height; - } - - if ((*in_slice_height > 0) && (*out_slice_height > 0)) { - return kTfLiteOk; - } else { - return kTfLiteError; - } -} - -TfLiteStatus arc_scratch_buffer_calc_slice_size_weights( - const mli_tensor* weights, const mli_tensor* bias, - const int weight_out_ch_dimension, int* slice_channels) { - const int channels = weights->shape[weight_out_ch_dimension]; - const int ch_size_w = (mli_hlp_count_elem_num(weights, 0) / channels) * - mli_hlp_tensor_element_size(weights); - const int ch_size_b = (mli_hlp_count_elem_num(bias, 0) / channels) * - mli_hlp_tensor_element_size(bias); - int max_ch_weigths = 0; - int max_ch_bias = 0; - - bool fit = (static_cast(weights->capacity) >= channels * ch_size_w) && - (static_cast(bias->capacity) >= channels * ch_size_b); - if (fit) { - // in case both tensors completely fit in the capacity, there is no need for - // slicing - *slice_channels = channels; - } else { - // First compute how many channels fit into the weights tensor - max_ch_weigths = - std::min(channels, static_cast(weights->capacity) / ch_size_w); - // Ten compute how many channels fit into the bias tensor. - max_ch_bias = - std::min(channels, static_cast(bias->capacity) / ch_size_b); - // the smallest of the two determines the slice size - *slice_channels = std::min(max_ch_weigths, max_ch_bias); - } - - if (*slice_channels > 0) { - return kTfLiteOk; - } else { - return kTfLiteError; - } -} - -TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors(TfLiteContext* context, - mli_tensor* in, - mli_tensor* out) { -#ifdef __Xxy - init_arc_scratch_buffers(); - return get_arc_scratch_buffer_for_io_tensors(context, in, out); -#else - return kTfLiteOk; -#endif -} - -} // namespace micro -} // namespace ops -} // namespace tflite - -#endif // EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h index 9eb6c51..2f60948 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buf_mgr.h @@ -1,7 +1,7 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,6 +20,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_ARC_SCRATCH_BUF_MGR_H_ #include "mli_api.h" // NOLINT +#include "mli_interface.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" namespace tflite { @@ -32,19 +33,17 @@ namespace micro { * @detail This function will update the data pointers in the 4 tensors with * pointers to scratch buffers in fast local memory. * - * @param context [I] pointer to TfLite context (needed for error handling) - * @param in [IO] pointer to the input tensor - * @param weights [IO] pointer to the weights tensor - * @param bias [IO] pointer to the bias tensor - * @param output [IO] pointer to the output tensor + * @param context [I] pointer to TfLite context (needed for error handling) + * @param in [IO] pointer to the input tensor + * @param weights [IO] pointer to the weights tensor + * @param bias [IO] pointer to the bias tensor + * @param output [IO] pointer to the output tensor * * @return Tf Lite status code */ -TfLiteStatus get_arc_scratch_buffer_for_conv_tensors(TfLiteContext* context, - mli_tensor* in, - mli_tensor* weights, - mli_tensor* bias, - mli_tensor* out); +TfLiteStatus get_arc_scratch_buffer_for_conv_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights, + MliTensorInterface* bias, MliTensorInterface* out); /** * @brief Function to allocate scratch buffers for pooling kernels with only @@ -53,15 +52,14 @@ TfLiteStatus get_arc_scratch_buffer_for_conv_tensors(TfLiteContext* context, * @detail This function will update the data pointers in the 2 tensors with * pointers to scratch buffers in fast local memory. * - * @param context [I] pointer to TfLite context (needed for error handling) - * @param in [IO] pointer to the input tensor - * @param output [IO] pointer to the output tensor + * @param context [I] pointer to TfLite context (needed for error handling) + * @param in [IO] pointer to the input tensor + * @param output [IO] pointer to the output tensor * * @return Tf Lite status code */ -TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors(TfLiteContext* context, - mli_tensor* in, - mli_tensor* out); +TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors( + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* out); /** * @brief Function to allocate scratch buffers for the fully connect tensors @@ -69,17 +67,34 @@ TfLiteStatus get_arc_scratch_buffer_for_pooling_tensors(TfLiteContext* context, * @detail This function will update the data pointers in the 4 tensors with * pointers to scratch buffers in fast local memory. * - * @param context [I] pointer to TfLite context (needed for error handling) - * @param in [IO] pointer to the input tensor - * @param weights [IO] pointer to the weights tensor - * @param bias [IO] pointer to the bias tensor - * @param output [IO] pointer to the output tensor + * @param context [I] pointer to TfLite context (needed for error handling) + * @param in [IO] pointer to the input tensor + * @param weights [IO] pointer to the weights tensor + * @param bias [IO] pointer to the bias tensor + * @param output [IO] pointer to the output tensor * * @return Tf Lite status code */ TfLiteStatus get_arc_scratch_buffer_for_fully_connect_tensors( - TfLiteContext* context, mli_tensor* in, mli_tensor* weights, - mli_tensor* bias, mli_tensor* out); + TfLiteContext* context, MliTensorInterface* in, MliTensorInterface* weights, + MliTensorInterface* bias, MliTensorInterface* out); + +/** + * @brief Function to allocate scratch buffers for the eltwise function tensors + * + * @detail This function will update the data pointers in the 3 tensors with + * pointers to scratch buffers in fast local memory. + * + * @param context [I] pointer to TfLite context (needed for error handling) + * @param in1 [IO] pointer to the first input tensor + * @param in2 [IO] pointer to the second input tensor + * @param output [IO] pointer to the output tensor + * + * @return Tf Lite status code + */ +TfLiteStatus get_arc_scratch_buffer_for_eltwise_tensors( + TfLiteContext* context, MliTensorInterface* in1, MliTensorInterface* in2, + MliTensorInterface* out); /** * @brief Function to calculate slice size for io tensors @@ -89,22 +104,23 @@ TfLiteStatus get_arc_scratch_buffer_for_fully_connect_tensors( * padding. the function will look at the capacity filed in the in and out * tensor to determine the available buffersize. * - * @param in [I] pointer to the input tensor - * @param out [I] pointer to the output tensor - * @param kernelHeight [I] size of the kernel in height dimension - * @param strideHeight [I] input stride in height dimension - * @param padding_top [I] number of lines with zeros at the top - * @param padding_bot [I] number of lines with zeros at the bottom - * @param inSliceHeight [O] slice size in height dimension for the input tensor - * @param outSliceHeight [O] slice size in height dimension for the output + * @param in [I] pointer to the input tensor + * @param out [I] pointer to the output tensor + * @param kernelHeight [I] size of the kernel in height dimension + * @param strideHeight [I] input stride in height dimension + * @param padding_top [I] number of lines with zeros at the top + * @param padding_bot [I] number of lines with zeros at the bottom + * @param inSliceHeight [O] slice size in height dimension for the input + * tensor + * @param outSliceHeight [O] slice size in height dimension for the output * tensor * * @return Tf Lite status code */ TfLiteStatus arc_scratch_buffer_calc_slice_size_io( - const mli_tensor* in, const mli_tensor* out, const int kernelHeight, - const int strideHeight, const int padding_top, const int padding_bot, - int* in_slice_height, int* out_slice_height); + const MliTensorInterface* in, const MliTensorInterface* out, + const int kernelHeight, const int strideHeight, const int padding_top, + const int padding_bot, int* in_slice_height, int* out_slice_height); /** * @brief Function to calculate slice size for weight slicing @@ -113,16 +129,16 @@ TfLiteStatus arc_scratch_buffer_calc_slice_size_io( * dimension for weight and bias tensors. the function will look at the capacity * filed in the weights and bias tensor to determine the available buffersize. * - * @param weights [I] pointer to the input tensor - * @param bias [I] pointer to the output tensor - * @param weightOutChDimension [I] dimension of the output channels in the + * @param weights [I] pointer to the input tensor + * @param bias [I] pointer to the output tensor + * @param weightOutChDimension [I] dimension of the output channels in the * weights tensor - * @param sliceChannels [O] slice size in output channel dimension + * @param sliceChannels [O] slice size in output channel dimension * * @return Tf Lite status code */ TfLiteStatus arc_scratch_buffer_calc_slice_size_weights( - const mli_tensor* weights, const mli_tensor* bias, + const MliTensorInterface* weights, const MliTensorInterface* bias, const int weight_out_ch_dimension, int* slice_channels); } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cc similarity index 67% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cc index d353667..924cc41 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.cc @@ -1,7 +1,7 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -28,48 +28,105 @@ namespace micro { * used for the data section and the stack. the values can be overruled by * adding a -D option to the makefile of the application */ + +#ifdef __Xxy + #ifndef SCRATCH_MEM_X_SIZE #ifdef core_config_xy_size #define SCRATCH_MEM_X_SIZE (core_config_xy_size) -#else -#define SCRATCH_MEM_X_SIZE (0) #endif #endif #ifndef SCRATCH_MEM_Y_SIZE #ifdef core_config_xy_size #define SCRATCH_MEM_Y_SIZE (core_config_xy_size) -#else -#define SCRATCH_MEM_Y_SIZE (0) #endif #endif #ifndef SCRATCH_MEM_Z_SIZE #ifdef core_config_dccm_size #define SCRATCH_MEM_Z_SIZE ((core_config_dccm_size) / 2) -#else -#define SCRATCH_MEM_Z_SIZE (0) #endif #endif +#elif defined(__Xvdsp) + +#ifndef SCRATCH_MEM_VEC_SIZE +#ifdef core_config_vec_mem_size +#define SCRATCH_MEM_VEC_SIZE ((core_config_vec_mem_size * 3) / 4) +#endif +#endif + +#else + +#define SCRATCH_MEM_SIZE (65536) + +#endif + +#ifdef __Xxy + +// Patched by Edge Impulse, ARC GCC fixes namespace { +#if defined (__GNUC__) +static int8_t scratch_mem_x[SCRATCH_MEM_X_SIZE] __attribute__((section(".Xdata"))); +#else #pragma Bss(".Xdata") static int8_t scratch_mem_x[SCRATCH_MEM_X_SIZE]; #pragma Bss() +#endif +#if defined (__GNUC__) +static int8_t scratch_mem_y[SCRATCH_MEM_Y_SIZE] __attribute__((section(".Ydata"))); +#else #pragma Bss(".Ydata") static int8_t scratch_mem_y[SCRATCH_MEM_Y_SIZE]; #pragma Bss() +#endif +#if defined (__GNUC__) +static int8_t scratch_mem_z[SCRATCH_MEM_Z_SIZE] __attribute__((section(".Zdata"))); +#else #pragma Bss(".Zdata") static int8_t scratch_mem_z[SCRATCH_MEM_Z_SIZE]; #pragma Bss() +#endif + +#elif defined(__Xvdsp) + +#pragma Bss(".vecmem_data") +static int8_t scratch_mem_vec_1[SCRATCH_MEM_VEC_SIZE / 4]; +static int8_t scratch_mem_vec_2[SCRATCH_MEM_VEC_SIZE / 4]; +static int8_t scratch_mem_vec_3[SCRATCH_MEM_VEC_SIZE / 2]; +#pragma Bss() + +#else + +static int8_t scratch_mem_stack[SCRATCH_MEM_SIZE]; + +#endif } // namespace +#ifdef __Xxy + static int8_t* scratch_mem[] = {scratch_mem_x, scratch_mem_y, scratch_mem_z}; static uint32_t scratch_sizes[] = {SCRATCH_MEM_X_SIZE, SCRATCH_MEM_Y_SIZE, SCRATCH_MEM_Z_SIZE}; +#elif defined(__Xvdsp) + +static int8_t* scratch_mem[] = {scratch_mem_vec_1, scratch_mem_vec_2, + scratch_mem_vec_3}; +static uint32_t scratch_sizes[] = {SCRATCH_MEM_VEC_SIZE / 4, + SCRATCH_MEM_VEC_SIZE / 4, + SCRATCH_MEM_VEC_SIZE / 2}; + +#else + +static int8_t* scratch_mem[] = {scratch_mem_stack}; +static uint32_t scratch_sizes[] = {SCRATCH_MEM_SIZE}; + +#endif + void* get_arc_scratch_buffer(int size) { // Function to asign fast memory from one of 3 scratch buffers. // Best Fit strategy - memory is allocated from that memory bank that leaves @@ -88,7 +145,7 @@ void* get_arc_scratch_buffer(int size) { } } if (best_mem_idx >= 0) { - buf = static_cast(scratch_mem[best_mem_idx]); + buf = scratch_mem[best_mem_idx]; scratch_mem[best_mem_idx] += size; scratch_sizes[best_mem_idx] -= size; } @@ -125,12 +182,24 @@ void get_arc_scratch_buffer_two_max_sizes(int* size1, int* size2) { } void init_arc_scratch_buffers(void) { +#ifdef __Xxy scratch_mem[0] = scratch_mem_x; scratch_mem[1] = scratch_mem_y; scratch_mem[2] = scratch_mem_z; scratch_sizes[0] = SCRATCH_MEM_X_SIZE; scratch_sizes[1] = SCRATCH_MEM_Y_SIZE; scratch_sizes[2] = SCRATCH_MEM_Z_SIZE; +#elif defined(__Xvdsp) + scratch_mem[0] = scratch_mem_vec_1; + scratch_mem[1] = scratch_mem_vec_2; + scratch_mem[2] = scratch_mem_vec_3; + scratch_sizes[0] = SCRATCH_MEM_VEC_SIZE / 4; + scratch_sizes[1] = SCRATCH_MEM_VEC_SIZE / 4; + scratch_sizes[2] = SCRATCH_MEM_VEC_SIZE / 2; +#else + scratch_mem[0] = scratch_mem_stack; + scratch_sizes[0] = SCRATCH_MEM_SIZE; +#endif } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h index 19e1a8d..dc704aa 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/scratch_buffers.h @@ -1,7 +1,7 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels +// Patched by Edge Impulse to include reference and hardware-accelerated kernels #include "../../../../classifier/ei_classifier_config.h" #if EI_CLASSIFIER_TFLITE_ENABLE_ARC == 1 -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -52,7 +52,7 @@ static inline bool inside_arc_xccm(void* p) { } static inline bool inside_arc_yccm(void* p) { -#if core_config_xy +#if core_config_xy_size return ((unsigned)p >= core_config_xy_y_base) && ((unsigned)p < core_config_xy_y_base + core_config_xy_size); #else @@ -60,8 +60,18 @@ static inline bool inside_arc_yccm(void* p) { #endif } +static inline bool inside_arc_vccm(void* p) { +#if core_config_vec_mem_size + return ((unsigned)p >= core_config_vec_mem_base) && + ((unsigned)p < core_config_vec_mem_base + core_config_vec_mem_size); +#else + return false; +#endif +} + static inline bool inside_arc_ccm(void* p) { - return inside_arc_dccm(p) || inside_arc_xccm(p) || inside_arc_yccm(p); + return inside_arc_dccm(p) || inside_arc_xccm(p) || inside_arc_yccm(p) || + inside_arc_vccm(p); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cc new file mode 100644 index 0000000..68cf319 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/select.cc @@ -0,0 +1,248 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TF_LITE_STATIC_MEMORY +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/select.h" + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +constexpr int kInputTensorCondition = 0; +constexpr int kInputTensorX = 1; +constexpr int kInputTensorY = 2; +constexpr int kOutputTensor = 0; + +enum KernelType { + kVersionOne, + kVersionTwo, +}; + +struct OpData { + bool requires_broadcast; + // True if input condition is scalar or input condition has rank one and + // matches the first dimension of other inputs. + bool has_low_rank_input_condition; +}; + +void* SelectInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + auto* data = static_cast( + context->AllocatePersistentBuffer(context, sizeof(OpData))); + data->requires_broadcast = false; + data->has_low_rank_input_condition = false; + return data; +} + +TfLiteStatus CheckBroadcastShape(TfLiteContext* context, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + const TfLiteTensor* input3, + const TfLiteIntArray* output_shape) { + const int dims1 = NumDimensions(input1); + const int dims2 = NumDimensions(input2); + const int dims3 = NumDimensions(input3); + const int out_dims = std::max(std::max(dims1, dims2), dims3); + TF_LITE_ENSURE_EQ(context, out_dims, output_shape->size); + + for (int i = 0; i < out_dims; ++i) { + const int d1 = i >= dims1 ? 1 : SizeOfDimension(input1, dims1 - i - 1); + const int d2 = i >= dims2 ? 1 : SizeOfDimension(input2, dims2 - i - 1); + const int d3 = i >= dims3 ? 1 : SizeOfDimension(input3, dims3 - i - 1); + const int min_value = std::min(std::min(d1, d2), d3); + int max_value = std::max(std::max(d1, d2), d3); + // If one dimention is 0, others must be 0 or 1. + if (min_value == 0) max_value = 0; + if (!(d1 == 1 || d1 == max_value) || !(d2 == 1 || d2 == max_value) || + !(d3 == 1 || d3 == max_value)) { + MicroPrintf("Given shapes are not broadcastable."); + return kTfLiteError; + } + TF_LITE_ENSURE_EQ(context, output_shape->data[out_dims - i - 1], max_value); + } + return kTfLiteOk; +} + +template +TfLiteStatus SelectPrepare(TfLiteContext* context, TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input_condition = + micro_context->AllocateTempInputTensor(node, kInputTensorCondition); + + TfLiteTensor* input_x = + micro_context->AllocateTempInputTensor(node, kInputTensorX); + + TfLiteTensor* input_y = + micro_context->AllocateTempInputTensor(node, kInputTensorY); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + + // Input must be bool. + TF_LITE_ENSURE_TYPES_EQ(context, input_condition->type, kTfLiteBool); + TF_LITE_ENSURE_TYPES_EQ(context, input_x->type, input_y->type); + output->type = input_x->type; + + // Respect the original output shape when there are mixed shapes to represent + // a scalar data. + if (GetTensorShape(input_condition).FlatSize() == 1 && + GetTensorShape(input_x).FlatSize() == 1 && + GetTensorShape(input_y).FlatSize() == 1 && + GetTensorShape(output).FlatSize() == 1) { + + micro_context->DeallocateTempTfLiteTensor(input_condition); + micro_context->DeallocateTempTfLiteTensor(input_x); + micro_context->DeallocateTempTfLiteTensor(input_y); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; + } + + bool same_shape = HaveSameShapes(input_condition, input_x) && + HaveSameShapes(input_x, input_y); + TfLiteIntArray* output_size; + if (!same_shape) { + switch (kernel_type) { + case kVersionOne: { + bool is_input_condition_scalar = NumDimensions(input_condition) == 0; + bool has_rank_one_input_condition = + NumDimensions(input_condition) == 1 && + SizeOfDimension(input_condition, 0) == SizeOfDimension(input_x, 0); + data->has_low_rank_input_condition = + is_input_condition_scalar || has_rank_one_input_condition; + TF_LITE_ENSURE(context, data->has_low_rank_input_condition); + + output_size = TfLiteIntArrayCopy(input_x->dims); + + // Input tensors must have the same type and size + TF_LITE_ENSURE(context, HaveSameShapes(input_x, input_y)); + break; + } + case kVersionTwo: { + TF_LITE_ENSURE_OK( + context, CheckBroadcastShape(context, input_condition, input_x, input_y, + output->dims)); + data->requires_broadcast = true; + break; + } + default: + micro_context->DeallocateTempTfLiteTensor(input_condition); + micro_context->DeallocateTempTfLiteTensor(input_x); + micro_context->DeallocateTempTfLiteTensor(input_y); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteError; + } + } else { + output_size = TfLiteIntArrayCopy(input_x->dims); + } + + micro_context->DeallocateTempTfLiteTensor(input_condition); + micro_context->DeallocateTempTfLiteTensor(input_x); + micro_context->DeallocateTempTfLiteTensor(input_y); + micro_context->DeallocateTempTfLiteTensor(output); + + TfLiteIntArrayFree(output_size); + + return kTfLiteOk; +} + +template +void CallSelect(const TfLiteEvalTensor* input_condition, + const TfLiteEvalTensor* input_x, + const TfLiteEvalTensor* input_y, TfLiteEvalTensor* output, + bool need_broadcast) { + using Func = decltype(reference_ops::Select)*; + Func select_func; + if (need_broadcast) { + select_func = reference_ops::BroadcastSelect5DSlow; + } else { + select_func = reference_ops::Select; + } + + select_func(tflite::micro::GetTensorShape(input_condition), + tflite::micro::GetTensorData(input_condition), + tflite::micro::GetTensorShape(input_x), + tflite::micro::GetTensorData(input_x), + tflite::micro::GetTensorShape(input_y), + tflite::micro::GetTensorData(input_y), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +} + +TfLiteStatus SelectEval(TfLiteContext* context, TfLiteNode* node) { + OpData* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input_condition = + tflite::micro::GetEvalInput(context, node, kInputTensorX); + + const TfLiteEvalTensor* input_x = + tflite::micro::GetEvalInput(context, node, kInputTensorY); + + const TfLiteEvalTensor* input_y = + tflite::micro::GetEvalInput(context, node, kInputTensorCondition); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + switch (input_x->type) { + case kTfLiteFloat32: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + case kTfLiteInt8: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + case kTfLiteInt16: + CallSelect(input_condition, input_x, input_y, output, + data->requires_broadcast); + break; + default: + MicroPrintf("Does not support type other than %s, but got %s", + "int8|int16|float32", TfLiteTypeGetName(input_x->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteRegistration Register_SELECT() { + return tflite::micro::RegisterOp(tflite::SelectInit, tflite::SelectPrepare, + tflite::SelectEval); +} + +// SelectV2 op selects values of 'x' if the corresponding value of 'condition' +// is true or the value of 'y' if false. There are valid condition input sizes: +// +// 1. Either the same shape (in which case the select is elementwise), or +// 2. Broadcastable shapes between 'condition', 'x' and 'y'. +TfLiteRegistration Register_SELECT_V2() { + return tflite::micro::RegisterOp(tflite::SelectInit, tflite::SelectPrepare, + tflite::SelectEval); +} + +} // namespace tflite +#endif // TF_LITE_STATIC_MEMORY diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cc similarity index 84% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cc index d0cf78b..21af290 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/shape.cc @@ -20,6 +20,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -47,8 +48,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); if (output->type != kTfLiteInt32) { - TF_LITE_KERNEL_LOG(context, "Output type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); + MicroPrintf("Output type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); return kTfLiteError; } else { ExtractShape(input, tflite::micro::GetTensorData(output)); @@ -60,14 +61,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_SHAPE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/slice.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/slice.cc new file mode 100644 index 0000000..16ce966 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/slice.cc @@ -0,0 +1,157 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/slice.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kBeginTensor = 1; +constexpr int kSizeTensor = 2; +constexpr int kOutputTensor = 0; + +const int kMaxDim = 5; + +template +void GetBeginAndSizeVectors(int dimensions, const TfLiteEvalTensor* begin, + const TfLiteEvalTensor* size, int32_t* begins, + int32_t* sizes) { + int offset = kMaxDim - dimensions; + for (int idx = 0; idx < dimensions; ++idx) { + begins[offset + idx] = tflite::micro::GetTensorData(begin)[idx]; + sizes[offset + idx] = tflite::micro::GetTensorData(size)[idx]; + } +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TFLITE_DCHECK(input != nullptr); + TfLiteTensor* begin = + micro_context->AllocateTempInputTensor(node, kBeginTensor); + TFLITE_DCHECK(begin != nullptr); + TfLiteTensor* size = + micro_context->AllocateTempInputTensor(node, kSizeTensor); + TFLITE_DCHECK(size != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TFLITE_DCHECK(output != nullptr); + + // Ensure validity of input tensor and its dimension. + TFLITE_DCHECK(input->type == output->type); + TFLITE_DCHECK(begin->type == size->type); + TFLITE_DCHECK(begin->type == kTfLiteInt32 || begin->type == kTfLiteInt64); + TFLITE_DCHECK(size->type == kTfLiteInt32 || size->type == kTfLiteInt64); + TFLITE_DCHECK(NumDimensions(begin) == 1); + TFLITE_DCHECK(NumDimensions(size) == 1); + TFLITE_DCHECK(NumElements(begin) == NumElements(size)); + TFLITE_DCHECK(NumDimensions(input) <= kMaxDim); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(begin); + micro_context->DeallocateTempTfLiteTensor(size); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* begin = + tflite::micro::GetEvalInput(context, node, kBeginTensor); + const TfLiteEvalTensor* size = + tflite::micro::GetEvalInput(context, node, kSizeTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + tflite::SliceParams op_params; + op_params.begin_count = kMaxDim; + op_params.size_count = kMaxDim; + for (int i = 0; i < kMaxDim; ++i) { + op_params.begin[i] = 0; + op_params.size[i] = 1; + } + + if (begin->type == kTfLiteInt32) { + GetBeginAndSizeVectors(input->dims->size, begin, size, + op_params.begin, op_params.size); + } else if (begin->type == kTfLiteInt64) { + GetBeginAndSizeVectors(input->dims->size, begin, size, + op_params.begin, op_params.size); + } else { + MicroPrintf("Begin tensor type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + + switch (input->type) { + case kTfLiteFloat32: + reference_ops::Slice(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt32: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt16: + reference_ops::Slice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Input tensor type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_SLICE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cc new file mode 100644 index 0000000..d5d6355 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cc @@ -0,0 +1,565 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +struct CMSISNNSoftmaxParams { + SoftmaxParams softmax_params; + int32_t num_rows; + int32_t row_size; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, + sizeof(CMSISNNSoftmaxParams)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, node->user_data != nullptr); + CMSISNNSoftmaxParams* op_data = + static_cast(node->user_data); + + auto* params = static_cast(node->builtin_data); + auto ret_val = CalculateSoftmaxParams(context, input, output, params, + &op_data->softmax_params); + + const auto input_shape = GetTensorShape(input); + const auto output_shape = GetTensorShape(output); + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + op_data->num_rows = outer_size; + op_data->row_size = depth; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return ret_val; +} + +TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Softmax( + op_data.softmax_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + if (output->type == kTfLiteInt8) { +#if EI_TFLITE_DISABLE_SOFTMAX_OUT_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + output->type); + return kTfLiteError; +#endif + arm_softmax_s8(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + } else { +#if EI_TFLITE_DISABLE_SOFTMAX_OUT_I16 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + output->type); + return kTfLiteError; +#endif + arm_softmax_s8_s16(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + } + return kTfLiteOk; + } + case kTfLiteInt16: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + const cmsis_nn_softmax_lut_s16 softmax_params = { + .exp_lut = op_data.softmax_params.exp_lut, + .one_by_one_lut = op_data.softmax_params.one_over_one_plus_x_lut}; + + TFLITE_DCHECK_EQ( + arm_softmax_s16( + tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, &softmax_params, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + return kTfLiteOk; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } +} + +TfLiteStatus SoftmaxEvalInt8(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + arm_softmax_s8(tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, + op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus SoftmaxEvalInt8_Int16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + arm_softmax_s8_s16( + tflite::micro::GetTensorData(input), op_data.num_rows, + op_data.row_size, op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, op_data.softmax_params.diff_min, + tflite::micro::GetTensorData(output)); + + return kTfLiteOk; +} + +TfLiteStatus SoftmaxEvalInt16(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + const CMSISNNSoftmaxParams op_data = + *static_cast(node->user_data); + + const cmsis_nn_softmax_lut_s16 softmax_params = { + .exp_lut = op_data.softmax_params.exp_lut, + .one_by_one_lut = op_data.softmax_params.one_over_one_plus_x_lut}; + + TFLITE_DCHECK_EQ( + arm_softmax_s16(tflite::micro::GetTensorData(input), + op_data.num_rows, op_data.row_size, + op_data.softmax_params.input_multiplier, + op_data.softmax_params.input_left_shift, &softmax_params, + tflite::micro::GetTensorData(output)), + ARM_CMSIS_NN_SUCCESS); + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_SOFTMAX() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEval); +} + +TfLiteRegistration Register_SOFTMAX_INT8() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt8); +} + +TfLiteRegistration Register_SOFTMAX_INT8_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt8_Int16); +} + +TfLiteRegistration Register_SOFTMAX_INT16() { + return tflite::micro::RegisterOp(Init, Prepare, SoftmaxEvalInt16); +} + +} // namespace tflite + +#elif EI_CLASSIFIER_TFLITE_ENABLE_ESP_NN == 1 +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +#include + +#if ESP_NN +#include "edge-impulse-sdk/porting/espressif/ESP-NN/include/esp_nn.h" +#endif + +long long softmax_total_time = 0; + +namespace tflite { +namespace { +// Softmax parameter data that persists in user_data +const int kInt16LUTArraySize = 513; + +struct NodeData { + SoftmaxParams op_data; +#if ESP_NN + int buffer_idx; +#endif +}; + +static void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(NodeData)); +} + +void SoftmaxQuantized(TfLiteContext* context, const TfLiteEvalTensor* input, + TfLiteEvalTensor* output, const NodeData* data) { + if (input->type == kTfLiteInt8) { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return; +#endif + if (output->type == kTfLiteInt16) { +#if EI_TFLITE_DISABLE_SOFTMAX_OUT_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return; +#endif + tflite::reference_ops::Softmax( + data->op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { +#if EI_TFLITE_DISABLE_SOFTMAX_OUT_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return; +#endif +#if ESP_NN + const int32_t input_beta_multiplier = data->op_data.input_multiplier; + const int32_t input_beta_left_shift = data->op_data.input_left_shift; + const int diff_min = data->op_data.diff_min; + const RuntimeShape input_shape = tflite::micro::GetTensorShape(input); + const RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + const int trailing_dim = input_shape.DimensionsCount() - 1; + const int outer_size = + MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); + const int depth = + MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); + const int8_t *in_ptr = tflite::micro::GetTensorData(input); + int8_t *out_ptr = tflite::micro::GetTensorData(output); + void *scratch_buf = NULL; + if (data->buffer_idx > -1) { + scratch_buf = context->GetScratchBuffer(context, data->buffer_idx); + } + esp_nn_set_softmax_scratch_buf(scratch_buf); + esp_nn_softmax_s8(in_ptr, outer_size, depth, input_beta_multiplier, + input_beta_left_shift, diff_min, out_ptr); +#else + tflite::reference_ops::Softmax( + data->op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); +#endif + } + } else { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return; +#endif + tflite::reference_ops::SoftmaxInt16( + data->op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +static TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + NodeData data = *static_cast(node->user_data); + + long long start_time = esp_timer_get_time(); + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Softmax( + data.op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + break; + case kTfLiteInt8: +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + SoftmaxQuantized(context, input, output, &data); + break; + case kTfLiteInt16: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + SoftmaxQuantized(context, input, output, &data); + } + break; + default: + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; + } + softmax_total_time += esp_timer_get_time() - start_time; + return kTfLiteOk; +} + +static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) >= 1); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE(context, node->user_data != nullptr); + NodeData* data = static_cast(node->user_data); + SoftmaxParams* op_data = static_cast(&data->op_data); + + auto* params = static_cast(node->builtin_data); + auto ret_val = + CalculateSoftmaxParams(context, input, output, params, op_data); + +#if ESP_NN + if (output->type == kTfLiteInt8 && input->type == kTfLiteInt8) { + const int32_t input_width = input->dims->data[1]; + const int32_t input_height = input->dims->data[2]; + int scratch_buf_size = esp_nn_get_softmax_scratch_size(input_width, + input_height); + if (scratch_buf_size > 0) { + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, scratch_buf_size, &data->buffer_idx)); + } + } +#endif + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return ret_val; +} + +} // namespace + +TfLiteRegistration Register_SOFTMAX() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#else +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, + const SoftmaxParams& op_data) { + if (input->type == kTfLiteInt8) { + if (output->type == kTfLiteInt16) { + tflite::reference_ops::Softmax( + op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + tflite::reference_ops::Softmax( + op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + } else { + tflite::reference_ops::SoftmaxInt16( + op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + + TFLITE_DCHECK(node->user_data != nullptr); + SoftmaxParams op_data = *static_cast(node->user_data); + + switch (input->type) { + case kTfLiteFloat32: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_F32 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + tflite::reference_ops::Softmax( + op_data, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + return kTfLiteOk; + } + case kTfLiteInt8: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + SoftmaxQuantized(input, output, op_data); + return kTfLiteOk; + } + case kTfLiteInt16: { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; +#endif + SoftmaxQuantized(input, output, op_data); + return kTfLiteOk; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } +} +} // namespace + +TfLiteRegistration Register_SOFTMAX() { + return tflite::micro::RegisterOp(SoftmaxInit, SoftmaxPrepare, SoftmaxEval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cpp deleted file mode 100644 index 66184b0..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.cpp +++ /dev/null @@ -1,217 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - if (input->type == kTfLiteUInt8) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - const auto input_shape = tflite::micro::GetTensorShape(input); - const auto output_shape = tflite::micro::GetTensorShape(output); - const int trailing_dim = input_shape.DimensionsCount() - 1; - const int outer_size = - MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); - const int depth = - MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); - - arm_softmax_s8(tflite::micro::GetTensorData(input), outer_size, - depth, op_data.input_multiplier, op_data.input_left_shift, - op_data.diff_min, - tflite::micro::GetTensorData(output)); - } - } else { - tflite::reference_ops::SoftmaxInt16( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - const SoftmaxParams data = - *static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::Softmax( - data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteInt8: - case kTfLiteUInt8: - case kTfLiteInt16: { - SoftmaxQuantized(input, output, data); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } -} - -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return {/*init=*/SoftmaxInit, - /*free=*/nullptr, - /*prepare=*/SoftmaxPrepare, - /*invoke=*/SoftmaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#else -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/softmax.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -void SoftmaxQuantized(const TfLiteEvalTensor* input, TfLiteEvalTensor* output, - const SoftmaxParams& op_data) { - if (input->type == kTfLiteInt8) { - if (output->type == kTfLiteInt16) { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - tflite::reference_ops::SoftmaxInt16( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); - - TFLITE_DCHECK(node->user_data != nullptr); - SoftmaxParams op_data = *static_cast(node->user_data); - - switch (input->type) { - case kTfLiteFloat32: { - tflite::reference_ops::Softmax( - op_data, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } - case kTfLiteInt8: - case kTfLiteInt16: { - SoftmaxQuantized(input, output, op_data); - return kTfLiteOk; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } -} -} // namespace - -TfLiteRegistration Register_SOFTMAX() { - return {/*init=*/SoftmaxInit, - /*free=*/nullptr, - /*prepare=*/SoftmaxPrepare, - /*invoke=*/SoftmaxEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h index 97c71a9..fb15d38 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h @@ -1,4 +1,4 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,6 +15,7 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ #define TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ +#include "edge-impulse-sdk/classifier/ei_classifier_config.h" #include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" @@ -23,8 +24,47 @@ namespace tflite { void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length); +// Common helper function to SoftmaxPrepare. +TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + const TfLiteSoftmaxParams* params, + SoftmaxParams* op_data); + TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node); +// This is the most generic TfLiteRegistration. The actual supported types may +// still be target dependent. The only requirement is that every implementation +// (reference or optimized) must define this function. +TfLiteRegistration Register_SOFTMAX(); + +#if defined(XTENSA) || defined(CMSIS_NN) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8 input and int16 output. +TfLiteRegistration Register_SOFTMAX_INT8_INT16(); +#else +inline TfLiteRegistration Register_SOFTMAX_INT8_INT16() { + return Register_SOFTMAX(); +} +#endif + +#if defined(CMSIS_NN) +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int8 input/output and uses the latency optimized implementations. +TfLiteRegistration Register_SOFTMAX_INT8(); + +// Returns a TfLiteRegistration struct for kernel variant that only supports +// int16 input/output and uses the latency optimized implementations. +TfLiteRegistration Register_SOFTMAX_INT16(); + +#else +inline TfLiteRegistration Register_SOFTMAX_INT8() { return Register_SOFTMAX(); } + +inline TfLiteRegistration Register_SOFTMAX_INT16() { + return Register_SOFTMAX(); +} +#endif + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_SOFTMAX_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cc similarity index 68% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cc index 4328450..82ec071 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax_common.cc @@ -1,4 +1,4 @@ -/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,24 +20,91 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" namespace tflite { namespace { // Softmax parameter data that persists in user_data -const int kInt16LUTArraySize = 513; +const int kInt16LUTArraySize = LUTSize(); + +TfLiteStatus InitializeLutForInt16(TfLiteContext* context, + const TfLiteTensor* input, + TfLiteTensor* output, + SoftmaxParams* op_data) { + // Only allocate LUTs for KTfLiteInt16 data type + if (input->type == kTfLiteInt16) { + void* raw_exp_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, raw_exp_lut != nullptr); + op_data->exp_lut = reinterpret_cast(raw_exp_lut); + void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( + context, sizeof(int16_t) * kInt16LUTArraySize); + TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); + op_data->one_over_one_plus_x_lut = + reinterpret_cast(one_over_one_plus_x_lut); + } + + if (output->type == kTfLiteInt16) { + TF_LITE_ENSURE(context, + input->type == kTfLiteInt8 || input->type == kTfLiteInt16); + } else { + TF_LITE_ENSURE_EQ(context, input->type, output->type); + } + + // Populate LUT if required + if (input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + // exp LUT only used on negative values + // we consider exp(-10.0) is insignificant to accumulation + const int32_t range = std::numeric_limits::max() - + std::numeric_limits::min(); + LUTPopulate( + 10.0f / range, std::numeric_limits::max(), 2.0f / range, 0, + [](float value) { return std::exp(value); }, op_data->exp_lut); + + LUTPopulate( + 1.0f / range, std::numeric_limits::min(), 2.0f / range, 0, + [](float value) { return 1.0f / (1.0f + value); }, + op_data->one_over_one_plus_x_lut); + + op_data->zero_point = output->params.zero_point; + op_data->scale = output->params.scale; + } + + return kTfLiteOk; +} + +} // namespace TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, const TfLiteTensor* input, TfLiteTensor* output, const TfLiteSoftmaxParams* params, SoftmaxParams* op_data) { +#ifndef EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + if (InitializeLutForInt16(context, input, output, op_data) != kTfLiteOk) { + return kTfLiteError; + } +#endif + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { if (input->type == kTfLiteInt16) { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); TF_LITE_ENSURE_NEAR(context, output->params.scale, 1.f / 32768, (0.001f * 1.f / 32768)); } else { // input->type == kTfLiteInt8 +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteInt8); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, -32768); @@ -54,6 +121,11 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, // Calculate input_multiplier and input_left_shift if (input->type == kTfLiteInt16) { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif int input_left_shift; double input_scale_beta_rescale = static_cast(input->params.scale) * @@ -64,6 +136,11 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, &input_left_shift); op_data->input_left_shift = input_left_shift; } else { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif int input_left_shift; tflite::PreprocessSoftmaxScaling( static_cast(params->beta), @@ -75,6 +152,11 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, op_data->input_left_shift); } } else { +#if EI_TFLITE_DISABLE_SOFTMAX_IN_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(input->type), input->type); + return kTfLiteError; +#endif TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); op_data->beta = static_cast(params->beta); @@ -82,59 +164,32 @@ TfLiteStatus CalculateSoftmaxParams(TfLiteContext* context, return kTfLiteOk; } -} // namespace - void* SoftmaxInit(TfLiteContext* context, const char* buffer, size_t length) { TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); return context->AllocatePersistentBuffer(context, sizeof(SoftmaxParams)); } TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, 0); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, input != nullptr); TF_LITE_ENSURE(context, NumDimensions(input) >= 1); - TfLiteTensor* output = GetOutput(context, node, 0); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE(context, node->user_data != nullptr); SoftmaxParams* op_data = static_cast(node->user_data); - // Only allocate LUTs for KTfLiteInt16 data type - if (input->type == kTfLiteInt16) { - void* raw_exp_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, raw_exp_lut != nullptr); - op_data->exp_lut = reinterpret_cast(raw_exp_lut); - void* one_over_one_plus_x_lut = context->AllocatePersistentBuffer( - context, sizeof(int16_t) * kInt16LUTArraySize); - TF_LITE_ENSURE(context, one_over_one_plus_x_lut != nullptr); - op_data->one_over_one_plus_x_lut = - reinterpret_cast(one_over_one_plus_x_lut); - } - - if (output->type == kTfLiteInt16) { - TF_LITE_ENSURE(context, - input->type == kTfLiteInt8 || input->type == kTfLiteInt16); - } else { - TF_LITE_ENSURE_EQ(context, input->type, output->type); - } - - // Populate LUT if required - if (input->type == kTfLiteInt16) { - TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); - // exp LUT only used on negative values - // we consider exp(-10.0) is insignificant to accumulation - gen_lut([](float value) { return std::exp(value); }, -10.0f, 0.0f, - op_data->exp_lut, kInt16LUTArraySize); - gen_lut([](float value) { return 1.0f / (1.0f + value); }, 0.0f, 1.0f, - op_data->one_over_one_plus_x_lut, kInt16LUTArraySize); - op_data->zero_point = output->params.zero_point; - op_data->scale = output->params.scale; - } auto* params = static_cast(node->builtin_data); - return CalculateSoftmaxParams(context, input, output, params, op_data); + auto ret_val = + CalculateSoftmaxParams(context, input, output, params, op_data); + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + return ret_val; } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cc similarity index 88% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cc index 8b5659f..5a7f414 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_batch_nd.cc @@ -20,6 +20,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -44,11 +45,15 @@ void* Init(TfLiteContext* context, const char* buffer, size_t length) { } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, input != nullptr && output != nullptr); TF_LITE_ENSURE(context, NumDimensions(input) >= kInputOutputMinDimensionNum); @@ -57,6 +62,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumDimensions(output) <= kInputOutputMaxDimensionNum); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -98,8 +105,8 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; @@ -108,14 +115,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace. TfLiteRegistration Register_SPACE_TO_BATCH_ND() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_depth.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_depth.cc new file mode 100644 index 0000000..2ab0faa --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/space_to_depth.cc @@ -0,0 +1,127 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/space_to_depth.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { + +constexpr int kInputTensor = 0; +constexpr int kOutputTensor = 0; +constexpr int kBatchRank = 0; +constexpr int kHeightRank = 1; +constexpr int kWidthRank = 2; +constexpr int kDepthRank = 3; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); + + auto data_type = output->type; + TF_LITE_ENSURE(context, + data_type == kTfLiteFloat32 || data_type == kTfLiteInt8); + TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); + + const int block_size = params->block_size; + const int input_height = input->dims->data[kHeightRank]; + const int input_width = input->dims->data[kWidthRank]; + int output_height = input_height / block_size; + int output_width = input_width / block_size; + + TF_LITE_ENSURE_EQ(context, input_height, output_height * block_size); + TF_LITE_ENSURE_EQ(context, input_width, output_width * block_size); + + // Relocate dims to the persistent storage arena before changing them, + // otherwise we'd be modifying temporary copies made by the interpreters each + // time they process the layer. + TfLiteEvalTensor* output_eval = + micro::GetEvalOutput(context, node, kOutputTensor); + TF_LITE_ENSURE_OK(context, micro::CreateWritableTensorDimsWithCopy( + context, output, output_eval)); + + output->dims->data[kBatchRank] = input->dims->data[kBatchRank]; + output->dims->data[kHeightRank] = output_height; + output->dims->data[kWidthRank] = output_width; + output->dims->data[kDepthRank] = + input->dims->data[kDepthRank] * block_size * block_size; + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = + reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input = + micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = micro::GetEvalOutput(context, node, kOutputTensor); + + SpaceToDepthParams op_params; + op_params.block_size = params->block_size; + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: + reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), + micro::GetTensorData(input), + micro::GetTensorShape(output), + micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::SpaceToDepth(op_params, micro::GetTensorShape(input), + micro::GetTensorData(input), + micro::GetTensorShape(output), + micro::GetTensorData(output)); + break; + default: + MicroPrintf("SPACE_TO_DEPTH only supports FLOAT32 and INT8, got %s.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_SPACE_TO_DEPTH() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cc similarity index 87% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cc index 7071f5c..d4d5280 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split.cc @@ -18,6 +18,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace ops { @@ -68,7 +69,8 @@ TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, } TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* axis = GetInput(context, node, 0); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 0); TF_LITE_ENSURE(context, axis != nullptr); // Dynamic output tensors are needed if axis tensor is not constant. @@ -76,6 +78,8 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { // constant axis tensor for now. TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), "Non constant axis tensor not supported"); + + micro_context->DeallocateTempTfLiteTensor(axis); return kTfLiteOk; } @@ -95,9 +99,6 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteFloat32: { return SplitImpl(context, node, input, axis_value); } - case kTfLiteUInt8: { - return SplitImpl(context, node, input, axis_value); - } case kTfLiteInt8: { return SplitImpl(context, node, input, axis_value); } @@ -108,11 +109,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return SplitImpl(context, node, input, axis_value); } default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } -#undef TF_LITE_SPLIT return kTfLiteOk; } @@ -120,14 +120,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace split TfLiteRegistration Register_SPLIT() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split::Prepare, - /*invoke=*/split::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, split::Prepare, split::Eval); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cc similarity index 86% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cc index a33aadf..ea4620a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/split_v.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,11 +19,11 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace split_v { + +namespace { template TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, @@ -74,13 +74,14 @@ TfLiteStatus SplitImpl(TfLiteContext* context, TfLiteNode* node, TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); + MicroContext* micro_context = GetMicroContext(context); // Dynamic output tensors are needed if axis tensor is not constant. // But Micro doesn't support dynamic memory allocation, so we only support // constant axis tensor for now. - const TfLiteTensor* axis = GetInput(context, node, 2); + TfLiteTensor* axis = micro_context->AllocateTempInputTensor(node, 2); TF_LITE_ENSURE_MSG(context, IsConstantTensor(axis), "Non constant axis tensor not supported"); - + micro_context->DeallocateTempTfLiteTensor(axis); return kTfLiteOk; } @@ -110,26 +111,17 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { return SplitImpl(context, node, input, axis_value); } default: - TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Type %s currently not supported.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; } -} // namespace split_v +} // namespace TfLiteRegistration Register_SPLIT_V() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/split_v::Prepare, - /*invoke=*/split_v::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squared_difference.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squared_difference.cc new file mode 100644 index 0000000..e45cbbe --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squared_difference.cc @@ -0,0 +1,247 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/binary_function.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { +constexpr int kInputTensor1 = 0; +constexpr int kInputTensor2 = 1; +constexpr int kOutputTensor = 0; + +struct OpData { + bool requires_broadcast; + ArithmeticParams arithmetic_params; +}; + +template +T SquaredDifference(T input1, T input2) { + const T difference = input1 - input2; + return difference * difference; +} + +void* SquaredDifferenceInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus SquaredDifferencePrepare(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + OpData* data = reinterpret_cast(node->user_data); + data->requires_broadcast = false; + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); + output->type = input2->type; + + // Ensure the quantization parameters are equivalent. + if (input1->type == kTfLiteInt8) { + const auto& input1_quantization_params = input1->params; + const auto& input2_quantization_params = input2->params; + const auto& output_quantization_params = output->params; + const int32_t integer_type_min = std::numeric_limits::min(); + const int32_t integer_type_max = std::numeric_limits::max(); + TF_LITE_ENSURE(context, + input1_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + input1_quantization_params.zero_point <= integer_type_max); + TF_LITE_ENSURE(context, + input2_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + input2_quantization_params.zero_point <= integer_type_max); + TF_LITE_ENSURE(context, + output_quantization_params.zero_point >= integer_type_min); + TF_LITE_ENSURE(context, + output_quantization_params.zero_point <= integer_type_max); + data->arithmetic_params.input1_offset = + -input1_quantization_params.zero_point; + data->arithmetic_params.input2_offset = + -input2_quantization_params.zero_point; + data->arithmetic_params.output_offset = + output_quantization_params.zero_point; + + // shift to make integer for scales. + // 7 is selected so that maximum shifted result 255^2 * (1 << (7 * 2 )) + // does not overflow signed 32-bit integer + data->arithmetic_params.left_shift = 7; + const double twice_max_input_scale = + 2.0 * static_cast(std::max(input1_quantization_params.scale, + input2_quantization_params.scale)); + const double real_input1_multiplier = + static_cast(input1_quantization_params.scale) / + twice_max_input_scale; + double real_input2_multiplier = + static_cast(input2_quantization_params.scale) / + twice_max_input_scale; + const double real_output_multiplier = + (twice_max_input_scale * twice_max_input_scale) / + static_cast((1 << data->arithmetic_params.left_shift * 2) * + output_quantization_params.scale); + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->arithmetic_params.input1_multiplier, + &data->arithmetic_params.input1_shift); + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->arithmetic_params.input2_multiplier, + &data->arithmetic_params.input2_shift); + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->arithmetic_params.output_multiplier, + &data->arithmetic_params.output_shift); + data->arithmetic_params.quantized_activation_min = + std::numeric_limits::min(); + data->arithmetic_params.quantized_activation_max = + std::numeric_limits::max(); + } + + data->requires_broadcast = !HaveSameShapes(input1, input2); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +inline int8_t SquaredDifference(int8_t x, int8_t y, + const ArithmeticParams& params) { + const int32_t input1_val = params.input1_offset + x; + const int32_t input2_val = params.input2_offset + y; + const int32_t shifted_input1_val = input1_val * (1 << params.left_shift); + const int32_t shifted_input2_val = input2_val * (1 << params.left_shift); + const int32_t scaled_input1_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input1_val, params.input1_multiplier, params.input1_shift); + const int32_t scaled_input2_val = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + shifted_input2_val, params.input2_multiplier, params.input2_shift); + const int32_t raw_diff = scaled_input1_val - scaled_input2_val; + + // Max of this is 255^2 * (1 << 14), so won't overflow 32 bits. + const int32_t squared_raw_diff = raw_diff * raw_diff; + const int32_t raw_output = + MultiplyByQuantizedMultiplierSmallerThanOneExp( + squared_raw_diff, params.output_multiplier, params.output_shift) + + params.output_offset; + const int32_t clamped_output = + std::min(params.quantized_activation_max, + std::max(params.quantized_activation_min, raw_output)); + return static_cast(clamped_output); +} + +template +void EvalQuantizedSquaredDifference(TfLiteContext* context, TfLiteNode* node, + const OpData* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + const auto* op_data = static_cast(node->user_data); + if (data->requires_broadcast) { + reference_integer_ops::BroadcastBinaryFunction4DSlow( + op_data->arithmetic_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + reference_integer_ops::CheckArithmeticParams, SquaredDifference); + } else { + const int flat_size = tflite::micro::GetTensorShape(input1).FlatSize(); + reference_integer_ops::ElementWise( + flat_size, op_data->arithmetic_params, + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorData(output), + reference_integer_ops::CheckArithmeticParams, SquaredDifference); + } +} + +template +void EvalSquaredDifference(TfLiteContext* context, TfLiteNode* node, + const OpData* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + if (data->requires_broadcast) { + reference_ops::BroadcastBinaryFunction4DSlow( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), SquaredDifference); + } else { + reference_ops::BinaryFunction( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), SquaredDifference); + } +} + +TfLiteStatus SquaredDifferenceEval(TfLiteContext* context, TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (output->type == kTfLiteFloat32) { + EvalSquaredDifference(context, node, data, input1, input2, output); + } else if (output->type == kTfLiteInt32) { + EvalSquaredDifference(context, node, data, input1, input2, output); + } else if (output->type == kTfLiteInt8) { + EvalQuantizedSquaredDifference(context, node, data, input1, input2, + output); + } else { + MicroPrintf( + "SquaredDifference only supports FLOAT32, INT32 and INT8 now, got %d.", + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} +} // namespace + +TfLiteRegistration Register_SQUARED_DIFFERENCE() { + return tflite::micro::RegisterOp( + SquaredDifferenceInit, SquaredDifferencePrepare, SquaredDifferenceEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cc similarity index 70% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cc index 2cfb39d..8a42410 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/squeeze.cc @@ -22,17 +22,25 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { struct SqueezeContext { - SqueezeContext(TfLiteContext* context, TfLiteNode* node) - : params(reinterpret_cast(node->builtin_data)), - input(GetInput(context, node, 0)), - output(GetOutput(context, node, 0)) {} + SqueezeContext(TfLiteContext* context, TfLiteNode* node) { + params = reinterpret_cast(node->builtin_data); + micro_context = GetMicroContext(context); + input = micro_context->AllocateTempInputTensor(node, 0); + output = micro_context->AllocateTempOutputTensor(node, 0); + } + ~SqueezeContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + } + MicroContext* micro_context; TfLiteSqueezeParams* params; - const TfLiteTensor* const input; + TfLiteTensor* input; TfLiteTensor* output; }; @@ -80,32 +88,31 @@ TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { } TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - SqueezeContext op_context(context, node); + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); - if (op_context.input->type == kTfLiteString) { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(op_context.input->type), - op_context.input->type); + if (input->type == kTfLiteString) { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } - TF_LITE_ENSURE_EQ(context, op_context.input->bytes, op_context.output->bytes); - memcpy(op_context.output->data.raw, op_context.input->data.raw, - op_context.input->bytes); + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + size_t input_byte_size; + size_t output_byte_size; + TF_LITE_ENSURE_OK(context, + TfLiteEvalTensorByteLength(input, &input_byte_size)); + TF_LITE_ENSURE_OK(context, + TfLiteEvalTensorByteLength(output, &output_byte_size)); + + TF_LITE_ENSURE_EQ(context, input_byte_size, output_byte_size); + memcpy(output->data.raw, input->data.raw, input_byte_size); return kTfLiteOk; } } // namespace TfLiteRegistration Register_SQUEEZE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cc similarity index 66% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cc index e686f11..b8c5d71 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/strided_slice.cc @@ -1,4 +1,4 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -23,11 +23,11 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { -namespace ops { -namespace micro { -namespace strided_slice { + +namespace { constexpr int kInputTensor = 0; constexpr int kBeginTensor = 1; @@ -38,18 +38,27 @@ constexpr int kOutputTensor = 0; struct StridedSliceContext { StridedSliceContext(TfLiteContext* context, TfLiteNode* node) { params = reinterpret_cast(node->builtin_data); - input = GetInput(context, node, kInputTensor); - begin = GetInput(context, node, kBeginTensor); - end = GetInput(context, node, kEndTensor); - strides = GetInput(context, node, kStridesTensor); - output = GetOutput(context, node, kOutputTensor); + micro_context = GetMicroContext(context); + input = micro_context->AllocateTempInputTensor(node, kInputTensor); + begin = micro_context->AllocateTempInputTensor(node, kBeginTensor); + end = micro_context->AllocateTempInputTensor(node, kEndTensor); + strides = micro_context->AllocateTempInputTensor(node, kStridesTensor); + output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); dims = NumDimensions(input); } + ~StridedSliceContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(begin); + micro_context->DeallocateTempTfLiteTensor(end); + micro_context->DeallocateTempTfLiteTensor(strides); + micro_context->DeallocateTempTfLiteTensor(output); + } const TfLiteStridedSliceParams* params; - const TfLiteTensor* input; - const TfLiteTensor* begin; - const TfLiteTensor* end; - const TfLiteTensor* strides; + MicroContext* micro_context; + TfLiteTensor* input; + TfLiteTensor* begin; + TfLiteTensor* end; + TfLiteTensor* strides; TfLiteTensor* output; int dims; }; @@ -147,6 +156,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetEvalOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_F32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + reference_ops::StridedSlice(op_params, tflite::micro::GetTensorShape(input), tflite::micro::GetTensorData(input), @@ -154,6 +169,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; case kTfLiteUInt8: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_U8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + reference_ops::StridedSlice( op_params, tflite::micro::GetTensorShape(input), tflite::micro::GetTensorData(input), @@ -161,32 +182,69 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { tflite::micro::GetTensorData(output)); break; case kTfLiteInt8: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_I8 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + reference_ops::StridedSlice(op_params, tflite::micro::GetTensorShape(input), tflite::micro::GetTensorData(input), tflite::micro::GetTensorShape(output), tflite::micro::GetTensorData(output)); break; - default: + case kTfLiteInt16: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_I16 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + + reference_ops::StridedSlice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt32: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_I32 + TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + + reference_ops::StridedSlice( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteBool: + #if EI_TFLITE_DISABLE_STRIDED_SLICE_OUT_BOOL TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); + TfLiteTypeGetName(output->type), output->type); + return kTfLiteError; + #endif + + reference_ops::StridedSlice(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); return kTfLiteError; } return kTfLiteOk; } -} // namespace strided_slice + +} // namespace TfLiteRegistration Register_STRIDED_SLICE() { - return {/*init=*/strided_slice::Init, - /*free=*/nullptr, - /*prepare=*/strided_slice::Prepare, - /*invoke=*/strided_slice::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(Init, Prepare, Eval); } -} // namespace micro -} // namespace ops } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cc new file mode 100644 index 0000000..266d6b5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cc @@ -0,0 +1,168 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +void* SubInit(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataSub)); +} + +void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, + const OpDataSub* data, const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { + float output_activation_min, output_activation_max; + CalculateActivationRange(params->activation, &output_activation_min, + &output_activation_max); + tflite::ArithmeticParams op_params; + SetActivationParams(output_activation_min, output_activation_max, &op_params); + if (data->requires_broadcast) { + tflite::reference_ops::BroadcastSubSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + tflite::reference_ops::SubWithActivation( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } +} + +TfLiteStatus EvalSubQuantized(TfLiteContext* context, TfLiteNode* node, + TfLiteSubParams* params, const OpDataSub* data, + const TfLiteEvalTensor* input1, + const TfLiteEvalTensor* input2, + TfLiteEvalTensor* output) { + tflite::ArithmeticParams op_params; + op_params.left_shift = data->left_shift; + op_params.input1_offset = data->input1_offset; + op_params.input1_multiplier = data->input1_multiplier; + op_params.input1_shift = data->input1_shift; + op_params.input2_offset = data->input2_offset; + op_params.input2_multiplier = data->input2_multiplier; + op_params.input2_shift = data->input2_shift; + op_params.output_offset = data->output_offset; + op_params.output_multiplier = data->output_multiplier; + op_params.output_shift = data->output_shift; + SetActivationParams(data->output_activation_min, data->output_activation_max, + &op_params); + bool need_broadcast = reference_ops::ProcessBroadcastShapes( + tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorShape(input2), &op_params); + + switch (output->type) { + case kTfLiteInt8: { + if (need_broadcast) { + tflite::reference_ops::BroadcastQuantSubSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + tflite::reference_ops::Sub( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + break; + } + case kTfLiteInt16: { + if (need_broadcast) { + tflite::reference_ops::BroadcastQuantSubSlow( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } else { + tflite::reference_ops::Sub( + op_params, tflite::micro::GetTensorShape(input1), + tflite::micro::GetTensorData(input1), + tflite::micro::GetTensorShape(input2), + tflite::micro::GetTensorData(input2), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + } + break; + } + default: + MicroPrintf("Quantized type %s not currently supported.", + TfLiteTypeGetName(output->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus SubEval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, kSubInputTensor1); + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, kSubInputTensor2); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSubOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataSub& data = *(static_cast(node->user_data)); + + if (output->type == kTfLiteFloat32) { + EvalSub(context, node, params, &data, input1, input2, output); + } else if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data, + input1, input2, output)); + } else { + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(output->type), + output->type); + return kTfLiteError; + } + + return kTfLiteOk; +} + +TfLiteRegistration Register_SUB() { + return tflite::micro::RegisterOp(SubInit, SubPrepare, SubEval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cpp deleted file mode 100644 index 0ca8dd1..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.cpp +++ /dev/null @@ -1,256 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace ops { -namespace micro { -namespace sub { - -constexpr int kInputTensor1 = 0; -constexpr int kInputTensor2 = 1; -constexpr int kOutputTensor = 0; - -struct OpData { - bool requires_broadcast; - - // These fields are used in both the general 8-bit -> 8bit quantized path, - // and the special 16-bit -> 16bit quantized path - int input1_shift; - int input2_shift; - int32_t output_activation_min; - int32_t output_activation_max; - - // These fields are used only in the general 8-bit -> 8bit quantized path - int32_t input1_multiplier; - int32_t input2_multiplier; - int32_t output_multiplier; - int output_shift; - int left_shift; - int32_t input1_offset; - int32_t input2_offset; - int32_t output_offset; -}; - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteSubParams* params, - const TfLiteTensor* input1, - const TfLiteTensor* input2, TfLiteTensor* output, - OpData* data) { - data->requires_broadcast = !HaveSameShapes(input1, input2); - - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - // 8bit -> 8bit general quantized path, with general rescalings - data->input1_offset = -input1->params.zero_point; - data->input2_offset = -input2->params.zero_point; - data->output_offset = output->params.zero_point; - data->left_shift = 20; - const float twice_max_input_scale = - 2 * std::max(input1->params.scale, input2->params.scale); - const double real_input1_multiplier = - static_cast(input1->params.scale / twice_max_input_scale); - const double real_input2_multiplier = - static_cast(input2->params.scale / twice_max_input_scale); - const double real_output_multiplier = - static_cast(twice_max_input_scale / - ((1 << data->left_shift) * output->params.scale)); - - QuantizeMultiplierSmallerThanOneExp( - real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); - - QuantizeMultiplierSmallerThanOneExp( - real_output_multiplier, &data->output_multiplier, &data->output_shift); - - TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( - context, params->activation, output, &data->output_activation_min, - &data->output_activation_max)); - } - - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); - TF_LITE_ENSURE(context, input1 != nullptr); - const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); - TF_LITE_ENSURE(context, input2 != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - - TF_LITE_ENSURE_STATUS( - CalculateOpData(context, params, input1, input2, output, data)); - return kTfLiteOk; -} - -void EvalSub(TfLiteContext* context, TfLiteNode* node, TfLiteSubParams* params, - const OpData* data, const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, TfLiteEvalTensor* output) { - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - tflite::ArithmeticParams op_params; - SetActivationParams(output_activation_min, output_activation_max, &op_params); - if (data->requires_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::SubWithActivation( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } -} - -TfLiteStatus EvalSubQuantized(TfLiteContext* context, TfLiteNode* node, - TfLiteSubParams* params, const OpData* data, - const TfLiteEvalTensor* input1, - const TfLiteEvalTensor* input2, - TfLiteEvalTensor* output) { - if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - tflite::ArithmeticParams op_params; - op_params.left_shift = data->left_shift; - op_params.input1_offset = data->input1_offset; - op_params.input1_multiplier = data->input1_multiplier; - op_params.input1_shift = data->input1_shift; - op_params.input2_offset = data->input2_offset; - op_params.input2_multiplier = data->input2_multiplier; - op_params.input2_shift = data->input2_shift; - op_params.output_offset = data->output_offset; - op_params.output_multiplier = data->output_multiplier; - op_params.output_shift = data->output_shift; - SetActivationParams(data->output_activation_min, - data->output_activation_max, &op_params); - bool need_broadcast = reference_ops::ProcessBroadcastShapes( - tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorShape(input2), &op_params); - - if (output->type == kTfLiteInt8) { - if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Sub( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } else { - if (need_broadcast) { - tflite::reference_ops::BroadcastSubSlow( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } else { - tflite::reference_ops::Sub( - op_params, tflite::micro::GetTensorShape(input1), - tflite::micro::GetTensorData(input1), - tflite::micro::GetTensorShape(input2), - tflite::micro::GetTensorData(input2), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - } - } - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - - const TfLiteEvalTensor* input1 = - tflite::micro::GetEvalInput(context, node, kInputTensor1); - const TfLiteEvalTensor* input2 = - tflite::micro::GetEvalInput(context, node, kInputTensor2); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - if (output->type == kTfLiteFloat32) { - EvalSub(context, node, params, &data, input1, input2, output); - } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8) { - TF_LITE_ENSURE_OK(context, EvalSubQuantized(context, node, params, &data, - input1, input2, output)); - } else { - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(output->type), output->type); - return kTfLiteError; - } - - return kTfLiteOk; -} - -} // namespace sub - -TfLiteRegistration Register_SUB() { - return {/*init=*/sub::Init, - /*free=*/nullptr, - /*prepare=*/sub::Prepare, - /*invoke=*/sub::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace micro -} // namespace ops -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h new file mode 100644 index 0000000..36608d5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h @@ -0,0 +1,60 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +extern const int kSubInputTensor1; +extern const int kSubInputTensor2; +extern const int kSubOutputTensor; + +struct OpDataSub { + bool requires_broadcast; + + // These fields are used in both the general 8-bit -> 8bit quantized path, + // and the special 16-bit -> 16bit quantized path + int input1_shift; + int input2_shift; + int32_t output_activation_min; + int32_t output_activation_max; + + // These fields are used only in the general 8-bit -> 8bit quantized path + int32_t input1_multiplier; + int32_t input2_multiplier; + int32_t output_multiplier; + int output_shift; + int left_shift; + int32_t input1_offset; + int32_t input2_offset; + int32_t output_offset; +}; + +TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataSub* data); + +TfLiteStatus SubPrepare(TfLiteContext* context, TfLiteNode* node); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_SUB_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub_common.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub_common.cc new file mode 100644 index 0000000..fcb8d4b --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/sub_common.cc @@ -0,0 +1,109 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/add.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/process_broadcast_shapes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/sub.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/sub.h" + +namespace tflite { + +const int kSubInputTensor1 = 0; +const int kSubInputTensor2 = 1; +const int kSubOutputTensor = 0; + +TfLiteStatus CalculateOpDataSub(TfLiteContext* context, TfLiteSubParams* params, + const TfLiteTensor* input1, + const TfLiteTensor* input2, + TfLiteTensor* output, OpDataSub* data) { + data->requires_broadcast = !HaveSameShapes(input1, input2); + + if (output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { + // 8bit -> 8bit general quantized path, with general rescalings + data->input1_offset = -input1->params.zero_point; + data->input2_offset = -input2->params.zero_point; + data->output_offset = output->params.zero_point; + + // The shift is set to 15 in case of 16-bit and 20 in case of 8-bit, + // accordingly. In case of 16-bit we have 65535 << 15 which is less than 1 + // << 31, therefore the addition will still fit in a 32 bit accumulator. + data->left_shift = output->type == kTfLiteInt16 ? 15 : 20; + const float twice_max_input_scale = + 2 * std::max(input1->params.scale, input2->params.scale); + const double real_input1_multiplier = + static_cast(input1->params.scale) / + static_cast(twice_max_input_scale); + const double real_input2_multiplier = + static_cast(input2->params.scale) / + static_cast(twice_max_input_scale); + const double real_output_multiplier = + static_cast(twice_max_input_scale) / + ((1 << data->left_shift) * static_cast(output->params.scale)); + + QuantizeMultiplierSmallerThanOneExp( + real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); + + QuantizeMultiplierSmallerThanOneExp( + real_output_multiplier, &data->output_multiplier, &data->output_shift); + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, params->activation, output, &data->output_activation_min, + &data->output_activation_max)); + } + + return kTfLiteOk; +} + +TfLiteStatus SubPrepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpDataSub* data = static_cast(node->user_data); + auto* params = reinterpret_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input1 = + micro_context->AllocateTempInputTensor(node, kSubInputTensor1); + TF_LITE_ENSURE(context, input1 != nullptr); + TfLiteTensor* input2 = + micro_context->AllocateTempInputTensor(node, kSubInputTensor2); + TF_LITE_ENSURE(context, input2 != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kSubOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + + TF_LITE_ENSURE_STATUS( + CalculateOpDataSub(context, params, input1, input2, output, data)); + + micro_context->DeallocateTempTfLiteTensor(input1); + micro_context->DeallocateTempTfLiteTensor(input2); + micro_context->DeallocateTempTfLiteTensor(output); + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cc new file mode 100644 index 0000000..e9b50e5 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cc @@ -0,0 +1,339 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h" + +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" +#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activation_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataSvdf)); +} + +TfLiteStatus EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + cmsis_nn_dims input_dims; + input_dims.n = input_tensor->dims->data[0]; + input_dims.h = input_tensor->dims->data[1]; + + cmsis_nn_dims weights_feature_dims; + weights_feature_dims.n = weights_feature_tensor->dims->data[0]; + weights_feature_dims.h = weights_feature_tensor->dims->data[1]; + + cmsis_nn_dims weights_time_dims; + weights_time_dims.n = weights_time_tensor->dims->data[0]; + weights_time_dims.h = weights_time_tensor->dims->data[1]; + + cmsis_nn_dims bias_dims; + bias_dims.n = bias_tensor->dims->data[0]; + + cmsis_nn_dims state_dims; + state_dims.n = bias_tensor->dims->data[0]; + state_dims.h = bias_tensor->dims->data[1]; + + cmsis_nn_dims output_dims; + output_dims.n = output_tensor->dims->data[0]; + output_dims.h = output_tensor->dims->data[1]; + + cmsis_nn_svdf_params svdf_params; + svdf_params.rank = params->rank; + svdf_params.input_offset = data.input_zero_point; + svdf_params.output_offset = data.output_zero_point; + + svdf_params.input_activation.min = INT16_MIN; + svdf_params.input_activation.max = INT16_MAX; + + svdf_params.output_activation.min = INT8_MIN; + svdf_params.output_activation.max = INT8_MAX; + + cmsis_nn_per_tensor_quant_params in_quant_params; + in_quant_params.multiplier = data.effective_scale_1_a; + in_quant_params.shift = data.effective_scale_1_b; + + cmsis_nn_per_tensor_quant_params out_quant_params; + out_quant_params.multiplier = data.effective_scale_2_a; + out_quant_params.shift = data.effective_scale_2_b; + + TFLITE_DCHECK(context != nullptr); + TFLITE_DCHECK(context->GetScratchBuffer != nullptr); + + cmsis_nn_context scratch_ctx; + scratch_ctx.buf = static_cast( + context->GetScratchBuffer(context, data.scratch_tensor_index)); + + cmsis_nn_context scratch_output_ctx; + scratch_output_ctx.buf = static_cast( + context->GetScratchBuffer(context, data.scratch_output_tensor_index)); + + int8_t* output_data = tflite::micro::GetTensorData(output_tensor); + + switch (weights_time_tensor->type) { + case kTfLiteInt8: { + arm_svdf_s8( + &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params, + &out_quant_params, &input_dims, + tflite::micro::GetTensorData(input_tensor), &state_dims, + tflite::micro::GetTensorData(activation_state_tensor), + &weights_feature_dims, + tflite::micro::GetTensorData(weights_feature_tensor), + &weights_time_dims, + tflite::micro::GetTensorData(weights_time_tensor), &bias_dims, + tflite::micro::GetTensorData(bias_tensor), &output_dims, + output_data); + return kTfLiteOk; + } + + case kTfLiteInt16: { + arm_svdf_state_s16_s8( + &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params, + &out_quant_params, &input_dims, + tflite::micro::GetTensorData(input_tensor), &state_dims, + tflite::micro::GetTensorData(activation_state_tensor), + &weights_feature_dims, + tflite::micro::GetTensorData(weights_feature_tensor), + &weights_time_dims, + tflite::micro::GetTensorData(weights_time_tensor), + &bias_dims, tflite::micro::GetTensorData(bias_tensor), + &output_dims, output_data); + return kTfLiteOk; + } + + default: + MicroPrintf("Could not find matching function for type %s.", + TfLiteTypeGetName(weights_time_tensor->type)); + return kTfLiteError; + } +} + +TfLiteStatus EvalSvdf(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataSvdf& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); + const TfLiteEvalTensor* weights_feature = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); + const TfLiteEvalTensor* weights_time = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 5) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) + : nullptr; + TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( + context, node, kSvdfInputActivationStateTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); + + switch (weights_time->type) { + case kTfLiteFloat32: { + EvalFloatSvdfReference( + context, node, input, weights_feature, weights_time, bias, params, + data.scratch_tensor_index, activation_state, output); + return kTfLiteOk; + } + + case kTfLiteInt8: + case kTfLiteInt16: { + return EvalIntegerSVDF(context, node, input, weights_feature, + weights_time, bias, params, activation_state, + output, data); + } + + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(weights_feature->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus EvalSvdfInt8(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataSvdf& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); + const TfLiteEvalTensor* weights_feature = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); + const TfLiteEvalTensor* weights_time = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 5) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) + : nullptr; + TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( + context, node, kSvdfInputActivationStateTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); + + TFLITE_DCHECK((weights_time->type == kTfLiteInt8) || + (weights_time->type == kTfLiteInt16)); + // Because of the TODO mentioned below, the int16 weight data type is not + // split into a seperate registration. + // TODO(#523): remove 16-bit code when no longer needed. + return EvalIntegerSVDF(context, node, input, weights_feature, weights_time, + bias, params, activation_state, output, data); +} + +} // namespace + +TfLiteRegistration Register_SVDF() { + return tflite::micro::RegisterOp(Init, PrepareSvdf, EvalSvdf); +} + +TfLiteRegistration Register_SVDF_INT8() { + return tflite::micro::RegisterOp(Init, PrepareSvdf, EvalSvdfInt8); +} + +} // namespace tflite + +#else +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activation_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { +namespace { + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataSvdf)); +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + auto* params = reinterpret_cast(node->builtin_data); + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataSvdf& data = *(static_cast(node->user_data)); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); + const TfLiteEvalTensor* weights_feature = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); + const TfLiteEvalTensor* weights_time = + tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 5) + ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) + : nullptr; + TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( + context, node, kSvdfInputActivationStateTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); + + switch (weights_feature->type) { + case kTfLiteFloat32: { + EvalFloatSvdfReference( + context, node, input, weights_feature, weights_time, bias, params, + data.scratch_tensor_index, activation_state, output); + return kTfLiteOk; + break; + } + + case kTfLiteInt8: { + switch (weights_time->type) { + case kTfLiteInt16: { + EvalInt16SvdfReference(context, node, input, weights_feature, + weights_time, bias, params, activation_state, + output, data); + return kTfLiteOk; + break; + } + case kTfLiteInt8: { + EvalInt8SvdfReference(context, node, input, weights_feature, + weights_time, bias, params, activation_state, + output, data); + return kTfLiteOk; + break; + } + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(weights_time->type)); + return kTfLiteError; + } + } + + default: + MicroPrintf("Type %s not currently supported.", + TfLiteTypeGetName(weights_feature->type)); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_SVDF() { + return tflite::micro::RegisterOp(Init, PrepareSvdf, Eval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cpp deleted file mode 100644 index 103d9b7..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.cpp +++ /dev/null @@ -1,586 +0,0 @@ -// Patched by Edge Impulse to include reference, CMSIS-NN and ARC kernels -#include "../../../../classifier/ei_classifier_config.h" -#if 0 == 1 -/* noop */ -#elif EI_CLASSIFIER_TFLITE_ENABLE_CMSIS_NN == 1 -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include -#include - -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nn_types.h" -#include "edge-impulse-sdk/CMSIS/NN/Include/arm_nnfunctions.h" -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activation_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -struct OpData { - int32_t effective_scale_1_a; - int32_t effective_scale_2_a; - // b versions of each scale are kept at int since the numbers are just the - // shift value - typically between [-32, 32]. - int effective_scale_1_b; - int effective_scale_2_b; - int scratch_tensor_index; - int scratch_output_tensor_index; - - // Cached tensor zero point values for quantized operations. - int input_zero_point; - int output_zero_point; -}; - -// Input tensors. -constexpr int kInputTensor = 0; -constexpr int kWeightsFeatureTensor = 1; -constexpr int kWeightsTimeTensor = 2; -constexpr int kBiasTensor = 3; -// This is a variable tensor, and will be modified by this op. -constexpr int kInputActivationStateTensor = 4; - -// Output tensor. -constexpr int kOutputTensor = 0; - -/** - * This version of SVDF is specific to TFLite Micro. It contains the following - * differences between the TFLite version: - * - * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time - * for the Micro interpreter. - * 2.) Output dimensions - the TFLite version determines output size and runtime - * and resizes the output tensor. Micro runtime does not support tensor - * resizing. - */ -static inline void ApplyTimeWeightsBiasAndActivation( - int batch_size, int memory_size, int num_filters, int num_units, int rank, - const float* const __restrict__ weights_time_ptr, - const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, - float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, - float* const __restrict__ output_ptr) { - // Compute matmul(activation_state, weights_time). - for (int b = 0; b < batch_size; ++b) { - // Perform batched vector dot product: - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - const float* vector1_ptr = weights_time_ptr; - const float* vector2_ptr = state_ptr + b * memory_size * num_filters; - for (int i = 0; i < num_filters; ++i) { - *scratch_ptr_batch = 0.f; - for (int j = 0; j < memory_size; ++j) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; - } - scratch_ptr_batch++; - } - } - - // Initialize output with bias if provided. - if (bias_ptr) { - // VectorBatchVectorAssign - for (int i = 0; i < batch_size; ++i) { - float* output_data = output_ptr + i * num_units; - const float* bias_data = bias_ptr; - for (int j = 0; j < num_units; ++j) { - *output_data++ = *bias_data++; - } - } - } else { - float* output_data = output_ptr; - for (int i = 0; i < batch_size * num_units; ++i) { - *output_data++ = 0.0f; - } - } - - // Reduction sum. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - float* scratch_ptr_batch = scratch_ptr + b * num_filters; - - // Reduction sum vector - for (int i = 0; i < num_units; ++i) { - for (int j = 0; j < rank; j++) { - output_ptr_batch[i] += *scratch_ptr_batch++; - } - } - } - - // Apply activation. - for (int b = 0; b < batch_size; ++b) { - float* output_ptr_batch = output_ptr + b * num_units; - for (int i = 0; i < num_units; ++i) { - *output_ptr_batch = - tflite::ops::micro::ActivationValFloat(activation, *output_ptr_batch); - ++output_ptr_batch; - } - } -} - -inline void EvalFloatSVDF( - TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, - const TfLiteEvalTensor* weights_feature, - const TfLiteEvalTensor* weights_time, const TfLiteEvalTensor* bias, - const TfLiteSVDFParams* params, int scratch_tensor_index, - TfLiteEvalTensor* activation_state, TfLiteEvalTensor* output) { - const int rank = params->rank; - const int batch_size = input->dims->data[0]; - const int input_size = input->dims->data[1]; - const int num_filters = weights_feature->dims->data[0]; - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - const float* weights_feature_ptr = - tflite::micro::GetTensorData(weights_feature); - const float* weights_time_ptr = - tflite::micro::GetTensorData(weights_time); - const float* bias_ptr = tflite::micro::GetTensorData(bias); - const float* input_ptr = tflite::micro::GetTensorData(input); - - float* state_ptr = tflite::micro::GetTensorData(activation_state); - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - float* scratch_ptr = static_cast( - context->GetScratchBuffer(context, scratch_tensor_index)); - - float* output_ptr = tflite::micro::GetTensorData(output); - - // Left shift the activation_state. - { - float* new_state_start = state_ptr; - const float* old_state_start = state_ptr + 1; - const float* old_state_end = - state_ptr + batch_size * num_filters * memory_size; - while (old_state_start != old_state_end) { - *new_state_start++ = *old_state_start++; - } - } - - // Note: no need to clear the latest activation, matmul is not accumulative. - - // Compute conv1d(inputs, weights_feature). - // The activation_state's rightmost column is used to save current cycle - // activation. This is achieved by starting at state_ptr[memory_size - 1] and - // having the stride equal to memory_size. - - // Perform batched matrix vector multiply operation: - { - const float* matrix = weights_feature_ptr; - const float* vector = input_ptr; - float* result = &state_ptr[memory_size - 1]; - float* result_in_batch = result; - for (int i = 0; i < batch_size; ++i) { - const float* matrix_ptr = matrix; - for (int j = 0; j < num_filters; ++j) { - float dot_prod = 0.0f; - const float* vector_in_batch = vector + i * input_size; - for (int k = 0; k < input_size; ++k) { - dot_prod += *matrix_ptr++ * *vector_in_batch++; - } - *result_in_batch = dot_prod; - result_in_batch += memory_size; - } - } - } - - ApplyTimeWeightsBiasAndActivation( - batch_size, memory_size, num_filters, num_units, rank, weights_time_ptr, - bias_ptr, params->activation, state_ptr, scratch_ptr, output_ptr); -} - -void EvalIntegerSVDF(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, const OpData& data) { - cmsis_nn_dims input_dims; - input_dims.n = input_tensor->dims->data[0]; - input_dims.h = input_tensor->dims->data[1]; - - cmsis_nn_dims weights_feature_dims; - weights_feature_dims.n = weights_feature_tensor->dims->data[0]; - weights_feature_dims.h = weights_feature_tensor->dims->data[1]; - - cmsis_nn_dims weights_time_dims; - weights_time_dims.n = weights_time_tensor->dims->data[0]; - weights_time_dims.h = weights_time_tensor->dims->data[1]; - - cmsis_nn_dims bias_dims; - bias_dims.n = bias_tensor->dims->data[0]; - - cmsis_nn_dims state_dims; - state_dims.n = bias_tensor->dims->data[0]; - state_dims.h = bias_tensor->dims->data[1]; - - cmsis_nn_dims output_dims; - output_dims.n = output_tensor->dims->data[0]; - output_dims.h = output_tensor->dims->data[1]; - - cmsis_nn_svdf_params svdf_params; - svdf_params.rank = params->rank; - svdf_params.input_offset = data.input_zero_point; - svdf_params.output_offset = data.output_zero_point; - - svdf_params.input_activation.min = INT16_MIN; - svdf_params.input_activation.max = INT16_MAX; - - svdf_params.output_activation.min = INT8_MIN; - svdf_params.output_activation.max = INT8_MAX; - - cmsis_nn_per_tensor_quant_params in_quant_params; - in_quant_params.multiplier = data.effective_scale_1_a; - in_quant_params.shift = data.effective_scale_1_b; - - cmsis_nn_per_tensor_quant_params out_quant_params; - out_quant_params.multiplier = data.effective_scale_2_a; - out_quant_params.shift = data.effective_scale_2_b; - - TFLITE_DCHECK(context != nullptr); - TFLITE_DCHECK(context->GetScratchBuffer != nullptr); - - cmsis_nn_context scratch_ctx; - scratch_ctx.buf = static_cast( - context->GetScratchBuffer(context, data.scratch_tensor_index)); - - cmsis_nn_context scratch_output_ctx; - scratch_output_ctx.buf = static_cast( - context->GetScratchBuffer(context, data.scratch_output_tensor_index)); - - int8_t* output_data = tflite::micro::GetTensorData(output_tensor); - arm_svdf_s8( - &scratch_ctx, &scratch_output_ctx, &svdf_params, &in_quant_params, - &out_quant_params, &input_dims, - (int8_t*)tflite::micro::GetTensorData(input_tensor), &state_dims, - (int16_t*)tflite::micro::GetTensorData(activation_state_tensor), - &weights_feature_dims, - (int8_t*)tflite::micro::GetTensorData(weights_feature_tensor), - &weights_time_dims, - (int16_t*)tflite::micro::GetTensorData(weights_time_tensor), - &bias_dims, (int32_t*)tflite::micro::GetTensorData(bias_tensor), - &output_dims, output_data); -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->builtin_data != nullptr); - - const auto* params = static_cast(node->builtin_data); - - // Validate Tensor Inputs (dtype depends on quantization): - // [0] = Input, {2, batch_size, input_size} - // [1] = Weights Feature, {2, num_filters, input_size} - // [2] = Weights Time, {2, num_filters, memory_size} - // [3] = Bias (optional), {1, num_units} - // [4] = Activation State (variable), - // {2, batch_size, memory_size * num_filters} - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* weights_feature = - GetInput(context, node, kWeightsFeatureTensor); - TF_LITE_ENSURE(context, weights_feature != nullptr); - const TfLiteTensor* weights_time = - GetInput(context, node, kWeightsTimeTensor); - TF_LITE_ENSURE(context, weights_time != nullptr); - const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); - const TfLiteTensor* activation_state = - GetInput(context, node, kInputActivationStateTensor); - TF_LITE_ENSURE(context, activation_state != nullptr); - - // Define input constants based on input tensor definition above: - const int rank = params->rank; - const int input_size = input->dims->data[1]; - const int batch_size = input->dims->data[0]; - const int num_filters = weights_feature->dims->data[0]; - TF_LITE_ENSURE_EQ(context, num_filters % rank, 0); - const int num_units = num_filters / rank; - const int memory_size = weights_time->dims->data[1]; - - // Validate Input Tensor: - TF_LITE_ENSURE(context, - input->type == kTfLiteFloat32 || input->type == kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); - - // Validate Tensor Output: - // [0] = float/int8, {2, batch_size, num_units} - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); - TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units); - - // Validate Weights Feature Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2); - TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size); - - // Validate Weights Time Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters); - TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size); - - // Validate Optional Bias Input Tensor: - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units); - } - - // Validate Activation State Input Tensor: - TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size); - TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1], - memory_size * num_filters); - // Since is_variable is not part of TFLiteEvalTensor, check is_variable here. - TF_LITE_ENSURE_EQ(context, activation_state->is_variable, true); - - TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); - - TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); - - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); - } - - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - - const double effective_scale_1 = static_cast( - input->params.scale * weights_feature->params.scale / - activation_state->params.scale); - const double effective_scale_2 = - static_cast(activation_state->params.scale * - weights_time->params.scale / output->params.scale); - - // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. - TF_LITE_ENSURE( - context, - std::abs(static_cast(bias->params.scale) - - static_cast(activation_state->params.scale * - weights_time->params.scale)) < 1e-5); - - QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), - &(data->effective_scale_1_b)); - QuantizeMultiplier(effective_scale_2, &(data->effective_scale_2_a), - &(data->effective_scale_2_b)); - - data->input_zero_point = input->params.zero_point; - data->output_zero_point = output->params.zero_point; - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(int32_t), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - - const TfLiteStatus scratch_output_status = - context->RequestScratchBufferInArena( - context, batch_size * num_units * sizeof(int32_t), - &(data->scratch_output_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_output_status); - } else { - TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32); - if (bias != nullptr) { - TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32); - } - TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); - - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - const TfLiteStatus scratch_status = context->RequestScratchBufferInArena( - context, batch_size * num_filters * sizeof(float), - &(data->scratch_tensor_index)); - TF_LITE_ENSURE_OK(context, scratch_status); - } - - return kTfLiteOk; -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* weights_feature = - tflite::micro::GetEvalInput(context, node, kWeightsFeatureTensor); - const TfLiteEvalTensor* weights_time = - tflite::micro::GetEvalInput(context, node, kWeightsTimeTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 5) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( - context, node, kInputActivationStateTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - switch (weights_feature->type) { - case kTfLiteFloat32: { - EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias, - params, data.scratch_tensor_index, activation_state, - output); - return kTfLiteOk; - break; - } - - case kTfLiteInt8: { - EvalIntegerSVDF(context, node, input, weights_feature, weights_time, bias, - params, activation_state, output, data); - return kTfLiteOk; - break; - } - - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(weights_feature->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SVDF() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#else -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h" - -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/activation_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" - -namespace tflite { -namespace { - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - auto* params = reinterpret_cast(node->builtin_data); - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kSvdfInputTensor); - const TfLiteEvalTensor* weights_feature = - tflite::micro::GetEvalInput(context, node, kSvdfWeightsFeatureTensor); - const TfLiteEvalTensor* weights_time = - tflite::micro::GetEvalInput(context, node, kSvdfWeightsTimeTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 5) - ? tflite::micro::GetEvalInput(context, node, kSvdfBiasTensor) - : nullptr; - TfLiteEvalTensor* activation_state = tflite::micro::GetMutableEvalInput( - context, node, kSvdfInputActivationStateTensor); - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kSvdfOutputTensor); - - switch (weights_feature->type) { - case kTfLiteFloat32: { - EvalFloatSvdfReference( - context, node, input, weights_feature, weights_time, bias, params, - data.scratch_tensor_index, activation_state, output); - return kTfLiteOk; - break; - } - - case kTfLiteInt8: { - EvalIntegerSvdfReference(context, node, input, weights_feature, - weights_time, bias, params, activation_state, - output, data); - return kTfLiteOk; - break; - } - - default: - TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", - TfLiteTypeGetName(weights_feature->type)); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_SVDF() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/PrepareSvdf, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite - -#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h index 8a7eb0f..8bc068e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20,7 +20,7 @@ limitations under the License. namespace tflite { -struct OpData { +struct OpDataSvdf { int32_t effective_scale_1_a; int32_t effective_scale_2_a; // b versions of each scale are kept at int since the numbers are just the @@ -33,6 +33,7 @@ struct OpData { // Cached tensor zero point values for quantized operations. int input_zero_point; int output_zero_point; + int activation_state_zero_point; }; // Input tensors. @@ -46,16 +47,26 @@ extern const int kSvdfInputActivationStateTensor; // Output tensor. extern const int kSvdfOutputTensor; -// TensorflowLite Micro-specific reference implementation for Integer SVDF. -void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, - const TfLiteEvalTensor* input_tensor, - const TfLiteEvalTensor* weights_feature_tensor, - const TfLiteEvalTensor* weights_time_tensor, - const TfLiteEvalTensor* bias_tensor, - const TfLiteSVDFParams* params, - TfLiteEvalTensor* activation_state_tensor, - TfLiteEvalTensor* output_tensor, - const OpData& data); +void EvalInt8SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data); + +// TODO(#523): remove 16-bit code when no longer needed. +void EvalInt16SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data); void EvalFloatSvdfReference( TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input, @@ -66,6 +77,23 @@ void EvalFloatSvdfReference( TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node); +// This is the most generic TfLiteRegistration. The actual supported types may +// still be target dependent. The only requirement is that every implementation +// (reference or optimized) must define this function. +TfLiteRegistration Register_SVDF(); + +#if defined(HEXAGON) || defined(CMSIS_NN) +TfLiteRegistration Register_SVDF_INT8(); + +#else +// Note that while this block gets used for both reference and optimized kernels +// that do not have any specialized implementations, the only goal here is to +// define fallback implementation that allow reference kernels to still be used +// from applications that call a more specific kernel variant. + +inline TfLiteRegistration Register_SVDF_INT8() { return Register_SVDF(); } + +#endif } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_KERNELS_SVDF_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cc similarity index 76% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cc index 038dac1..bdc36b8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/svdf_common.cc @@ -48,6 +48,7 @@ const int kSvdfInputActivationStateTensor = 4; // This is a variable tensor, and will be modified by this op. const int kSvdfOutputTensor = 0; +template void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, const TfLiteEvalTensor* input_tensor, const TfLiteEvalTensor* weights_feature_tensor, @@ -56,7 +57,7 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, const TfLiteSVDFParams* params, TfLiteEvalTensor* activation_state_tensor, TfLiteEvalTensor* output_tensor, - const OpData& data) { + const OpDataSvdf& data) { const int n_rank = params->rank; const int n_batch = input_tensor->dims->data[0]; const int n_input = input_tensor->dims->data[1]; @@ -73,14 +74,13 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, context->GetScratchBuffer(context, data.scratch_output_tensor_index)); // Shift states. - int16_t* const state_ptr = - tflite::micro::GetTensorData(activation_state_tensor); + T* const state_ptr = tflite::micro::GetTensorData(activation_state_tensor); // Left shift the activation_state. { - int16_t* new_state_start = state_ptr; - const int16_t* old_state_start = state_ptr + 1; - const int16_t* old_state_end = state_ptr + n_batch * n_filter * n_memory; + T* new_state_start = state_ptr; + const T* old_state_start = state_ptr + 1; + const T* old_state_end = state_ptr + n_batch * n_filter * n_memory; while (old_state_start != old_state_end) { *new_state_start++ = *old_state_start++; } @@ -90,14 +90,13 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, // Feature matmul. { - int16_t* state = - tflite::micro::GetTensorData(activation_state_tensor); + T* state = tflite::micro::GetTensorData(activation_state_tensor); const int8_t* input = tflite::micro::GetTensorData(input_tensor); const int8_t* weight_feature = tflite::micro::GetTensorData(weights_feature_tensor); - const int32_t output_max = std::numeric_limits::max(); - const int32_t output_min = std::numeric_limits::min(); - int16_t* result_in_batch = state + (n_memory - 1); + const int32_t output_max = std::numeric_limits::max(); + const int32_t output_min = std::numeric_limits::min(); + T* result_in_batch = state + (n_memory - 1); for (int b = 0; b < n_batch; b++) { const int8_t* matrix_ptr = weight_feature; for (int r = 0; r < n_filter; r++) { @@ -110,13 +109,10 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, dot_prod = MultiplyByQuantizedMultiplier( dot_prod, data.effective_scale_1_a, data.effective_scale_1_b); dot_prod = std::min(std::max(output_min, dot_prod), output_max); - // This assumes state is symmetrically quantized. Otherwise last bit of - // state should be initialized to its zero point and accumulate the - // dot_prod. - // Equivalent as the following: - // result_in_batch = zero point, which happens to be zero. - // result_in_batch += dot_prod_56. - *result_in_batch = dot_prod; + // The int16 version of the op assumes a zero_point of 0. This + // code accounts for the potentially non-zero zero_point for the int8 + // version of the op. + *result_in_batch = data.activation_state_zero_point + dot_prod; result_in_batch += n_memory; } } @@ -128,16 +124,18 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, int32_t* scratch_ptr_batch = scratch_tensor + b * n_filter; // Perform batched vector dot product: - const int16_t* vector1_ptr = - tflite::micro::GetTensorData(weights_time_tensor); - const int16_t* vector2_ptr = - tflite::micro::GetTensorData(activation_state_tensor) + + const T* vector1_ptr = + tflite::micro::GetTensorData(weights_time_tensor); + const T* vector2_ptr = + tflite::micro::GetTensorData(activation_state_tensor) + b * n_memory * n_filter; for (int i = 0; i < n_filter; i++) { *scratch_ptr_batch = 0; for (int j = 0; j < n_memory; j++) { - *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++; + *scratch_ptr_batch += + *vector1_ptr++ * + (*vector2_ptr++ - data.activation_state_zero_point); } scratch_ptr_batch++; } @@ -192,12 +190,46 @@ void EvalIntegerSvdfReference(TfLiteContext* context, TfLiteNode* node, } } } + +/** + * Generate two versions of the integer code. One with int16_t type for the + * time weights and the activation state, and another one with int8_t for the + * same. + */ + +void EvalInt16SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + EvalIntegerSvdfReference( + context, node, input_tensor, weights_feature_tensor, weights_time_tensor, + bias_tensor, params, activation_state_tensor, output_tensor, data); +} + +void EvalInt8SvdfReference(TfLiteContext* context, TfLiteNode* node, + const TfLiteEvalTensor* input_tensor, + const TfLiteEvalTensor* weights_feature_tensor, + const TfLiteEvalTensor* weights_time_tensor, + const TfLiteEvalTensor* bias_tensor, + const TfLiteSVDFParams* params, + TfLiteEvalTensor* activation_state_tensor, + TfLiteEvalTensor* output_tensor, + const OpDataSvdf& data) { + EvalIntegerSvdfReference( + context, node, input_tensor, weights_feature_tensor, weights_time_tensor, + bias_tensor, params, activation_state_tensor, output_tensor, data); +} + static inline void ApplyTimeWeightsBiasAndActivation( int batch_size, int memory_size, int num_filters, int num_units, int rank, - const float* const __restrict__ weights_time_ptr, - const float* const __restrict__ bias_ptr, TfLiteFusedActivation activation, - float* const __restrict__ state_ptr, float* const __restrict__ scratch_ptr, - float* const __restrict__ output_ptr) { + const float* const weights_time_ptr, const float* const bias_ptr, + TfLiteFusedActivation activation, float* const state_ptr, + float* const scratch_ptr, float* const output_ptr) { // Compute matmul(activation_state, weights_time). for (int b = 0; b < batch_size; ++b) { // Perform batched vector dot product: @@ -332,6 +364,8 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { const auto* params = static_cast(node->builtin_data); + MicroContext* micro_context = GetMicroContext(context); + // Validate Tensor Inputs (dtype depends on quantization): // [0] = Input, {2, batch_size, input_size} // [1] = Weights Feature, {2, num_filters, input_size} @@ -339,18 +373,19 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { // [3] = Bias (optional), {1, num_units} // [4] = Activation State (variable), // {2, batch_size, memory_size * num_filters} - const TfLiteTensor* input = GetInput(context, node, kSvdfInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kSvdfInputTensor); TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* weights_feature = - GetInput(context, node, kSvdfWeightsFeatureTensor); + TfLiteTensor* weights_feature = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsFeatureTensor); TF_LITE_ENSURE(context, weights_feature != nullptr); - const TfLiteTensor* weights_time = - GetInput(context, node, kSvdfWeightsTimeTensor); + TfLiteTensor* weights_time = + micro_context->AllocateTempInputTensor(node, kSvdfWeightsTimeTensor); TF_LITE_ENSURE(context, weights_time != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kSvdfBiasTensor); - const TfLiteTensor* activation_state = - GetInput(context, node, kSvdfInputActivationStateTensor); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kSvdfBiasTensor); + TfLiteTensor* activation_state = micro_context->AllocateTempInputTensor( + node, kSvdfInputActivationStateTensor); TF_LITE_ENSURE(context, activation_state != nullptr); // Define input constants based on input tensor definition above: @@ -370,7 +405,8 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { // Validate Tensor Output: // [0] = float/int8_t, {2, batch_size, num_units} TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - TfLiteTensor* output = GetOutput(context, node, kSvdfOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kSvdfOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2); TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size); @@ -401,31 +437,35 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 5); TFLITE_DCHECK(node->user_data != nullptr); - OpData* data = static_cast(node->user_data); + OpDataSvdf* data = static_cast(node->user_data); if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteInt8); - TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteInt16); - TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteInt16); + TF_LITE_ENSURE(context, (weights_time->type == kTfLiteInt16) || + (weights_time->type == kTfLiteInt8)); + TF_LITE_ENSURE(context, (activation_state->type == kTfLiteInt16) || + (activation_state->type == kTfLiteInt8)); if (bias != nullptr) { TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteInt32); } TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteInt8); - const double effective_scale_1 = static_cast( - input->params.scale * weights_feature->params.scale / - activation_state->params.scale); + const double effective_scale_1 = + static_cast(input->params.scale) * + static_cast(weights_feature->params.scale) / + static_cast(activation_state->params.scale); const double effective_scale_2 = - static_cast(activation_state->params.scale * - weights_time->params.scale / output->params.scale); + static_cast(activation_state->params.scale) * + static_cast(weights_time->params.scale) / + static_cast(output->params.scale); // TODO(b/162018098): Use TF_LITE_ENSURE_NEAR when it is ready. TF_LITE_ENSURE( context, std::abs(static_cast(bias->params.scale) - - static_cast(activation_state->params.scale * - weights_time->params.scale)) < 1e-5); + (static_cast(activation_state->params.scale) * + static_cast(weights_time->params.scale))) < 1e-5); QuantizeMultiplier(effective_scale_1, &(data->effective_scale_1_a), &(data->effective_scale_1_b)); @@ -434,6 +474,7 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { data->input_zero_point = input->params.zero_point; data->output_zero_point = output->params.zero_point; + data->activation_state_zero_point = activation_state->params.zero_point; TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); @@ -463,6 +504,12 @@ TfLiteStatus PrepareSvdf(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, scratch_status); } + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(weights_feature); + micro_context->DeallocateTempTfLiteTensor(weights_time); + micro_context->DeallocateTempTfLiteTensor(activation_state); + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(bias); return kTfLiteOk; } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cc similarity index 56% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cc index fa100ea..2ae32b6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tanh.cc @@ -24,6 +24,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" namespace tflite { @@ -48,16 +49,19 @@ void* TanhInit(TfLiteContext* context, const char* buffer, size_t length) { TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, OpData* data) { + MicroContext* micro_context = GetMicroContext(context); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); TF_LITE_ENSURE(context, output != nullptr); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { + if (input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = static_cast(input->params.scale) * @@ -69,6 +73,62 @@ TfLiteStatus CalculateArithmeticOpData(TfLiteContext* context, TfLiteNode* node, data->input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 31); } + + if (input->type == kTfLiteInt16) { + static constexpr int kInputIntegerBits = 3; + static constexpr int kOutputFractionalBits = 15; + + // These operators are implemented in fixed-point arithmetic, + // which intrinsically wants symmetric ranges (zero_point==0) + // and power-of-two scales (power-of-two is abbreviated below as POT). + // While more general support would be possible by means of rescaling, + // that would add some overhead and some loss of accuracy and wouldn't + // be used at the moment as current quantized LSTM applications are + // happy with symmetric, power-of-two-scales quantization. So we just + // implement that narrow case only for now. + + TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); + TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); + + int input_scale_log2_rounded; + bool param_scale_pot = + CheckedLog2(input->params.scale, &input_scale_log2_rounded); + + data->input_left_shift = + (15 - kInputIntegerBits) + input_scale_log2_rounded; + param_scale_pot &= + (data->input_left_shift == 0 || data->input_left_shift == 1); + + if (param_scale_pot) { + data->input_multiplier = 0; + } else { + // Calculate multiplier to change input scale to 1/(3*4096) + // as required by the table lookup. + // The number 3.0 in the multiplier comes from here, + // because the interval is [-10.7, 10.7] instead of [-8, 8]. + // So, in this scaling +/-2^17 represents +/-10.7. + + double multiplier = + static_cast(input->params.scale) * 4096.0 * 3.0; + data->input_left_shift = 0; + + while (multiplier <= 32767.0 / 2.0 && data->input_left_shift <= 30) { + data->input_left_shift++; + multiplier = multiplier * 2.0; + } + + data->input_multiplier = static_cast(multiplier); + } + + int output_scale_log2_rounded; + TF_LITE_ENSURE( + context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); + TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, + -kOutputFractionalBits); + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -77,10 +137,15 @@ TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast(node->user_data); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); TF_LITE_ENSURE(context, input != nullptr); data->input_zero_point = input->params.zero_point; - return CalculateArithmeticOpData(context, node, data); + TF_LITE_ENSURE_OK(context, CalculateArithmeticOpData(context, node, data)); + + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; } } // namespace @@ -103,25 +168,12 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; case kTfLiteInt16: { - TanhParams params; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - return kTfLiteOk; - } break; - case kTfLiteUInt8: { - TanhParams params; - params.input_zero_point = data.input_zero_point; - params.input_range_radius = data.input_range_radius; - params.input_multiplier = data.input_multiplier; - params.input_left_shift = data.input_left_shift; - reference_ops::Tanh(params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output)); - + reference_integer_ops::Tanh( + data.input_multiplier, data.input_left_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); return kTfLiteOk; } break; case kTfLiteInt8: { @@ -134,9 +186,9 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { return kTfLiteOk; } break; default: - TF_LITE_KERNEL_LOG(context, "Input %s, output %s not supported.", - TfLiteTypeGetName(input->type), - TfLiteTypeGetName(output->type)); + MicroPrintf("Input %s, output %s not supported.", + TfLiteTypeGetName(input->type), + TfLiteTypeGetName(output->type), context); return kTfLiteError; } } @@ -144,14 +196,8 @@ TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { } // namespace activations TfLiteRegistration Register_TANH() { - return {/*init=*/activations::TanhInit, - /*free=*/nullptr, - /*prepare=*/activations::TanhPrepare, - /*invoke=*/activations::TanhEval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp( + activations::TanhInit, activations::TanhPrepare, activations::TanhEval); } } // namespace micro } // namespace ops diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose.cc new file mode 100644 index 0000000..c0bd6e4 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose.cc @@ -0,0 +1,122 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +constexpr int kInputTensor = 0; +constexpr int kPermTensor = 1; +constexpr int kOutputTensor = 0; + +struct TransposeContext { + TransposeContext(TfLiteContext* context, TfLiteNode* node) { + micro_context = GetMicroContext(context); + input = micro_context->AllocateTempInputTensor(node, kInputTensor); + perm = micro_context->AllocateTempInputTensor(node, kPermTensor); + output = micro_context->AllocateTempOutputTensor(node, kOutputTensor); + } + ~TransposeContext() { + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(perm); + micro_context->DeallocateTempTfLiteTensor(output); + } + MicroContext* micro_context; + TfLiteTensor* input; + TfLiteTensor* perm; + TfLiteTensor* output; +}; + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + + TransposeContext op_context(context, node); + + // Ensure validity of input tensor. + TF_LITE_ENSURE_MSG(context, NumDimensions(op_context.input) <= 5, + "Transpose op only supports 1D-5D input arrays."); + TF_LITE_ENSURE_TYPES_EQ(context, op_context.input->type, + op_context.output->type); + + int dims = NumDimensions(op_context.input); + const int32_t* perm_data = GetTensorData(op_context.perm); + + // Ensure validity of the permutations tensor as a 1D tensor. + TF_LITE_ENSURE_EQ(context, NumDimensions(op_context.perm), 1); + TF_LITE_ENSURE_EQ(context, op_context.perm->dims->data[0], dims); + for (int idx = 0; idx < dims; ++idx) { + TF_LITE_ENSURE_MSG(context, (perm_data[idx] >= 0 && perm_data[idx] < dims), + "Transpose op permutations array is out of bounds."); + } + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* perm_tensor = + tflite::micro::GetEvalInput(context, node, kPermTensor); + const int32_t* perm_data = perm_tensor->data.i32; + const int size = perm_tensor->dims->data[0]; + TransposeParams params; + params.perm_count = size; + for (int i = 0; i < size; ++i) { + params.perm[i] = perm_data[i]; + } + + // Transpose kernel only does rearranging values not numeric evaluations + // on each cell. It's safe to implement per size of scalar type and this + // trick keeps the total code size in a reasonable range. + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + switch (input->type) { + case kTfLiteFloat32: + reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + case kTfLiteInt8: + reference_ops::Transpose(params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output)); + break; + default: + MicroPrintf( + "Type %s is currently not supported by Transpose. " + "Only float32 and int8 is supported", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_TRANSPOSE() { + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); +} +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cc new file mode 100644 index 0000000..411d4e0 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cc @@ -0,0 +1,708 @@ +// Patched by Edge Impulse to include reference and hardware-accelerated kernels +#include "../../../../classifier/ei_classifier_config.h" +#if 0 == 1 +/* noop */ +#elif EI_CLASSIFIER_TFLITE_ENABLE_SILABS_MVP == 1 + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +using int8 = int8_t; +using int16 = int16_t; +using int32 = int32_t; +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" + +#include "sl_mvp_ml_transpose_conv2d.h" + +namespace tflite { +namespace sl { +namespace transpose_conv2d { + +constexpr int kFilterTensor = 1; +constexpr int kInputTensor = 2; +constexpr int kBiasTensor = 3; +constexpr int kOutputTensor = 0; + +// TransposeConv is quantized along dimension 0 of filter tensor. +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kConvQuantizedDimension = 0; + +enum op_support { kMvp, kTFLMrefF32, kTFLMrefI8 }; + +struct OpData { + op_support supported; + int scratch_buffer_index; + sli_mvp_ml_transpose_conv2d_s8_params_t op_params; + + // Per channel output multiplier and shift. + int32_t *per_channel_output_multiplier; + int32_t *per_channel_output_shift; +}; + +inline float16_t normalize_fp16(float f) +{ + return (float16_t)std::min(std::max(f, SLI_MVP_FP16_MIN), SLI_MVP_FP16_MAX); +} + +inline PaddingType RuntimePaddingType(TfLitePadding padding) +{ + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus PopulateConvolutionQuantizationParams( + TfLiteContext* context, + const TfLiteTensor* input, + const TfLiteTensor* filter, + TfLiteTensor* output, + const TfLiteFusedActivation& activation, + int32_t* output_activation_min, int32_t* output_activation_max, + float16_t* per_channel_scalers, int num_channels, float accumulator_multipler) +{ + auto affine_quantization = + reinterpret_cast(filter->quantization.params); + + // Populate multiplier and shift using affine quantization. + const float input_scale = input->params.scale; + const float output_scale = output->params.scale; + const float* filter_scales = affine_quantization->scale->data; + + for (int i = 0; i < num_channels; ++i) { + // If per-tensor quantization parameter is specified, broadcast it along the + // quantization dimension. + const float filter_scale = filter_scales[i]; + const float effective_output_scale = (input_scale * filter_scale) / output_scale; + const float acc_output_scale = effective_output_scale * accumulator_multipler; + per_channel_scalers[i] = normalize_fp16(acc_output_scale); + } + + TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( + context, activation, output, output_activation_min, + output_activation_max)); + + return kTfLiteOk; +} + +void *Init(TfLiteContext* context, const char* buffer, size_t length) +{ + (void)buffer; + (void)length; + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) +{ + int scratch_buffer_size = 0; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = static_cast(node->builtin_data); + + TfLiteTensor* output = GetOutput(context, node, kOutputTensor); + const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor); + const TfLiteTensor* input = GetInput(context, node, kInputTensor); + const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, output != nullptr); + TF_LITE_ENSURE(context, filter != nullptr); + + data->op_params.batches = input->dims->data[0]; + data->op_params.in_channels = input->dims->data[3]; + data->op_params.input_height = input->dims->data[1]; + data->op_params.input_width = input->dims->data[2]; + data->op_params.out_channels = filter->dims->data[kConvQuantizedDimension]; + data->op_params.output_height = output->dims->data[1]; + data->op_params.output_width = output->dims->data[2]; + data->op_params.filter_height = filter->dims->data[1]; + data->op_params.filter_width = filter->dims->data[2]; + data->op_params.input_offset = -input->params.zero_point; + data->op_params.output_offset = output->params.zero_point; + data->op_params.stride_height = params->stride_height; + data->op_params.stride_width = params->stride_width; + data->op_params.padding = params->padding == kTfLitePaddingSame; + + int dummy_height, dummy_width; + const auto padding = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, + 1, 1, //dilation_rate_height and dilation_rate_width + data->op_params.input_height, data->op_params.input_width, + data->op_params.filter_height, data->op_params.filter_width, + params->padding, + &dummy_height, &dummy_width); + + data->op_params.pad_height = padding.height; + data->op_params.pad_width = padding.width; + + const int num_channels = data->op_params.out_channels; + + if (input->type == kTfLiteInt8) { + if (sli_mvp_ml_transpose_conv2d_s8_is_supported(&data->op_params)) { + data->supported = kMvp; + scratch_buffer_size = GetTensorShape(output).FlatSize() * sizeof(float16_t); + + float16_t *bias_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + if(bias != nullptr) { + data->op_params.bias = bias_data; + int32_t i32_bias; + for(int i = 0; i < num_channels; i++) { + i32_bias = bias->data.i32[i]; + bias_data[i] = float16_t(i32_bias * SLI_MVP_ACCUMULATOR_SCALER); + } + } else { + data->op_params.bias = nullptr; + } + + float16_t *scaler_data = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(float16_t))); + data->op_params.output_scaler = scaler_data; + TF_LITE_ENSURE_STATUS(PopulateConvolutionQuantizationParams( + context, input, filter, output, kTfLiteActNone, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + scaler_data, num_channels, SLI_MVP_ACCUMULATOR_MULTIPLIER)); + + } else { + data->supported = kTFLMrefI8; + scratch_buffer_size = GetTensorShape(output).FlatSize() * sizeof(int32_t); + data->per_channel_output_multiplier = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + int32_t dummy_output_multiplier; + int dummy_output_shift; + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, kTfLiteActNone, + &dummy_output_multiplier, &dummy_output_shift, + reinterpret_cast(&data->op_params.output_activation_min), + reinterpret_cast(&data->op_params.output_activation_max), + data->per_channel_output_multiplier, + reinterpret_cast(data->per_channel_output_shift), + num_channels)); + } + + } else if (input->type == kTfLiteFloat32) { + data->supported = kTFLMrefF32; + } else { + TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", + TfLiteTypeGetName(input->type)); + return kTfLiteError; + } + + if(scratch_buffer_size > 0) { + TF_LITE_ENSURE_STATUS( + context->RequestScratchBufferInArena( + context, scratch_buffer_size, &data->scratch_buffer_index)); + } else { + data->scratch_buffer_index = -1; + } + + return kTfLiteOk; +} + +TfLiteStatus eval_mvp_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + TfLiteEvalTensor* output) +{ + float16_t *scratch; + if (data->scratch_buffer_index > -1) { + scratch = reinterpret_cast(context->GetScratchBuffer(context, data->scratch_buffer_index)); + } else { + return kTfLiteError; + } + + data->op_params.scratch_buffer = scratch; + data->op_params.input = tflite::micro::GetTensorData(input); + data->op_params.output = tflite::micro::GetTensorData(output); + data->op_params.filter = tflite::micro::GetTensorData(filter); + + TF_LITE_ENSURE_EQ(context, SL_STATUS_OK, sli_mvp_ml_transpose_conv2d_s8(&data->op_params)); + + return kTfLiteOk; +} + +TfLiteStatus eval_tflm_int8(TfLiteContext* context, + OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + int32_t *scratch; + ConvParams op_params; + + if (data->scratch_buffer_index > -1) { + scratch = reinterpret_cast(context->GetScratchBuffer(context, data->scratch_buffer_index)); + } else { + return kTfLiteError; + } + + op_params.input_offset = data->op_params.input_offset; + op_params.output_offset = data->op_params.output_offset; + op_params.stride_height = data->op_params.stride_height; + op_params.stride_width = data->op_params.stride_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.padding_values.width = data->op_params.pad_width; + + reference_integer_ops::TransposeConv(op_params, + data->per_channel_output_multiplier, + data->per_channel_output_shift, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(const_cast(bias)), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + RuntimeShape(), + nullptr, + scratch); + return kTfLiteOk; +} + +TfLiteStatus eval_float(TfLiteConvParams* params, + const OpData* data, + const TfLiteEvalTensor* input, + const TfLiteEvalTensor* filter, + const TfLiteEvalTensor* bias, + TfLiteEvalTensor* output) +{ + ConvParams op_params; + + op_params.padding_type = RuntimePaddingType(params->padding); + op_params.padding_values.width = data->op_params.pad_width; + op_params.padding_values.height = data->op_params.pad_height; + op_params.stride_width = data->op_params.stride_width; + op_params.stride_height = data->op_params.stride_height; + + reference_ops::TransposeConv(op_params, + tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetTensorData(const_cast(bias)), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + RuntimeShape(), + nullptr); + return kTfLiteOk; +} + +TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node) +{ + TfLiteStatus status = kTfLiteError; + + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + auto* params = reinterpret_cast(node->builtin_data); + OpData* data = static_cast(node->user_data); + + const auto input = tflite::micro::GetEvalInput(context, node, kInputTensor); + const auto filter = tflite::micro::GetEvalInput(context, node, kFilterTensor); + const auto bias = NumInputs(node) == 4 + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + auto output = tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + if (data->supported == kMvp) { + status = eval_mvp_int8(context, data, input, filter, output); + + } else if (data->supported == kTFLMrefI8) { + status = eval_tflm_int8(context, data, input, filter, bias, output); + + } else if (data->supported == kTFLMrefF32) { + status = eval_float(params, data, input, filter, bias, output); + } + + return status; +} + +} // namespace transpose_conv2d +} // namespace sl + +TfLiteRegistration Register_TRANSPOSE_CONV() { + return {/*init=*/sl::transpose_conv2d::Init, + /*free=*/nullptr, + /*prepare=*/sl::transpose_conv2d::Prepare, + /*invoke=*/sl::transpose_conv2d::Invoke, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; +} + +} // namespace tflite + +#else +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +namespace { + +// For the TfLite transpose_conv implementation, input tensor 0 corresponds to +// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, +// the TFLM implementation ignores input tensor 0 and the only inputs we care +// about are kFilterTensor, kInputTensor and kBiasTensor. +constexpr int kFilterTensor = 1; +constexpr int kInputTensor = 2; +constexpr int kBiasTensor = 3; +constexpr int kOutputTensor = 0; + +// Conv is quantized along dimension 0: +// https://www.tensorflow.org/lite/performance/quantization_spec +constexpr int kConvQuantizedDimension = 0; + +struct OpData { + ConvParams params; + + // A scratch buffer is required for quantized implementations. + int scratch_buffer_index; + + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + int bias_converted_buffer_index; + + // Multiplier and shift arrays are required for the int8 implementation. + int32_t* per_channel_output_multiplier; + int32_t* per_channel_output_shift; +}; + +inline PaddingType RuntimePaddingType(TfLitePadding padding) { + switch (padding) { + case TfLitePadding::kTfLitePaddingSame: + return PaddingType::kSame; + case TfLitePadding::kTfLitePaddingValid: + return PaddingType::kValid; + case TfLitePadding::kTfLitePaddingUnknown: + default: + return PaddingType::kNone; + } +} + +TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, + const TfLiteTransposeConvParams* params, int width, + int height, int filter_width, int filter_height, + const TfLiteType data_type, OpData* data) { + bool has_bias = node->inputs->size == 4; + // Check number of inputs/outputs + TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3); + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + + // Matching GetWindowedOutputSize in TensorFlow. + auto padding = params->padding; + int unused_output_width; + int unused_output_height; + TfLitePaddingValues padding_values = ComputePaddingHeightWidth( + params->stride_height, params->stride_width, 1, + 1, // Dilation height and width are always 1 for transpose_conv. + height, width, filter_height, filter_width, padding, + &unused_output_height, &unused_output_width); + + data->params.padding_type = RuntimePaddingType(padding); + data->params.padding_values.width = padding_values.width; + data->params.padding_values.height = padding_values.height; + + // Note that quantized inference requires that all tensors have their + // parameters set. This is usually done during quantized training. + if (data_type != kTfLiteFloat32) { + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + TfLiteTensor* bias = + micro_context->AllocateTempInputTensor(node, kBiasTensor); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + int output_channels = filter->dims->data[kConvQuantizedDimension]; + + TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( + context, input, filter, bias, output, kTfLiteActNone, + &data->params.output_multiplier, &data->params.output_shift, + &data->params.quantized_activation_min, + &data->params.quantized_activation_max, + data->per_channel_output_multiplier, data->per_channel_output_shift, + output_channels)); + + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + if (input->type == kTfLiteInt16) { + TFLITE_DCHECK(filter->type == kTfLiteInt8); + TFLITE_DCHECK(output->type == kTfLiteInt16); + if (bias->type == kTfLiteInt16) { + TFLITE_DCHECK( + context->RequestScratchBufferInArena( + context, GetTensorShape(bias).FlatSize() * sizeof(std::int64_t), + &(data->bias_converted_buffer_index)) == kTfLiteOk); + } + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + micro_context->DeallocateTempTfLiteTensor(output); + if (bias != nullptr) { + micro_context->DeallocateTempTfLiteTensor(bias); + } + } + return kTfLiteOk; +} + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + TFLITE_DCHECK(node->builtin_data != nullptr); + + OpData* data = static_cast(node->user_data); + const auto params = + static_cast(node->builtin_data); + + MicroContext* micro_context = GetMicroContext(context); + + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* filter = + micro_context->AllocateTempInputTensor(node, kFilterTensor); + TF_LITE_ENSURE(context, filter != nullptr); + + // Get height and width of the output. + const int width = SizeOfDimension(output, 2); + const int height = SizeOfDimension(output, 1); + const int filter_width = SizeOfDimension(filter, 2); + const int filter_height = SizeOfDimension(filter, 1); + + // Dynamically allocate per-channel quantization parameters. + const int num_channels = filter->dims->data[kConvQuantizedDimension]; + data->per_channel_output_multiplier = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + data->per_channel_output_shift = + static_cast(context->AllocatePersistentBuffer( + context, num_channels * sizeof(int32_t))); + + // Quantized kernels use an int32 scratch buffer. + if (input->type == kTfLiteInt8) { + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, + GetTensorShape(output).FlatSize() * sizeof(int32_t), + &(data->scratch_buffer_index)) == kTfLiteOk); + } + + // Quantized 16x8 kernels use an int64 scratch buffer. + if (input->type == kTfLiteInt16) { + TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); + TFLITE_DCHECK(context->RequestScratchBufferInArena( + context, + GetTensorShape(output).FlatSize() * sizeof(std::int64_t), + &(data->scratch_buffer_index)) == kTfLiteOk); + } + + // All per-channel quantized tensors need valid zero point and scale arrays. + if (input->type == kTfLiteInt8 || input->type == kTfLiteInt16) { + TF_LITE_ENSURE_EQ(context, filter->quantization.type, + kTfLiteAffineQuantization); + + const auto* affine_quantization = + static_cast(filter->quantization.params); + TF_LITE_ENSURE(context, affine_quantization); + TF_LITE_ENSURE(context, affine_quantization->scale); + TF_LITE_ENSURE(context, affine_quantization->zero_point); + + TF_LITE_ENSURE(context, + affine_quantization->scale->size == 1 || + affine_quantization->scale->size == + filter->dims->data[kConvQuantizedDimension]); + TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, + affine_quantization->zero_point->size); + } + + TF_LITE_ENSURE_STATUS(CalculateOpData(context, node, params, width, height, + filter_width, filter_height, + input->type, data)); + + // Offsets (zero points) + data->params.input_offset = -input->params.zero_point; + data->params.weights_offset = -filter->params.zero_point; + data->params.output_offset = output->params.zero_point; + + // Stride + data->params.stride_width = params->stride_width; + data->params.stride_height = params->stride_height; + + micro_context->DeallocateTempTfLiteTensor(output); + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(filter); + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + const TfLiteEvalTensor* filter = + tflite::micro::GetEvalInput(context, node, kFilterTensor); + const TfLiteEvalTensor* bias = + (NumInputs(node) == 4) + ? tflite::micro::GetEvalInput(context, node, kBiasTensor) + : nullptr; + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, kOutputTensor); + + TFLITE_DCHECK(node->user_data != nullptr); + const OpData& data = *(static_cast(node->user_data)); + + TF_LITE_ENSURE_EQ(context, input->type, output->type); + TF_LITE_ENSURE_MSG( + context, + input->type == filter->type || + (input->type == kTfLiteInt16 && filter->type == kTfLiteInt8), + "Hybrid models are not supported on TFLite Micro."); + + switch (input->type) { // Already know in/out types are same. + case kTfLiteFloat32: { + const auto& params = + *(reinterpret_cast(node->builtin_data)); + ConvParams op_params = data.params; + CalculateActivationRange(params.activation, + &op_params.float_activation_min, + &op_params.float_activation_max); + + reference_ops::TransposeConv( + op_params, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr); + break; + } + case kTfLiteInt8: { + int32_t* scratch_buffer = static_cast( + context->GetScratchBuffer(context, data.scratch_buffer_index)); + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + break; + } + case kTfLiteInt16: { + std::int64_t* scratch_buffer = static_cast( + context->GetScratchBuffer(context, data.scratch_buffer_index)); + // TODO(b/192090531): Remove this once all 8x16 transpose conv models use + // 64-bit biases. + if (bias != nullptr && bias->type == kTfLiteInt16) { + std::int64_t* bias_converted_buffer = + static_cast(context->GetScratchBuffer( + context, data.bias_converted_buffer_index)); + for (int i = 0; i < tflite::micro::GetTensorShape(bias).FlatSize(); + i++) { + bias_converted_buffer[i] = bias->data.i16[i]; + } + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), bias_converted_buffer, + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + } else { + reference_integer_ops::TransposeConv( + data.params, data.per_channel_output_multiplier, + data.per_channel_output_shift, tflite::micro::GetTensorShape(input), + tflite::micro::GetTensorData(input), + tflite::micro::GetTensorShape(filter), + tflite::micro::GetTensorData(filter), + tflite::micro::GetTensorShape(bias), + tflite::micro::GetOptionalTensorData(bias), + tflite::micro::GetTensorShape(output), + tflite::micro::GetTensorData(output), + tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); + } + break; + } + default: + MicroPrintf("Type %s (%d) not supported.", TfLiteTypeGetName(input->type), + input->type); + return kTfLiteError; + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_TRANSPOSE_CONV() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite + +#endif diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cpp deleted file mode 100644 index 452775a..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/transpose_conv.cpp +++ /dev/null @@ -1,269 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/transpose_conv.h" - -#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/common.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/padding.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" - -namespace tflite { -namespace { - -// For the TfLite transpose_conv implementation, input tensor 0 corresponds to -// the OutputShapeTensor. However, since TFLM does not support dynamic tensors, -// the TFLM implementation ignores input tensor 0 and the only inputs we care -// about are kFilterTensor, kInputTensor and kBiasTensor. -constexpr int kFilterTensor = 1; -constexpr int kInputTensor = 2; -constexpr int kBiasTensor = 3; -constexpr int kOutputTensor = 0; - -// Conv is quantized along dimension 0: -// https://www.tensorflow.org/lite/performance/quantization_spec -constexpr int kConvQuantizedDimension = 0; - -struct OpData { - ConvParams params; - - // A scratch buffer is required for quantized implementations. - int scratch_buffer_index; - - // Multiplier and shift arrays are required for the int8 implementation. - int32_t* per_channel_output_multiplier; - int32_t* per_channel_output_shift; -}; - -inline PaddingType RuntimePaddingType(TfLitePadding padding) { - switch (padding) { - case TfLitePadding::kTfLitePaddingSame: - return PaddingType::kSame; - case TfLitePadding::kTfLitePaddingValid: - return PaddingType::kValid; - case TfLitePadding::kTfLitePaddingUnknown: - default: - return PaddingType::kNone; - } -} - -TfLiteStatus CalculateOpData(TfLiteContext* context, TfLiteNode* node, - const TfLiteConvParams* params, int width, - int height, int filter_width, int filter_height, - int out_width, int out_height, - const TfLiteType data_type, OpData* data) { - bool has_bias = node->inputs->size == 4; - // Check number of inputs/outputs - TF_LITE_ENSURE(context, has_bias || node->inputs->size == 3); - TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); - - // Matching GetWindowedOutputSize in TensorFlow. - auto padding = params->padding; - TfLitePaddingValues padding_values = ComputePaddingHeightWidth( - params->stride_height, params->stride_width, - params->dilation_height_factor, params->dilation_width_factor, height, - width, filter_height, filter_width, padding, &out_height, &out_width); - - data->params.padding_type = RuntimePaddingType(padding); - data->params.padding_values.width = padding_values.width; - data->params.padding_values.height = padding_values.height; - - // Note that quantized inference requires that all tensors have their - // parameters set. This is usually done during quantized training. - if (data_type != kTfLiteFloat32) { - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - const TfLiteTensor* bias = - GetOptionalInputTensor(context, node, kBiasTensor); - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - int output_channels = filter->dims->data[kConvQuantizedDimension]; - - TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( - context, input, filter, bias, output, params->activation, - &data->params.output_multiplier, &data->params.output_shift, - &data->params.quantized_activation_min, - &data->params.quantized_activation_max, - data->per_channel_output_multiplier, - reinterpret_cast(data->per_channel_output_shift), - output_channels)); - } - return kTfLiteOk; -} - -void* Init(TfLiteContext* context, const char* buffer, size_t length) { - TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); - return context->AllocatePersistentBuffer(context, sizeof(OpData)); -} - -TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { - TFLITE_DCHECK(node->user_data != nullptr); - TFLITE_DCHECK(node->builtin_data != nullptr); - - OpData* data = static_cast(node->user_data); - const auto params = static_cast(node->builtin_data); - - TfLiteTensor* output = GetOutput(context, node, kOutputTensor); - TF_LITE_ENSURE(context, output != nullptr); - const TfLiteTensor* input = GetInput(context, node, kInputTensor); - TF_LITE_ENSURE(context, input != nullptr); - const TfLiteTensor* filter = GetInput(context, node, kFilterTensor); - TF_LITE_ENSURE(context, filter != nullptr); - - int input_width = input->dims->data[2]; - int input_height = input->dims->data[1]; - int filter_width = filter->dims->data[2]; - int filter_height = filter->dims->data[1]; - int output_width = output->dims->data[2]; - int output_height = output->dims->data[1]; - - // Dynamically allocate per-channel quantization parameters. - const int num_channels = filter->dims->data[kConvQuantizedDimension]; - data->per_channel_output_multiplier = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - data->per_channel_output_shift = - static_cast(context->AllocatePersistentBuffer( - context, num_channels * sizeof(int32_t))); - - // Quantized kernels use an int32 scratch buffer. - if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { - TFLITE_DCHECK(context->RequestScratchBufferInArena != nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena( - context, - GetTensorShape(output).FlatSize() * sizeof(int32_t), - &(data->scratch_buffer_index)) == kTfLiteOk); - } - - // All per-channel quantized tensors need valid zero point and scale arrays. - if (input->type == kTfLiteInt8) { - TF_LITE_ENSURE_EQ(context, filter->quantization.type, - kTfLiteAffineQuantization); - - const auto* affine_quantization = - static_cast(filter->quantization.params); - TF_LITE_ENSURE(context, affine_quantization); - TF_LITE_ENSURE(context, affine_quantization->scale); - TF_LITE_ENSURE(context, affine_quantization->zero_point); - - TF_LITE_ENSURE(context, - affine_quantization->scale->size == 1 || - affine_quantization->scale->size == - filter->dims->data[kConvQuantizedDimension]); - TF_LITE_ENSURE_EQ(context, affine_quantization->scale->size, - affine_quantization->zero_point->size); - } - - TF_LITE_ENSURE_STATUS(CalculateOpData( - context, node, params, input_width, input_height, filter_width, - filter_height, output_width, output_height, input->type, data)); - - // Offsets (zero points) - data->params.input_offset = -input->params.zero_point; - data->params.weights_offset = -filter->params.zero_point; - data->params.output_offset = output->params.zero_point; - - // Stride + dilation - data->params.stride_width = params->stride_width; - data->params.stride_height = params->stride_height; - data->params.dilation_width_factor = params->dilation_width_factor; - data->params.dilation_height_factor = params->dilation_height_factor; - - float output_activation_min, output_activation_max; - CalculateActivationRange(params->activation, &output_activation_min, - &output_activation_max); - data->params.float_activation_min = output_activation_min; - data->params.float_activation_max = output_activation_max; - return kTfLiteOk; -} // namespace conv - -TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { - const TfLiteEvalTensor* input = - tflite::micro::GetEvalInput(context, node, kInputTensor); - const TfLiteEvalTensor* filter = - tflite::micro::GetEvalInput(context, node, kFilterTensor); - const TfLiteEvalTensor* bias = - (NumInputs(node) == 4) - ? tflite::micro::GetEvalInput(context, node, kBiasTensor) - : nullptr; - TfLiteEvalTensor* output = - tflite::micro::GetEvalOutput(context, node, kOutputTensor); - - TFLITE_DCHECK(node->user_data != nullptr); - const OpData& data = *(static_cast(node->user_data)); - - TF_LITE_ENSURE_EQ(context, input->type, output->type); - TF_LITE_ENSURE_MSG(context, input->type == filter->type, - "Hybrid models are not supported on TFLite Micro."); - - switch (input->type) { // Already know in/out types are same. - case kTfLiteFloat32: { - reference_ops::TransposeConv( - data.params, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr); - break; - } - case kTfLiteInt8: { - int32_t* scratch_buffer = static_cast( - context->GetScratchBuffer(context, data.scratch_buffer_index)); - reference_integer_ops::TransposeConv( - data.params, data.per_channel_output_multiplier, - data.per_channel_output_shift, tflite::micro::GetTensorShape(input), - tflite::micro::GetTensorData(input), - tflite::micro::GetTensorShape(filter), - tflite::micro::GetTensorData(filter), - tflite::micro::GetTensorShape(bias), - tflite::micro::GetTensorData(bias), - tflite::micro::GetTensorShape(output), - tflite::micro::GetTensorData(output), - tflite::micro::GetTensorShape(nullptr), nullptr, scratch_buffer); - break; - } - default: - TF_LITE_KERNEL_LOG(context, "Type %s (%d) not supported.", - TfLiteTypeGetName(input->type), input->type); - return kTfLiteError; - } - return kTfLiteOk; -} - -} // namespace - -TfLiteRegistration Register_TRANSPOSE_CONV() { - return {/*init=*/Init, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.cc new file mode 100644 index 0000000..c0f4317 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.cc @@ -0,0 +1,194 @@ +/* Copyright 2023 Edge Impulse Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#define FLATBUFFERS_LOCALE_INDEPENDENT 0 +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flexbuffers.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include + +#define FEATURE_TYPE float + +namespace tflite { +namespace { + +struct OpDataTree { + uint32_t num_leaf_nodes; + uint32_t num_internal_nodes; + uint32_t num_trees; + const uint16_t* nodes_modes; + const uint16_t* nodes_featureids; + const float* nodes_values; + const uint16_t* nodes_truenodeids; + const uint16_t* nodes_falsenodeids; + const float* nodes_weights; + const uint8_t* nodes_classids; + const uint16_t* tree_root_ids; + const uint8_t* buffer_t; + size_t buffer_length; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + const uint8_t* buffer_t = reinterpret_cast(buffer); + const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap(); + + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + OpDataTree* data = static_cast(context->AllocatePersistentBuffer(context, sizeof(OpDataTree))); + + data->buffer_t = buffer_t; + data->buffer_length = length; + + data->num_leaf_nodes = m["num_leaf_nodes"].AsUInt32(); + data->num_internal_nodes = m["num_internal_nodes"].AsUInt32(); + data->num_trees = m["num_trees"].AsUInt32(); + + data->nodes_modes = (uint16_t*)(m["nodes_modes"].AsBlob().data()); + data->nodes_featureids = (uint16_t*)(m["nodes_featureids"].AsBlob().data()); + data->nodes_values = (float*)(m["nodes_values"].AsBlob().data()); + data->nodes_truenodeids = (uint16_t*)(m["nodes_truenodeids"].AsBlob().data()); + data->nodes_falsenodeids = (uint16_t*)(m["nodes_falsenodeids"].AsBlob().data()); + data->nodes_weights = (float*)(m["nodes_weights"].AsBlob().data()); + data->nodes_classids = (uint8_t*)(m["nodes_classids"].AsBlob().data()); + data->tree_root_ids = (uint16_t*)(m["tree_root_ids"].AsBlob().data()); + + return data; +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + + MicroContext* micro_context = GetMicroContext(context); + const OpDataTree* data = static_cast(node->user_data); + const flexbuffers::Map& m = flexbuffers::GetRoot(data->buffer_t, data->buffer_length).AsMap(); + + // The OOB checks below are very important to prevent vulnerabilities where an adversary sends + // us a malicious TFLite model, similar to: https://nvd.nist.gov/vuln/detail/CVE-2022-23560 + + int num_nodes = data->num_leaf_nodes + data->num_internal_nodes; + + // Check that the tree root ids are valid. + for (uint32_t i = 0; i < data->num_trees; i++) { + TF_LITE_ENSURE_EQ(context, data->tree_root_ids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->tree_root_ids[i] >= 0, true); + } + + // Check that all node indices are valid + for (uint32_t i = 0; i < data->num_internal_nodes; i++) { + TF_LITE_ENSURE_EQ(context, data->nodes_truenodeids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->nodes_truenodeids[i] >= 0, true); + TF_LITE_ENSURE_EQ(context, data->nodes_falsenodeids[i] < num_nodes, true); + TF_LITE_ENSURE_EQ(context, data->nodes_falsenodeids[i] >= 0, true); + } + + // Check all node arrays have the same length + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_featureids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_values"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_truenodeids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_internal_nodes, m["nodes_falsenodeids"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_leaf_nodes, m["nodes_weights"].AsBlob().size()); + TF_LITE_ENSURE_EQ(context, data->num_leaf_nodes, m["nodes_classids"].AsBlob().size()); + + // Check data types are supported. Currently we only support one combination. + TF_LITE_ENSURE_EQ(context, strncmp(m["tree_index_type"].AsString().c_str(), "uint16", 6), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["node_value_type"].AsString().c_str(), "float32", 7), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["class_index_type"].AsString().c_str(), "uint8", 5), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["class_weight_type"].AsString().c_str(), "float32", 7), 0); + TF_LITE_ENSURE_EQ(context, strncmp(m["equality_operator"].AsString().c_str(), "leq", 3), 0); + + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); + TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); + TfLiteTensor* input = micro_context->AllocateTempInputTensor(node, 0); + TF_LITE_ENSURE(context, input != nullptr); + TF_LITE_ENSURE(context, NumDimensions(input) == 2); + TfLiteTensor* output = micro_context->AllocateTempOutputTensor(node, 0); + TF_LITE_ENSURE(context, output != nullptr); + + int input_width = SizeOfDimension(input, 1); + int output_width = SizeOfDimension(output, 1); + + // Check that all indices into the input/output tensor are valid + for (uint32_t i = 0; i < data->num_internal_nodes; i++) { + TF_LITE_ENSURE(context, data->nodes_featureids[i] < input_width); + TF_LITE_ENSURE(context, data->nodes_featureids[i] >= 0); + if (!m["nodes_modes"].AsBlob().IsTheEmptyBlob()) { + if (data->nodes_modes[i] == 0) { + TF_LITE_ENSURE(context, data->nodes_classids[i] < output_width); + TF_LITE_ENSURE(context, data->nodes_classids[i] >= 0); + } + } + } + + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + + const OpDataTree* data = static_cast(node->user_data); + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, 0); + const float *in_data = tflite::micro::GetTensorData(input); + + TfLiteEvalTensor* output = + tflite::micro::GetEvalOutput(context, node, 0); + float *out_data = tflite::micro::GetTensorData(output); + + const tflite::RuntimeShape output_shape = tflite::micro::GetTensorShape(output); + memset(out_data, 0, output_shape.FlatSize() * sizeof(float)); + + for (uint32_t i = 0; i < data->num_trees; i++) { + uint16_t ix = data->tree_root_ids[i]; + + while (ix < data->num_internal_nodes) { + float node_val = 0; + memcpy(&node_val, (data->nodes_values + ix), sizeof(float)); + + if (in_data[data->nodes_featureids[ix]] <= node_val) { + ix = data->nodes_truenodeids[ix]; + } else { + ix = data->nodes_falsenodeids[ix]; + } + } + ix -= data->num_internal_nodes; + + float weight = 0; + memcpy(&weight, (data->nodes_weights + ix), sizeof(float)); + out_data[data->nodes_classids[ix]] += weight; + } + + return kTfLiteOk; +} + + +} // namespace + +TfLiteRegistration* Register_TreeEnsembleClassifier() { + static TfLiteRegistration r = {Init, + nullptr, + Prepare, + Eval, + /*profiling_string=*/nullptr, + /*builtin_code=*/0, + /*custom_name=*/nullptr, + /*version=*/0}; + return &r; +} + +const char* GetString_TreeEnsembleClassifier() { return "TreeEnsembleClassifier"; } + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.h new file mode 100644 index 0000000..335c312 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.h @@ -0,0 +1,29 @@ +/* Copyright 2023 Edge Impulse Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ +#define TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" + +namespace tflite { + +TfLiteRegistration* Register_TreeEnsembleClassifier(); + +const char* GetString_TreeEnsembleClassifier(); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_KERNELS_TREE_ENSEMBLE_CLASSIFIER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.cc new file mode 100644 index 0000000..7ff9a2f --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unidirectional_sequence_lstm.cc @@ -0,0 +1,589 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +// Integer version of unidirectional sequence lstm. Only the standard LSTM +// (defined in the keras LSTM layer, e.g., no peephole etc.) is supported here. +// Currently used by the 16 bits activation case only + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/quantization_util.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_eval.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/lstm_shared.h" + +namespace tflite { + +namespace { +/*Helper Functions*/ + +// Interface to access all the TempTfLiteTensors of the LSTM kernel during the +// preparation phase. Can only be constructed through the constructor to avoid +// memory leakage. All TempTfLiteTensors will be deallocated through the +// destructor. +class LstmTensors { + public: + LstmTensors(const LstmTensors& other) = delete; + LstmTensors& operator=(const LstmTensors& other) = delete; + + LstmTensors(TfLiteContext* context, TfLiteNode* node) { + micro_context_ = GetMicroContext(context); + // 24 internal tensors. see lstm_shared.h for tensor names + for (size_t i = 0; i < 24; i++) { + internal_tensors_[i] = micro_context_->AllocateTempInputTensor(node, i); + } + output_tensor_ = + micro_context_->AllocateTempOutputTensor(node, kLstmOutputTensor); + } + + ~LstmTensors() { + for (size_t i = 0; i < 24; i++) { + if (internal_tensors_[i] != nullptr) { + micro_context_->DeallocateTempTfLiteTensor(internal_tensors_[i]); + } + } + micro_context_->DeallocateTempTfLiteTensor(output_tensor_); + } + + // Verify the LSTM internal tensor properties (e.g., type checks) + // Input/output/states/fc weights tensors are required for kernel evaulation. + // The state tensors should be variables. Variants of the standard LSTM + // are not supported here, therefore their corresponding tensors should be + // invalid + TfLiteStatus ValidateTensorStatus(TfLiteContext* context) const { + // Verify certain tensor properties + // input tensor + TF_LITE_ENSURE(context, internal_tensors_[kLstmInputTensor] != nullptr); + // hidden state + TF_LITE_ENSURE(context, + internal_tensors_[kLstmOutputStateTensor] != nullptr); + TF_LITE_ENSURE(context, + internal_tensors_[kLstmOutputStateTensor]->is_variable); + // hidden state becomes input so they must have the same type + TF_LITE_ENSURE_EQ(context, internal_tensors_[kLstmOutputStateTensor]->type, + internal_tensors_[kLstmInputTensor]->type); + // cell state + TF_LITE_ENSURE(context, internal_tensors_[kLstmCellStateTensor] != nullptr); + TF_LITE_ENSURE(context, + internal_tensors_[kLstmCellStateTensor]->is_variable); + // output + TF_LITE_ENSURE(context, output_tensor_ != nullptr); + // output type is the same as the input type (activations) + TF_LITE_ENSURE_EQ(context, output_tensor_->type, + internal_tensors_[kLstmInputTensor]->type); + + // weight tensors (1-9, see lstm_shared for index definition) + const auto weight_type = + internal_tensors_[kLstmInputToForgetWeightsTensor]->type; + for (size_t i = 1; i < 9; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr); + TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, weight_type); + } + + // bias tensors (12-15, see lstm_shared for index definition) + const auto bias_type = internal_tensors_[kLstmForgetGateBiasTensor]->type; + for (size_t i = 12; i < 16; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] != nullptr); + TF_LITE_ENSURE_EQ(context, internal_tensors_[i]->type, bias_type); + } + // Tensors from LSTM variants are invalid + // No peephole + for (size_t i = 9; i < 12; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + // No projection + for (size_t i = 16; i < 18; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + // No internal layer norm + for (size_t i = 20; i < 24; i++) { + TF_LITE_ENSURE(context, internal_tensors_[i] == nullptr); + } + return kTfLiteOk; + } + + // Internal tensors. see lstm_shared.h for tensor names + const TfLiteTensor* GetInternalTensor(const int tensor_index) const { + return internal_tensors_[tensor_index]; + } + + const TfLiteTensor* HiddenStateTensor() const { + return internal_tensors_[kLstmOutputStateTensor]; + } + const TfLiteTensor* CellStateTensor() const { + return internal_tensors_[kLstmCellStateTensor]; + } + const TfLiteTensor* OutputTensor() const { return output_tensor_; } + + private: + // see lstm_shared.h for tensor names + MicroContext* micro_context_; + TfLiteTensor* internal_tensors_[24]; + TfLiteTensor* output_tensor_; +}; + +// Deduce the size information (Batch (B), Time Steps (T), Input dimension (I), +// State dimension (S)) that defines the LSTM using the input and hidden state +// tensor +LstmSizeInfo CreateLstmSizeInfo( + const bool time_major, const TfLiteIntArray* input_tensor_shape, + const TfLiteIntArray* hidden_state_tensor_shape) { + LstmSizeInfo size_info; + size_info.time_major = time_major; + size_info.batch_size = + time_major ? input_tensor_shape->data[1] : input_tensor_shape->data[0]; + size_info.time_steps = + time_major ? input_tensor_shape->data[0] : input_tensor_shape->data[1]; + size_info.input_dimension = input_tensor_shape->data[2]; + size_info.state_dimension = hidden_state_tensor_shape->data[1]; + return size_info; +} + +TfLiteStatus ValidateWeightTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int dim1_size, + int dim2_size) { + TF_LITE_ENSURE_EQ(context, tensor->dims->size, 2); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[0], dim1_size); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[1], dim2_size); + return kTfLiteOk; +} + +TfLiteStatus ValidateBiasTensorSize(TfLiteContext* context, + const TfLiteTensor* tensor, int size) { + TF_LITE_ENSURE_EQ(context, tensor->dims->size, 1); + TF_LITE_ENSURE_EQ(context, tensor->dims->data[0], size); + return kTfLiteOk; +} + +// Go through every tensors and make sure their shape match the kernel +// configuration +TfLiteStatus ValidateTensorSize(TfLiteContext* context, + const LstmTensors& tensors, + const LstmSizeInfo& size_info) { + // Input FC weights + for (size_t i = 1; i < 5; i++) { + TF_LITE_ENSURE_OK( + context, ValidateWeightTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension, + size_info.input_dimension)); + } + // Recurrent FC weights + for (size_t i = 5; i < 9; i++) { + TF_LITE_ENSURE_OK( + context, ValidateWeightTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension, + size_info.state_dimension)); + } + // Biases + for (size_t i = 12; i < 16; i++) { + TF_LITE_ENSURE_OK( + context, ValidateBiasTensorSize(context, tensors.GetInternalTensor(i), + size_info.state_dimension)); + } + + // Check the shape of input state tensors. + // These tensor may be 1D or 2D. It's fine as long as the total size is + // correct. + TF_LITE_ENSURE_EQ(context, NumElements(tensors.HiddenStateTensor()), + size_info.batch_size * size_info.state_dimension); + TF_LITE_ENSURE_EQ(context, NumElements(tensors.CellStateTensor()), + size_info.batch_size * size_info.state_dimension); + + // Check the shape of output tensor against that of input tensor + TF_LITE_ENSURE_EQ(context, tensors.OutputTensor()->dims->size, 3); + TF_LITE_ENSURE_EQ(context, + tensors.GetInternalTensor(kLstmInputTensor)->dims->data[0], + tensors.OutputTensor()->dims->data[0]); + TF_LITE_ENSURE_EQ(context, + tensors.GetInternalTensor(kLstmInputTensor)->dims->data[1], + tensors.OutputTensor()->dims->data[1]); + TF_LITE_ENSURE_EQ(context, tensors.OutputTensor()->dims->data[2], + size_info.state_dimension); + return kTfLiteOk; +} + +// Wrapper function to create gate parameters for the four internal LSTM gates +TfLiteStatus CreateGateParams( + TfLiteContext* context, + /*Input tensors*/ + const TfLiteTensor* input, const TfLiteTensor* input_weight, + const TfLiteTensor* input_bias, + /*Hidden state tensors*/ + const TfLiteTensor* hidden_state, const TfLiteTensor* hidden_state_weight, + const TfLiteTensor* hidden_state_bias, + /*Scale of the fc output (input to non-linear activation)*/ + const float nonlinear_activation_input_scale, const TfLiteType cell_type, + tflite::GateParameters& gate_params) { + // A temp tflite tensor to represent the output of fc operation. Only the data + // type and quantization parameters are set since it is only used for + // parameter calculations + TfLiteTensor fc_output_temp; + fc_output_temp.type = cell_type; + fc_output_temp.params.scale = nonlinear_activation_input_scale; + fc_output_temp.params.zero_point = 0; // symmetrical quantized + + // A temp fc opdata to reuse the helper function on creating fc parameters + tflite::OpDataFullyConnected fc_data_temp; + // TODO(b/265853320): due to the lack of precision for the float scale, + // scale_diff / output_scale <= 0.02 (potentially requires 1e-8 precision) can + // not be satisified for the bias. Here we rely on the correctiveness of the + // conversion process (set input_bias=nullptr to avoid checking) for + // tensor scales + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, kTfLiteActNone, input->type, input, input_weight, + /*input_bias=*/nullptr, &fc_output_temp, &fc_data_temp)); + gate_params.input_fc_params = FullyConnectedParamsQuantized(fc_data_temp); + double real_multiplier = 0.0; + GetQuantizedConvolutionMultipler(context, input, input_weight, nullptr, + &fc_output_temp, &real_multiplier); + + TF_LITE_ENSURE_STATUS(CalculateOpDataFullyConnected( + context, kTfLiteActNone, hidden_state->type, hidden_state, + hidden_state_weight, hidden_state_bias, &fc_output_temp, &fc_data_temp)); + gate_params.recurrent_fc_params = FullyConnectedParamsQuantized(fc_data_temp); + return kTfLiteOk; +} + +// Create parameters for element wise multiplication that happens in a) cell +// state update ; b) hidden state update +// Note that all the output of gates are symmetrically quantized so only scales +// are required for input. However, during the hidden state update phase, the +// output is the updated hidden state, which is asymmetrically quantized. Thus +// output may require zero point +tflite::ArithmeticParams CreateInterGateMulParams(const float input1_scale, + const float input2_scale, + const float output_scale, + const TfLiteType output_type, + const int output_zp = 0) { + tflite::ArithmeticParams op_params = {}; + if (output_type == kTfLiteInt16) { + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + } else if (output_type == kTfLiteInt8) { + op_params.quantized_activation_min = std::numeric_limits::min(); + op_params.quantized_activation_max = std::numeric_limits::max(); + } + + op_params.input1_offset = 0; // symmetric + op_params.input2_offset = 0; // symmetric + op_params.output_offset = output_zp; + + const double input_product_scale = + static_cast(input1_scale) * static_cast(input2_scale); + double effective_scale = + input_product_scale / static_cast(output_scale); + + QuantizeMultiplier(effective_scale, &op_params.output_multiplier, + &op_params.output_shift); + return op_params; +} + +// Create the additional information about the cell state, which include: +// cell_state_scale_power: used in integer nonlinear function (e.g., tanh) +// quantized_cell_clip: quantized cell clip range +CellStateInfo CreateLstmCellStateInfo(const float cell_state_scale, + const float cell_clip) { + CellStateInfo cell_state_info; + // cell_state_scale_power: 2^-cell_state_scale_power = cell state scale + int buffer; + tflite::CheckedLog2(cell_state_scale, &buffer); + cell_state_info.cell_state_scale_power = buffer; + // Cell state specifics + cell_state_info.cell_clip = cell_clip; + cell_state_info.quantized_cell_clip = static_cast( + std::min(std::max(static_cast(cell_clip) / + static_cast(cell_state_scale), + static_cast(-32768.0)), + static_cast(32767.0))); + return cell_state_info; +} + +CellStateInfo CreateLstmCellStateInfoFloat(const float cell_clip) { + CellStateInfo cell_state_info; + cell_state_info.cell_clip = cell_clip; + cell_state_info.cell_state_scale_power = 0; // no quantization + cell_state_info.quantized_cell_clip = 0; // no quantization + return cell_state_info; +} + +tflite::FullyConnectedParams CreateFCParamsFloat() { + FullyConnectedParams op_params; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +tflite::GateParameters CreateGateParamsFloat() { + tflite::GateParameters gate_params = {}; + gate_params.input_fc_params = CreateFCParamsFloat(); + gate_params.recurrent_fc_params = CreateFCParamsFloat(); + return gate_params; +} + +tflite::ArithmeticParams CreateInterGateMulParamsFloat() { + tflite::ArithmeticParams op_params = {}; + CalculateActivationRange(kTfLiteActNone, &op_params.float_activation_min, + &op_params.float_activation_max); + return op_params; +} + +TfLiteStatus PrepareGateParametersFloat(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data) { + // Gate Parameters + op_data->forget_gate_parameters = CreateGateParamsFloat(); + op_data->input_gate_parameters = CreateGateParamsFloat(); + op_data->cell_gate_parameters = CreateGateParamsFloat(); + op_data->output_gate_parameters = CreateGateParamsFloat(); + // Inter gate multiplication parameters + op_data->inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParamsFloat(); + op_data->inter_gate_parameters.input_mul_params = + CreateInterGateMulParamsFloat(); + op_data->inter_gate_parameters.output_mul_params = + CreateInterGateMulParamsFloat(); + return kTfLiteOk; +} + +TfLiteStatus PrepareGateParametersInteger(TfLiteContext* context, + const LstmTensors& lstm_tensors, + OpDataLSTM* op_data) { + float nonlinear_input_scale = 0.00024414062; // 2^-12 Q3.12 -> Q0.15 + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToForgetWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmForgetGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToForgetWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data->forget_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToInputWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmInputGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToInputWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data->input_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToCellWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmCellGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToCellWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data->cell_gate_parameters)); + TF_LITE_ENSURE_OK( + context, + CreateGateParams( + context, lstm_tensors.GetInternalTensor(kLstmInputTensor), + lstm_tensors.GetInternalTensor(kLstmInputToOutputWeightsTensor), + lstm_tensors.GetInternalTensor(kLstmOutputGateBiasTensor), + lstm_tensors.GetInternalTensor(kLstmOutputStateTensor), + lstm_tensors.GetInternalTensor(kLstmRecurrentToOutputWeightsTensor), + /*hidden_state_bias=*/nullptr, nonlinear_input_scale, kTfLiteInt16, + op_data->output_gate_parameters)); + + // Inter gate multiplication parameters + float nonlinear_output_scale = 0.00003051757; // 2^-15 Q3.12 -> Q0.15 + float cell_state_scale = lstm_tensors.CellStateTensor()->params.scale; + // forget gate output (nonlinear output) x cell state -> cell state + op_data->inter_gate_parameters.forget_cell_mul_params = + CreateInterGateMulParams(nonlinear_output_scale, cell_state_scale, + cell_state_scale, kTfLiteInt16); + // input gate output x cell gate output -> cell state + op_data->inter_gate_parameters.input_mul_params = + CreateInterGateMulParams(nonlinear_output_scale, nonlinear_output_scale, + cell_state_scale, kTfLiteInt16); + // tanh output x output gate output -> hidden state (potentially asymmetric) + op_data->inter_gate_parameters.output_mul_params = CreateInterGateMulParams( + nonlinear_output_scale, nonlinear_output_scale, + lstm_tensors.HiddenStateTensor()->params.scale, + lstm_tensors.HiddenStateTensor()->type, + lstm_tensors.HiddenStateTensor()->params.zero_point); + return kTfLiteOk; +} + +LSTMKernelContents CreateLSTMKernelContent(TfLiteContext* context, + TfLiteNode* node) { + LSTMKernelContents kernel_content; + // Point to correct tensors + for (size_t i = 0; i < 24; i++) { + kernel_content.internal_tensors[i] = + tflite::micro::GetMutableEvalInput(context, node, i); + } + // Output tensor + kernel_content.output_tensor = tflite::micro::GetEvalOutput(context, node, 0); + return kernel_content; +} + +template +LSTMBuffers CreateLSTMBuffers(TfLiteContext* context, + const int* buffer_indices) { + LSTMBuffers buffers; + buffers.buffer0 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[0])); + buffers.buffer1 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[1])); + buffers.buffer2 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[2])); + buffers.buffer3 = reinterpret_cast( + context->GetScratchBuffer(context, buffer_indices[3])); + return buffers; +} + +/*Kernel functions*/ + +void* UnidirectionalSequenceLstmInit(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpDataLSTM)); +} + +TfLiteStatus UnidirectionalSequenceLstmPrepare(TfLiteContext* context, + TfLiteNode* node) { + TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); + TF_LITE_ENSURE_EQ(context, node->inputs->size, 24); + + TFLITE_DCHECK(node->builtin_data != nullptr); + TFLITE_DCHECK(node->user_data != nullptr); + + OpDataLSTM* op_data = reinterpret_cast(node->user_data); + const auto* builtin_data = + static_cast(node->builtin_data); + // All TempTfLiteTensors will be deallocated through the destructor. + LstmTensors lstm_tensors(context, node); + TF_LITE_ENSURE_OK(context, lstm_tensors.ValidateTensorStatus(context)); + + op_data->cell_gate_nonlinear_type = builtin_data->activation; + op_data->size_info = + CreateLstmSizeInfo(builtin_data->time_major, + lstm_tensors.GetInternalTensor(kLstmInputTensor)->dims, + lstm_tensors.HiddenStateTensor()->dims); + TF_LITE_ENSURE_OK( + context, ValidateTensorSize(context, lstm_tensors, op_data->size_info)); + + // Create cell state information and gate parameters (Fully Connected and Mul) + auto cell_state_type = + lstm_tensors.GetInternalTensor(kLstmCellStateTensor)->type; + if (cell_state_type == kTfLiteFloat32) { + op_data->cell_state_info = + CreateLstmCellStateInfoFloat(builtin_data->cell_clip); + TF_LITE_ENSURE_OK( + context, PrepareGateParametersFloat(context, lstm_tensors, op_data)); + } else if (cell_state_type == kTfLiteInt16) { + op_data->cell_state_info = CreateLstmCellStateInfo( + lstm_tensors.CellStateTensor()->params.scale, builtin_data->cell_clip); + TF_LITE_ENSURE_OK( + context, PrepareGateParametersInteger(context, lstm_tensors, op_data)); + } else { + MicroPrintf( + "Cell state type %s (%d) not supported. The quantized Unidirectional " + "Sequence LSTM Op only support int16 cell state", + TfLiteTypeGetName(cell_state_type), cell_state_type); + return kTfLiteError; + } + // request buffers (four buffers) + for (size_t i = 0; i < 4; i++) { + TF_LITE_ENSURE_OK(context, context->RequestScratchBufferInArena( + context, + op_data->size_info.batch_size * + op_data->size_info.state_dimension * + TfLiteTypeGetSize(cell_state_type), + &(op_data->buffer_indices[i]))); + } + return kTfLiteOk; +} + +TfLiteStatus UnidirectionalSequenceLstmEval(TfLiteContext* context, + TfLiteNode* node) { + TFLITE_DCHECK(node->user_data != nullptr); + const OpDataLSTM& op_data = *reinterpret_cast(node->user_data); + auto kernel_content = CreateLSTMKernelContent(context, node); + + const auto activation_type = + kernel_content.internal_tensors[kLstmInputTensor]->type; + const auto weight_type = + kernel_content.internal_tensors[kLstmInputToInputWeightsTensor]->type; + + switch (activation_type) { + case kTfLiteFloat32: { + LSTMBuffers buffers = + CreateLSTMBuffers(context, op_data.buffer_indices); + EvalLstm(op_data, kernel_content, buffers); + break; + } + case kTfLiteInt8: { + switch (weight_type) { + case kTfLiteInt8: { + // 8(activation)x8(weight)->16(cell) LSTM with 32 bits bias + LSTMBuffers buffers = + CreateLSTMBuffers(context, op_data.buffer_indices); + EvalLstm(op_data, kernel_content, + buffers); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(weight_type), activation_type); + return kTfLiteError; + } + } + break; + } + case kTfLiteInt16: { + switch (weight_type) { + case kTfLiteInt8: { + // 16(activation)x8(weight)->16(cell) LSTM with 64 bits bias + LSTMBuffers buffers = + CreateLSTMBuffers(context, op_data.buffer_indices); + EvalLstm(op_data, kernel_content, + buffers); + break; + } + default: { + MicroPrintf("Filter type %s (%d) not supported.", + TfLiteTypeGetName(weight_type), weight_type); + return kTfLiteError; + } + } + break; + } + default: { + MicroPrintf("Input type %s (%d) not supported.", + TfLiteTypeGetName(activation_type), activation_type); + return kTfLiteError; + } + } + return kTfLiteOk; +} + +} // namespace + +TfLiteRegistration Register_UNIDIRECTIONAL_SEQUENCE_LSTM() { + return tflite::micro::RegisterOp(UnidirectionalSequenceLstmInit, + UnidirectionalSequenceLstmPrepare, + UnidirectionalSequenceLstmEval); +} +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cc similarity index 87% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cc index 26da0ce..c0d3d8b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/unpack.cc @@ -18,6 +18,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace ops { @@ -87,15 +88,12 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { case kTfLiteInt32: { return UnpackImpl(context, node, input, data->num, data->axis); } - case kTfLiteUInt8: { - return UnpackImpl(context, node, input, data->num, data->axis); - } case kTfLiteInt8: { return UnpackImpl(context, node, input, data->num, data->axis); } default: { - TF_LITE_KERNEL_LOG(context, "Type '%s' is not supported by unpack.", - TfLiteTypeGetName(input->type)); + MicroPrintf("Type '%s' is not supported by unpack.", + TfLiteTypeGetName(input->type)); return kTfLiteError; } } @@ -106,14 +104,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace unpack TfLiteRegistration Register_UNPACK() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/nullptr, - /*invoke=*/unpack::Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, nullptr, unpack::Eval); } } // namespace micro diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/var_handle.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/var_handle.cc new file mode 100644 index 0000000..2329f2c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/var_handle.cc @@ -0,0 +1,93 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +namespace { + +struct OpData { + int32_t resource_id; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph& graph_info = micro_context->graph(); + + MicroResourceVariables* resources = graph_info.GetResourceVariables(); + if (resources == nullptr) { + MicroPrintf( + "VAR_HANDLE requires resource variables. Please create " + "ResourceVariables and pass it to the interpreter."); + return kTfLiteError; + } + op_data->resource_id = + resources->CreateIdIfNoneFound(params->container, params->shared_name); + if (op_data->resource_id < 0) { + return kTfLiteError; + } + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TFLITE_DCHECK(output != nullptr); + + // Assign saved resource_id so this output tensor will always return the + // correct resource id. + output->data.i32 = &op_data->resource_id; + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TFLITE_DCHECK(output != nullptr); + + // Assign saved resource_id so this output tensor will always return the + // correct resource id. + output->data.i32 = &op_data->resource_id; + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_VAR_HANDLE() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/while.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/while.cc new file mode 100644 index 0000000..ba18ba6 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/while.cc @@ -0,0 +1,133 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace { + +struct OpData { + int cond_subgraph_index; + int body_subgraph_index; +}; + +void* Init(TfLiteContext* context, const char* buffer, size_t length) { + TFLITE_DCHECK(context->AllocatePersistentBuffer != nullptr); + return context->AllocatePersistentBuffer(context, sizeof(OpData)); +} + +TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + OpData* op_data = reinterpret_cast(node->user_data); + const auto* params = + reinterpret_cast(node->builtin_data); + + op_data->cond_subgraph_index = params->cond_subgraph_index; + op_data->body_subgraph_index = params->body_subgraph_index; + + // The first input is the condition. + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + + size_t num_inputs = node->inputs->size; + size_t num_outputs = node->outputs->size; + + MicroGraph& graph_info = micro_context->graph(); + + TF_LITE_ENSURE(context, + op_data->cond_subgraph_index < graph_info.NumSubgraphs()); + TF_LITE_ENSURE(context, + op_data->body_subgraph_index < graph_info.NumSubgraphs()); + + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->cond_subgraph_index)); + TF_LITE_ENSURE_EQ(context, num_inputs, + graph_info.NumSubgraphInputs(op_data->body_subgraph_index)); + TF_LITE_ENSURE_EQ(context, num_inputs, num_outputs); + TF_LITE_ENSURE_EQ( + context, num_outputs, + graph_info.NumSubgraphOutputs(op_data->body_subgraph_index)); + + return kTfLiteOk; +} + +TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { + const OpData* op_data = reinterpret_cast(node->user_data); + + tflite::MicroContext* micro_context = tflite::GetMicroContext(context); + MicroGraph* graph_info = µ_context->graph(); + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, op_data->cond_subgraph_index, + /*first_tensor_idx=*/0)); + + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->cond_subgraph_index)); + + TfLiteEvalTensor* cond_subgraph_output = graph_info->GetSubgraphOutput( + op_data->cond_subgraph_index, /*tensor_idx=*/0); + bool cond_value = cond_subgraph_output->data.b[0]; + + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToSubgraphInputs( + context, node, graph_info, op_data->body_subgraph_index, + /*first_tensor_idx=*/0)); + TF_LITE_ENSURE_OK(context, + tflite::micro::CopyOpInputsToOpOutputs(context, node)); + + while (cond_value == true) { + // Copy output of this iteration back to the body input. + TF_LITE_ENSURE_OK( + context, tflite::micro::CopyOpOutputsToSubgraphInputs( + context, node, graph_info, op_data->body_subgraph_index)); + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->body_subgraph_index)); + + TF_LITE_ENSURE_OK( + context, tflite::micro::CopySubgraphOutputsToOpOutputs( + context, node, graph_info, op_data->body_subgraph_index)); + TF_LITE_ENSURE_OK( + context, tflite::micro::CopyOpOutputsToSubgraphInputs( + context, node, graph_info, op_data->cond_subgraph_index)); + TF_LITE_ENSURE_OK(context, + graph_info->InvokeSubgraph(op_data->cond_subgraph_index)); + + cond_subgraph_output = graph_info->GetSubgraphOutput( + op_data->cond_subgraph_index, /*tensor_idx=*/0); + cond_value = cond_subgraph_output->data.b[0]; + } + + return kTfLiteOk; +} + +} // namespace. + +TfLiteRegistration Register_WHILE() { + return tflite::micro::RegisterOp(Init, Prepare, Eval); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cc similarity index 77% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cc index 73b9508..c868341 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/zeros_like.cc @@ -17,6 +17,7 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { namespace { @@ -25,15 +26,20 @@ constexpr int kInputTensor = 0; constexpr int kOutputTensor = 0; TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { + MicroContext* micro_context = GetMicroContext(context); + TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kOutputTensor, &output)); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + TfLiteTensor* output = + micro_context->AllocateTempOutputTensor(node, kOutputTensor); + TF_LITE_ENSURE(context, output != nullptr); output->type = input->type; + micro_context->DeallocateTempTfLiteTensor(input); + micro_context->DeallocateTempTfLiteTensor(output); return kTfLiteOk; } @@ -65,10 +71,10 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { resetZeros(tflite::micro::GetTensorData(output), flat_size); break; default: - TF_LITE_KERNEL_LOG(context, - "ZerosLike only currently supports int64, int32, " - "and float32, got %d.", - input->type); + MicroPrintf( + "ZerosLike only currently supports int64, int32, " + "and float32, got %d.", + input->type); return kTfLiteError; } return kTfLiteOk; @@ -76,14 +82,7 @@ TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { } // namespace TfLiteRegistration Register_ZEROS_LIKE() { - return {/*init=*/nullptr, - /*free=*/nullptr, - /*prepare=*/Prepare, - /*invoke=*/Eval, - /*profiling_string=*/nullptr, - /*builtin_code=*/0, - /*custom_name=*/nullptr, - /*version=*/0}; + return tflite::micro::RegisterOp(nullptr, Prepare, Eval); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cc similarity index 93% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cc index d767e89..486b68e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.cc @@ -20,9 +20,8 @@ limitations under the License. #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -81,12 +80,18 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { case kTfLiteBool: *size = sizeof(bool); break; + case kTfLiteResource: + *size = sizeof(int32_t); + break; case kTfLiteComplex64: *size = sizeof(float) * 2; break; case kTfLiteComplex128: *size = sizeof(double) * 2; break; + case kTfLiteInt4: + *size = sizeof(int8_t); + break; default: return kTfLiteError; } @@ -94,8 +99,7 @@ TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size) { } TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter) { + size_t* bytes, size_t* type_size) { int element_count = 1; // If flatbuffer_tensor.shape == nullptr, then flatbuffer_tensor is a scalar // so has 1 element. @@ -106,8 +110,8 @@ TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, } TfLiteType tf_lite_type; - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &tf_lite_type, error_reporter)); + TF_LITE_ENSURE_STATUS( + ConvertTensorType(flatbuffer_tensor.type(), &tf_lite_type)); TF_LITE_ENSURE_STATUS(TfLiteTypeSizeOf(tf_lite_type, type_size)); *bytes = element_count * (*type_size); return kTfLiteOk; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h index cd3c697..2ceb2bc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h @@ -19,8 +19,8 @@ limitations under the License. #include #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" namespace tflite { @@ -33,13 +33,19 @@ uint8_t* AlignPointerDown(uint8_t* data, size_t alignment); // Returns an increased size that's a multiple of alignment. size_t AlignSizeUp(size_t size, size_t alignment); +// Templated version of AlignSizeUp +// Returns an increased size that's a multiple of alignment. +template +size_t AlignSizeUp(size_t count = 1) { + return AlignSizeUp(sizeof(T) * count, alignof(T)); +} + // Returns size in bytes for a given TfLiteType. TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size); // How many bytes are needed to hold a tensor's contents. TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor, - size_t* bytes, size_t* type_size, - ErrorReporter* error_reporter); + size_t* bytes, size_t* type_size); // How many bytes are used in a TfLiteEvalTensor instance. The byte length is // returned in out_bytes. diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc similarity index 86% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc index e623ac5..ff98fc2 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.cc @@ -15,8 +15,28 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_string.h" + namespace tflite { +namespace { + +// Returns a character representing a numbered buffer +// for GreedyMemoryPlanner::PrintMemoryPlan() +char GetOrdinalCharacter(int i) { + if (i < 10) { + return '0' + i; + } else if (i < 36) { + return 'a' + (i - 10); + } else if (i < 62) { + return 'A' + (i - 36); + } + return '*'; +} + +} // namespace + // Simple stable in-place sort function. Not time-efficient for large arrays. // Would normally be in an anonymous namespace to keep it private, but we want // to be able to test it externally. @@ -38,9 +58,14 @@ void ReverseSortInPlace(int* values, int* ids, int size) { } while (any_swapped); } -GreedyMemoryPlanner::GreedyMemoryPlanner(unsigned char* scratch_buffer, - int scratch_buffer_size) - : buffer_count_(0), need_to_calculate_offsets_(true) { +GreedyMemoryPlanner::GreedyMemoryPlanner() {} + +TfLiteStatus GreedyMemoryPlanner::Init(unsigned char* scratch_buffer, + int scratch_buffer_size) { + // Reset internal states + buffer_count_ = 0; + need_to_calculate_offsets_ = true; + // Allocate the arrays we need within the scratch buffer arena. max_buffer_count_ = scratch_buffer_size / per_buffer_size(); @@ -58,18 +83,17 @@ GreedyMemoryPlanner::GreedyMemoryPlanner(unsigned char* scratch_buffer, next_free += sizeof(ListEntry) * max_buffer_count_; buffer_offsets_ = reinterpret_cast(next_free); + return kTfLiteOk; } GreedyMemoryPlanner::~GreedyMemoryPlanner() { // We don't own the scratch buffer, so don't deallocate anything. } -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { +TfLiteStatus GreedyMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used) { if (buffer_count_ >= max_buffer_count_) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - max_buffer_count_); + MicroPrintf("Too many buffers (max is %d)", max_buffer_count_); return kTfLiteError; } BufferRequirements* current = &requirements_[buffer_count_]; @@ -82,12 +106,11 @@ TfLiteStatus GreedyMemoryPlanner::AddBuffer( return kTfLiteOk; } -TfLiteStatus GreedyMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used, int offline_offset) { +TfLiteStatus GreedyMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used, + int offline_offset) { BufferRequirements* current = &requirements_[buffer_count_]; - if (AddBuffer(error_reporter, size, first_time_used, last_time_used) != - kTfLiteOk) { + if (AddBuffer(size, first_time_used, last_time_used) != kTfLiteOk) { return kTfLiteError; } current->offline_offset = offline_offset; @@ -297,8 +320,6 @@ size_t GreedyMemoryPlanner::GetMaximumMemorySize() { while (entry) { BufferRequirements* requirements = &requirements_[entry->requirements_index]; - // TODO(b/148246793): Update all size and offset variables types from - // int to size_t const size_t current_size = entry->offset + requirements->size; if (current_size > max_size) { max_size = current_size; @@ -311,17 +332,14 @@ size_t GreedyMemoryPlanner::GetMaximumMemorySize() { return max_size; } -void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { +void GreedyMemoryPlanner::PrintMemoryPlan() { CalculateOffsetsIfNeeded(); for (int i = 0; i < buffer_count_; ++i) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Planner buffer ID: %d, calculated offset: %d, size required: %d, " - "first_time_created: %d, " - "last_time_used: %d", - i, buffer_offsets_[i], requirements_[i].size, - requirements_[i].first_time_used, requirements_[i].last_time_used); + MicroPrintf("%c (id=%d): size=%d, offset=%d, first_used=%d last_used=%d", + GetOrdinalCharacter(i), i, requirements_[i].size, + buffer_offsets_[i], requirements_[i].first_time_used, + requirements_[i].last_time_used); } constexpr int kLineWidth = 80; @@ -345,6 +363,7 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { for (int c = 0; c < kLineWidth; ++c) { line[c] = '.'; } + int memory_use = 0; for (int i = 0; i < buffer_count_; ++i) { BufferRequirements* requirements = &requirements_[i]; if ((t < requirements->first_time_used) || @@ -356,47 +375,39 @@ void GreedyMemoryPlanner::PrintMemoryPlan(ErrorReporter* error_reporter) { continue; } const int size = requirements->size; + memory_use += size; const int line_start = (offset * kLineWidth) / max_size; const int line_end = ((offset + size) * kLineWidth) / max_size; for (int n = line_start; n < line_end; ++n) { if (line[n] == '.') { - char display; - if (i < 10) { - display = '0' + i; - } else if (i < 36) { - display = 'a' + (i - 10); - } else if (i < 62) { - display = 'A' + (i - 36); - } else { - display = '*'; - } - line[n] = display; + line[n] = GetOrdinalCharacter(i); } else { line[n] = '!'; } } } line[kLineWidth] = 0; - TF_LITE_REPORT_ERROR(error_reporter, "%s", (const char*)line); + + MicroPrintf("%s%d: %s (%dk)", t < 10 ? " " : "", t, (const char*)line, + (memory_use + 1023) / 1024); } } int GreedyMemoryPlanner::GetBufferCount() { return buffer_count_; } -TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { +TfLiteStatus GreedyMemoryPlanner::GetOffsetForBuffer(int buffer_index, + int* offset) { CalculateOffsetsIfNeeded(); if ((buffer_index < 0) || (buffer_index >= buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, buffer_count_); + MicroPrintf("buffer index %d is outside range 0 to %d", buffer_index, + buffer_count_); return kTfLiteError; } *offset = buffer_offsets_[buffer_index]; return kTfLiteOk; } -bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) { +bool GreedyMemoryPlanner::DoAnyBuffersOverlap() { CalculateOffsetsIfNeeded(); bool were_overlaps_found = false; for (int i = 0; i < buffer_count_; ++i) { @@ -425,10 +436,10 @@ bool GreedyMemoryPlanner::DoAnyBuffersOverlap(ErrorReporter* error_reporter) { continue; } were_overlaps_found = true; - TF_LITE_REPORT_ERROR( - error_reporter, "Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", - i, a_first_time_used, a_last_time_used, a_start_offset, a_end_offset, - j, b_first_time_used, b_last_time_used, b_start_offset, b_end_offset); + MicroPrintf("Overlap: %d (%d=>%d, %d->%d) vs %d (%d=>%d, %d->%d)", i, + a_first_time_used, a_last_time_used, a_start_offset, + a_end_offset, j, b_first_time_used, b_last_time_used, + b_start_offset, b_end_offset); } } return were_overlaps_found; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h index 42775ba..d77a595 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h @@ -17,7 +17,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_GREEDY_MEMORY_PLANNER_H_ #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h" namespace tflite { @@ -43,29 +43,32 @@ constexpr int kOnlinePlannedBuffer = -1; // // This is not guaranteed to produce the best placement, since that's an // NP-Complete problem, but in practice it should produce one that's decent. -class GreedyMemoryPlanner : public MemoryPlanner { +class GreedyMemoryPlanner : public MicroMemoryPlanner { public: - // You need to pass in an area of memory to be used for planning. This memory - // needs to have a lifetime as long as the planner, but isn't owned by this - // object, so management should be handled by the client. This is so it can be - // stack or globally allocated if necessary on devices without dynamic memory - // allocation. How many buffers can be planned for will depend on the size of - // this scratch memory, so you should enlarge it if you see an error when - // calling AddBuffer(). The memory can be reused once you're done with the - // planner, as long as you copy the calculated offsets to another location. - // Each buffer requires about 36 bytes of scratch. - GreedyMemoryPlanner(unsigned char* scratch_buffer, int scratch_buffer_size); + GreedyMemoryPlanner(); ~GreedyMemoryPlanner() override; + // You need to pass in an area of memory to be used for planning. The client + // should ensure the validity of the memory when it needs to use this object. + // This memory isn't owned by this object, so management should be handled by + // the client. This is so it can be stack or globally allocated if necessary + // on devices without dynamic memory allocation. How many buffers can be + // planned for will depend on the size of this scratch memory, so you should + // enlarge it if you see an error when calling AddBuffer(). The memory can be + // reused once you're done with the planner, as long as you copy the + // calculated offsets to another location. Each buffer requires about 36 bytes + // of scratch. + TfLiteStatus Init(unsigned char* scratch_buffer, + int scratch_buffer_size) override; + // Record details of a buffer we want to place. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; // Record details of an offline planned buffer offset we want to place. // offline_offset is the buffer offset from the start of the arena. - TfLiteStatus AddBuffer(ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used, - int offline_offset); + TfLiteStatus AddBuffer(int size, int first_time_used, int last_time_used, + int offline_offset) override; // Returns the high-water mark of used memory. This is the minimum size of a // memory arena you'd need to allocate to hold these buffers. @@ -77,15 +80,14 @@ class GreedyMemoryPlanner : public MemoryPlanner { // Where a given buffer should be placed in the memory arena. // This information is stored in the memory arena itself, so once the arena // is used for inference, it will be overwritten. - TfLiteStatus GetOffsetForBuffer(ErrorReporter* error_reporter, - int buffer_index, int* offset) override; + TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) override; // Prints an ascii-art diagram of the buffer layout plan. - void PrintMemoryPlan(ErrorReporter* error_reporter); + void PrintMemoryPlan() override; // Debug method to check whether any buffer allocations are overlapping. This // is an O(N^2) complexity operation, so only use for testing. - bool DoAnyBuffersOverlap(ErrorReporter* error_reporter); + bool DoAnyBuffersOverlap(); // Used to store a list of buffers ordered by their offset. struct ListEntry { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc similarity index 71% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc index 8a4e514..6e21eb6 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.cc @@ -15,18 +15,21 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + namespace tflite { +// Patched by Edge Impulse +constexpr int LinearMemoryPlanner::kMaxBufferCount; + LinearMemoryPlanner::LinearMemoryPlanner() : current_buffer_count_(0), next_free_offset_(0) {} LinearMemoryPlanner::~LinearMemoryPlanner() {} -TfLiteStatus LinearMemoryPlanner::AddBuffer( - tflite::ErrorReporter* error_reporter, int size, int first_time_used, - int last_time_used) { +TfLiteStatus LinearMemoryPlanner::AddBuffer(int size, int first_time_used, + int last_time_used) { if (current_buffer_count_ >= kMaxBufferCount) { - TF_LITE_REPORT_ERROR(error_reporter, "Too many buffers (max is %d)", - kMaxBufferCount); + MicroPrintf("Too many buffers (max is %d)", kMaxBufferCount); return kTfLiteError; } buffer_offsets_[current_buffer_count_] = next_free_offset_; @@ -39,12 +42,11 @@ size_t LinearMemoryPlanner::GetMaximumMemorySize() { return next_free_offset_; } int LinearMemoryPlanner::GetBufferCount() { return current_buffer_count_; } -TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer( - tflite::ErrorReporter* error_reporter, int buffer_index, int* offset) { +TfLiteStatus LinearMemoryPlanner::GetOffsetForBuffer(int buffer_index, + int* offset) { if ((buffer_index < 0) || (buffer_index >= current_buffer_count_)) { - TF_LITE_REPORT_ERROR(error_reporter, - "buffer index %d is outside range 0 to %d", - buffer_index, current_buffer_count_); + MicroPrintf("buffer index %d is outside range 0 to %d", buffer_index, + current_buffer_count_); return kTfLiteError; } *offset = buffer_offsets_[buffer_index]; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h index d2712f9..f699f8b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/linear_memory_planner.h @@ -17,24 +17,23 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_LINEAR_MEMORY_PLANNER_H_ #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h" namespace tflite { // The simplest possible memory planner that just lays out all buffers at // increasing offsets without trying to reuse memory. -class LinearMemoryPlanner : public MemoryPlanner { +class LinearMemoryPlanner : public MicroMemoryPlanner { public: LinearMemoryPlanner(); ~LinearMemoryPlanner() override; - TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, int size, - int first_time_used, int last_time_used) override; + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; size_t GetMaximumMemorySize() override; int GetBufferCount() override; - TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) override; + TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) override; private: static constexpr int kMaxBufferCount = 1024; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h new file mode 100644 index 0000000..5f3b7ef --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h @@ -0,0 +1,73 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ +#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +// This is an experimental feature and subjected to change. +// More description is available at +// tensorflow/lite/micro/docs/offline_memory_plan.md. + +// Describes a buffer's layout inside an arena. This struct should be kept as +// small as possible for memory footprint sensitive applications and should use +// only primitive fields, making it easy to adjust offline. +struct BufferDescriptor { + // Starting offset inside an arena for this buffer. + // Offset is the minimum information needed for the buffer. The user knows + // the model and the size of each buffer in order to lay out a valid buffer + // plan. + int32_t offset; +}; + +// A structure describing the lay out of buffers inside an arena. +struct BufferPlan { + // Number of buffers described in this plan. + int32_t buffer_count; + + // Each element describes one buffer. + // Buffer index is implicit by the order of AddBuffer() call. + // Specifically, indices of activation tensors are 0 … N-1 where N is the + // number of activation tensors. + // The rest are based on the order of OP requests. + // + // This is a flexible array member and should ideally be + // arena_entries[]; However, in order to support a variety + // of compilers (and without needing to add ifdef's), we + // are implementing the flexible array member with an array of + // length 1 as the last member of the struct. When the size of a BufferPlan + // is needed, use the provided SizeOfBufferPlan(buffer_count) that + // accounts for this implemenatation caveat. + BufferDescriptor buffer_plan_entries[1]; +}; + +// Returns size of a BufferPlan given a buffer count. This size is compile time +// known if buffer_count is a compile time constant. +constexpr size_t SizeOfBufferPlan(int32_t buffer_count) { + // Minus 1 because a BufferPlan struct have a BufferDescriptor already. + // Max to provide a lower bound for the corner case of buffer_count = 0. + return sizeof(BufferPlan) + + sizeof(BufferDescriptor) * Max(buffer_count - 1, 0); +} + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLAN_STRUCT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h similarity index 55% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h index c79060f..0d0d74f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h @@ -13,11 +13,10 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ -#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ +#ifndef TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" namespace tflite { @@ -28,44 +27,65 @@ namespace tflite { // information about the calculated layout. For example: // // SomeMemoryPlanner planner; -// planner.AddBuffer(reporter, 100, 0, 1); // Buffer 0 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 1 -// planner.AddBuffer(reporter, 50, 2, 3); // Buffer 2 +// planner.AddBuffer(100, 0, 1); // Buffer 0 +// planner.AddBuffer(50, 2, 3); // Buffer 1 +// planner.AddBuffer(50, 2, 3); // Buffer 2 // // int offset0; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 0, &offset0)); +// TF_EXPECT_OK(planner.GetOffsetForBuffer(0, &offset0)); // int offset1; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 1, &offset1)); +// TF_EXPECT_OK(planner.GetOffsetForBuffer(1, &offset1)); // int offset2; -// TF_EXPECT_OK(planner.GetOffsetForBuffer(reporter, 2, &offset2)); +// TF_EXPECT_OK(planner.GetOffsetForBuffer(2, &offset2)); // const int arena_size_needed = planner.GetMaximumMemorySize(); // // The goal is for applications to be able to experiment with different layout // strategies without changing their client code, by swapping out classes that // implement this interface.= -class MemoryPlanner { +class MicroMemoryPlanner { public: - MemoryPlanner() {} - virtual ~MemoryPlanner() {} + MicroMemoryPlanner() {} + virtual ~MicroMemoryPlanner() {} // Pass information about a buffer's size and lifetime to the layout // algorithm. The order this is called implicitly assigns an index to the // result, so the buffer information that's passed into the N-th call of // this method will be used as the buffer_index argument to // GetOffsetForBuffer(). - virtual TfLiteStatus AddBuffer(tflite::ErrorReporter* error_reporter, - int size, int first_time_used, + virtual TfLiteStatus AddBuffer(int size, int first_time_used, int last_time_used) = 0; + // Record details of an offline planned buffer offset we want to place. + // offline_offset is the buffer offset from the start of the arena. + // This is to support offline memory planning from the flatbuffer metadata. + // By default, it returns an error. + virtual TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used, int offline_offset) { + return kTfLiteError; + } + // The largest contiguous block of memory that's needed to hold the layout. virtual size_t GetMaximumMemorySize() = 0; // How many buffers have been added to the planner. virtual int GetBufferCount() = 0; // Calculated layout offset for the N-th buffer added to the planner. - virtual TfLiteStatus GetOffsetForBuffer(tflite::ErrorReporter* error_reporter, - int buffer_index, int* offset) = 0; + virtual TfLiteStatus GetOffsetForBuffer(int buffer_index, int* offset) = 0; + + // Provides the scratch buffer in case that the memory planner needs it. + // The lifetime of scratch buffers lifetime lasts until the static memory plan + // is committed. + // The default implementation is for the memory planner that does not need + // scratch buffer and simply returns ok. + virtual TfLiteStatus Init(unsigned char* scratch_buffer, + int scratch_buffer_size) { + return kTfLiteOk; + } + + virtual void PrintMemoryPlan() { + // Default does nothing. + } }; } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ +#endif // TENSORFLOW_LITE_MICRO_MICRO_MEMORY_PLANNER_MEMORY_PLANNER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cc new file mode 100644 index 0000000..0c1fd6d --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.cc @@ -0,0 +1,66 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h" + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +NonPersistentMemoryPlannerShim::NonPersistentMemoryPlannerShim( + const BufferPlan* buffer_plan) + : buffer_plan_(buffer_plan), buffer_request_count_(0) {} + +NonPersistentMemoryPlannerShim::~NonPersistentMemoryPlannerShim() {} + +TfLiteStatus NonPersistentMemoryPlannerShim::AddBuffer(int size, + int first_time_used, + int last_time_used) { + buffer_request_count_++; + if (buffer_request_count_ > buffer_plan_->buffer_count) { + MicroPrintf( + "Attempting to add buffer %d, but only %d buffers in given buffer " + "plan.", + buffer_request_count_, buffer_plan_->buffer_count); + return kTfLiteError; + } + return kTfLiteOk; +} + +size_t NonPersistentMemoryPlannerShim::GetMaximumMemorySize() { + // Simply return 0 to let the framework accept this memory plan + // because the client ensure validity of the memory plan. + return 0; +} + +// How many buffers are in the given memory plan. +int NonPersistentMemoryPlannerShim::GetBufferCount() { + return buffer_plan_->buffer_count; +} + +TfLiteStatus NonPersistentMemoryPlannerShim::GetOffsetForBuffer( + int buffer_request_index, int* offset) { + if (buffer_request_index >= buffer_plan_->buffer_count) { + MicroPrintf( + "Attempting to get offset for buffer %d, but only %d buffers in given " + "buffer plan.", + buffer_request_index, buffer_plan_->buffer_count); + return kTfLiteError; + } + *offset = buffer_plan_->buffer_plan_entries[buffer_request_index].offset; + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h new file mode 100644 index 0000000..291c678 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/memory_planner/non_persistent_buffer_planner_shim.h @@ -0,0 +1,129 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ +#define TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ + +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_plan_struct.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h" + +namespace tflite { + +/* This is an experimental feature and subjected to change. + * +The NonPersistentMemoryPlannerShim enables TFLM to work with an external tooling +that can plan the offset of each non persistent buffer for the Model within the +TFLM arena. + +If the NonPersistentMemoryPlannerShim is used, then the final binary does not +have any of the symbols associated with the GreedyMemoryPlanner which results in +a reduced memory footprint. + +Additionally, the offline planning of the non-persistent buffers can be used to +have a more efficient utilization compared to the GreedyMemoryPlanner. + +For example, consider the following hypothetical model: + +A1(400) A2(401) +──┬─────────┐ ┌─────────── + │ │ │ + │ │ │ + │ ▼ ▼ + │ ┌────────┐ + │ │ OP1 │ + │ └───┬────┘ A4(201) + │ A3(10) │ │ + │ │ │ + │ │ │ + │ ┌───┴────┐ │ + │ │ OP2 │◄────────┤ + │ └───┬────┘ │ + │ A5(11) │ A6(202) │ + │ │ │ │ + │ ▼ │ │ + │ ┌────────┐ │ │ + │ │ OP3 │◄─┘ │ + │ └───┬────┘ │ + │ │ A8(200) │ + │ A7(12) │ │ │ + │ │ │ │ + │ ┌───┴────┐◄──┘ │ + └──────►│ OP4 │ │ + └───┬────┘◄────────┘ + │ + A9(13) │ + ▼ + +The GreedyMemoryPlanner will give the following memory layout that requires 1012 +bytes of scratch arena size: + +┌─────────────────────────────────────────┬──────────────────────────┬────────┬───────┐ +│ A2(401) │ A1(400) │ A4(201)│ +A3(10)│ +└─────────────────────────────────────────┴──────────────────────────┴────────┴───────┘ + +┌───────────┬──────┬──────┐ +│ A6(202) │A5(11)│A7(12)│ +└───────────┴──────┴──────┘ + +┌──────────┬───────┐ +│ A8(200) │A9(13) │ +└──────────┴───────┘ + +But a more efficient offline memory plan that requires only 826 bytes of scratch +arena size can be + +┌──────────────────────────────────────┬─────────────────────────────┬───────┬──────┐ +│ A1(400) │ A2(401) │ +A3(10)│A5(11)│ +└──────────────────────────────────────┴─────────────────────────────┴───────┴──────┘ + + ┌────────────────┬────────────┬────────┬───────┐ + │A4(201) │ A8(200) │A9(13) +│A7(12) │ └────────────────┴────────────┴────────┴───────┘ + + ┌─────────────┐ + │ A6(202) │ + └─────────────┘ + +*/ +class NonPersistentMemoryPlannerShim : public MicroMemoryPlanner { + public: + // Does not take ownership of buffer_plan, which must refer to a valid + // BufferPlan that outlives this object. + explicit NonPersistentMemoryPlannerShim(const BufferPlan* buffer_plan); + ~NonPersistentMemoryPlannerShim() override; + + TfLiteStatus GetOffsetForBuffer(int buffer_request_index, + int* offset) override; + + TfLiteStatus AddBuffer(int size, int first_time_used, + int last_time_used) override; + size_t GetMaximumMemorySize() override; + int GetBufferCount() override; + + private: + const BufferPlan* buffer_plan_; // not owned, can't be null + + // The number of buffers requested so far. Used for error checking. + int buffer_request_count_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MEMORY_PLANNER_NON_PERSISTENT_MEMORY_PLANNER_SHIM_H__ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.cc new file mode 100644 index 0000000..296a502 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.cc @@ -0,0 +1,375 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +namespace { +constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; +constexpr int kUninitializedLifetime = -1; +} // namespace + +// Mark the given Allocation info as first created at the specified allocation +// scope count. Only the first creation must be recorded since the allocation +// scope count monotonically increases throughout the lifetime marking process. +void AllocationInfoBuilder::UpdateFirstCreated(AllocationInfo* current, + int allocation_scope_count) { + TFLITE_DCHECK(current->first_created <= allocation_scope_count); + if (current->first_created == kUninitializedLifetime) { + current->first_created = allocation_scope_count; + } +} + +// Mark the given AllocationInfo as last used at the specified allocation scope +// count. Update the last used marker every time, since the allocation scope +// count monotonically increases through the lifetime marking process. +void AllocationInfoBuilder::UpdateLastUsed(AllocationInfo* current, + int allocation_scope_count) { + TFLITE_DCHECK(current->last_used <= allocation_scope_count); + current->last_used = allocation_scope_count; +} + +TfLiteStatus AllocationInfoBuilder::MarkSubgraphLifetimesIfNecessary( + const Operator* op, internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations) { + int first_subgraph_index = -1; + int second_subgraph_index = -1; + const OperatorCode* opcode = + model_->operator_codes()->Get(op->opcode_index()); + switch (opcode->builtin_code()) { + case BuiltinOperator_IF: { + first_subgraph_index = + op->builtin_options_as_IfOptions()->then_subgraph_index(); + second_subgraph_index = + op->builtin_options_as_IfOptions()->else_subgraph_index(); + break; + } + case BuiltinOperator_CALL_ONCE: { + first_subgraph_index = + op->builtin_options_as_CallOnceOptions()->init_subgraph_index(); + break; + } + case BuiltinOperator_WHILE: { + first_subgraph_index = + op->builtin_options_as_WhileOptions()->cond_subgraph_index(); + second_subgraph_index = + op->builtin_options_as_WhileOptions()->body_subgraph_index(); + break; + } + default: { + break; + } + } + if (first_subgraph_index != -1) { + // Enter a new allocation scope for each subgraph. + allocation_scope_count_++; + TF_LITE_ENSURE_STATUS( + MarkAllocationLifetimes(first_subgraph_index, scratch_buffer_requests, + scratch_buffer_handles, allocations)); + } + if (second_subgraph_index != -1) { + // Enter a new allocation scope for each subgraph. + allocation_scope_count_++; + TF_LITE_ENSURE_STATUS( + MarkAllocationLifetimes(second_subgraph_index, scratch_buffer_requests, + scratch_buffer_handles, allocations)); + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::CreateAllocationInfo( + int scratch_buffer_request_count) { + size_t subgraph_offsets_length = model_->subgraphs()->size() * sizeof(size_t); + info_.subgraph_offsets = + reinterpret_cast(non_persistent_allocator_->AllocateTemp( + subgraph_offsets_length, alignof(size_t))); + if (info_.subgraph_offsets == nullptr) { + MicroPrintf( + "Failed to allocate memory for memory planning, %d bytes required", + subgraph_offsets_length); + return kTfLiteError; + } + size_t tensor_count = 0; + for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); + subgraph_idx++) { + // Add all tensors in each subgraph to the AllocationInfo array. Even weight + // tensors are added but marked with needs_allocating = false. Including all + // tensors in the graph here simplifies logic. + info_.subgraph_offsets[subgraph_idx] = tensor_count; + tensor_count += model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); + } + info_.tensor_count = tensor_count; + + // Scratch buffer allocations follow tensor allocations, so the scratch offset + // is equal to the number of tensor allocations. + info_.scratch_offset = tensor_count; + info_.allocation_info_count = tensor_count + scratch_buffer_request_count; + info_.scratch_buffer_count = scratch_buffer_request_count; + size_t bytes = sizeof(AllocationInfo) * info_.allocation_info_count; + + // Allocate an array of AllocationInfo structs from the temp section. This + // struct will be used by AllocationInfoBuilder to find buffer usage. + info_.allocation_info = reinterpret_cast( + non_persistent_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); + if (info_.allocation_info == nullptr) { + MicroPrintf( + "Failed to allocate memory for memory planning, %d bytes required", + bytes); + return kTfLiteError; + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::FreeAllocationInfo() { + non_persistent_allocator_->DeallocateTemp( + reinterpret_cast(info_.allocation_info)); + non_persistent_allocator_->DeallocateTemp( + reinterpret_cast(info_.subgraph_offsets)); + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::ValidateSubgraph( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { + uint32_t operators_size = NumSubgraphOperators(subgraph); + + for (uint32_t i = 0; i < operators_size; i++) { + const auto op = subgraph->operators()->Get(i); + for (size_t n = 0; + op->intermediates() != nullptr && n < op->intermediates()->size(); + n++) { + const int tensor_index = op->intermediates()->Get(n); + size_t tensor_size = -1; + TF_LITE_ENSURE_STATUS(TfLiteEvalTensorByteLength( + &eval_tensors[tensor_index], &tensor_size)); + if (tensor_size != 0) { + MicroPrintf( + "Does not support intermediate tensor with non-zero size: %d", + tensor_size); + return kTfLiteError; + } + } + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::InitializeAllocationInfo( + const int32_t* offline_offsets, SubgraphAllocations* allocations) { + AllocationInfo* allocation_info = info_.allocation_info; + // Initialize allocation info for every tensor in every subgraph. + for (size_t subgraph_idx = 0; subgraph_idx < model_->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + TfLiteEvalTensor* eval_tensors = allocations[subgraph_idx].tensors; + AllocationInfo* subgraph_allocation_info = + &allocation_info[info_.subgraph_offsets[subgraph_idx]]; + + // Ensure constraints are met. + TF_LITE_ENSURE_STATUS(ValidateSubgraph(subgraph, eval_tensors)); + + for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { + AllocationInfo* current = &subgraph_allocation_info[i]; + current->output_ptr = &(eval_tensors[i].data.data); + + TF_LITE_ENSURE_STATUS( + TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); + + current->first_created = kUninitializedLifetime; + current->last_used = kUninitializedLifetime; + current->needs_allocating = + (eval_tensors[i].data.data == nullptr) && + (!subgraph->tensors()->Get(i)->is_variable()) && + (current->bytes != 0); + if (offline_offsets) { + current->offline_offset = offline_offsets[i]; + + // Mark offline planned variable tensors so they can get an offline + // offset and be handled offline. + if (subgraph->tensors()->Get(i)->is_variable() && + current->offline_offset != kOnlinePlannedBuffer) { + current->needs_allocating = true; + } + + } else { + current->offline_offset = kOnlinePlannedBuffer; + } + } + } + // Initialize allocation info for every scratch buffer. + AllocationInfo* scratch_allocation_info = + &allocation_info[info_.scratch_offset]; + for (size_t i = 0; i < info_.scratch_buffer_count; i++) { + AllocationInfo* current = &scratch_allocation_info[i]; + current->first_created = kUninitializedLifetime; + current->last_used = kUninitializedLifetime; + current->needs_allocating = true; + current->offline_offset = kOnlinePlannedBuffer; + } + return kTfLiteOk; +} + +TfLiteStatus AllocationInfoBuilder::MarkAllocationLifetimes( + int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + + AllocationInfo* allocation_info = info_.allocation_info; + // Each subgraph's tensor allocations are in a contiguous block starting at + // subgraph_offsets_[subgraph index] with one entry per tensor. + AllocationInfo* subgraph_allocation_info = + &allocation_info[info_.subgraph_offsets[subgraph_idx]]; + + uint32_t operators_size = NumSubgraphOperators(subgraph); + // Mark all inputs as created at the start of the subgraph invocation. + for (size_t i = 0; + subgraph->inputs() != nullptr && i < subgraph->inputs()->size(); ++i) { + const int tensor_index = subgraph->inputs()->Get(i); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateFirstCreated(current, allocation_scope_count_); + // This will ensure that the tensors that are inputs to the subgraphs + // but not used in any ops also have a reasonable lifetime. + UpdateLastUsed(current, allocation_scope_count_); + } + + for (uint32_t i = 0; i < operators_size; i++) { + // Each operator has a new allocation scope. + allocation_scope_count_++; + const auto* op = subgraph->operators()->Get(i); + // Figure out when the first creation and use of each tensor is. + for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); + ++n) { + const int tensor_index = op->outputs()->Get(n); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateFirstCreated(current, allocation_scope_count_); + } + + // Keep track of scope count before any subgraphs, so that scratch buffers' + // lifetime within a control flow op properly overlaps with all subgraphs. + int start_allocation_scope_count = allocation_scope_count_; + + // Control flow operators can invoke subgraphs. Plan these subgraphs + // before continuing on to the rest of the graph. + MarkSubgraphLifetimesIfNecessary(op, scratch_buffer_requests, + scratch_buffer_handles, allocations); + + // Figure out when the last use of each tensor is. + for (size_t n = 0; op->inputs() != nullptr && n < op->inputs()->size(); + ++n) { + const int tensor_index = op->inputs()->Get(n); + // Optional bias tensors can have an index of -1 when they are omitted. + if (tensor_index >= 0) { + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + // No need to update creation since it is either marked by the subgraph + // or producer op, or it is not part of the memory plan (weight, bias + // tensor). + UpdateLastUsed(current, allocation_scope_count_); + } + } + for (size_t n = 0; op->outputs() != nullptr && n < op->outputs()->size(); + ++n) { + const int tensor_index = op->outputs()->Get(n); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + UpdateLastUsed(current, allocation_scope_count_); + } + + // Mark thse lifetime of scratch buffers belonging to the current node. This + // operation is O(N * M) where N is the total number of visited nodes and M + // is the total number of scratch buffers. + // TODO(b/217794030): Optimize this memory planning code. + AllocationInfo* scratch_allocation_info = + &allocation_info[info_.scratch_offset]; + for (size_t scratch_idx = 0; scratch_idx < info_.scratch_buffer_count; + scratch_idx++) { + internal::ScratchBufferRequest request = + scratch_buffer_requests[scratch_idx]; + AllocationInfo* current = &scratch_allocation_info[scratch_idx]; + if (request.node_idx == static_cast(i) && + request.subgraph_idx == static_cast(subgraph_idx)) { + ScratchBufferHandle* current_handle = + &(scratch_buffer_handles[scratch_idx]); + current->output_ptr = reinterpret_cast(¤t_handle->data); + current->bytes = request.bytes; + UpdateFirstCreated(current, start_allocation_scope_count); + UpdateLastUsed(current, allocation_scope_count_); + } + } + } + + // Mark all outputs as persistent to the end of the subgraph invocation. + for (size_t i = 0; + subgraph->outputs() != nullptr && i < subgraph->outputs()->size(); ++i) { + const int tensor_index = subgraph->outputs()->Get(i); + AllocationInfo* current = &subgraph_allocation_info[tensor_index]; + // Make sure to assign the First created value of the subgraph output + // This will handle the case where the subgraph is empty. This helps + // ensure all tensors have valid lifetimes before those are used by the + // memory planner. + UpdateFirstCreated(current, allocation_scope_count_); + UpdateLastUsed(current, allocation_scope_count_); + } + return kTfLiteOk; +} + +// Get offline tensors allocation plan. See +// micro/docs/memory_management.md for more info. +TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( + const int32_t** offline_planner_offsets) { + if (model_->metadata()) { + for (size_t i = 0; i < model_->metadata()->size(); ++i) { + auto metadata = model_->metadata()->Get(i); + + if (metadata->name()) { + const size_t metadata_name_size = metadata->name()->size(); + + if ((strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, + std::min(metadata_name_size, + strlen(kOfflineMemAllocMetadata))) == 0) && + metadata_name_size == strlen(kOfflineMemAllocMetadata)) { + const flatbuffers::Vector>* buffers = + model_->buffers(); + auto* buffer = (*buffers)[metadata->buffer()]; + auto* array = buffer->data(); + const uint32_t* metadata_buffer = + reinterpret_cast(array->data()); + const size_t nbr_tensors = static_cast(metadata_buffer[2]); + *offline_planner_offsets = + reinterpret_cast(&metadata_buffer[3]); + + if (info_.tensor_count != nbr_tensors) { + MicroPrintf( + "Nbr of offline buffer offsets (%d) in metadata " + "not equal nbr tensors (%d)\n", + nbr_tensors, info_.tensor_count); + return kTfLiteError; + } + } + } + } + } + return kTfLiteOk; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h new file mode 100644 index 0000000..a02503e --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h @@ -0,0 +1,139 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" + +namespace tflite { + +// Used to hold information used during allocation calculations. +struct AllocationInfo { + size_t bytes; + void** output_ptr; + int first_created; + int last_used; + int32_t offline_offset; + bool needs_allocating; +}; + +// Used to hold the allocation info list and related metadata for the entire +// graph (including subgraphs). Since all subgraphs are planned together, the +// allocation info list contains allocations for all subgraphs. Track the offset +// into this list for each subgraph then reserve space to track all allocations. +// +// The AllocationInfo list is a contiguous list of allocations across all +// subgraphs and scratch buffers. Each element here is marked as +// st. The following is a possible +// AllocationInfo list: +// [s0t0, s0t1, s1t0, s2t1, s1t2, s3t0, s3t1, scratch0, scratch1, scratch2] +// +// For this example, the subgraph offsets would be [0, 2, 5] and the scratch +// offset would be 7. +struct GraphAllocationInfo { + AllocationInfo* allocation_info; + size_t allocation_info_count; + size_t* subgraph_offsets; + size_t scratch_offset; + size_t tensor_count; + size_t scratch_buffer_count; +}; + +// A helper class to construct AllocationInfo array. This array contains the +// lifetime of tensors / scratch_buffer and will be used to calculate the memory +// plan. Methods need to be called in order from `Create`, Init`, `Add*`, to +// `Finish`. +class AllocationInfoBuilder { + public: + AllocationInfoBuilder(const Model* model, + INonPersistentBufferAllocator* non_persistent_allocator) + : model_(model), non_persistent_allocator_(non_persistent_allocator) {} + + // Check if model contains offline planned buffer offsets. + // - If there's no metadata available, offline_planner_offsets is not set + // - If there's metadata available, offline_planner_offsets will point to the + // first offset in the metadata buffer list. + TfLiteStatus GetOfflinePlannedOffsets( + const int32_t** offline_planner_offsets); + + // Allocate memory for the allocation info array as well as offsets into that + // array for each subgraph. + TfLiteStatus CreateAllocationInfo(int scratch_buffer_request_count); + + // Release memory used for the allocation info array. + TfLiteStatus FreeAllocationInfo(); + + // Initialize AllocationInfo for all tensors and scratch buffers in the graph. + TfLiteStatus InitializeAllocationInfo(const int32_t* offline_offsets, + SubgraphAllocations* allocations); + + // Mark the scope of each tensor and scratch buffer across the graph. Enter + // all possible subgraphs invoked by each control flow operator. This method + // marks the maximum lifetime of each buffer so that tensors are correctly + // planned for all valid invocation flows. + TfLiteStatus MarkAllocationLifetimes( + int subgraph_idx, internal::ScratchBufferRequest* scratch_buffer_request, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations); + + // Identify control flow operators and recursively mark all subgraphs which + // that operator can invoke. The lifetime of all tensors within a subgraph + // can only be extended. The order of subgraph invocation does not matter + // since subgraphs within the same control flow operator are executed + // within their own allocation scope (planned buffers in a subgraph cannot + // persist beyond the end of that subgraph's invocation). + TfLiteStatus MarkSubgraphLifetimesIfNecessary( + const Operator* op, + internal::ScratchBufferRequest* scratch_buffer_requests, + ScratchBufferHandle* scratch_buffer_handles, + SubgraphAllocations* allocations); + + // Returns the number of allocations. + int AllocationCount() const { return info_.allocation_info_count; } + + // Returns a pointer to the built AllocationInfo array. + AllocationInfo* Finish() const { return info_.allocation_info; } + + private: + // Mark the given Allocation info as first created at the specified allocation + // scope count. Only the first creation must be recorded since the allocation + // scope count monotonically increases throughout the lifetime marking + // process. + void UpdateFirstCreated(AllocationInfo* current, int allocation_scope_count); + + // Mark the given AllocationInfo as last used at the specified allocation + // scope + // count. Update the last used marker every time, since the allocation scope + // count monotonically increases through the lifetime marking process. + void UpdateLastUsed(AllocationInfo* current, int allocation_scope_count); + + // Validate if a subgraph satisfies assumptions. + TfLiteStatus ValidateSubgraph(const SubGraph* subgraph, + TfLiteEvalTensor* eval_tensors); + + const tflite::Model* model_ = nullptr; + INonPersistentBufferAllocator* non_persistent_allocator_ = nullptr; + GraphAllocationInfo info_; + int allocation_scope_count_ = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_ALLOCATION_INFO_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cc new file mode 100644 index 0000000..872cb06 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cc @@ -0,0 +1,941 @@ +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" + +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocation_info.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_utils.h" + +namespace tflite { + +namespace { + +// Maximum number of scratch buffer requests per operator. Operator kernels that +// request more than this value will receive an exception. +constexpr size_t kMaxScratchBuffersPerOp = 12; + +// Sentinel value used as a placeholder to mark a ScratchBufferRequest request +// needs a node id assignment. +constexpr int kUnassignedScratchBufferRequestIndex = -1; + +const TfLiteIntArray kZeroLengthIntArray = {}; + +class MicroBuiltinDataAllocator : public TfLiteBridgeBuiltinDataAllocator { + public: + explicit MicroBuiltinDataAllocator( + IPersistentBufferAllocator* persistent_allocator) + : persistent_allocator_(persistent_allocator) {} + + void* Allocate(size_t size, size_t alignment_hint) override { + return persistent_allocator_->AllocatePersistentBuffer(size, + alignment_hint); + } + void Deallocate(void* data) override { + // Do not deallocate, builtin data needs to be available for the life time + // of the model. + } + + TF_LITE_REMOVE_VIRTUAL_DELETE + + private: + IPersistentBufferAllocator* persistent_allocator_; +}; + +TfLiteStatus CreatePlan(MicroMemoryPlanner* planner, + const AllocationInfo* allocation_info, + size_t allocation_info_size) { + // Add the tensors to our allocation plan. + for (size_t i = 0; i < allocation_info_size; ++i) { + const AllocationInfo* current = &allocation_info[i]; + if (current->needs_allocating) { + size_t aligned_bytes_required = + AlignSizeUp(current->bytes, MicroArenaBufferAlignment()); + if (current->offline_offset == kOnlinePlannedBuffer) { + TF_LITE_ENSURE_STATUS(planner->AddBuffer(aligned_bytes_required, + current->first_created, + current->last_used)); + } else { + TF_LITE_ENSURE_STATUS( + planner->AddBuffer(aligned_bytes_required, current->first_created, + current->last_used, current->offline_offset)); + } + } + } + return kTfLiteOk; +} + +TfLiteStatus CommitPlan(MicroMemoryPlanner* planner, uint8_t* starting_point, + const AllocationInfo* allocation_info, + size_t allocation_info_size) { + // Figure out the actual memory addresses for each buffer, based on the plan. + int planner_index = 0; + for (size_t i = 0; i < allocation_info_size; ++i) { + const AllocationInfo* current = &allocation_info[i]; + if (current->needs_allocating) { + int offset = -1; + TF_LITE_ENSURE_STATUS( + planner->GetOffsetForBuffer(planner_index, &offset)); + *current->output_ptr = reinterpret_cast(starting_point + offset); + ++planner_index; + } + } + return kTfLiteOk; +} + +IPersistentBufferAllocator* CreatePersistentArenaAllocator(uint8_t* buffer_head, + size_t buffer_size) { + // Align the actually used area by the tail because persistent buffer grows + // from the bottom to top. + uint8_t* aligned_buffer_tail = + AlignPointerDown(buffer_head + buffer_size, MicroArenaBufferAlignment()); + size_t aligned_buffer_size = aligned_buffer_tail - buffer_head; + PersistentArenaBufferAllocator tmp = + PersistentArenaBufferAllocator(buffer_head, aligned_buffer_size); + + // Allocate enough bytes from the buffer to create a + // SingleArenaBufferAllocator. The new instance will use the current adjusted + // tail buffer from the tmp allocator instance. + uint8_t* allocator_buffer = + tmp.AllocatePersistentBuffer(sizeof(PersistentArenaBufferAllocator), + alignof(PersistentArenaBufferAllocator)); + // Use the default copy constructor to populate internal states. + return new (allocator_buffer) PersistentArenaBufferAllocator(tmp); +} + +// NonPersistentBufferAllocator instance is created in the persistent buffer +// because it has to be persistent to keep track of the non-persistent buffer +// information. +INonPersistentBufferAllocator* CreateNonPersistentArenaAllocator( + uint8_t* buffer_head, size_t buffer_size, + IPersistentBufferAllocator* persistent_buffer_allocator) { + uint8_t* allocator_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(NonPersistentArenaBufferAllocator), + alignof(NonPersistentArenaBufferAllocator)); + // Align the actually used area by the head because persistent buffer grows + // from the head to bottom. + uint8_t* aligned_buffer_head = + AlignPointerUp(buffer_head, MicroArenaBufferAlignment()); + size_t aligned_buffer_size = buffer_head + buffer_size - aligned_buffer_head; + + INonPersistentBufferAllocator* non_persistent_buffer_allocator = + new (allocator_buffer) NonPersistentArenaBufferAllocator( + aligned_buffer_head, aligned_buffer_size); + return non_persistent_buffer_allocator; +} + +} // namespace + +namespace internal { + +// Returns a pointer to any buffer associated with the flatbuffer tensor. Can +// return nullptr if no buffer is found. +void* GetFlatbufferTensorBuffer( + const tflite::Tensor& flatbuffer_tensor, + const flatbuffers::Vector>* buffers) { + // We need to figure out where the actual contents of this tensor are stored + // in memory. We'll check to see if there's a serialized buffer (pretty much + // the same as a constant op in TensorFlow) associated with this tensor first, + // and if there is update the runtime structure to point to its location in + // memory. + // First see if there's any buffer information in the serialized tensor. + // TODO(b/170379532): Add better unit tests to validate flatbuffer values. + void* out_buffer = nullptr; + if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) { + // If we've found a buffer, does it have any data? + if (auto* array = buffer->data()) { + // If it has any data, is the data size larger than zero? + if (array->size()) { + // We've found a buffer with valid data, so update the runtime tensor + // data structure to point to it. + out_buffer = const_cast(static_cast(array->data())); + } + } + // TODO(petewarden): It's not clear in what circumstances we could have a + // buffer in the serialized tensor, but it doesn't have any data in it. Is + // that a validly-generated file, and if so what does it mean, or is it an + // error condition? It would be good to tighten up the specification to make + // it less ambiguous. + } + return out_buffer; +} + +TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, + const flatbuffers::Vector>* buffers, + TfLiteTensor* result) { + TFLITE_DCHECK(result != nullptr); + + *result = {}; + // Make sure the serialized type is one we know how to deal with, and convert + // it from a flatbuffer enum into a constant used by the kernel C API. + TF_LITE_ENSURE_STATUS( + tflite::ConvertTensorType(flatbuffer_tensor.type(), &result->type)); + // Make sure we remember if the serialized tensor is designated as a variable. + result->is_variable = flatbuffer_tensor.is_variable(); + + result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); + // this is useful for debugging +#if defined(EI_LOG_LEVEL) && (EI_LOG_LEVEL >= 4) + result->name = flatbuffer_tensor.name()->c_str(); +#endif + // TODO(petewarden): Some of these paths aren't getting enough testing + // coverage, so we should figure out some tests that exercise them. + if (result->data.data == nullptr) { + // The tensor contents haven't been set from a serialized buffer, so + // make a note that they will be allocated from memory. The actual + // allocation won't happen until later. + result->allocation_type = kTfLiteArenaRw; + } else { + // We set the data from a serialized buffer, so record tha. + result->allocation_type = kTfLiteMmapRo; + } + + // Figure out what the size in bytes of the buffer is and store it. + size_t type_size; + TF_LITE_ENSURE_STATUS( + BytesRequiredForTensor(flatbuffer_tensor, &result->bytes, &type_size)); + + if (flatbuffer_tensor.shape() == nullptr) { + // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar + // tensor. + // TODO(b/188459715): figure out why const_cast is required here. + result->dims = const_cast(&kZeroLengthIntArray); + } else { + // TFLM doesn't allow reshaping the tensor which requires dynamic memory + // allocation so it is safe to drop the const qualifier. In the future, if + // we really want to update the tensor shape, we can always pass in a new + // TfLiteIntArray - especially we have to do so if the dimension is + result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); + } + + // Copy the quantization information from the serialized data. + const auto* src_quantization = flatbuffer_tensor.quantization(); + if (src_quantization && src_quantization->scale() && + (src_quantization->scale()->size() > 0) && + src_quantization->zero_point() && + (src_quantization->zero_point()->size() > 0)) { + // Always populate the TfLiteTensor.params field, even if there are + // per-channel quantization parameters. + result->params.scale = src_quantization->scale()->Get(0); + // Note that the zero_point field in the FlatBuffers schema is a 64-bit + // integer, but the zero_point field in the TfLiteQuantizationParams struct + // is a 32-bit integer. + result->params.zero_point = + static_cast(src_quantization->zero_point()->Get(0)); + + // Populate per-channel quantization params. + int channels = src_quantization->scale()->size(); + TfLiteAffineQuantization* quantization = + allocate_temp + ? reinterpret_cast( + non_persistent_buffer_allocator->AllocateTemp( + sizeof(TfLiteAffineQuantization), + alignof(TfLiteAffineQuantization))) + : reinterpret_cast( + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(TfLiteAffineQuantization), + alignof(TfLiteAffineQuantization))); + if (quantization == nullptr) { + MicroPrintf("Unable to allocate TfLiteAffineQuantization.\n"); + return kTfLiteError; + } + + // TODO(b/153688719): Reduce tail allocation by using a global zero-point + // buffer. This value can not be reused from the flatbuffer since the + // zero_point is stored as a int64_t. + quantization->zero_point = + allocate_temp + ? reinterpret_cast( + non_persistent_buffer_allocator->AllocateTemp( + TfLiteIntArrayGetSizeInBytes(channels), + alignof(TfLiteIntArray))) + : reinterpret_cast( + persistent_buffer_allocator->AllocatePersistentBuffer( + TfLiteIntArrayGetSizeInBytes(channels), + alignof(TfLiteIntArray))); + if (quantization->zero_point == nullptr) { + MicroPrintf("Unable to allocate quantization->zero_point.\n"); + return kTfLiteError; + } + + quantization->scale = + FlatBufferVectorToTfLiteTypeArray(src_quantization->scale()); + + quantization->zero_point->size = channels; + int* zero_point_data = quantization->zero_point->data; + for (int i = 0; i < channels; i++) { + // As a space-saving optimization, zero point arrays for weights can be + // reduced to a single value, since all zero points for weights are 0. + zero_point_data[i] = src_quantization->zero_point()->size() == + src_quantization->scale()->size() + ? src_quantization->zero_point()->Get(i) + : src_quantization->zero_point()->Get(0); + } + // TODO(rocky): Need to add a micro_allocator test case that fails when + // this is not copied: + quantization->quantized_dimension = src_quantization->quantized_dimension(); + + result->quantization = {kTfLiteAffineQuantization, quantization}; + } + return kTfLiteOk; +} + +TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( + const tflite::Tensor& flatbuffer_tensor, + const flatbuffers::Vector>* buffers, + TfLiteEvalTensor* result) { + *result = {}; + // Make sure the serialized type is one we know how to deal with, and convert + // it from a flatbuffer enum into a constant used by the kernel C API. + TF_LITE_ENSURE_STATUS( + tflite::ConvertTensorType(flatbuffer_tensor.type(), &result->type)); + + result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); + + if (flatbuffer_tensor.shape() == nullptr) { + // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar + // tensor. + result->dims = const_cast(&kZeroLengthIntArray); + } else { + result->dims = FlatBufferVectorToTfLiteTypeArray(flatbuffer_tensor.shape()); + } + return kTfLiteOk; +} + +} // namespace internal + +size_t MicroAllocator::GetDefaultTailUsage(bool is_memory_planner_given) { + size_t total_size = AlignSizeUp() + + AlignSizeUp() + + AlignSizeUp() + + AlignSizeUp(); + if (!is_memory_planner_given) { + total_size += AlignSizeUp(); + } + return total_size; +} + +MicroAllocator::MicroAllocator(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner) + : non_persistent_buffer_allocator_(memory_allocator), + persistent_buffer_allocator_(memory_allocator), + memory_planner_(memory_planner), + model_is_allocating_(false) {} + +MicroAllocator::MicroAllocator( + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + MicroMemoryPlanner* memory_planner) + : non_persistent_buffer_allocator_(non_persistent_buffer_allocator), + persistent_buffer_allocator_(persistent_buffer_allocator), + memory_planner_(memory_planner), + model_is_allocating_(false) {} + +MicroAllocator::~MicroAllocator() {} + +MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, + MicroMemoryPlanner* memory_planner) { + uint8_t* aligned_arena = + AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); + size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; + SingleArenaBufferAllocator* memory_allocator = + SingleArenaBufferAllocator::Create(aligned_arena, aligned_arena_size); + + return Create(memory_allocator, memory_planner); +} + +MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, + size_t arena_size) { + uint8_t* aligned_arena = + AlignPointerUp(tensor_arena, MicroArenaBufferAlignment()); + size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; + SingleArenaBufferAllocator* memory_allocator = + SingleArenaBufferAllocator::Create(aligned_arena, aligned_arena_size); + + // By default create GreedyMemoryPlanner. + // If a different MemoryPlanner is needed, use the other api. + uint8_t* memory_planner_buffer = memory_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + GreedyMemoryPlanner* memory_planner = + new (memory_planner_buffer) GreedyMemoryPlanner(); + + return Create(memory_allocator, memory_planner); +} + +MicroAllocator* MicroAllocator::Create( + SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner) { + TFLITE_DCHECK(memory_allocator != nullptr); + TFLITE_DCHECK(memory_planner != nullptr); + + uint8_t* allocator_buffer = memory_allocator->AllocatePersistentBuffer( + sizeof(MicroAllocator), alignof(MicroAllocator)); + MicroAllocator* allocator = new (allocator_buffer) + MicroAllocator(memory_allocator, memory_allocator, memory_planner); + return allocator; +} + +MicroAllocator* MicroAllocator::Create(uint8_t* persistent_tensor_arena, + size_t persistent_arena_size, + uint8_t* non_persistent_tensor_arena, + size_t non_persistent_arena_size) { + TFLITE_DCHECK(persistent_tensor_arena != nullptr); + TFLITE_DCHECK(non_persistent_tensor_arena != nullptr); + TFLITE_DCHECK(persistent_tensor_arena != non_persistent_tensor_arena); + + IPersistentBufferAllocator* persistent_buffer_allocator = + CreatePersistentArenaAllocator(persistent_tensor_arena, + persistent_arena_size); + INonPersistentBufferAllocator* non_persistent_buffer_allocator = + CreateNonPersistentArenaAllocator(non_persistent_tensor_arena, + non_persistent_arena_size, + persistent_buffer_allocator); + + uint8_t* memory_planner_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + GreedyMemoryPlanner* memory_planner = + new (memory_planner_buffer) GreedyMemoryPlanner(); + + uint8_t* micro_allocator_buffer = + persistent_buffer_allocator->AllocatePersistentBuffer( + sizeof(MicroAllocator), alignof(MicroAllocator)); + MicroAllocator* allocator = new (micro_allocator_buffer) + MicroAllocator(persistent_buffer_allocator, + non_persistent_buffer_allocator, memory_planner); + return allocator; +} + +SubgraphAllocations* MicroAllocator::StartModelAllocation(const Model* model) { + TFLITE_DCHECK(model != nullptr); + + if (model_is_allocating_) { + MicroPrintf( + "MicroAllocator: Model allocation started before " + "finishing previously allocated model"); + return nullptr; + } + + model_is_allocating_ = true; + + uint8_t* data_allocator_buffer = + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(MicroBuiltinDataAllocator), + alignof(MicroBuiltinDataAllocator)); + builtin_data_allocator_ = new (data_allocator_buffer) + MicroBuiltinDataAllocator(persistent_buffer_allocator_); + + if (InitScratchBufferData() != kTfLiteOk) { + return nullptr; + } + + // Allocate struct to store eval tensors, nodes and registrations. + SubgraphAllocations* output = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(SubgraphAllocations) * model->subgraphs()->size(), + alignof(SubgraphAllocations))); + if (output == nullptr) { + MicroPrintf("Failed to allocate memory for model metadata."); + return nullptr; + } + + if (AllocateTfLiteEvalTensors(model, output) != kTfLiteOk || + AllocateNodeAndRegistrations(model, output) != kTfLiteOk) { + return nullptr; + } + return output; +} + +TfLiteStatus MicroAllocator::FinishModelAllocation( + const Model* model, SubgraphAllocations* subgraph_allocations, + ScratchBufferHandle** scratch_buffer_handles) { + if (!model_is_allocating_) { + MicroPrintf( + "MicroAllocator: Model allocation finished before " + "starting allocating model"); + return kTfLiteError; + } + + // Allocate scratch buffer metadata. + TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles( + scratch_buffer_handles, scratch_buffer_request_count_)); + + // Plan all subgraphs and scratch buffers together. + TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph_allocations, + *scratch_buffer_handles)); + model_is_allocating_ = false; + return kTfLiteOk; +} + +void* MicroAllocator::AllocatePersistentBuffer(size_t bytes) { + return persistent_buffer_allocator_->AllocatePersistentBuffer( + bytes, MicroArenaBufferAlignment()); +} + +TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, + int subgraph_idx, + int* buffer_idx) { + // All scratch buffer requests are stored in the head section of the arena + // when a model is in the prepare phase. First align a scratch buffer request + // pointer to the start of the head: + internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); + + // Count the number of requested scratch buffers for the current node: + size_t current_node_request_count = 0; + for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { + if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { + ++current_node_request_count; + } + } + + // First, ensure that the per-kernel request has not exceeded the limit: + if (current_node_request_count >= kMaxScratchBuffersPerOp) { + MicroPrintf("Scratch buffer request exeeds limit per operator (%d)", + kMaxScratchBuffersPerOp); + return kTfLiteError; + } + + // Initialize and assign values for the request at the current index: + internal::ScratchBufferRequest* current_request = + &requests[scratch_buffer_request_count_]; + *current_request = {}; + // Assign -1 as a sentinel value that will be updated when the node finishes + // allocating: + current_request->bytes = bytes; + current_request->node_idx = kUnassignedScratchBufferRequestIndex; + current_request->subgraph_idx = subgraph_idx; + + // Assign the current request index to the out-param: + *buffer_idx = scratch_buffer_request_count_; + + // Bump the request count to prepare for the next request: + ++scratch_buffer_request_count_; + return kTfLiteOk; +} + +TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { + // When a node has finished preparing, all temp allocations performed by the + // kernel should be cleaned up: + TF_LITE_ENSURE_STATUS(ResetTempAllocations()); + + // Find and update any new scratch buffer requests for the current node: + internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); + + for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { + // A request with a node_idx of -1 is a sentinel value used to indicate this + // was a new request for the current node. The allocator finally knows the + // node index at this point. Assign the value and update the list of new + // requests so the head section can be adjusted to allow for the next kernel + // to allocate at most kMaxScratchBuffersPerOp requests: + if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { + requests[i].node_idx = node_id; + } + } + + // Ensure that the head is re-adjusted to allow for another at-most + // kMaxScratchBuffersPerOp scratch buffer requests in the next operator: + TF_LITE_ENSURE_STATUS(non_persistent_buffer_allocator_->ResizeBuffer( + scratch_buffer_head_, + sizeof(internal::ScratchBufferRequest) * + (scratch_buffer_request_count_ + kMaxScratchBuffersPerOp), + alignof(internal::ScratchBufferRequest))); + + return kTfLiteOk; +} + +size_t MicroAllocator::used_bytes() const { + return non_persistent_buffer_allocator_->GetNonPersistentUsedBytes() + + persistent_buffer_allocator_->GetPersistentUsedBytes(); +} + +TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations( + const Model* model, SubgraphAllocations* subgraph_allocations) { + TFLITE_DCHECK(subgraph_allocations != nullptr); + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + uint32_t operators_size = NumSubgraphOperators(subgraph); + + // Initialize NodeAndRegistrations for the subgraph. + NodeAndRegistration* output = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(NodeAndRegistration) * operators_size, + alignof(NodeAndRegistration))); + if (output == nullptr) { + MicroPrintf("Failed to allocate memory for node_and_registrations."); + return kTfLiteError; + } + subgraph_allocations[subgraph_idx].node_and_registrations = output; + } + return kTfLiteOk; +} + +TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensor( + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); + TFLITE_DCHECK(subgraph != nullptr); + + // This value is allocated from persistent arena space. It is guaranteed to be + // around for the lifetime of the application. + TfLiteTensor* tensor = AllocatePersistentTfLiteTensorInternal(); + + // Populate any fields from the flatbuffer, since this TfLiteTensor struct is + // allocated in the persistent section of the arena, ensure that additional + // allocations also take place in that section of the arena. + if (PopulateTfLiteTensorFromFlatbuffer( + model, tensor, tensor_index, subgraph_index, + /*allocate_temp=*/false) != kTfLiteOk) { + MicroPrintf( + "Failed to populate a persistent TfLiteTensor struct " + "from flatbuffer data!"); + return nullptr; + } + + if (subgraph_allocations != nullptr) { + // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) + // and not located in the flatbuffer are stored on the pre-allocated list of + // TfLiteEvalTensors structs. These structs are the source of truth, simply + // point the corresponding buffer to the new TfLiteTensor data value. + tensor->data.data = + subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; + // TfLiteEvalTensor structs must also be the source of truth for the + // TfLiteTensor dims. + tensor->dims = + subgraph_allocations[subgraph_index].tensors[tensor_index].dims; + } + return tensor; +} + +void MicroAllocator::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + TFLITE_DCHECK(tensor != nullptr); + + if (tensor->quantization.type == kTfLiteAffineQuantization) { + TFLITE_DCHECK(tensor->quantization.params != nullptr); + TfLiteAffineQuantization* quantization = + reinterpret_cast( + tensor->quantization.params); + + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(quantization->zero_point)); + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(quantization)); + } + + // Clear the data in case someone still access tensor arena by mistake + tensor->quantization.type = kTfLiteNoQuantization; + tensor->quantization.params = nullptr; + tensor->data.data = nullptr; + tensor->dims = nullptr; + non_persistent_buffer_allocator_->DeallocateTemp( + reinterpret_cast(tensor)); +} + +TfLiteTensor* MicroAllocator::AllocateTempTfLiteTensor( + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_index); + TFLITE_DCHECK(subgraph != nullptr); + + // This value is allocated from temporary arena space. It is guaranteed to be + // around for at least the scope of the calling function. Since this struct + // allocation takes place in temp space, no need to own or cleanup. + TfLiteTensor* tensor = reinterpret_cast( + non_persistent_buffer_allocator_->AllocateTemp(sizeof(TfLiteTensor), + alignof(TfLiteTensor))); + + // Populate any fields from the flatbuffer, since this TfLiteTensor struct is + // allocated in the temp section of the arena, ensure that additional + // allocations also take place in that section of the arena. + if (PopulateTfLiteTensorFromFlatbuffer(model, tensor, tensor_index, + subgraph_index, + /*allocate_temp=*/true) != kTfLiteOk) { + MicroPrintf( + "Failed to populate a temp TfLiteTensor struct from flatbuffer data!"); + return nullptr; + } + + if (subgraph_allocations != nullptr) { + // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) + // and not located in the flatbuffer are stored on the pre-allocated list of + // TfLiteEvalTensors structs. These structs are the source of truth, simply + // point the corresponding buffer to the new TfLiteTensor data value. + tensor->data.data = + subgraph_allocations[subgraph_index].tensors[tensor_index].data.data; + // TfLiteEvalTensor structs must also be the source of truth for the + // TfLiteTensor dims. + tensor->dims = + subgraph_allocations[subgraph_index].tensors[tensor_index].dims; + } + return tensor; +} + +TfLiteStatus MicroAllocator::ResetTempAllocations() { + return non_persistent_buffer_allocator_->ResetTempAllocations(); +} + +bool MicroAllocator::IsAllTempDeallocated() { + return non_persistent_buffer_allocator_->IsAllTempDeallocated(); +} + +TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( + const Model* model, SubgraphAllocations* subgraph_allocations) { + TFLITE_DCHECK(subgraph_allocations != nullptr); + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + size_t alloc_count = subgraph->tensors()->size(); + TfLiteEvalTensor* tensors = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); + if (tensors == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->eval_tensors, " + "%d bytes required", + sizeof(TfLiteEvalTensor) * alloc_count); + return kTfLiteError; + } + + for (size_t i = 0; i < alloc_count; ++i) { + TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( + *subgraph->tensors()->Get(i), model->buffers(), &tensors[i]); + if (status != kTfLiteOk) { + MicroPrintf("Failed to initialize tensor %d", i); + return kTfLiteError; + } + } + subgraph_allocations[subgraph_idx].tensors = tensors; + } + return kTfLiteOk; +} + +TfLiteStatus MicroAllocator::AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) { + for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { + auto* tensor = subgraph->tensors()->Get(i); + if (tensor->is_variable()) { + if (offline_planner_offsets == nullptr || + offline_planner_offsets[i] == kOnlinePlannedBuffer) { + size_t buffer_size; + TF_LITE_ENSURE_STATUS( + TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); + + eval_tensors[i].data.data = + persistent_buffer_allocator_->AllocatePersistentBuffer( + buffer_size, MicroArenaBufferAlignment()); + + if (eval_tensors[i].data.data == nullptr) { + MicroPrintf("Failed to allocate variable tensor of size %d", + buffer_size); + return kTfLiteError; + } + } + } + } + return kTfLiteOk; +} + +TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal() { + return reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(TfLiteTensor), alignof(TfLiteTensor))); +} + +TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( + const Model* model, TfLiteTensor* tensor, int tensor_index, + int subgraph_idx, bool allocate_temp) { + // TODO(b/162311891): This method serves as a stub to ensure quantized + // allocations in the tail can be recorded. Once the interpreter has APIs for + // accessing buffers on TfLiteEvalTensor this method can be dropped. + return internal::InitializeTfLiteTensorFromFlatbuffer( + persistent_buffer_allocator_, non_persistent_buffer_allocator_, + allocate_temp, + *model->subgraphs()->Get(subgraph_idx)->tensors()->Get(tensor_index), + model->buffers(), tensor); +} + +TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( + const Model* model, SubgraphAllocations* allocations, + ScratchBufferHandle* scratch_buffer_handles) { + size_t head_usage = 0; + // Create static memory plan + // 1. Calculate AllocationInfo to know the lifetime of each tensor/buffer. + // 2. Add them into the planner (such as the GreedyMemoryPlanner). + // 3. Static memory planning using the planner. + // 4. Set tensor/buffer pointers based on the offsets from the previous step. + // + // Note that AllocationInfo is only needed for creating the plan. It will be + // allocated from the temp section and cleaned up at the bottom of this + // function. + + // Use the AllocationInfoBuilder class to help determine where buffers are + // used in the subgraph. + AllocationInfoBuilder builder(model, non_persistent_buffer_allocator_); + TF_LITE_ENSURE_STATUS( + builder.CreateAllocationInfo(scratch_buffer_request_count_)); + + const int32_t* offline_planner_offsets = nullptr; + TF_LITE_ENSURE_STATUS( + builder.GetOfflinePlannedOffsets(&offline_planner_offsets)); + + // We allocate buffers for variable tensors here since the offline planner + // offsets are conviently available here. + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + const SubGraph* subgraph = model->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + TF_LITE_ENSURE_STATUS(AllocateVariables( + subgraph, allocations[subgraph_idx].tensors, offline_planner_offsets)); + } + + TF_LITE_ENSURE_STATUS( + builder.InitializeAllocationInfo(offline_planner_offsets, allocations)); + + internal::ScratchBufferRequest* scratch_buffer_requests = + GetScratchBufferRequests(); + TF_LITE_ENSURE_STATUS(builder.MarkAllocationLifetimes( + 0, scratch_buffer_requests, scratch_buffer_handles, allocations)); + int allocation_info_count = builder.AllocationCount(); + AllocationInfo* allocation_info = builder.Finish(); + + // Remaining arena size that memory planner can use for calculating offsets. + size_t remaining_arena_size = + non_persistent_buffer_allocator_->GetAvailableMemory( + MicroArenaBufferAlignment()); + uint8_t* planner_arena = non_persistent_buffer_allocator_->AllocateTemp( + remaining_arena_size, MicroArenaBufferAlignment()); + + if (planner_arena == nullptr) { + return kTfLiteError; + } + + memory_planner_->Init(planner_arena, remaining_arena_size); + TF_LITE_ENSURE_STATUS( + CreatePlan(memory_planner_, allocation_info, allocation_info_count)); + + // Commit the plan. + TF_LITE_ENSURE_STATUS( + CommitPlan(memory_planner_, + non_persistent_buffer_allocator_->GetOverlayMemoryAddress(), + allocation_info, allocation_info_count)); + + // Reset all temp allocations used above: + builder.FreeAllocationInfo(); + non_persistent_buffer_allocator_->DeallocateTemp(planner_arena); + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->ResetTempAllocations()); + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->DeallocateResizableBuffer( + scratch_buffer_head_)); + +#ifdef TF_LITE_SHOW_MEMORY_USE + memory_planner_->PrintMemoryPlan(); +#endif + head_usage = memory_planner_->GetMaximumMemorySize(); + + // The head is used to store memory plans for one model at a time during the + // model preparation stage, and is re-purposed to store scratch buffer handles + // during model invocation. The head must be as large as the greater of the + // largest model memory plan's size and the total space required for all + // scratch buffer handles. + if (max_head_buffer_usage_ < head_usage) { + max_head_buffer_usage_ = head_usage; + } + + // The head is used for storing scratch buffer allocations before finalizing a + // memory plan in this function. Ensure that the head is set to the largest + // memory plan sent through the allocator: + TF_LITE_ENSURE_STATUS( + non_persistent_buffer_allocator_->ReserveNonPersistentOverlayMemory( + max_head_buffer_usage_, MicroArenaBufferAlignment())); + return kTfLiteOk; +} + +TfLiteStatus MicroAllocator::AllocateScratchBufferHandles( + ScratchBufferHandle** scratch_buffer_handles, size_t handle_count) { + TFLITE_DCHECK(scratch_buffer_handles != nullptr); + + if (scratch_buffer_request_count_ == 0) { + // No scratch buffer requests were requested during model allocation. + return kTfLiteOk; + } + + // Allocate a consecutive block of memory store the scratch buffer handles. + // This alignment ensures quick lookup during inference time for the model: + *scratch_buffer_handles = reinterpret_cast( + persistent_buffer_allocator_->AllocatePersistentBuffer( + sizeof(ScratchBufferHandle) * handle_count, + alignof(ScratchBufferHandle))); + + return kTfLiteOk; +} + +TfLiteStatus MicroAllocator::InitScratchBufferData() { + // A model is preparing to allocate resources, ensure that scratch buffer + // request counter is cleared: + scratch_buffer_request_count_ = 0; + + // All requests will be stored in the head section. Each kernel is allowed at + // most kMaxScratchBuffersPerOp requests. Adjust the head to reserve at most + // that many requests to begin: + scratch_buffer_head_ = + non_persistent_buffer_allocator_->AllocateResizableBuffer( + sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, + alignof(internal::ScratchBufferRequest)); + if (scratch_buffer_head_ == nullptr) { + return kTfLiteError; + } + + return kTfLiteOk; +} + +internal::ScratchBufferRequest* MicroAllocator::GetScratchBufferRequests() { + return reinterpret_cast(AlignPointerUp( + scratch_buffer_head_, alignof(internal::ScratchBufferRequest))); +} + +TfLiteBridgeBuiltinDataAllocator* MicroAllocator::GetBuiltinDataAllocator() { + return builtin_data_allocator_; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp deleted file mode 100644 index dc02eb2..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.cpp +++ /dev/null @@ -1,1158 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" - -#include -#include - -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/memory_planner.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h" -#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" -#include "edge-impulse-sdk/tensorflow/lite/schema/schema_utils.h" - -namespace tflite { - -namespace { - -// Maximum number of scratch buffer requests per operator. Operator kernels that -// request more than this value will receive an exception. -constexpr size_t kMaxScratchBuffersPerOp = 12; - -// Sentinel value used as a placeholder to mark a ScratchBufferRequest request -// needs a node id assignment. -constexpr int kUnassignedScratchBufferRequestIndex = -1; - -// Used to hold information used during allocation calculations. -struct AllocationInfo { - size_t bytes; - void** output_ptr; - int first_created; - int last_used; - int32_t offline_offset; - bool needs_allocating; -}; - -// We align tensor buffers to 16-byte boundaries, since this is a common -// requirement for SIMD extensions. -constexpr int kBufferAlignment = 16; -constexpr char kOfflineMemAllocMetadata[] = "OfflineMemoryAllocation"; -const TfLiteIntArray kZeroLengthIntArray = {}; - -class MicroBuiltinDataAllocator : public BuiltinDataAllocator { - public: - explicit MicroBuiltinDataAllocator(SimpleMemoryAllocator* memory_allocator) - : memory_allocator_(memory_allocator) {} - - void* Allocate(size_t size, size_t alignment_hint) override { - return memory_allocator_->AllocateFromTail(size, alignment_hint); - } - void Deallocate(void* data) override { - // Do not deallocate, builtin data needs to be available for the life time - // of the model. - } - - private: - SimpleMemoryAllocator* memory_allocator_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -#if !defined(__clang__) -// Helper function to check flatbuffer metadata correctness. This function is -// not called by default. Hence it's not linked in to the final binary code. -TfLiteStatus CheckOfflinePlannedOffsets(const Model* model, - ErrorReporter* error_reporter) { - // Suppress compile warning for unused function - (void)CheckOfflinePlannedOffsets; - - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - auto* subgraphs = model->subgraphs(); - const SubGraph* subgraph = (*subgraphs)[0]; - const flatbuffers::Vector>* tensors = - subgraph->tensors(); - const flatbuffers::Vector>* buffers = - model->buffers(); - int nbr_tflite_tensors = tensors->size(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = (uint32_t*)array->data(); - int version = metadata_buffer[0]; - int subgraph_idx = metadata_buffer[1]; - const int nbr_offline_offsets = metadata_buffer[2]; -#ifndef TF_LITE_STRIP_ERROR_STRINGS - int* offline_planner_offsets = (int*)&metadata_buffer[3]; -#endif - - TF_LITE_REPORT_ERROR(error_reporter, "==== Model metadata info: ====="); - TF_LITE_REPORT_ERROR(error_reporter, - "Offline planner metadata found, version %d, " - "subgraph %d, nbr offline offsets %d", - version, subgraph_idx, nbr_offline_offsets); - for (int j = 0; j < nbr_offline_offsets; ++j) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Offline planner tensor index %d, offline offset: %d", j, - offline_planner_offsets[j]); - } - - if (version != 1) { - TF_LITE_REPORT_ERROR(error_reporter, "Version not supported! (%d)\n", - version); - return kTfLiteError; - } - if (subgraph_idx != 0) { - TF_LITE_REPORT_ERROR(error_reporter, - "Only 1 subgraph supported! Subgraph idx (%d)\n", - subgraph_idx); - return kTfLiteError; - } - if (nbr_tflite_tensors != nbr_offline_offsets) { - TF_LITE_REPORT_ERROR(error_reporter, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_offline_offsets, nbr_tflite_tensors); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} -#endif - -// A helper class to construct AllocationInfo array. This array contains the -// lifetime of tensors / scratch_buffer and will be used to calculate the memory -// plan. Methods need to be called in order from `Init`, `Add*`, to `Finish`. -class AllocationInfoBuilder { - public: - AllocationInfoBuilder(AllocationInfo* info, size_t tensor_count, - size_t scratch_buffer_count, ErrorReporter* reporter) - : info_(info), - tensor_count_(tensor_count), - buffer_count_(scratch_buffer_count), - reporter_(reporter) {} - - // Check if model contains offline planned buffer offsets. - // - If there's no metadata available, offline_planner_offsets is not set - // - If there's metadata available, offline_planner_offsets will point to the - // first offset in the metadata buffer list. - TfLiteStatus GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets); - - // Add allocaiton information for the tensors. - TfLiteStatus AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors); - - // Add allocation information for the scratch buffers. - TfLiteStatus AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles); - - // Returns a pointer to the built AllocationInfo array. - const AllocationInfo* Finish() const { return info_; } - - private: - AllocationInfo* info_ = nullptr; - size_t tensor_count_ = 0; - size_t buffer_count_ = 0; - ErrorReporter* reporter_ = nullptr; -}; - -TfLiteStatus AllocationInfoBuilder::AddTensors(const SubGraph* subgraph, - const int32_t* offline_offsets, - TfLiteEvalTensor* eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); - - // Set up allocation info for all tensors. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - current->output_ptr = &(eval_tensors[i].data.data); - - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], ¤t->bytes)); - - current->first_created = -1; - current->last_used = -1; - current->needs_allocating = (eval_tensors[i].data.data == nullptr) && - (!subgraph->tensors()->Get(i)->is_variable()); - if (offline_offsets) { - current->offline_offset = offline_offsets[i]; - } else { - current->offline_offset = kOnlinePlannedBuffer; - } - } - - for (size_t i = 0; i < subgraph->inputs()->size(); ++i) { - const int tensor_index = subgraph->inputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->first_created = 0; - } - - // Mark all outputs as persistent to the end of the invocation. - for (size_t i = 0; i < subgraph->outputs()->size(); ++i) { - const int tensor_index = subgraph->outputs()->Get(i); - AllocationInfo* current = &info_[tensor_index]; - current->last_used = subgraph->operators()->size() - 1; - } - - // Figure out when the first and last use of each tensor is. - for (int i = (subgraph->operators()->size() - 1); i >= 0; --i) { - const auto* op = subgraph->operators()->Get(i); - for (size_t n = 0; n < op->inputs()->size(); ++n) { - const int tensor_index = op->inputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if (((current->last_used == -1) || (current->last_used < i))) { - current->last_used = i; - } - } - for (size_t n = 0; n < op->outputs()->size(); ++n) { - const int tensor_index = op->outputs()->Get(n); - AllocationInfo* current = &info_[tensor_index]; - if ((current->first_created == -1) || (current->first_created > i)) { - current->first_created = i; - } - } - } - - // Sanity check for valid tensor lifetime. - for (size_t i = 0; i < tensor_count_; ++i) { - AllocationInfo* current = &info_[i]; - // Even though tensor appears to be read only it may still need to be - // allocated. - const bool appears_read_only = - (current->first_created == -1) && (current->last_used != -1); - const bool has_partial_lifetime = - !appears_read_only && - ((current->first_created == -1) || (current->last_used == -1)); - if (has_partial_lifetime && current->needs_allocating) { - TF_LITE_REPORT_ERROR( - reporter_, - "Logic error in memory planner, tensor %d has an invalid lifetime: " - "first_created: %d, last_used: %d", - i, current->first_created, current->last_used); - return kTfLiteError; - } - } - return kTfLiteOk; -} - -// The tensor offsets will be encoded in the metadata:[Metadata] field of the -// Model. The following encoding applies: -// -// | Metadata component | Value | -// | name:string | “OfflineMemoryAllocation” | -// | buffer:unit | Index of buffer containing memory allocation data | -// -// The buffer contents for the memory allocation is a list of 32-bit integers. -// The number of tensors, n, must be equal to the number of tensors defined in -// the model. The following encoding applies: -// -// | Offset | Value | -// | 0 | Offline allocation format version – set to 0 | -// | 1 | Subgraph index to which this allocation applies | -// | 2 | Number offsets following: n | -// | 3 | Arena byte offset of tensor #0 or -1 to allocate at runtime | -// | 4 | Arena byte offset of tensor #1 or -1 to allocate at runtime | -// | 3+(n-1) | Arena byte offset of tensor #(n-1) or -1 to allocate at runtime | -TfLiteStatus AllocationInfoBuilder::GetOfflinePlannedOffsets( - const Model* model, const int32_t** offline_planner_offsets) { - if (model->metadata()) { - for (size_t i = 0; i < model->metadata()->size(); ++i) { - auto metadata = model->metadata()->Get(i); - if (strncmp(metadata->name()->c_str(), kOfflineMemAllocMetadata, - strlen(kOfflineMemAllocMetadata)) == 0) { - const flatbuffers::Vector>* buffers = - model->buffers(); - auto* buffer = (*buffers)[metadata->buffer()]; - auto* array = buffer->data(); - const uint32_t* metadata_buffer = - reinterpret_cast(array->data()); - const size_t nbr_tensors = static_cast(metadata_buffer[2]); - *offline_planner_offsets = - reinterpret_cast(&metadata_buffer[3]); - - if (tensor_count_ != nbr_tensors) { - TF_LITE_REPORT_ERROR(reporter_, - "Nbr of offline buffer offsets (%d) in metadata " - "not equal nbr tensors (%d)\n", - nbr_tensors, tensor_count_); - return kTfLiteError; - } - } - } - } - return kTfLiteOk; -} - -TfLiteStatus AllocationInfoBuilder::AddScratchBuffers( - internal::ScratchBufferRequest* scratch_buffer_requests, - ScratchBufferHandle* scratch_buffer_handles) { - // Set up allocation info for buffers. - for (size_t i = tensor_count_; i < tensor_count_ + buffer_count_; ++i) { - internal::ScratchBufferRequest* current_request = - &(scratch_buffer_requests[i - tensor_count_]); - ScratchBufferHandle* current_handle = - &(scratch_buffer_handles[i - tensor_count_]); - - AllocationInfo* current = &info_[i]; - current->output_ptr = reinterpret_cast(¤t_handle->data); - current->bytes = current_request->bytes; - current->first_created = current_request->node_idx; - current->last_used = current_request->node_idx; - current->offline_offset = kOnlinePlannedBuffer; - current->needs_allocating = true; - } - return kTfLiteOk; -} - -TfLiteStatus CreatePlan(ErrorReporter* error_reporter, - GreedyMemoryPlanner* planner, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Add the tensors to our allocation plan. - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - size_t aligned_bytes_required = - AlignSizeUp(current->bytes, kBufferAlignment); - if (current->offline_offset == kOnlinePlannedBuffer) { - TF_LITE_ENSURE_STATUS( - planner->AddBuffer(error_reporter, aligned_bytes_required, - current->first_created, current->last_used)); - } else { - TF_LITE_ENSURE_STATUS(planner->AddBuffer( - error_reporter, aligned_bytes_required, current->first_created, - current->last_used, current->offline_offset)); - } - } - } - return kTfLiteOk; -} - -TfLiteStatus CommitPlan(ErrorReporter* error_reporter, MemoryPlanner* planner, - uint8_t* starting_point, - const AllocationInfo* allocation_info, - size_t allocation_info_size) { - // Figure out the actual memory addresses for each buffer, based on the plan. - int planner_index = 0; - for (size_t i = 0; i < allocation_info_size; ++i) { - const AllocationInfo* current = &allocation_info[i]; - if (current->needs_allocating) { - int offset = -1; - TF_LITE_ENSURE_STATUS( - planner->GetOffsetForBuffer(error_reporter, planner_index, &offset)); - *current->output_ptr = reinterpret_cast(starting_point + offset); - ++planner_index; - } - } - return kTfLiteOk; -} -} // namespace - -namespace internal { - -// Handles architecture safe mapping of flatbuffer vectors to a TfLite*Array -// struct. Matching types are required (e.g. float and TfLiteFloatArray). -// Big-endian systems will always allocate dimension array data in the tail -// (persistent) section. -template -TfLiteStatus FlatBufferVectorToTfLiteTypeArray( - SimpleMemoryAllocator* allocator, ErrorReporter* error_reporter, - const flatbuffers::Vector* flatbuffer_array, - kTfLiteArrayType** result) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(flatbuffer_array != nullptr); - // TODO(b/159668691): Consider adding type assertion or breaking this function - // into multiple functions for each type. std::is_same is c++11 and has a - // special updated constructor in c++17 that requires a string argument. - if (FLATBUFFERS_LITTLEENDIAN) { - // On little-endian machines, TfLite*Array happens to have the same memory - // layout as flatbuffers:Vector, so we can - // reinterpret_cast the flatbuffer vector and avoid a copy and malloc. - *result = const_cast( - reinterpret_cast(flatbuffer_array)); - } else { - // Big-endian architecture can not use the same memory layout as - // flatbuffers::Vector. Allocate from the tail and - // copy values from the flatbuffer into the newly allocated chunk. - kTfLiteArrayType* array = - reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length()), - alignof(kTfLiteArrayType))); - if (array == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter, - "Failed to allocate %d bytes of memory to copy an array.", - TfLiteIntArrayGetSizeInBytes(flatbuffer_array->Length())); - return kTfLiteError; - } - array->size = flatbuffer_array->Length(); - for (int i = 0; i < array->size; ++i) { - array->data[i] = flatbuffer_array->Get(i); - } - *result = array; - } - return kTfLiteOk; -} - -// Returns a pointer to any buffer associated with the flatbuffer tensor. Can -// return nullptr if no buffer is found. -void* GetFlatbufferTensorBuffer( - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers) { - // We need to figure out where the actual contents of this tensor are stored - // in memory. We'll check to see if there's a serialized buffer (pretty much - // the same as a constant op in TensorFlow) associated with this tensor first, - // and if there is update the runtime structure to point to its location in - // memory. - // First see if there's any buffer information in the serialized tensor. - // TODO(b/170379532): Add better unit tests to validate flatbuffer values. - void* out_buffer = nullptr; - if (auto* buffer = (*buffers)[flatbuffer_tensor.buffer()]) { - // If we've found a buffer, does it have any data? - if (auto* array = buffer->data()) { - // If it has any data, is the data size larger than zero? - if (array->size()) { - // We've found a buffer with valid data, so update the runtime tensor - // data structure to point to it. - out_buffer = const_cast(static_cast(array->data())); - } - } - // TODO(petewarden): It's not clear in what circumstances we could have a - // buffer in the serialized tensor, but it doesn't have any data in it. Is - // that a validly-generated file, and if so what does it mean, or is it an - // error condition? It would be good to tighten up the specification to make - // it less ambiguous. - } - return out_buffer; -} - -TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result) { - TFLITE_DCHECK(result != nullptr); - - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - // Make sure we remember if the serialized tensor is designated as a variable. - result->is_variable = flatbuffer_tensor.is_variable(); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - // TODO(petewarden): Some of these paths aren't getting enough testing - // coverage, so we should figure out some tests that exercise them. - if (result->data.data == nullptr) { - // The tensor contents haven't been set from a serialized buffer, so - // make a note that they will be allocated from memory. The actual - // allocation won't happen until later. - result->allocation_type = kTfLiteArenaRw; - } else { - // We set the data from a serialized buffer, so record tha. - result->allocation_type = kTfLiteMmapRo; - } - - // Figure out what the size in bytes of the buffer is and store it. - size_t type_size; - TF_LITE_ENSURE_STATUS(BytesRequiredForTensor( - flatbuffer_tensor, &result->bytes, &type_size, error_reporter)); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - // TFLM doesn't allow reshaping the tensor which requires dynamic memory - // allocation so it is safe to drop the const qualifier. In the future, if - // we really want to update the tensor shape, we can always pass in a new - // TfLiteIntArray - especially we have to do so if the dimension is - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); - } - - // Copy the quantization information from the serialized data. - const auto* src_quantization = flatbuffer_tensor.quantization(); - if (src_quantization && src_quantization->scale() && - (src_quantization->scale()->size() > 0) && - src_quantization->zero_point() && - (src_quantization->zero_point()->size() > 0)) { - // Always populate the TfLiteTensor.params field, even if there are - // per-channel quantization parameters. - result->params.scale = src_quantization->scale()->Get(0); - // Note that the zero_point field in the FlatBuffers schema is a 64-bit - // integer, but the zero_point field in the TfLiteQuantizationParams struct - // is a 32-bit integer. - result->params.zero_point = - static_cast(src_quantization->zero_point()->Get(0)); - - // Populate per-channel quantization params. - int channels = src_quantization->scale()->size(); - TfLiteAffineQuantization* quantization = - allocate_temp - ? reinterpret_cast( - allocator->AllocateTemp(sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))) - : reinterpret_cast( - allocator->AllocateFromTail( - sizeof(TfLiteAffineQuantization), - alignof(TfLiteAffineQuantization))); - if (quantization == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate TfLiteAffineQuantization.\n"); - return kTfLiteError; - } - - // TODO(b/153688719): Reduce tail allocation by using a global zero-point - // buffer. This value can not be reused from the flatbuffer since the - // zero_point is stored as a int64_t. - quantization->zero_point = - allocate_temp - ? reinterpret_cast(allocator->AllocateTemp( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))) - : reinterpret_cast(allocator->AllocateFromTail( - TfLiteIntArrayGetSizeInBytes(channels), - alignof(TfLiteIntArray))); - if (quantization->zero_point == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter, - "Unable to allocate quantization->zero_point.\n"); - return kTfLiteError; - } - - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, src_quantization->scale(), - &quantization->scale)); - - quantization->zero_point->size = channels; - int* zero_point_data = quantization->zero_point->data; - for (int i = 0; i < channels; i++) { - zero_point_data[i] = src_quantization->zero_point()->Get(i); - } - // TODO(rocky): Need to add a micro_allocator test case that fails when - // this is not copied: - quantization->quantized_dimension = src_quantization->quantized_dimension(); - - result->quantization = {kTfLiteAffineQuantization, quantization}; - } - return kTfLiteOk; -} - -TfLiteStatus InitializeTfLiteEvalTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, const tflite::Tensor& flatbuffer_tensor, - const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteEvalTensor* result) { - *result = {}; - // Make sure the serialized type is one we know how to deal with, and convert - // it from a flatbuffer enum into a constant used by the kernel C API. - TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(), - &result->type, error_reporter)); - - result->data.data = GetFlatbufferTensorBuffer(flatbuffer_tensor, buffers); - - if (flatbuffer_tensor.shape() == nullptr) { - // flatbuffer_tensor.shape() can return a nullptr in the case of a scalar - // tensor. - result->dims = const_cast(&kZeroLengthIntArray); - } else { - TF_LITE_ENSURE_STATUS(FlatBufferVectorToTfLiteTypeArray( - allocator, error_reporter, flatbuffer_tensor.shape(), &(result->dims))); - } - return kTfLiteOk; -} - -} // namespace internal - -MicroAllocator::MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) - : memory_allocator_(memory_allocator), - error_reporter_(error_reporter), - model_is_allocating_(false) {} - -MicroAllocator::~MicroAllocator() {} - -MicroAllocator* MicroAllocator::Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter) { - uint8_t* aligned_arena = AlignPointerUp(tensor_arena, kBufferAlignment); - size_t aligned_arena_size = tensor_arena + arena_size - aligned_arena; - return Create(SimpleMemoryAllocator::Create(error_reporter, aligned_arena, - aligned_arena_size), - error_reporter); -} - -MicroAllocator* MicroAllocator::Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter) { - TFLITE_DCHECK(memory_allocator != nullptr); - TFLITE_DCHECK(error_reporter != nullptr); - - uint8_t* allocator_buffer = memory_allocator->AllocateFromTail( - sizeof(MicroAllocator), alignof(MicroAllocator)); - MicroAllocator* allocator = - new (allocator_buffer) MicroAllocator(memory_allocator, error_reporter); - return allocator; -} - -TfLiteStatus MicroAllocator::StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors) { - TFLITE_DCHECK(model != nullptr); - - if (model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation started before " - "finishing previously allocated model"); - return kTfLiteError; - } - - model_is_allocating_ = true; - - TF_LITE_ENSURE_STATUS(InitScratchBufferData()); - TF_LITE_ENSURE_STATUS(AllocateTfLiteEvalTensors(model, eval_tensors)); - TF_LITE_ENSURE_STATUS( - AllocateNodeAndRegistrations(model, node_and_registrations)); - TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, *node_and_registrations)); - - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle** scratch_buffer_handles) { - if (!model_is_allocating_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "MicroAllocator: Model allocation finished before " - "starting allocating model"); - return kTfLiteError; - } - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - TF_LITE_ENSURE_STATUS(AllocateScratchBufferHandles( - scratch_buffer_handles, scratch_buffer_request_count_)); - TF_LITE_ENSURE_STATUS(CommitStaticMemoryPlan(model, subgraph, eval_tensors, - *scratch_buffer_handles)); - TF_LITE_ENSURE_STATUS(AllocateVariables(subgraph, eval_tensors)); - - model_is_allocating_ = false; - return kTfLiteOk; -} - -void* MicroAllocator::AllocatePersistentBuffer(size_t bytes) { - return memory_allocator_->AllocateFromTail(bytes, kBufferAlignment); -} - -TfLiteStatus MicroAllocator::RequestScratchBufferInArena(size_t bytes, - int* buffer_idx) { - // All scratch buffer requests are stored in the head section of the arena - // when a model is in the prepare phase. First align a scratch buffer request - // pointer to the start of the head: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - // Count the number of requested scratch buffers for the current node: - size_t current_node_request_count = 0; - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - ++current_node_request_count; - } - } - - // First, ensure that the per-kernel request has not exceeded the limit: - if (current_node_request_count >= kMaxScratchBuffersPerOp) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Scratch buffer request exeeds limit per operator (%d)", - kMaxScratchBuffersPerOp); - return kTfLiteError; - } - - // Initialize and assign values for the request at the current index: - internal::ScratchBufferRequest* current_request = - &requests[scratch_buffer_request_count_]; - *current_request = {}; - // Assign -1 as a sentinel value that will be updated when the node finishes - // allocating: - current_request->bytes = bytes; - current_request->node_idx = kUnassignedScratchBufferRequestIndex; - - // Assign the current request index to the out-param: - *buffer_idx = scratch_buffer_request_count_; - - // Bump the request count to prepare for the next request: - ++scratch_buffer_request_count_; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::FinishPrepareNodeAllocations(int node_id) { - // When a node has finished preparing, all temp allocations performed by the - // kernel should be cleaned up: - ResetTempAllocations(); - - // Find and update any new scratch buffer requests for the current node: - internal::ScratchBufferRequest* requests = GetScratchBufferRequests(); - - for (size_t i = 0; i < scratch_buffer_request_count_; ++i) { - // A request with a node_idx of -1 is a sentinel value used to indicate this - // was a new request for the current node. The allocator finally knows the - // node index at this point. Assign the value and update the list of new - // requests so the head section can be adjusted to allow for the next kernel - // to allocate at most kMaxScratchBuffersPerOp requests: - if (requests[i].node_idx == kUnassignedScratchBufferRequestIndex) { - requests[i].node_idx = node_id; - } - } - - // Ensure that the head is re-adjusted to allow for another at-most - // kMaxScratchBuffersPerOp scratch buffer requests in the next operator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - sizeof(internal::ScratchBufferRequest) * - (scratch_buffer_request_count_ + kMaxScratchBuffersPerOp), - alignof(internal::ScratchBufferRequest))); - - return kTfLiteOk; -} - -size_t MicroAllocator::used_bytes() const { - return memory_allocator_->GetUsedBytes(); -} - -TfLiteStatus MicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { - TFLITE_DCHECK(node_and_registrations); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - NodeAndRegistration* output = reinterpret_cast( - memory_allocator_->AllocateFromTail( - sizeof(NodeAndRegistration) * subgraph->operators()->size(), - alignof(NodeAndRegistration))); - if (output == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for node_and_registrations."); - return kTfLiteError; - } - *node_and_registrations = output; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - TFLITE_DCHECK(model != nullptr); - TFLITE_DCHECK(node_and_registrations != nullptr); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - TfLiteStatus status = kTfLiteOk; - auto* opcodes = model->operator_codes(); - MicroBuiltinDataAllocator builtin_data_allocator(memory_allocator_); - for (size_t i = 0; i < subgraph->operators()->size(); ++i) { - const auto* op = subgraph->operators()->Get(i); - const size_t index = op->opcode_index(); - if (index >= opcodes->size()) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Missing registration for opcode_index %d\n", index); - return kTfLiteError; - } - auto* opcode = (*opcodes)[index]; - status = - GetRegistrationFromOpCode(opcode, op_resolver, error_reporter_, - &(node_and_registrations[i].registration)); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to get registration from op code %s\n ", - EnumNameBuiltinOperator(GetBuiltinCode(opcode))); - return status; - } - const auto* registration = node_and_registrations[i].registration; - if (registration == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Skipping op for opcode_index %d\n", - index); - return kTfLiteError; - } - BuiltinOperator op_type = - static_cast(registration->builtin_code); - - const char* custom_data = nullptr; - size_t custom_data_size = 0; - unsigned char* builtin_data = nullptr; - - if (op_type == BuiltinOperator_CUSTOM) { - // Custom Ops may or may not have a non-null custom_options field. - if (op->custom_options() != nullptr) { - custom_data = - reinterpret_cast(op->custom_options()->data()); - custom_data_size = op->custom_options()->size(); - } - } else { - if (op->custom_options() != nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Unsupported behavior: found builtin operator %s with custom " - "options.\n", - EnumNameBuiltinOperator(op_type)); - return kTfLiteError; - } - - MicroOpResolver::BuiltinParseFunction parser = - op_resolver.GetOpDataParser(op_type); - if (parser == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, "Did not find a parser for %s", - EnumNameBuiltinOperator(op_type)); - - return kTfLiteError; - } - TF_LITE_ENSURE_STATUS(parser(op, error_reporter_, &builtin_data_allocator, - (void**)(&builtin_data))); - } - - TfLiteIntArray* inputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->inputs(), &inputs_array)); - - TfLiteIntArray* outputs_array; - TF_LITE_ENSURE_STATUS(internal::FlatBufferVectorToTfLiteTypeArray( - memory_allocator_, error_reporter_, op->outputs(), &outputs_array)); - - TfLiteNode* node = &(node_and_registrations[i].node); - *node = {}; - node->inputs = inputs_array; - node->outputs = outputs_array; - node->builtin_data = reinterpret_cast(builtin_data); - node->custom_initial_data = custom_data; - node->custom_initial_data_size = custom_data_size; - } - - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from persistent arena space. It is guaranteed to be - // around for the lifetime of the application. - TfLiteTensor* tensor = - AllocatePersistentTfLiteTensorInternal(model, eval_tensors, tensor_index); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the persistent section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, - /*allocate_temp=*/false) != - kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to populate a persistent TfLiteTensor struct " - "from flatbuffer data!"); - return nullptr; - } - - if (eval_tensors != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; - } - return tensor; -} - -TfLiteTensor* MicroAllocator::AllocateTempTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - // This value is allocated from temporary arena space. It is guaranteed to be - // around for at least the scope of the calling function. Since this struct - // allocation takes place in temp space, no need to own or cleanup. - TfLiteTensor* tensor = - reinterpret_cast(memory_allocator_->AllocateTemp( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); - - // Populate any fields from the flatbuffer, since this TfLiteTensor struct is - // allocated in the temp section of the arena, ensure that additional - // allocations also take place in that section of the arena. - if (PopulateTfLiteTensorFromFlatbuffer(model, subgraph, tensor, tensor_index, - /*allocate_temp=*/true) != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to populate a temp TfLiteTensor struct from flatbuffer data!"); - return nullptr; - } - - if (eval_tensors != nullptr) { - // Tensor buffers that are allocated at runtime (e.g. non-weight buffers) - // and not located in the flatbuffer are stored on the pre-allocated list of - // TfLiteEvalTensors structs. These structs are the source of truth, simply - // point the corresponding buffer to the new TfLiteTensor data value. - tensor->data.data = eval_tensors[tensor_index].data.data; - } - return tensor; -} - -void MicroAllocator::ResetTempAllocations() { - memory_allocator_->ResetTempAllocations(); -} - -TfLiteStatus MicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { - TFLITE_DCHECK(eval_tensors != nullptr); - - const SubGraph* subgraph = GetSubGraphFromModel(model); - TFLITE_DCHECK(subgraph != nullptr); - - size_t alloc_count = subgraph->tensors()->size(); - TfLiteEvalTensor* tensors = - reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteEvalTensor) * alloc_count, alignof(TfLiteEvalTensor))); - if (tensors == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate memory for context->eval_tensors, " - "%d bytes required", - sizeof(TfLiteEvalTensor) * alloc_count); - return kTfLiteError; - } - - for (size_t i = 0; i < alloc_count; ++i) { - TfLiteStatus status = internal::InitializeTfLiteEvalTensorFromFlatbuffer( - memory_allocator_, *subgraph->tensors()->Get(i), model->buffers(), - error_reporter_, &tensors[i]); - if (status != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, "Failed to initialize tensor %d", - i); - return kTfLiteError; - } - } - *eval_tensors = tensors; - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) { - for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { - auto* tensor = subgraph->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors[i], &buffer_size)); - - eval_tensors[i].data.data = - memory_allocator_->AllocateFromTail(buffer_size, kBufferAlignment); - - if (eval_tensors[i].data.data == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate variable tensor of size %d", - buffer_size); - return kTfLiteError; - } - } - } - return kTfLiteOk; -} - -TfLiteTensor* MicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { - return reinterpret_cast(memory_allocator_->AllocateFromTail( - sizeof(TfLiteTensor), alignof(TfLiteTensor))); -} - -TfLiteStatus MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { - // TODO(b/162311891): This method serves as a stub to ensure quantized - // allocations in the tail can be recorded. Once the interpreter has APIs for - // accessing buffers on TfLiteEvalTensor this method can be dropped. - return internal::InitializeTfLiteTensorFromFlatbuffer( - memory_allocator_, allocate_temp, *subgraph->tensors()->Get(tensor_index), - model->buffers(), error_reporter_, tensor); -} - -ErrorReporter* MicroAllocator::error_reporter() const { - return error_reporter_; -} - -const SubGraph* MicroAllocator::GetSubGraphFromModel(const Model* model) { - auto* subgraphs = model->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - return nullptr; - } - return (*subgraphs)[0]; -} - -TfLiteStatus MicroAllocator::CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, - ScratchBufferHandle* scratch_buffer_handles) { - size_t head_usage = 0; - // Create static memory plan - // 1. Calculate AllocationInfo to know the lifetime of each tensor/buffer. - // 2. Add them into the planner (such as the GreedyMemoryPlanner). - // 3. Static memory planning using the planner. - // 4. Set tensor/buffer pointers based on the offsets from the previous step. - // - // Note that AllocationInfo is only needed for creating the plan. It will be - // allocated from the temp section and cleaned up at the bottom of this - // function. - - size_t allocation_info_count = - subgraph->tensors()->size() + scratch_buffer_request_count_; - size_t bytes = sizeof(AllocationInfo) * allocation_info_count; - - // Allocate an array of AllocationInfo structs from the temp section. This - // struct will be used by AllocationInfoBuilder to find buffer usage. - AllocationInfo* allocation_info = reinterpret_cast( - memory_allocator_->AllocateTemp(bytes, alignof(AllocationInfo))); - if (allocation_info == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for allocation_info, %d bytes required", - bytes); - return kTfLiteError; - } - - // Use the AllocationInfoBuilder class to help determine where buffers are - // used in the subgraph. - AllocationInfoBuilder builder(allocation_info, subgraph->tensors()->size(), - scratch_buffer_request_count_, error_reporter_); - - const int32_t* offline_planner_offsets = nullptr; - TF_LITE_ENSURE_STATUS( - builder.GetOfflinePlannedOffsets(model, &offline_planner_offsets)); - TF_LITE_ENSURE_STATUS( - builder.AddTensors(subgraph, offline_planner_offsets, eval_tensors)); - - internal::ScratchBufferRequest* scratch_buffer_requests = - GetScratchBufferRequests(); - - TF_LITE_ENSURE_STATUS(builder.AddScratchBuffers(scratch_buffer_requests, - scratch_buffer_handles)); - - // Remaining arena size that memory planner can use for calculating offsets. - size_t remaining_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - uint8_t* planner_arena = - memory_allocator_->AllocateTemp(remaining_arena_size, kBufferAlignment); - TF_LITE_ENSURE(error_reporter_, planner_arena != nullptr); - GreedyMemoryPlanner planner(planner_arena, remaining_arena_size); - TF_LITE_ENSURE_STATUS(CreatePlan(error_reporter_, &planner, allocation_info, - allocation_info_count)); - - // Reset all temp allocations used above: - memory_allocator_->ResetTempAllocations(); - - size_t actual_available_arena_size = - memory_allocator_->GetAvailableMemory(kBufferAlignment); - - // Make sure we have enough arena size. - if (planner.GetMaximumMemorySize() > actual_available_arena_size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Arena size is too small for all buffers. Needed %u but only " - "%u was available.", - planner.GetMaximumMemorySize(), actual_available_arena_size); - return kTfLiteError; - } - // Commit the plan. - TF_LITE_ENSURE_STATUS(CommitPlan(error_reporter_, &planner, - memory_allocator_->GetHeadBuffer(), - allocation_info, allocation_info_count)); - head_usage = planner.GetMaximumMemorySize(); - - // The head is used to store memory plans for one model at a time during the - // model preparation stage, and is re-purposed to store scratch buffer handles - // during model invocation. The head must be as large as the greater of the - // largest model memory plan's size and the total space required for all - // scratch buffer handles. - if (max_head_buffer_usage_ < head_usage) { - max_head_buffer_usage_ = head_usage; - } - - // The head is used for storing scratch buffer allocations before finalizing a - // memory plan in this function. Ensure that the head is set to the largest - // memory plan sent through the allocator: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - max_head_buffer_usage_, kBufferAlignment)); - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::AllocateScratchBufferHandles( - ScratchBufferHandle** scratch_buffer_handles, size_t handle_count) { - TFLITE_DCHECK(scratch_buffer_handles != nullptr); - - if (scratch_buffer_request_count_ == 0) { - // No scratch buffer requests were requested during model allocation. - return kTfLiteOk; - } - - // Allocate a consecutive block of memory store the scratch buffer handles. - // This alignment ensures quick lookup during inference time for the model: - *scratch_buffer_handles = reinterpret_cast( - memory_allocator_->AllocateFromTail( - sizeof(ScratchBufferHandle) * handle_count, - alignof(ScratchBufferHandle))); - - return kTfLiteOk; -} - -TfLiteStatus MicroAllocator::InitScratchBufferData() { - // A model is preparing to allocate resources, ensure that scratch buffer - // request counter is cleared: - scratch_buffer_request_count_ = 0; - - // All requests will be stored in the head section. Each kernel is allowed at - // most kMaxScratchBuffersPerOp requests. Adjust the head to reserve at most - // that many requests to begin: - TF_LITE_ENSURE_STATUS(memory_allocator_->SetHeadBufferSize( - sizeof(internal::ScratchBufferRequest) * kMaxScratchBuffersPerOp, - alignof(internal::ScratchBufferRequest))); - - return kTfLiteOk; -} - -internal::ScratchBufferRequest* MicroAllocator::GetScratchBufferRequests() { - return reinterpret_cast( - AlignPointerUp(memory_allocator_->GetHeadBuffer(), - alignof(internal::ScratchBufferRequest))); -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h index 49294b8..ca2e27e 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -18,16 +18,18 @@ limitations under the License. #include #include -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/micro_memory_planner.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { +// TODO(b/199402574): rename to tflite_internal or just remove internal +// namespace. namespace internal { // Sets up all of the data structure members for a TfLiteTensor based on the @@ -35,10 +37,11 @@ namespace internal { // TODO(b/162311891): Drop this method when the interpreter has an API for // returning buffers on TfLiteEvalTensor. TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( - SimpleMemoryAllocator* allocator, bool allocate_temp, - const tflite::Tensor& flatbuffer_tensor, + IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + bool allocate_temp, const tflite::Tensor& flatbuffer_tensor, const flatbuffers::Vector>* buffers, - ErrorReporter* error_reporter, TfLiteTensor* result); + TfLiteTensor* result); // Holds placeholder information for a scratch buffer request from a kernel. // This struct is only used during the model prepare stage. Each request from a @@ -50,7 +53,7 @@ TfLiteStatus InitializeTfLiteTensorFromFlatbuffer( // of a sequential, array of ScratchBufferHandle allocations in the tail // section. These allocations are indexed by the request API defined in the // TfLiteContext struct. -typedef struct { +struct ScratchBufferRequest { // Number of bytes required by the buffer. The actual allocated size might be // greater than `bytes` due to buffer alignment. size_t bytes; @@ -58,22 +61,30 @@ typedef struct { // determine the lifetime of the buffer. In AllocationInfo, this buffer will // have `before` = node_idx and `after` = node_idx. int node_idx; -} ScratchBufferRequest; + int subgraph_idx; +}; } // namespace internal -typedef struct { +struct NodeAndRegistration { TfLiteNode node; const TfLiteRegistration* registration; -} NodeAndRegistration; +}; // Holds a pointer to a buffer for a scratch buffer requested by a kernel during // the model prepare stage. This struct is allocated in-place and allows for // quick pointer-indexed lookup for speed during model inference. -typedef struct { +struct ScratchBufferHandle { // Pointer to location of the scratch buffer: uint8_t* data; -} ScratchBufferHandle; +}; + +// Stores all per-subgraph allocations. This includes the node and registration +// array, and tensor list for each subgraph. +struct SubgraphAllocations { + NodeAndRegistration* node_and_registrations; + TfLiteEvalTensor* tensors; +}; // Allocator responsible for allocating memory for all intermediate tensors // necessary to invoke a model. @@ -84,9 +95,9 @@ typedef struct { // // The MicroAllocator simply plans out additional allocations that are required // to standup a model for inference in TF Micro. This class currently relies on -// an additional allocator - SimpleMemoryAllocator - for all allocations from an -// arena. These allocations are divided into head (non-persistent) and tail -// (persistent) regions: +// an additional allocator - SingleArenaBufferAllocator - for all allocations +// from an arena. These allocations are divided into head (non-persistent) and +// tail (persistent) regions: // // Memory layout to help understand how it works // This information could change in the future version. @@ -101,41 +112,64 @@ typedef struct { class MicroAllocator { public: // Creates a MicroAllocator instance from a given tensor arena. This arena - // will be managed by the created instance. - // Note: Please use __declspec(align(16)) to make sure tensor_arena is 16 + // will be managed by the created instance. The GreedyMemoryPlanner will + // by default be used and created on the arena. + // Note: Please use alignas(16) to make sure tensor_arena is 16 // bytes aligned, otherwise some head room will be wasted. // TODO(b/157615197): Cleanup constructor + factory usage. - static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, - ErrorReporter* error_reporter); - - // Creates a MicroAllocator instance using the provided SimpleMemoryAllocator - // intance. This allocator instance will use the SimpleMemoryAllocator - // instance to manage allocations internally. - static MicroAllocator* Create(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); + static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size); - // Begin allocating internal resources required for model inference. + // Creates a MicroAllocator instance from a given tensor arena and a given + // MemoryPlanner. This arena will be managed by the created instance. Note: + // Please use alignas(16) to make sure tensor_arena is 16 bytes + // aligned, otherwise some head room will be wasted. + static MicroAllocator* Create(uint8_t* tensor_arena, size_t arena_size, + MicroMemoryPlanner* memory_planner); + + // Creates a MicroAllocator instance using the provided + // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator + // instance will use the SingleArenaBufferAllocator instance to manage + // allocations internally. + static MicroAllocator* Create(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); + + // Creates a MicroAllocator instance using the provided + // SingleArenaBufferAllocator instance and the MemoryPlanner. This allocator + // instance will use the SingleArenaBufferAllocator instance to manage + // allocations internally. + static MicroAllocator* Create(uint8_t* persistent_tensor_arena, + size_t persistent_arena_size, + uint8_t* non_persistent_tensor_arena, + size_t non_persistent_arena_size); + + // Returns the fixed amount of memory overhead of MicroAllocator. + static size_t GetDefaultTailUsage(bool is_memory_planner_given); + + // Allocates internal resources required for model inference for each subgraph + // from the arena. + // // This method will run through the flatbuffer data supplied in the model to // properly allocate tensor, node, and op registration data. This method is - // expected to be followed with a call to FinishModelAllocation() before - // resuming allocation with another model. All persistent tensor buffers are - // stored in the out-param eval_tensors. This value is allocated from the - // persistent memory arena and will be used to host runtime tensor buffers. - TfLiteStatus StartModelAllocation( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration** node_and_registrations, - TfLiteEvalTensor** eval_tensors); + // expected to be followed with a call to FinishModelAllocation() Returns a + // pointer to an array of SubgraphAllocations (also stored in the tail of the + // arena) where each index corresponds to a different subgraph in the model. + // Return value is nullptr if the allocations failed. + SubgraphAllocations* StartModelAllocation(const Model* model); // Finish allocating internal resources required for model inference. - // This method will plan non-persistent buffers and commit a memory plan to - // the 'head' section of the memory arena. All variable tensor data will also - // be allocated. This method should be called after assigning model resources - // in StartModelAllocation(). The eval_tensors pointer should be the value - // passed into this class during StartModelAllocation(). Scratch buffer - // handles are stored in the out-param `scratch_buffer_handles`. This value - // will be used in `GetScratchBuffer` call to retrieve scratch buffers. + // + // -Plan the memory for activation tensors and scratch buffers. + // -Update eval tensors for each subgraph based on planned offsets. + // -Allocate scratch buffer handles array and update based on planned offsets. + // + // This method should be called after assigning model resources + // in StartModelAllocation(). The subgraph_allocations pointer should be the + // value passed into this class during StartModelAllocation(). Scratch buffer + // handles are stored in the out-param `scratch_buffer_handles` array which is + // allocated in this method. This value will be used in `GetScratchBuffer` + // call to retrieve scratch buffers. TfLiteStatus FinishModelAllocation( - const Model* model, TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* subgraph_allocations, ScratchBufferHandle** scratch_buffer_handles); // Allocates a TfLiteTensor struct and populates the returned value with @@ -145,22 +179,30 @@ class MicroAllocator { // class during StartModelAllocation() and contains the source-of-truth for // buffers. virtual TfLiteTensor* AllocatePersistentTfLiteTensor( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index); // Allocates a TfLiteTensor struct and populates the returned value with // properties from the model flatbuffer. This struct is allocated from // temporary arena memory is only guaranteed until a call is made to - // ResetTempAllocations(). The eval_tensors pointer should be the value passed - // into this class during StartModelAllocation() and contains the - // source-of-truth for buffers. - virtual TfLiteTensor* AllocateTempTfLiteTensor(const Model* model, - TfLiteEvalTensor* eval_tensors, - int tensor_index); + // ResetTempAllocations(). Subgraph_allocaitons contains the array of + // TfLiteEvalTensors. If the newly allocated temp at the specified subgraph + // and tensor index is already present int the TfLiteEvalTensor array, its + // data buffer will be re-used. + virtual TfLiteTensor* AllocateTempTfLiteTensor( + const Model* model, const SubgraphAllocations* subgraph_allocations, + int tensor_index, int subgraph_index); + + virtual void DeallocateTempTfLiteTensor(TfLiteTensor*); // Resets all temporary allocations. This method should be called after a // chain of temp allocations (e.g. chain of TfLiteTensor objects via // AllocateTfLiteTensor()). - virtual void ResetTempAllocations(); + virtual TfLiteStatus ResetTempAllocations(); + + // Returns true if all temporary buffers including temp TfLiteTensor are + // already deallocated. + virtual bool IsAllTempDeallocated(); // Allocates persistent buffer which has the same life time as the allocator. // The memory is immediately available and is allocated from the tail of the @@ -171,7 +213,8 @@ class MicroAllocator { // This method only requests a buffer with a given size to be used after a // model has finished allocation via FinishModelAllocation(). All requested // buffers will be accessible by the out-param in that method. - TfLiteStatus RequestScratchBufferInArena(size_t bytes, int* buffer_idx); + TfLiteStatus RequestScratchBufferInArena(size_t bytes, int subgraph_idx, + int* buffer_idx); // Finish allocating a specific NodeAndRegistration prepare block (kernel // entry for a model) with a given node ID. This call ensures that any scratch @@ -183,53 +226,48 @@ class MicroAllocator { // `FinishModelAllocation`. Otherwise, it will return 0. size_t used_bytes() const; + TfLiteBridgeBuiltinDataAllocator* GetBuiltinDataAllocator(); + protected: - MicroAllocator(SimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); + MicroAllocator(SingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); + MicroAllocator(IPersistentBufferAllocator* persistent_buffer_allocator, + INonPersistentBufferAllocator* non_persistent_buffer_allocator, + MicroMemoryPlanner* memory_planner); virtual ~MicroAllocator(); // Allocates an array in the arena to hold pointers to the node and // registration pointers required to represent the inference graph of the // model. virtual TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations); - - // Populates node and registration pointers representing the inference graph - // of the model from values inside the flatbuffer (loaded from the TfLiteModel - // instance). Persistent data (e.g. operator data) is allocated from the - // arena. - virtual TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations); + const Model* model, SubgraphAllocations* subgraph_allocations); // Allocates the list of persistent TfLiteEvalTensors that are used for the // "eval" phase of model inference. These structs will be the source of truth - // for all tensor buffers. Allocation results are stored in the out-param - // eval_tensors. + // for all tensor buffers. virtual TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors); + const Model* model, SubgraphAllocations* subgraph_allocations); // Allocates persistent tensor buffers for variable tensors in the subgraph. - virtual TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors); + // Online and offline variable tensors are handled differently hence the + // offline_planner_offsets parameter is needed. + virtual TfLiteStatus AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets); // Allocate and return a persistent TfLiteTensor. // TODO(b/162311891): Drop this method when the interpreter has an API for // accessing TfLiteEvalTensor structs. - virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index); + virtual TfLiteTensor* AllocatePersistentTfLiteTensorInternal(); // Populates a TfLiteTensor struct with data from the model flatbuffer. Any // quantization data is allocated from either the tail (persistent) or temp // sections of the arena based on the allocation flag. - virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp); - - ErrorReporter* error_reporter() const; - - // Returns the first subgraph from the model. - const SubGraph* GetSubGraphFromModel(const Model* model); + virtual TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, + TfLiteTensor* tensor, + int tensor_index, + int subgraph_idx, + bool allocate_temp); private: // Commits a memory plan for all non-persistent buffer allocations in the @@ -240,8 +278,7 @@ class MicroAllocator { // ScratchBufferHandle structs that will point to allocated buffers also in // the head section. virtual TfLiteStatus CommitStaticMemoryPlan( - const Model* model, const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors, + const Model* model, SubgraphAllocations* allocations, ScratchBufferHandle* scratch_buffer_handles); // Allocates an array of ScratchBufferHandle structs in the tail section for a @@ -259,15 +296,24 @@ class MicroAllocator { internal::ScratchBufferRequest* GetScratchBufferRequests(); // A simple memory allocator that always allocate from the arena tail or head. - SimpleMemoryAllocator* memory_allocator_; + INonPersistentBufferAllocator* non_persistent_buffer_allocator_; + IPersistentBufferAllocator* persistent_buffer_allocator_; + + // Allocator used to allocate persistent builtin data. + TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator_; + + // Activation buffer memory planner. + MicroMemoryPlanner* memory_planner_; - ErrorReporter* error_reporter_; bool model_is_allocating_; // Holds the number of ScratchBufferRequest instances stored in the head // section when a model is allocating. size_t scratch_buffer_request_count_ = 0; + // Holds ScratchBufferRequest when a model is allocating + uint8_t* scratch_buffer_head_ = nullptr; + // Holds the byte length of the memory plan with the largest head usage. Used // to ensure that multi-tenant allocations can share the head for buffers. size_t max_head_buffer_usage_ = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h new file mode 100644 index 0000000..8282817 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h @@ -0,0 +1,28 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ + +namespace tflite { + +// The default buffer alignment requirement. +// We align tensor buffers to 16-byte boundaries, since this is a common +// requirement for SIMD extensions. +constexpr int MicroArenaBufferAlignment() { return 16; } + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_ARENA_CONSTANTS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.cc new file mode 100644 index 0000000..b0a4244 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.cc @@ -0,0 +1,129 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { +MicroContext::MicroContext(MicroAllocator* allocator, const Model* model, + MicroGraph* graph) + : allocator_(*allocator), graph_(*graph), model_(model) {} + +MicroContext::~MicroContext() {} + +void* MicroContext::AllocatePersistentBuffer(size_t bytes) { + return allocator_.AllocatePersistentBuffer(bytes); +} + +TfLiteStatus MicroContext::RequestScratchBufferInArena(size_t bytes, + int* buffer_idx) { + return allocator_.RequestScratchBufferInArena( + bytes, graph_.GetCurrentSubgraphIndex(), buffer_idx); +} + +void* MicroContext::GetScratchBuffer(int buffer_idx) { + ScratchBufferHandle* handle = scratch_buffer_handles_ + buffer_idx; + return handle->data; +} + +TfLiteTensor* MicroContext::AllocateTempTfLiteTensor(int tensor_idx) { + return allocator_.AllocateTempTfLiteTensor(model_, graph_.GetAllocations(), + tensor_idx, + graph_.GetCurrentSubgraphIndex()); +} + +int MicroContext::GetTensorIndex(int index, int max_size, + const int* tensor_indices) { + if (index >= 0 && index < max_size) { + const int tensor_index = tensor_indices[index]; + if (tensor_index != kTfLiteOptionalTensor) { + return tensor_index; + } + } + return -1; +} + +TfLiteTensor* MicroContext::AllocateTempInputTensor(const TfLiteNode* node, + int index) { + const int tensor_index = + GetTensorIndex(index, node->inputs->size, node->inputs->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +TfLiteTensor* MicroContext::AllocateTempOutputTensor(const TfLiteNode* node, + int index) { + const int tensor_index = + GetTensorIndex(index, node->outputs->size, node->outputs->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +TfLiteTensor* MicroContext::AllocateTempIntermediateTensor( + const TfLiteNode* node, int index) { + const int tensor_index = GetTensorIndex(index, node->intermediates->size, + node->intermediates->data); + if (tensor_index < 0) { + return nullptr; + } + return AllocateTempTfLiteTensor(tensor_index); +} + +void MicroContext::DeallocateTempTfLiteTensor(TfLiteTensor* tensor) { + return allocator_.DeallocateTempTfLiteTensor(tensor); +} + +TfLiteEvalTensor* MicroContext::GetEvalTensor(int tensor_idx) { + return &graph_.GetAllocations()[graph_.GetCurrentSubgraphIndex()] + .tensors[tensor_idx]; +} + +void MicroContext::SetScratchBufferHandles( + ScratchBufferHandle* scratch_buffer_handles) { + scratch_buffer_handles_ = scratch_buffer_handles; +} + +TfLiteStatus MicroContext::set_external_context( + void* external_context_payload) { + if (external_context_payload == nullptr || + external_context_payload_ != nullptr) { + MicroPrintf( + "Attempting to set external context to %x but it was %x already", + external_context_payload, external_context_payload_); + return kTfLiteError; + } + + external_context_payload_ = external_context_payload; + return kTfLiteOk; +} + +void MicroContextReportOpError(struct TfLiteContext* context, + const char* format, ...) { + va_list args; + va_start(args, format); + Log(format, args); + va_end(args); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.h new file mode 100644 index 0000000..65a64b2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_context.h @@ -0,0 +1,161 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" + +namespace tflite { +// MicroContext is eventually going to become the API between TFLM and the +// kernels, replacing all the functions in TfLiteContext. The end state is code +// kernels to have code like: +// +// MicroContext* micro_context = GetMicroContext(context); +// micro_context-> +class MicroContext { + public: + // Does not take any ownership, and all pointers must refer to valid objects + // that outlive the one constructed. + explicit MicroContext(MicroAllocator* allocator, const Model* model, + MicroGraph* graph); + virtual ~MicroContext(); + + // Allocate persistent buffer which has the same life time as the interpreter. + // Returns nullptr on failure. + // The memory is allocated from the tail. + // This method is only available in Init or Prepare stage. + // Virtual so that it can be faked for kernel tests. + virtual void* AllocatePersistentBuffer(size_t bytes); + + // Request a scratch buffer in the arena through static memory planning. + // This method is only available in Prepare stage and the buffer is allocated + // by the interpreter between Prepare and Eval stage. In Eval stage, + // GetScratchBuffer API can be used to fetch the address. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteStatus RequestScratchBufferInArena(size_t bytes, + int* buffer_idx); + + // Get the scratch buffer pointer. + // This method is only available in Eval stage. + // Virtual so that it can be faked for kernel tests. + virtual void* GetScratchBuffer(int buffer_idx); + + // Returns a temporary TfLiteTensor struct for a given index. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteTensor* AllocateTempTfLiteTensor(int tensor_idx); + + // Returns a temporary TfLiteTensor struct for the specified input tensor of a + // given mode. This is the recommended API over the deprecated + // GetInput/GetInputSafe to get a temp input tensor. The returned tensor shall + // be freed via calling DeallocateTempTfLiteTensor. + virtual TfLiteTensor* AllocateTempInputTensor(const TfLiteNode* node, + int index); + + // Returns a temporary TfLiteTensor struct for the specified output tensor of + // a given mode. This is the recommended API over the deprecated + // GetOutput/GetOutputSafe to get a temp output tensor. The returned tensor + // shall be freed via calling DeallocateTempTfLiteTensor. + virtual TfLiteTensor* AllocateTempOutputTensor(const TfLiteNode* node, + int index); + + // Returns a temporary TfLiteTensor struct for the specified intermediate + // tensor of a given mode. This is the recommended API over the deprecated + // GetIntermediates/GetIntermediatesSafe to get a temp intermediate tensor. + // The returned tensor shall be freed via calling DeallocateTempTfLiteTensor. + virtual TfLiteTensor* AllocateTempIntermediateTensor(const TfLiteNode* node, + int index); + + // Deallocates a temp TfLiteTensor. + // Virtual so that it can be faked for kernel tests. + virtual void DeallocateTempTfLiteTensor(TfLiteTensor* tensor); + + // Returns a TfLiteEvalTensor struct for a given index. + // Virtual so that it can be faked for kernel tests. + virtual TfLiteEvalTensor* GetEvalTensor(int tensor_idx); + + // Does not take ownership of the pointer and the pointer must refer to valid + // an object that outlive this class instance. + // This can only be called once to set one external context. + TfLiteStatus set_external_context(void* external_context_payload); + + void* external_context() { return external_context_payload_; } + + MicroGraph& graph() { return graph_; } + + // Sets the pointer to a list of ScratchBufferHandle instances. + // Not API between TFLM and kernels. Primarily used by the framework for + // housekeeping in MicroContext. + void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); + + private: + // Return the tensor index as tensor_indices[index]. tensor_indices is of + // max_size. Return -1 if index is not in the valid range of tensor_indices. + int GetTensorIndex(int index, int max_size, const int* tensor_indices); + + MicroAllocator& allocator_; + MicroGraph& graph_; + const Model* model_; + + ScratchBufferHandle* scratch_buffer_handles_ = nullptr; + void* external_context_payload_ = nullptr; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +inline MicroContext* GetMicroContext(const struct TfLiteContext* context) { + return reinterpret_cast(context->impl_); +} + +// Deprecated API. Prefer to using the MicroContext API directly from the +// kernels. +// TODO(b/213010668): migrate all existing kernels to use MicroContext, delete +// these functions, and remove corresponding members from the TfLiteContext +// struct for TFLM. +inline void* MicroContextAllocatePersistentBuffer(TfLiteContext* ctx, + size_t bytes) { + return GetMicroContext(ctx)->AllocatePersistentBuffer(bytes); +} +inline TfLiteStatus MicroContextRequestScratchBufferInArena(TfLiteContext* ctx, + size_t bytes, + int* buffer_idx) { + return GetMicroContext(ctx)->RequestScratchBufferInArena(bytes, buffer_idx); +} +inline void* MicroContextGetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { + return GetMicroContext(ctx)->GetScratchBuffer(buffer_idx); +} +inline TfLiteTensor* MicroContextGetTensor(const struct TfLiteContext* context, + int tensor_idx) { + return GetMicroContext(context)->AllocateTempTfLiteTensor(tensor_idx); +} +inline TfLiteEvalTensor* MicroContextGetEvalTensor( + const struct TfLiteContext* context, int tensor_idx) { + return GetMicroContext(context)->GetEvalTensor(tensor_idx); +} +inline TfLiteExternalContext* MicroContextGetExternalContext( + TfLiteContext* context, TfLiteExternalContextType unused) { + return reinterpret_cast( + GetMicroContext(context)->external_context()); +} + +// Requests that an error be reported with format string msg. +void MicroContextReportOpError(struct TfLiteContext* context, + const char* format, ...); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_CONTEXT_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cc similarity index 53% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_utils.h rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cc index e406ac1..f15cfcc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cc @@ -12,29 +12,32 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ -#define TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace { +uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)]; +tflite::MicroErrorReporter* error_reporter_ = nullptr; + +} // namespace + namespace tflite { -namespace ops { -namespace micro { - -// Same as gtl::Greater but defined here to reduce dependencies and -// binary size for micro environment. -struct Greater { - template - bool operator()(const T& x, const T& y) const { - return x > y; +ErrorReporter* GetMicroErrorReporter() { + if (error_reporter_ == nullptr) { + error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); } -}; + return error_reporter_; +} -struct Less { - template - bool operator()(const T& x, const T& y) const { - return x < y; - } -}; +int MicroErrorReporter::Report(const char* format, va_list args) { + Log(format, args); + return 0; +} -} // namespace micro -} // namespace ops } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_KERNELS_MICRO_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h index d2fd174..20a2423 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h @@ -12,29 +12,17 @@ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ -#define TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ +#ifndef TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ +#define TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ #include #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) -// This function can be used independent of the MicroErrorReporter to get -// printf-like functionalitys and are common to all target platforms. -void MicroPrintf(const char* format, ...); -#else -// We use a #define to ensure that the strings are completely stripped, to -// prevent an unnecessary increase in the binary size. -#define MicroPrintf(format, ...) -#endif - namespace tflite { - // Get a pointer to a singleton global error reporter. ErrorReporter* GetMicroErrorReporter(); - class MicroErrorReporter : public ErrorReporter { public: ~MicroErrorReporter() override {} @@ -46,4 +34,4 @@ class MicroErrorReporter : public ErrorReporter { } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_MICRO_ERROR_REPORTER_H_ +#endif // TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_MICRO_ERROR_REPORTER_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.cc new file mode 100644 index 0000000..fa43d6c --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.cc @@ -0,0 +1,258 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +namespace { + +const char* OpNameFromRegistration(const TfLiteRegistration* registration) { + if (registration->builtin_code == BuiltinOperator_CUSTOM) { + return registration->custom_name; + } else { + return EnumNameBuiltinOperator(BuiltinOperator(registration->builtin_code)); + } +} + +} // namespace + +MicroGraph::MicroGraph(TfLiteContext* context, const Model* model, + MicroAllocator* allocator, + MicroResourceVariables* resource_variables) + : context_(context), + model_(model), + allocator_(allocator), + current_subgraph_index_(0), + resource_variables_(resource_variables) { + if (model != nullptr) { + subgraphs_ = model->subgraphs(); + } +} + +MicroGraph::~MicroGraph() {} + +TfLiteStatus MicroGraph::InitSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (size_t i = 0; i < operators_size; ++i) { + TfLiteNode* node = + &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node); + const TfLiteRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[i] + .registration; + size_t init_data_size; + const char* init_data; + if (registration->builtin_code == BuiltinOperator_CUSTOM) { + init_data = reinterpret_cast(node->custom_initial_data); + init_data_size = node->custom_initial_data_size; + } else { + init_data = reinterpret_cast(node->builtin_data); + init_data_size = 0; + } + if (registration->init) { + node->user_data = + registration->init(context_, init_data, init_data_size); + } + } + } + current_subgraph_index_ = previous_subgraph_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroGraph::PrepareSubgraphs(bool run_all_prep_ops) { + int previous_subgraph_idx = current_subgraph_index_; + bool all_prep_ops_ok = true; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (size_t i = 0; i < operators_size; ++i) { + TfLiteNode* node = + &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node); + const TfLiteRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[i] + .registration; + if (registration->prepare != nullptr) { + TfLiteStatus prepare_status = registration->prepare(context_, node); + if (prepare_status != kTfLiteOk) { + MicroPrintf("Node %s (number %df) failed to prepare with status %d", + OpNameFromRegistration(registration), i, prepare_status); + + all_prep_ops_ok = false; + if (!run_all_prep_ops) { + return kTfLiteError; + } + } + } + allocator_->FinishPrepareNodeAllocations(/*node_id=*/i); + } + + if (!all_prep_ops_ok) { + return kTfLiteError; + } + + } + current_subgraph_index_ = previous_subgraph_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroGraph::FreeSubgraphs() { + int previous_subgraph_idx = current_subgraph_index_; + + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + current_subgraph_index_ = subgraph_idx; + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (size_t i = 0; i < operators_size; ++i) { + TfLiteNode* node = + &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node); + const TfLiteRegistration* registration = + subgraph_allocations_[subgraph_idx] + .node_and_registrations[i] + .registration; + // registration is allocated outside the interpreter, so double check to + // make sure it's not nullptr; + if (registration != nullptr && registration->free != nullptr) { + registration->free(context_, node->user_data); + } + } + } + current_subgraph_index_ = previous_subgraph_idx; + + return kTfLiteOk; +} + +TfLiteStatus MicroGraph::InvokeSubgraph(int subgraph_idx) { + int previous_subgraph_idx = current_subgraph_index_; + current_subgraph_index_ = subgraph_idx; + + if (static_cast(subgraph_idx) >= subgraphs_->size()) { + MicroPrintf("Accessing subgraph %d but only %d subgraphs found", + subgraph_idx, subgraphs_->size()); + return kTfLiteError; + } + uint32_t operators_size = NumSubgraphOperators(model_, subgraph_idx); + for (size_t i = 0; i < operators_size; ++i) { + TfLiteNode* node = + &(subgraph_allocations_[subgraph_idx].node_and_registrations[i].node); + const TfLiteRegistration* registration = subgraph_allocations_[subgraph_idx] + .node_and_registrations[i] + .registration; + +// This ifdef is needed (even though ScopedMicroProfiler itself is a no-op with +// -DTF_LITE_STRIP_ERROR_STRINGS) because the function OpNameFromRegistration is +// only defined for builds with the error strings. +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + ScopedMicroProfiler scoped_profiler( + OpNameFromRegistration(registration), + reinterpret_cast(context_->profiler)); +#endif + + TFLITE_DCHECK(registration->invoke); + TfLiteStatus invoke_status = registration->invoke(context_, node); + + // All TfLiteTensor structs used in the kernel are allocated from temp + // memory in the allocator. This creates a chain of allocations in the + // temp section. The call below resets the chain of allocations to + // prepare for the next call. + allocator_->ResetTempAllocations(); + + if (invoke_status == kTfLiteError) { + MicroPrintf("Node %s (number %d) failed to invoke with status %d", + OpNameFromRegistration(registration), i, invoke_status); + return kTfLiteError; + } else if (invoke_status != kTfLiteOk) { + return invoke_status; + } + } + current_subgraph_index_ = previous_subgraph_idx; + return kTfLiteOk; +} + +TfLiteStatus MicroGraph::ResetVariableTensors() { + for (size_t subgraph_idx = 0; subgraph_idx < subgraphs_->size(); + subgraph_idx++) { + const SubGraph* subgraph = (*subgraphs_)[subgraph_idx]; + for (size_t i = 0; i < subgraph->tensors()->size(); ++i) { + auto* tensor = subgraph->tensors()->Get(i); + if (tensor->is_variable()) { + size_t buffer_size; + TF_LITE_ENSURE_STATUS(TfLiteEvalTensorByteLength( + &subgraph_allocations_[subgraph_idx].tensors[i], &buffer_size)); + + int value = 0; + if (tensor->type() == tflite::TensorType_INT8) { + value = tensor->quantization()->zero_point()->Get(0); + } + memset(subgraph_allocations_[subgraph_idx].tensors[i].data.raw, value, + buffer_size); + } + } + } + if (resource_variables_ != nullptr) { + resource_variables_->ResetAll(); + } + + return kTfLiteOk; +} + +int MicroGraph::NumSubgraphs() { return model_->subgraphs()->size(); } + +void MicroGraph::SetSubgraphAllocations( + SubgraphAllocations* subgraph_allocations) { + subgraph_allocations_ = subgraph_allocations; +} + +size_t MicroGraph::NumSubgraphInputs(int subgraph_idx) { + return model_->subgraphs()->Get(subgraph_idx)->inputs()->size(); +} + +TfLiteEvalTensor* MicroGraph::GetSubgraphInput(int subgraph_idx, + int input_idx) { + int tensor_idx = + model_->subgraphs()->Get(subgraph_idx)->inputs()->Get(input_idx); + return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx]; +} + +size_t MicroGraph::NumSubgraphOutputs(int subgraph_idx) { + return model_->subgraphs()->Get(subgraph_idx)->outputs()->size(); +} + +TfLiteEvalTensor* MicroGraph::GetSubgraphOutput(int subgraph_idx, + int output_idx) { + int tensor_idx = + model_->subgraphs()->Get(subgraph_idx)->outputs()->Get(output_idx); + return &subgraph_allocations_[subgraph_idx].tensors[tensor_idx]; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h new file mode 100644 index 0000000..0e096c7 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h @@ -0,0 +1,110 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// Abstracts the details of interacting with the tflite::Model. +// +// Provides methods to access, initialize, prepare, invoke and free any +// subgraph in the tflite::Graph. +class MicroGraph { + public: + // The lifetime of the context, model, allocator and resource_variables must + // be at least as long as that of the graph object, since the this class may + // need to access them at any time. If resource_variables is a nullptr, + // GetResourceVariables will return a nullptr. + MicroGraph(TfLiteContext* context, const Model* model, + MicroAllocator* allocator, + MicroResourceVariables* resource_variables); + virtual ~MicroGraph(); + + // Sets up builtin data and calls TfLiteRegistration->Init for every operator + // in every subgraph in the model. + virtual TfLiteStatus InitSubgraphs(); + + // Calls TfLiteRegistration->Prepare for every operator in every subgraph in + // the model. + virtual TfLiteStatus PrepareSubgraphs(bool run_all_prep_ops); + + // Calls TfLiteRegistration->Free for every operator in every subgraph in the + // model. + virtual TfLiteStatus FreeSubgraphs(); + + // Calls TfLiteRegistration->Invoke for every operator in a single subgraph in + // the model. + virtual TfLiteStatus InvokeSubgraph(int subgraph_idx); + + // Zeros out all variable tensors in all subgraphs in the model. + virtual TfLiteStatus ResetVariableTensors(); + + // Number of tensor inputs to a specified subgraph in the model. + virtual size_t NumSubgraphInputs(int subgraph_idx); + + // Get the specified input tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int input_idx); + + // Number of tensor outputs from a specified subgraph in the model. + virtual size_t NumSubgraphOutputs(int subgraph_idx); + + // Get the specified output tensor of a specified subgraph in the model. + virtual TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, int output_idx); + + // Number of subgraphs in the model. + virtual int NumSubgraphs(); + + // Hook to pass in subgraph allocations tracked within the interpreter, + // allowing MicroGraph to init / prepare / invoke subgraphs in the model. + void SetSubgraphAllocations(SubgraphAllocations* subgraph_allocations); + + // Get the current subgraph index. Within an on operator, this is guaranteed + // to be the subgraph of that operator. + int GetCurrentSubgraphIndex() { return current_subgraph_index_; } + + // Set the current subgraph index. + void SetCurrentSubgraphIndex(int subgraph_idx) + { + current_subgraph_index_ = subgraph_idx; + } + + // Gets the list of alloctions for each subgraph. This is the source of truth + // for all per-subgraph allocation data. + SubgraphAllocations* GetAllocations() { return subgraph_allocations_; } + + // Get the resource variables for this TFLM graph. + MicroResourceVariables* GetResourceVariables() { return resource_variables_; } + + private: + TfLiteContext* context_; + const Model* model_; + MicroAllocator* allocator_; + SubgraphAllocations* subgraph_allocations_ = nullptr; + int current_subgraph_index_; + MicroResourceVariables* resource_variables_; + const flatbuffers::Vector>* subgraphs_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_GRAPH_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cc new file mode 100644 index 0000000..3c734fb --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cc @@ -0,0 +1,347 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" + +#include +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_utils.h" + +namespace tflite { + +MicroInterpreter::MicroInterpreter(const Model* model, + const MicroOpResolver& op_resolver, + uint8_t* tensor_arena, + size_t tensor_arena_size, + MicroResourceVariables* resource_variables, + MicroProfilerInterface* profiler) + : model_(model), + op_resolver_(op_resolver), + allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size)), + + graph_(&context_, model, &allocator_, resource_variables), + tensors_allocated_(false), + initialization_status_(kTfLiteError), + input_tensors_(nullptr), + output_tensors_(nullptr), + micro_context_(&allocator_, model_, &graph_) { + Init(profiler); +} + +MicroInterpreter::MicroInterpreter(const Model* model, + const MicroOpResolver& op_resolver, + MicroAllocator* allocator, + MicroResourceVariables* resource_variables, + MicroProfilerInterface* profiler) + : model_(model), + op_resolver_(op_resolver), + allocator_(*allocator), + graph_(&context_, model, allocator, resource_variables), + tensors_allocated_(false), + initialization_status_(kTfLiteError), + input_tensors_(nullptr), + output_tensors_(nullptr), + micro_context_(&allocator_, model_, &graph_) { + Init(profiler); +} + +MicroInterpreter::~MicroInterpreter() { + if (graph_.GetAllocations() != nullptr) { + graph_.FreeSubgraphs(); + } +#ifdef EON_COMPILER_RUN + if (node_and_registrations_ != nullptr) { + for (size_t i = 0; i < model_->subgraphs()->Get(0)->operators()->size(); ++i) { + TfLiteNode* node = &(node_and_registrations_[i].node); + const TfLiteRegistration* registration = + node_and_registrations_[i].registration; + // registration is allocated outside the interpreter, so double check to + // make sure it's not nullptr; + if (registration != nullptr && registration->free != nullptr) { + registration->free(&context_, node->user_data); + } + } + } +#endif +} + +void MicroInterpreter::Init(MicroProfilerInterface* profiler) { + context_.impl_ = static_cast(µ_context_); + context_.ReportError = MicroContextReportOpError; + context_.GetTensor = MicroContextGetTensor; + context_.GetEvalTensor = MicroContextGetEvalTensor; + context_.profiler = profiler; + + initialization_status_ = kTfLiteOk; +} + +TfLiteStatus MicroInterpreter::PrepareNodeAndRegistrationDataFromFlatbuffer() { + for (int subgraph_idx = 0; subgraph_idx < graph_.NumSubgraphs(); + subgraph_idx++) { + const SubGraph* subgraph = model_->subgraphs()->Get(subgraph_idx); + TFLITE_DCHECK(subgraph != nullptr); + + auto* opcodes = model_->operator_codes(); + TfLiteBridgeBuiltinDataAllocator* builtin_data_allocator = + allocator_.GetBuiltinDataAllocator(); + uint32_t operators_size = NumSubgraphOperators(subgraph); + for (size_t i = 0; i < operators_size; ++i) { + const auto* op = subgraph->operators()->Get(i); + const size_t index = op->opcode_index(); + if (index >= opcodes->size()) { + MicroPrintf("Missing registration for opcode_index %d\n", index); + return kTfLiteError; + } + const auto* opcode = opcodes->Get(index); + TfLiteStatus status = + GetRegistrationFromOpCode(opcode, op_resolver_, + &(graph_.GetAllocations()[subgraph_idx] + .node_and_registrations[i] + .registration)); + if (status != kTfLiteOk) { + MicroPrintf("Failed to get registration from op code %s\n ", + EnumNameBuiltinOperator(GetBuiltinCode(opcode))); + return status; + } + const auto* registration = graph_.GetAllocations()[subgraph_idx] + .node_and_registrations[i] + .registration; + if (registration == nullptr) { + MicroPrintf("Skipping op for opcode_index %d\n", index); + return kTfLiteError; + } + BuiltinOperator op_type = + static_cast(registration->builtin_code); + + const char* custom_data = nullptr; + size_t custom_data_size = 0; + unsigned char* builtin_data = nullptr; + + if (op_type == BuiltinOperator_CUSTOM) { + // Custom Ops may or may not have a non-null custom_options field. + if (op->custom_options() != nullptr) { + custom_data = + reinterpret_cast(op->custom_options()->data()); + custom_data_size = op->custom_options()->size(); + } + } else { + if (op->custom_options() != nullptr) { + MicroPrintf( + "Unsupported behavior: found builtin operator %s with custom " + "options.\n", + EnumNameBuiltinOperator(op_type)); + return kTfLiteError; + } + + TfLiteBridgeBuiltinParseFunction parser = + op_resolver_.GetOpDataParser(op_type); + if (parser == nullptr) { + MicroPrintf("Did not find a parser for %s", + EnumNameBuiltinOperator(op_type)); + + return kTfLiteError; + } + TF_LITE_ENSURE_STATUS(CallBuiltinParseFunction( + parser, op, builtin_data_allocator, (void**)(&builtin_data))); + } + + TfLiteIntArray* inputs_array = + FlatBufferVectorToTfLiteTypeArray(op->inputs()); + TfLiteIntArray* outputs_array = + FlatBufferVectorToTfLiteTypeArray(op->outputs()); + + TfLiteNode* node = &( + graph_.GetAllocations()[subgraph_idx].node_and_registrations[i].node); + *node = {}; + node->inputs = inputs_array; + node->outputs = outputs_array; + node->builtin_data = reinterpret_cast(builtin_data); + node->custom_initial_data = custom_data; + node->custom_initial_data_size = custom_data_size; + + if (op->intermediates() && (op->intermediates()->size() > 0)) { + node->intermediates = + FlatBufferVectorToTfLiteTypeArray(op->intermediates()); + } + } + } + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreter::AllocateTensors(bool run_all_prep_ops) { + SubgraphAllocations* allocations = allocator_.StartModelAllocation(model_); + + if (allocations == nullptr) { + MicroPrintf("Failed starting model allocation.\n"); + initialization_status_ = kTfLiteError; + return kTfLiteError; + } + + graph_.SetSubgraphAllocations(allocations); + + TF_LITE_ENSURE_STATUS(PrepareNodeAndRegistrationDataFromFlatbuffer()); + + // Only allow AllocatePersistentBuffer in Init stage. + context_.AllocatePersistentBuffer = MicroContextAllocatePersistentBuffer; + context_.RequestScratchBufferInArena = nullptr; + context_.GetScratchBuffer = nullptr; + context_.GetExternalContext = nullptr; + TF_LITE_ENSURE_STATUS(graph_.InitSubgraphs()); + + // Both AllocatePersistentBuffer and RequestScratchBufferInArena is + // available in Prepare stage. + context_.RequestScratchBufferInArena = + MicroContextRequestScratchBufferInArena; + // external_context become available in Prepare stage. + context_.GetExternalContext = MicroContextGetExternalContext; + + TF_LITE_ENSURE_STATUS(graph_.PrepareSubgraphs(run_all_prep_ops)); + + // Prepare is done, we're ready for Invoke. Memory allocation is no longer + // allowed. Kernels can only fetch scratch buffers via GetScratchBuffer. + context_.AllocatePersistentBuffer = nullptr; + context_.RequestScratchBufferInArena = nullptr; + context_.GetScratchBuffer = MicroContextGetScratchBuffer; + + TF_LITE_ENSURE_OK(&context_, allocator_.FinishModelAllocation( + model_, graph_.GetAllocations(), + &scratch_buffer_handles_)); + + micro_context_.SetScratchBufferHandles(scratch_buffer_handles_); + + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + input_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * inputs_size())); + if (input_tensors_ == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->input_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * inputs_size()); + return kTfLiteError; + } + + for (size_t i = 0; i < inputs_size(); ++i) { + input_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, graph_.GetAllocations(), inputs().Get(i), 0); + if (input_tensors_[i] == nullptr) { + MicroPrintf("Failed to initialize input tensor %d", i); + return kTfLiteError; + } + } + + // TODO(b/162311891): Drop these allocations when the interpreter supports + // handling buffers from TfLiteEvalTensor. + output_tensors_ = + reinterpret_cast(allocator_.AllocatePersistentBuffer( + sizeof(TfLiteTensor*) * outputs_size())); + if (output_tensors_ == nullptr) { + MicroPrintf( + "Failed to allocate memory for context->output_tensors_, " + "%d bytes required", + sizeof(TfLiteTensor*) * outputs_size()); + return kTfLiteError; + } + + for (size_t i = 0; i < outputs_size(); ++i) { + output_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( + model_, graph_.GetAllocations(), outputs().Get(i), 0); + if (output_tensors_[i] == nullptr) { + MicroPrintf("Failed to initialize output tensor %d", i); + return kTfLiteError; + } + } + + TF_LITE_ENSURE_STATUS(Reset()); + +#ifdef EON_COMPILER_RUN + node_and_registrations_ = allocations->node_and_registrations; +#endif + + tensors_allocated_ = true; + return kTfLiteOk; +} + +TfLiteStatus MicroInterpreter::Invoke() { + if (initialization_status_ != kTfLiteOk) { + MicroPrintf("Invoke() called after initialization failed\n"); + return kTfLiteError; + } + + // Ensure tensors are allocated before the interpreter is invoked to avoid + // difficult to debug segfaults. + if (!tensors_allocated_) { + TF_LITE_ENSURE_OK(&context_, AllocateTensors(true)); + } + return graph_.InvokeSubgraph(0); +} + +TfLiteTensor* MicroInterpreter::input(size_t index) { + const size_t length = inputs_size(); + if (index >= length) { + MicroPrintf("Input index %d out of range (length is %d)", index, length); + return nullptr; + } + return input_tensors_[index]; +} + +TfLiteTensor* MicroInterpreter::output(size_t index) { + const size_t length = outputs_size(); + if (index >= length) { + MicroPrintf("Output index %d out of range (length is %d)", index, length); + return nullptr; + } + return output_tensors_[index]; +} + +TfLiteTensor* MicroInterpreter::tensor(size_t index, size_t subgraph_idx) { + const size_t length = tensors_size(subgraph_idx); + if (index >= length) { + MicroPrintf("Tensor index %d out of range (length is %d)", index, length); + return nullptr; + } + return allocator_.AllocatePersistentTfLiteTensor(model_, graph_.GetAllocations(), index, subgraph_idx); +} + +// Repurposing free subgraphs to reset state for some ops for now +// will reset api is made. See b/220940833#comment25 for more context. +TfLiteStatus MicroInterpreter::Reset() { + TfLiteStatus status = graph_.FreeSubgraphs(); + if (status != kTfLiteOk) { + return status; + } + return graph_.ResetVariableTensors(); +} + +TfLiteStatus MicroInterpreter::SetMicroExternalContext( + void* external_context_payload) { + return micro_context_.set_external_context(external_context_payload); +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp deleted file mode 100644 index ca4397e..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.cpp +++ /dev/null @@ -1,409 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" - -#include -#include -#include - -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/tensor_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" -#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" - -namespace tflite { -namespace { - -#ifndef TF_LITE_STRIP_ERROR_STRINGS -const char* OpNameFromRegistration(const TfLiteRegistration* registration) { - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - return registration->custom_name; - } else { - return EnumNameBuiltinOperator(BuiltinOperator(registration->builtin_code)); - } -} -#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) - -} // namespace - -namespace internal { - -ContextHelper::ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model) - : allocator_(allocator), error_reporter_(error_reporter), model_(model) {} - -void* ContextHelper::AllocatePersistentBuffer(TfLiteContext* ctx, - size_t bytes) { - return reinterpret_cast(ctx->impl_) - ->allocator_->AllocatePersistentBuffer(bytes); -} - -TfLiteStatus ContextHelper::RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - return helper->allocator_->RequestScratchBufferInArena(bytes, buffer_idx); -} - -void* ContextHelper::GetScratchBuffer(TfLiteContext* ctx, int buffer_idx) { - ContextHelper* helper = reinterpret_cast(ctx->impl_); - ScratchBufferHandle* handle = helper->scratch_buffer_handles_ + buffer_idx; - return handle->data; -} - -void ContextHelper::ReportOpError(struct TfLiteContext* context, - const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ContextHelper* helper = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(helper->error_reporter_, format, args); - va_end(args); -#endif -} - -TfLiteTensor* ContextHelper::GetTensor(const struct TfLiteContext* context, - int tensor_idx) { - ContextHelper* helper = static_cast(context->impl_); - return helper->allocator_->AllocateTempTfLiteTensor( - helper->model_, helper->eval_tensors_, tensor_idx); -} - -TfLiteEvalTensor* ContextHelper::GetEvalTensor( - const struct TfLiteContext* context, int tensor_idx) { - ContextHelper* helper = reinterpret_cast(context->impl_); - return &helper->eval_tensors_[tensor_idx]; -} - -void ContextHelper::SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors) { - eval_tensors_ = eval_tensors; -} - -void ContextHelper::SetScratchBufferHandles( - ScratchBufferHandle* scratch_buffer_handles) { - scratch_buffer_handles_ = scratch_buffer_handles; -} - -} // namespace internal - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - uint8_t* tensor_arena, - size_t tensor_arena_size, - ErrorReporter* error_reporter, - MicroProfiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*MicroAllocator::Create(tensor_arena, tensor_arena_size, - error_reporter)), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensors_(nullptr), - output_tensors_(nullptr) { - Init(profiler); -} - -MicroInterpreter::MicroInterpreter(const Model* model, - const MicroOpResolver& op_resolver, - MicroAllocator* allocator, - ErrorReporter* error_reporter, - MicroProfiler* profiler) - : model_(model), - op_resolver_(op_resolver), - error_reporter_(error_reporter), - allocator_(*allocator), - tensors_allocated_(false), - initialization_status_(kTfLiteError), - eval_tensors_(nullptr), - context_helper_(error_reporter_, &allocator_, model), - input_tensors_(nullptr), - output_tensors_(nullptr) { - Init(profiler); -} - -MicroInterpreter::~MicroInterpreter() { - if (node_and_registrations_ != nullptr) { - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - TfLiteNode* node = &(node_and_registrations_[i].node); - const TfLiteRegistration* registration = - node_and_registrations_[i].registration; - // registration is allocated outside the interpreter, so double check to - // make sure it's not nullptr; - if (registration != nullptr && registration->free != nullptr) { - registration->free(&context_, node->user_data); - } - } - } -} - -void MicroInterpreter::Init(MicroProfiler* profiler) { - const flatbuffers::Vector>* subgraphs = - model_->subgraphs(); - if (subgraphs->size() != 1) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Only 1 subgraph is currently supported.\n"); - initialization_status_ = kTfLiteError; - return; - } - subgraph_ = (*subgraphs)[0]; - - context_.impl_ = static_cast(&context_helper_); - context_.ReportError = context_helper_.ReportOpError; - context_.GetTensor = context_helper_.GetTensor; - context_.GetEvalTensor = context_helper_.GetEvalTensor; - context_.recommended_num_threads = 1; - context_.profiler = profiler; - - initialization_status_ = kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::AllocateTensors() { - if (allocator_.StartModelAllocation(model_, op_resolver_, - &node_and_registrations_, - &eval_tensors_) != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed starting model allocation.\n"); - initialization_status_ = kTfLiteError; - return kTfLiteError; - } - - // Update the pointer now that TfLiteEvalTensor allocation has completed on - // the context helper. - // TODO(b/16157777): This call would not be needed if ContextHelper rolled - // into the interpreter. - context_helper_.SetTfLiteEvalTensors(eval_tensors_); - context_.tensors_size = subgraph_->tensors()->size(); - - // Only allow AllocatePersistentBuffer in Init stage. - context_.AllocatePersistentBuffer = context_helper_.AllocatePersistentBuffer; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = nullptr; - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - size_t init_data_size; - const char* init_data; - if (registration->builtin_code == BuiltinOperator_CUSTOM) { - init_data = reinterpret_cast(node->custom_initial_data); - init_data_size = node->custom_initial_data_size; - } else { - init_data = reinterpret_cast(node->builtin_data); - init_data_size = 0; - } - if (registration->init) { - node->user_data = - registration->init(&context_, init_data, init_data_size); - } - } - - // Both AllocatePersistentBuffer and RequestScratchBufferInArena is - // available in Prepare stage. - context_.RequestScratchBufferInArena = - context_helper_.RequestScratchBufferInArena; - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - if (registration->prepare) { - TfLiteStatus prepare_status = registration->prepare(&context_, node); - if (prepare_status != kTfLiteOk) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %df) failed to prepare with status %d", - OpNameFromRegistration(registration), i, prepare_status); - return kTfLiteError; - } - } - allocator_.FinishPrepareNodeAllocations(/*node_id=*/i); - } - - // Prepare is done, we're ready for Invoke. Memory allocation is no longer - // allowed. Kernels can only fetch scratch buffers via GetScratchBuffer. - context_.AllocatePersistentBuffer = nullptr; - context_.RequestScratchBufferInArena = nullptr; - context_.GetScratchBuffer = context_helper_.GetScratchBuffer; - - TF_LITE_ENSURE_OK(&context_, - allocator_.FinishModelAllocation(model_, eval_tensors_, - &scratch_buffer_handles_)); - // TODO(b/16157777): Remove this when ContextHelper is rolled into this class. - context_helper_.SetScratchBufferHandles(scratch_buffer_handles_); - - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - input_tensors_ = - reinterpret_cast(allocator_.AllocatePersistentBuffer( - sizeof(TfLiteTensor*) * inputs_size())); - if (input_tensors_ == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for context->input_tensors_, " - "%d bytes required", - sizeof(TfLiteTensor*) * inputs_size()); - return kTfLiteError; - } - - for (size_t i = 0; i < inputs_size(); ++i) { - input_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, inputs().Get(i)); - if (input_tensors_[i] == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to initialize input tensor %d", i); - return kTfLiteError; - } - } - - // TODO(b/162311891): Drop these allocations when the interpreter supports - // handling buffers from TfLiteEvalTensor. - output_tensors_ = - reinterpret_cast(allocator_.AllocatePersistentBuffer( - sizeof(TfLiteTensor*) * outputs_size())); - if (output_tensors_ == nullptr) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to allocate memory for context->output_tensors_, " - "%d bytes required", - sizeof(TfLiteTensor*) * outputs_size()); - return kTfLiteError; - } - - for (size_t i = 0; i < outputs_size(); ++i) { - output_tensors_[i] = allocator_.AllocatePersistentTfLiteTensor( - model_, eval_tensors_, outputs().Get(i)); - if (output_tensors_[i] == nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to initialize output tensor %d", i); - return kTfLiteError; - } - } - - TF_LITE_ENSURE_STATUS(ResetVariableTensors()); - - tensors_allocated_ = true; - return kTfLiteOk; -} - -TfLiteStatus MicroInterpreter::Invoke() { - if (initialization_status_ != kTfLiteOk) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invoke() called after initialization failed\n"); - return kTfLiteError; - } - - // Ensure tensors are allocated before the interpreter is invoked to avoid - // difficult to debug segfaults. - if (!tensors_allocated_) { - TF_LITE_ENSURE_OK(&context_, AllocateTensors()); - } - - for (size_t i = 0; i < subgraph_->operators()->size(); ++i) { - auto* node = &(node_and_registrations_[i].node); - auto* registration = node_and_registrations_[i].registration; - -// This ifdef is needed (even though ScopedMicroProfiler itself is a no-op with -// -DTF_LITE_STRIP_ERROR_STRINGS) because the function OpNameFromRegistration is -// only defined for builds with the error strings. -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) - ScopedMicroProfiler scoped_profiler( - OpNameFromRegistration(registration), - reinterpret_cast(context_.profiler)); -#endif - - TFLITE_DCHECK(registration->invoke); - TfLiteStatus invoke_status = registration->invoke(&context_, node); - - // All TfLiteTensor structs used in the kernel are allocated from temp - // memory in the allocator. This creates a chain of allocations in the - // temp section. The call below resets the chain of allocations to - // prepare for the next call. - allocator_.ResetTempAllocations(); - - if (invoke_status == kTfLiteError) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Node %s (number %d) failed to invoke with status %d", - OpNameFromRegistration(registration), i, invoke_status); - return kTfLiteError; - } else if (invoke_status != kTfLiteOk) { - return invoke_status; - } - } - - return kTfLiteOk; -} - -TfLiteTensor* MicroInterpreter::input(size_t index) { - const size_t length = inputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Input index %d out of range (length is %d)", index, - length); - return nullptr; - } - return input_tensors_[index]; -} - -TfLiteTensor* MicroInterpreter::output(size_t index) { - const size_t length = outputs_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Output index %d out of range (length is %d)", index, - length); - return nullptr; - } - return output_tensors_[index]; -} - -TfLiteTensor* MicroInterpreter::tensor(size_t index) { - const size_t length = tensors_size(); - if (index >= length) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Tensor index %d out of range (length is %d)", index, - length); - return nullptr; - } - return allocator_.AllocatePersistentTfLiteTensor(model_, eval_tensors_, - index); -} - -TfLiteStatus MicroInterpreter::ResetVariableTensors() { - for (size_t i = 0; i < subgraph_->tensors()->size(); ++i) { - auto* tensor = subgraph_->tensors()->Get(i); - if (tensor->is_variable()) { - size_t buffer_size; - TF_LITE_ENSURE_STATUS( - TfLiteEvalTensorByteLength(&eval_tensors_[i], &buffer_size)); - - int value = 0; - if (tensor->type() == tflite::TensorType_INT8) { - value = tensor->quantization()->zero_point()->Get(0); - } - memset(eval_tensors_[i].data.raw, value, buffer_size); - } - } - - return kTfLiteOk; -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h index 93026c5..5901372 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,74 +19,39 @@ limitations under the License. #include #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_context.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h" #include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h" -// Copied from tensorflow/lite/version.h to avoid a dependency chain into +/// Copied from tensorflow/lite/version.h to avoid a dependency chain into // tensorflow/core. #define TFLITE_SCHEMA_VERSION (3) namespace tflite { -namespace internal { - -// A helper class to encapsulate the implementation of APIs in Context. -// context->impl_ points to an instance of this class. -// Check tensorflow/lite/c/common.h for detailed descriptions. -// TODO(b/16157777): Consider rolling this class into MicroInterpreter. -class ContextHelper { - public: - explicit ContextHelper(ErrorReporter* error_reporter, - MicroAllocator* allocator, const Model* model); - - // Functions that will be assigned to function pointers on TfLiteContext: - static void* AllocatePersistentBuffer(TfLiteContext* ctx, size_t bytes); - static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* ctx, - size_t bytes, - int* buffer_idx); - static void* GetScratchBuffer(TfLiteContext* ctx, int buffer_idx); - static void ReportOpError(struct TfLiteContext* context, const char* format, - ...); - static TfLiteTensor* GetTensor(const struct TfLiteContext* context, - int tensor_idx); - static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, - int tensor_idx); - - // Sets the pointer to a list of TfLiteEvalTensor instances. - void SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors); - - // Sets the pointer to a list of ScratchBufferHandle instances. - void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); - - private: - MicroAllocator* allocator_ = nullptr; - ErrorReporter* error_reporter_ = nullptr; - const Model* model_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; - ScratchBufferHandle* scratch_buffer_handles_ = nullptr; -}; - -} // namespace internal - class MicroInterpreter { public: - // The lifetime of the model, op resolver, tensor arena, error reporter and - // profiler must be at least as long as that of the interpreter object, since - // the interpreter may need to access them at any time. This means that you - // should usually create them with the same scope as each other, for example - // having them all allocated on the stack as local variables through a - // top-level function. The interpreter doesn't do any deallocation of any of - // the pointed-to objects, ownership remains with the caller. + // The lifetime of the model, op resolver, tensor arena, error reporter, + // resource variables, and profiler must be at least as long as that of the + // interpreter object, since the interpreter may need to access them at any + // time. This means that you should usually create them with the same scope as + // each other, for example having them all allocated on the stack as local + // variables through a top-level function. The interpreter doesn't do any + // deallocation of any of the pointed-to objects, ownership remains with the + // caller. MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter, - MicroProfiler* profiler = nullptr); + MicroResourceVariables* resource_variables = nullptr, + MicroProfilerInterface* profiler = nullptr); // Create an interpreter instance using an existing MicroAllocator instance. // This constructor should be used when creating an allocator that needs to @@ -94,22 +59,31 @@ class MicroInterpreter { // allocations inside the interpreter. The lifetime of the allocator must be // as long as that of the interpreter object. MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, - MicroAllocator* allocator, ErrorReporter* error_reporter, - MicroProfiler* profiler = nullptr); + MicroAllocator* allocator, + MicroResourceVariables* resource_variables = nullptr, + MicroProfilerInterface* profiler = nullptr); ~MicroInterpreter(); // Runs through the model and allocates all necessary input, output and // intermediate tensors. - TfLiteStatus AllocateTensors(); + TfLiteStatus AllocateTensors(bool run_all_prep_ops); // In order to support partial graph runs for strided models, this can return // values other than kTfLiteOk and kTfLiteError. // TODO(b/149795762): Add this to the TfLiteStatus enum. TfLiteStatus Invoke(); - size_t tensors_size() const { return context_.tensors_size; } - TfLiteTensor* tensor(size_t tensor_index); + // This is the recommended API for an application to pass an external payload + // pointer as an external context to kernels. The life time of the payload + // pointer should be at least as long as this interpreter. TFLM supports only + // one external context. + TfLiteStatus SetMicroExternalContext(void* external_context_payload); + + size_t tensors_size(size_t subgraph_idx = 0) const { return model_->subgraphs()->Get(subgraph_idx)->tensors()->size(); } + + TfLiteTensor* tensor(size_t tensor_index, size_t subgraph_idx = 0); + template T* typed_tensor(int tensor_index) { if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) { @@ -121,9 +95,11 @@ class MicroInterpreter { } TfLiteTensor* input(size_t index); - size_t inputs_size() const { return subgraph_->inputs()->Length(); } + size_t inputs_size() const { + return model_->subgraphs()->Get(0)->inputs()->size(); + } const flatbuffers::Vector& inputs() const { - return *subgraph_->inputs(); + return *model_->subgraphs()->Get(0)->inputs(); } TfLiteTensor* input_tensor(size_t index) { return input(index); } template @@ -137,9 +113,11 @@ class MicroInterpreter { } TfLiteTensor* output(size_t index); - size_t outputs_size() const { return subgraph_->outputs()->Length(); } + size_t outputs_size() const { + return model_->subgraphs()->Get(0)->outputs()->size(); + } const flatbuffers::Vector& outputs() const { - return *subgraph_->outputs(); + return *model_->subgraphs()->Get(0)->outputs(); } TfLiteTensor* output_tensor(size_t index) { return output(index); } template @@ -152,17 +130,31 @@ class MicroInterpreter { return nullptr; } - // Reset all variable tensors to the default value. - TfLiteStatus ResetVariableTensors(); + // Reset the state to be what you would expect when the interpreter is first + // created. i.e. after Init and Prepare is called for the very first time. + TfLiteStatus Reset(); TfLiteStatus initialization_status() const { return initialization_status_; } - size_t operators_size() const { return subgraph_->operators()->size(); } +#ifdef EON_COMPILER_RUN + NodeAndRegistration* node_and_registrations_ = nullptr; - // For debugging only. - const NodeAndRegistration node_and_registration(int node_index) const { - return node_and_registrations_[node_index]; + size_t operators_size(uint32_t subgraph_idx = 0) const + { + return model_->subgraphs()->Get(subgraph_idx)->operators()->size(); + } + + const NodeAndRegistration node_and_registration(int node_index, int sg) + { + return graph_.GetAllocations()[sg].node_and_registrations[node_index]; } +#endif + + // Populates node and registration pointers representing the inference graph + // of the model from values inside the flatbuffer (loaded from the TfLiteModel + // instance). Persistent data (e.g. operator data) is allocated from the + // arena. + TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer(); // For debugging only. // Returns the actual used arena in bytes. This method gives the optimal arena @@ -179,30 +171,28 @@ class MicroInterpreter { private: // TODO(b/158263161): Consider switching to Create() function to enable better // error reporting during initialization. - void Init(MicroProfiler* profiler); + void Init(MicroProfilerInterface* profiler); - NodeAndRegistration* node_and_registrations_ = nullptr; + // Gets the current subgraph index used from within context methods. + int get_subgraph_index() { return graph_.GetCurrentSubgraphIndex(); } const Model* model_; const MicroOpResolver& op_resolver_; - ErrorReporter* error_reporter_; TfLiteContext context_ = {}; MicroAllocator& allocator_; + MicroGraph graph_; bool tensors_allocated_; TfLiteStatus initialization_status_; - const SubGraph* subgraph_ = nullptr; - TfLiteEvalTensor* eval_tensors_ = nullptr; ScratchBufferHandle* scratch_buffer_handles_ = nullptr; - // TODO(b/16157777): Drop this reference: - internal::ContextHelper context_helper_; - // TODO(b/162311891): Clean these pointers up when this class supports buffers // from TfLiteEvalTensor. TfLiteTensor** input_tensors_; TfLiteTensor** output_tensors_; + + MicroContext micro_context_; }; } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.cc similarity index 72% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.cc index 00a88be..26282ca 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include #include @@ -24,10 +24,6 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/micro_string.h" #endif -namespace { -uint8_t micro_error_reporter_buffer[sizeof(tflite::MicroErrorReporter)]; -tflite::MicroErrorReporter* error_reporter_ = nullptr; - void Log(const char* format, va_list args) { #if !defined(TF_LITE_STRIP_ERROR_STRINGS) // Only pulling in the implementation of this function for builds where we @@ -41,8 +37,6 @@ void Log(const char* format, va_list args) { #endif } -} // namespace - #if !defined(TF_LITE_STRIP_ERROR_STRINGS) void MicroPrintf(const char* format, ...) { va_list args; @@ -51,18 +45,3 @@ void MicroPrintf(const char* format, ...) { va_end(args); } #endif - -namespace tflite { -ErrorReporter* GetMicroErrorReporter() { - if (error_reporter_ == nullptr) { - error_reporter_ = new (micro_error_reporter_buffer) MicroErrorReporter(); - } - return error_reporter_; -} - -int MicroErrorReporter::Report(const char* format, va_list args) { - Log(format, args); - return 0; -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.h new file mode 100644 index 0000000..22cceb2 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_log.h @@ -0,0 +1,49 @@ +/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ + +#include + +// do this by default except when running EON compiler +#ifndef EON_COMPILER_RUN +#define TF_LITE_STRIP_ERROR_STRINGS +#endif + +// This is a free function used to perform the actual logging. +// This function will be used by MicroPrintf and MicroErrorReporter::Report() +void Log(const char* format, va_list args); + +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) +// This function can be used independent of the MicroErrorReporter to get +// printf-like functionalitys and are common to all target platforms. +void MicroPrintf(const char* format, ...); +#else +// We use a #define to ensure that the strings are completely stripped, to +// prevent an unnecessary increase in the binary size. +#define MicroPrintf(...) tflite::Unused(__VA_ARGS__) +#endif + +namespace tflite { + +// From +// https://stackoverflow.com/questions/23235910/variadic-unused-function-macro +template +void Unused(Args&&... args) { + (void)(sizeof...(args)); +} +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_LOG_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h index db2fc10..798787a 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -19,27 +19,33 @@ limitations under the License. #include #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" #include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/add.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/conv.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/depthwise_conv.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/ethosu.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/tree_ensemble_classifier.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/fully_connected.h" #include "edge-impulse-sdk/tensorflow/lite/micro/kernels/micro_ops.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/pooling.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/reduce.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/softmax.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { -// TfLiteRegistration* Register_DETECTION_POSTPROCESS(); +TfLiteRegistration* Register_DETECTION_POSTPROCESS(); template class MicroMutableOpResolver : public MicroOpResolver { public: TF_LITE_REMOVE_VIRTUAL_DELETE - explicit MicroMutableOpResolver(ErrorReporter* error_reporter = nullptr) - : error_reporter_(error_reporter) {} + explicit MicroMutableOpResolver() {} const TfLiteRegistration* FindOp(tflite::BuiltinOperator op) const override { if (op == BuiltinOperator_CUSTOM) return nullptr; @@ -64,7 +70,7 @@ class MicroMutableOpResolver : public MicroOpResolver { return nullptr; } - MicroOpResolver::BuiltinParseFunction GetOpDataParser( + TfLiteBridgeBuiltinParseFunction GetOpDataParser( BuiltinOperator op) const override { TFLITE_DCHECK(num_buitin_ops_ <= tOpCount); for (unsigned int i = 0; i < num_buitin_ops_; ++i) { @@ -81,22 +87,16 @@ class MicroMutableOpResolver : public MicroOpResolver { // kTfLiteError. TfLiteStatus AddCustom(const char* name, TfLiteRegistration* registration) { if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Couldn't register custom op '%s', resolver size is too small (%d)", - name, tOpCount); - } + MicroPrintf( + "Couldn't register custom op '%s', resolver size is too" + "small (%d)", + name, tOpCount); return kTfLiteError; } if (FindOp(name) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddCustom for the same op more than once " - "is not supported (Op: %s).", - name); - } + MicroPrintf("Calling AddCustom for the same op more than once "); + MicroPrintf("is not supported (Op: %s).", name); return kTfLiteError; } @@ -117,9 +117,8 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseAbs); } - TfLiteStatus AddAdd() { - return AddBuiltin(BuiltinOperator_ADD, tflite::ops::micro::Register_ADD(), - ParseAdd); + TfLiteStatus AddAdd(const TfLiteRegistration& registration = Register_ADD()) { + return AddBuiltin(BuiltinOperator_ADD, registration, ParseAdd); } TfLiteStatus AddAddN() { @@ -128,19 +127,26 @@ class MicroMutableOpResolver : public MicroOpResolver { } TfLiteStatus AddArgMax() { - return AddBuiltin(BuiltinOperator_ARG_MAX, - tflite::ops::micro::Register_ARG_MAX(), ParseArgMax); + return AddBuiltin(BuiltinOperator_ARG_MAX, Register_ARG_MAX(), ParseArgMax); } TfLiteStatus AddArgMin() { - return AddBuiltin(BuiltinOperator_ARG_MIN, - tflite::ops::micro::Register_ARG_MIN(), ParseArgMin); + return AddBuiltin(BuiltinOperator_ARG_MIN, Register_ARG_MIN(), ParseArgMin); } - TfLiteStatus AddAveragePool2D() { - return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, - tflite::ops::micro::Register_AVERAGE_POOL_2D(), - ParsePool); + TfLiteStatus AddAssignVariable() { + return AddBuiltin(BuiltinOperator_ASSIGN_VARIABLE, + tflite::Register_ASSIGN_VARIABLE(), ParseAssignVariable); + } + + TfLiteStatus AddAveragePool2D( + const TfLiteRegistration& registration = Register_AVERAGE_POOL_2D()) { + return AddBuiltin(BuiltinOperator_AVERAGE_POOL_2D, registration, ParsePool); + } + + TfLiteStatus AddBatchMatMul() { + return AddBuiltin(BuiltinOperator_BATCH_MATMUL, + Register_BATCH_MATMUL(), ParseBatchMatMul); } TfLiteStatus AddBatchToSpaceNd() { @@ -148,28 +154,46 @@ class MicroMutableOpResolver : public MicroOpResolver { Register_BATCH_TO_SPACE_ND(), ParseBatchToSpaceNd); } + TfLiteStatus AddBroadcastArgs() { + return AddBuiltin(BuiltinOperator_BROADCAST_ARGS, Register_BROADCAST_ARGS(), + ParseBroadcastArgs); + } + + TfLiteStatus AddBroadcastTo() { + return AddBuiltin(BuiltinOperator_BROADCAST_TO, Register_BROADCAST_TO(), + ParseBroadcastTo); + } + + TfLiteStatus AddCallOnce() { + return AddBuiltin(BuiltinOperator_CALL_ONCE, Register_CALL_ONCE(), + ParseCallOnce); + } + TfLiteStatus AddCast() { return AddBuiltin(BuiltinOperator_CAST, Register_CAST(), ParseCast); } TfLiteStatus AddCeil() { - return AddBuiltin(BuiltinOperator_CEIL, tflite::ops::micro::Register_CEIL(), - ParseCeil); + return AddBuiltin(BuiltinOperator_CEIL, Register_CEIL(), ParseCeil); + } + + TfLiteStatus AddComplexAbs() { + return AddBuiltin(BuiltinOperator_COMPLEX_ABS, Register_COMPLEX_ABS(), + ParseComplexAbs); } TfLiteStatus AddCircularBuffer() { - return AddCustom("CIRCULAR_BUFFER", - tflite::ops::micro::Register_CIRCULAR_BUFFER()); + return AddCustom("CIRCULAR_BUFFER", tflite::Register_CIRCULAR_BUFFER()); } TfLiteStatus AddConcatenation() { - return AddBuiltin(BuiltinOperator_CONCATENATION, - tflite::ops::micro::Register_CONCATENATION(), + return AddBuiltin(BuiltinOperator_CONCATENATION, Register_CONCATENATION(), ParseConcatenation); } - TfLiteStatus AddConv2D() { - return AddBuiltin(BuiltinOperator_CONV_2D, Register_CONV_2D(), ParseConv2D); + TfLiteStatus AddConv2D( + const TfLiteRegistration& registration = Register_CONV_2D()) { + return AddBuiltin(BuiltinOperator_CONV_2D, registration, ParseConv2D); } TfLiteStatus AddCos() { @@ -177,21 +201,31 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseCos); } - TfLiteStatus AddDepthwiseConv2D() { - return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, - Register_DEPTHWISE_CONV_2D(), ParseDepthwiseConv2D); + TfLiteStatus AddCumSum() { + return AddBuiltin(BuiltinOperator_CUMSUM, tflite::Register_CUMSUM(), + ParseCumsum); + } + + TfLiteStatus AddDepthToSpace() { + return AddBuiltin(BuiltinOperator_DEPTH_TO_SPACE, + tflite::Register_DEPTH_TO_SPACE(), ParseDepthToSpace); + } + + TfLiteStatus AddDepthwiseConv2D( + const TfLiteRegistration& registration = Register_DEPTHWISE_CONV_2D()) { + return AddBuiltin(BuiltinOperator_DEPTHWISE_CONV_2D, registration, + ParseDepthwiseConv2D); } TfLiteStatus AddDequantize() { - return AddBuiltin(BuiltinOperator_DEQUANTIZE, - tflite::ops::micro::Register_DEQUANTIZE(), + return AddBuiltin(BuiltinOperator_DEQUANTIZE, tflite::Register_DEQUANTIZE(), ParseDequantize); } - // TfLiteStatus AddDetectionPostprocess() { - // return AddCustom("TFLite_Detection_PostProcess", - // tflite::Register_DETECTION_POSTPROCESS()); - // } + TfLiteStatus AddDetectionPostprocess() { + return AddCustom("TFLite_Detection_PostProcess", + tflite::Register_DETECTION_POSTPROCESS()); + } TfLiteStatus AddDiv() { return AddBuiltin(BuiltinOperator_DIV, tflite::Register_DIV(), ParseDiv); @@ -202,8 +236,7 @@ class MicroMutableOpResolver : public MicroOpResolver { } TfLiteStatus AddEqual() { - return AddBuiltin(BuiltinOperator_EQUAL, - tflite::ops::micro::Register_EQUAL(), ParseEqual); + return AddBuiltin(BuiltinOperator_EQUAL, Register_EQUAL(), ParseEqual); } TfLiteStatus AddEthosU() { @@ -223,9 +256,22 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseExpandDims); } + TfLiteStatus AddFill() { + return AddBuiltin(BuiltinOperator_FILL, tflite::Register_FILL(), ParseFill); + } + TfLiteStatus AddFloor() { - return AddBuiltin(BuiltinOperator_FLOOR, - tflite::ops::micro::Register_FLOOR(), ParseFloor); + return AddBuiltin(BuiltinOperator_FLOOR, Register_FLOOR(), ParseFloor); + } + + TfLiteStatus AddFloorDiv() { + return AddBuiltin(BuiltinOperator_FLOOR_DIV, tflite::Register_FLOOR_DIV(), + ParseFloorDiv); + } + + TfLiteStatus AddFloorMod() { + return AddBuiltin(BuiltinOperator_FLOOR_MOD, tflite::Register_FLOOR_MOD(), + ParseFloorMod); } TfLiteStatus AddFullyConnected( @@ -234,23 +280,42 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseFullyConnected); } +#ifndef TF_LITE_STATIC_MEMORY + TfLiteStatus AddGather() { + return AddBuiltin(BuiltinOperator_GATHER, tflite::Register_GATHER(), + ParseGather); + } +#endif + + TfLiteStatus AddGatherNd() { + return AddBuiltin(BuiltinOperator_GATHER_ND, tflite::Register_GATHER_ND(), + ParseGatherNd); + } + TfLiteStatus AddGreater() { - return AddBuiltin(BuiltinOperator_GREATER, - tflite::ops::micro::Register_GREATER(), ParseGreater); + return AddBuiltin(BuiltinOperator_GREATER, Register_GREATER(), + ParseGreater); } TfLiteStatus AddGreaterEqual() { - return AddBuiltin(BuiltinOperator_GREATER_EQUAL, - tflite::ops::micro::Register_GREATER_EQUAL(), + return AddBuiltin(BuiltinOperator_GREATER_EQUAL, Register_GREATER_EQUAL(), ParseGreaterEqual); } TfLiteStatus AddHardSwish() { - return AddBuiltin(BuiltinOperator_HARD_SWISH, - tflite::ops::micro::Register_HARD_SWISH(), + return AddBuiltin(BuiltinOperator_HARD_SWISH, tflite::Register_HARD_SWISH(), ParseHardSwish); } + TfLiteStatus AddImag() { + return AddBuiltin(BuiltinOperator_IMAG, Register_IMAG(), + ParseImag); + } + + TfLiteStatus AddIf() { + return AddBuiltin(BuiltinOperator_IF, tflite::Register_IF(), ParseIf); + } + TfLiteStatus AddL2Normalization() { return AddBuiltin(BuiltinOperator_L2_NORMALIZATION, tflite::ops::micro::Register_L2_NORMALIZATION(), @@ -268,13 +333,11 @@ class MicroMutableOpResolver : public MicroOpResolver { } TfLiteStatus AddLess() { - return AddBuiltin(BuiltinOperator_LESS, tflite::ops::micro::Register_LESS(), - ParseLess); + return AddBuiltin(BuiltinOperator_LESS, Register_LESS(), ParseLess); } TfLiteStatus AddLessEqual() { - return AddBuiltin(BuiltinOperator_LESS_EQUAL, - tflite::ops::micro::Register_LESS_EQUAL(), + return AddBuiltin(BuiltinOperator_LESS_EQUAL, Register_LESS_EQUAL(), ParseLessEqual); } @@ -285,8 +348,7 @@ class MicroMutableOpResolver : public MicroOpResolver { TfLiteStatus AddLogicalAnd() { return AddBuiltin(BuiltinOperator_LOGICAL_AND, - tflite::ops::micro::Register_LOGICAL_AND(), - ParseLogicalAnd); + tflite::Register_LOGICAL_AND(), ParseLogicalAnd); } TfLiteStatus AddLogicalNot() { @@ -296,69 +358,72 @@ class MicroMutableOpResolver : public MicroOpResolver { } TfLiteStatus AddLogicalOr() { - return AddBuiltin(BuiltinOperator_LOGICAL_OR, - tflite::ops::micro::Register_LOGICAL_OR(), + return AddBuiltin(BuiltinOperator_LOGICAL_OR, tflite::Register_LOGICAL_OR(), ParseLogicalOr); } TfLiteStatus AddLogistic() { - return AddBuiltin(BuiltinOperator_LOGISTIC, - tflite::ops::micro::Register_LOGISTIC(), ParseLogistic); + return AddBuiltin(BuiltinOperator_LOGISTIC, tflite::Register_LOGISTIC(), + ParseLogistic); + } + + TfLiteStatus AddLogSoftmax() { + return AddBuiltin(BuiltinOperator_LOG_SOFTMAX, + tflite::Register_LOG_SOFTMAX(), ParseLogSoftmax); } TfLiteStatus AddMaximum() { - return AddBuiltin(BuiltinOperator_MAXIMUM, - tflite::ops::micro::Register_MAXIMUM(), ParseMaximum); + return AddBuiltin(BuiltinOperator_MAXIMUM, Register_MAXIMUM(), + ParseMaximum); + } + + TfLiteStatus AddMaxPool2D( + const TfLiteRegistration& registration = Register_MAX_POOL_2D()) { + return AddBuiltin(BuiltinOperator_MAX_POOL_2D, registration, ParsePool); } - TfLiteStatus AddMaxPool2D() { - return AddBuiltin(BuiltinOperator_MAX_POOL_2D, - tflite::ops::micro::Register_MAX_POOL_2D(), ParsePool); + TfLiteStatus AddMirrorPad() { + return AddBuiltin(BuiltinOperator_MIRROR_PAD, tflite::Register_MIRROR_PAD(), + ParseMirrorPad); } TfLiteStatus AddMean() { - return AddBuiltin(BuiltinOperator_MEAN, tflite::ops::micro::Register_MEAN(), - ParseReducer); + return AddBuiltin(BuiltinOperator_MEAN, Register_MEAN(), ParseReducer); } TfLiteStatus AddMinimum() { - return AddBuiltin(BuiltinOperator_MINIMUM, - tflite::ops::micro::Register_MINIMUM(), ParseMinimum); + return AddBuiltin(BuiltinOperator_MINIMUM, Register_MINIMUM(), + ParseMinimum); } - TfLiteStatus AddMul() { - return AddBuiltin(BuiltinOperator_MUL, tflite::ops::micro::Register_MUL(), - ParseMul); + TfLiteStatus AddMul(const TfLiteRegistration& registration = Register_MUL()) { + return AddBuiltin(BuiltinOperator_MUL, registration, ParseMul); } TfLiteStatus AddNeg() { - return AddBuiltin(BuiltinOperator_NEG, tflite::ops::micro::Register_NEG(), - ParseNeg); + return AddBuiltin(BuiltinOperator_NEG, Register_NEG(), ParseNeg); } TfLiteStatus AddNotEqual() { - return AddBuiltin(BuiltinOperator_NOT_EQUAL, - tflite::ops::micro::Register_NOT_EQUAL(), ParseNotEqual); + return AddBuiltin(BuiltinOperator_NOT_EQUAL, Register_NOT_EQUAL(), + ParseNotEqual); } TfLiteStatus AddPack() { - return AddBuiltin(BuiltinOperator_PACK, tflite::ops::micro::Register_PACK(), - ParsePack); + return AddBuiltin(BuiltinOperator_PACK, Register_PACK(), ParsePack); } - TfLiteStatus AddPad() { - return AddBuiltin(BuiltinOperator_PAD, tflite::ops::micro::Register_PAD(), - ParsePad); + TfLiteStatus AddPad(const TfLiteRegistration& registration = Register_PAD()) { + return AddBuiltin(BuiltinOperator_PAD, registration, ParsePad); } TfLiteStatus AddPadV2() { - return AddBuiltin(BuiltinOperator_PADV2, - tflite::ops::micro::Register_PADV2(), ParsePadV2); + return AddBuiltin(BuiltinOperator_PADV2, Register_PADV2(), ParsePadV2); } TfLiteStatus AddPrelu() { - return AddBuiltin(BuiltinOperator_PRELU, - tflite::ops::micro::Register_PRELU(), ParsePrelu); + return AddBuiltin(BuiltinOperator_PRELU, tflite::Register_PRELU(), + ParsePrelu); } TfLiteStatus AddQuantize() { @@ -366,19 +431,33 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseQuantize); } + TfLiteStatus AddReal() { + return AddBuiltin(BuiltinOperator_REAL, Register_REAL(), + ParseReal); + } + + TfLiteStatus AddReadVariable() { + return AddBuiltin(BuiltinOperator_READ_VARIABLE, + tflite::Register_READ_VARIABLE(), ParseReadVariable); + } + TfLiteStatus AddReduceMax() { - return AddBuiltin(BuiltinOperator_REDUCE_MAX, - tflite::ops::micro::Register_REDUCE_MAX(), ParseReducer); + return AddBuiltin(BuiltinOperator_REDUCE_MAX, Register_REDUCE_MAX(), + ParseReducer); + } + + TfLiteStatus AddReduceMin() { + return AddBuiltin(BuiltinOperator_REDUCE_MIN, Register_REDUCE_MIN(), + ParseReducer); } TfLiteStatus AddRelu() { - return AddBuiltin(BuiltinOperator_RELU, tflite::ops::micro::Register_RELU(), - ParseRelu); + return AddBuiltin(BuiltinOperator_RELU, tflite::Register_RELU(), ParseRelu); } TfLiteStatus AddRelu6() { - return AddBuiltin(BuiltinOperator_RELU6, - tflite::ops::micro::Register_RELU6(), ParseRelu6); + return AddBuiltin(BuiltinOperator_RELU6, tflite::Register_RELU6(), + ParseRelu6); } TfLiteStatus AddReshape() { @@ -386,12 +465,22 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::ops::micro::Register_RESHAPE(), ParseReshape); } + TfLiteStatus AddResizeBilinear() { + return AddBuiltin(BuiltinOperator_RESIZE_BILINEAR, + Register_RESIZE_BILINEAR(), ParseResizeBilinear); + } + TfLiteStatus AddResizeNearestNeighbor() { return AddBuiltin(BuiltinOperator_RESIZE_NEAREST_NEIGHBOR, tflite::ops::micro::Register_RESIZE_NEAREST_NEIGHBOR(), ParseResizeNearestNeighbor); } + TfLiteStatus AddRfft2D() { + return AddBuiltin(BuiltinOperator_RFFT2D, Register_RFFT2D(), + ParseRfft2D); + } + TfLiteStatus AddRound() { return AddBuiltin(BuiltinOperator_ROUND, tflite::ops::micro::Register_ROUND(), ParseRound); @@ -402,6 +491,18 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::ops::micro::Register_RSQRT(), ParseRsqrt); } +#ifndef TF_LITE_STATIC_MEMORY + TfLiteStatus AddSelect() { + return AddBuiltin(BuiltinOperator_SELECT, Register_SELECT(), + ParseSelect); + } + + TfLiteStatus AddSelectV2() { + return AddBuiltin(BuiltinOperator_SELECT_V2, Register_SELECT_V2(), + ParseSelect); + } +#endif // TF_LITE_STATIC_MEMORY + TfLiteStatus AddShape() { return AddBuiltin(BuiltinOperator_SHAPE, Register_SHAPE(), ParseShape); } @@ -411,9 +512,13 @@ class MicroMutableOpResolver : public MicroOpResolver { ParseSin); } - TfLiteStatus AddSoftmax() { - return AddBuiltin(BuiltinOperator_SOFTMAX, Register_SOFTMAX(), - ParseSoftmax); + TfLiteStatus AddSlice() { + return AddBuiltin(BuiltinOperator_SLICE, Register_SLICE(), ParseSlice); + } + + TfLiteStatus AddSoftmax( + const TfLiteRegistration& registration = Register_SOFTMAX()) { + return AddBuiltin(BuiltinOperator_SOFTMAX, registration, ParseSoftmax); } TfLiteStatus AddSpaceToBatchNd() { @@ -421,14 +526,18 @@ class MicroMutableOpResolver : public MicroOpResolver { Register_SPACE_TO_BATCH_ND(), ParseSpaceToBatchNd); } + TfLiteStatus AddSpaceToDepth() { + return AddBuiltin(BuiltinOperator_SPACE_TO_DEPTH, Register_SPACE_TO_DEPTH(), + ParseSpaceToDepth); + } + TfLiteStatus AddSplit() { return AddBuiltin(BuiltinOperator_SPLIT, tflite::ops::micro::Register_SPLIT(), ParseSplit); } TfLiteStatus AddSplitV() { - return AddBuiltin(BuiltinOperator_SPLIT_V, - tflite::ops::micro::Register_SPLIT_V(), ParseSplitV); + return AddBuiltin(BuiltinOperator_SPLIT_V, Register_SPLIT_V(), ParseSplitV); } TfLiteStatus AddSqueeze() { @@ -446,19 +555,28 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::ops::micro::Register_SQUARE(), ParseSquare); } + TfLiteStatus AddSquaredDifference() { + return AddBuiltin(BuiltinOperator_SQUARED_DIFFERENCE, + tflite::Register_SQUARED_DIFFERENCE(), + ParseSquaredDifference); + } + TfLiteStatus AddStridedSlice() { - return AddBuiltin(BuiltinOperator_STRIDED_SLICE, - tflite::ops::micro::Register_STRIDED_SLICE(), + return AddBuiltin(BuiltinOperator_STRIDED_SLICE, Register_STRIDED_SLICE(), ParseStridedSlice); } TfLiteStatus AddSub() { - return AddBuiltin(BuiltinOperator_SUB, tflite::ops::micro::Register_SUB(), - ParseSub); + return AddBuiltin(BuiltinOperator_SUB, tflite::Register_SUB(), ParseSub); + } + + TfLiteStatus AddSum() { + return AddBuiltin(BuiltinOperator_SUM, Register_SUM(), ParseReducer); } - TfLiteStatus AddSvdf() { - return AddBuiltin(BuiltinOperator_SVDF, Register_SVDF(), ParseSvdf); + TfLiteStatus AddSvdf( + const TfLiteRegistration& registration = Register_SVDF()) { + return AddBuiltin(BuiltinOperator_SVDF, registration, ParseSvdf); } TfLiteStatus AddTanh() { @@ -471,11 +589,36 @@ class MicroMutableOpResolver : public MicroOpResolver { tflite::Register_TRANSPOSE_CONV(), ParseTransposeConv); } + TfLiteStatus AddTranspose() { + return AddBuiltin(BuiltinOperator_TRANSPOSE, Register_TRANSPOSE(), + ParseTranspose); + } + + TfLiteStatus AddTreeEnsembleClassifier() { + return AddCustom(tflite::GetString_TreeEnsembleClassifier(), + tflite::Register_TreeEnsembleClassifier()); + } + TfLiteStatus AddUnpack() { return AddBuiltin(BuiltinOperator_UNPACK, tflite::ops::micro::Register_UNPACK(), ParseUnpack); } + TfLiteStatus AddUnidirectionalSequenceLstm() { + return AddBuiltin(BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM, + Register_UNIDIRECTIONAL_SEQUENCE_LSTM(), + ParseUnidirectionalSequenceLSTM); + } + + TfLiteStatus AddVarHandle() { + return AddBuiltin(BuiltinOperator_VAR_HANDLE, Register_VAR_HANDLE(), + ParseVarHandle); + } + + TfLiteStatus AddWhile() { + return AddBuiltin(BuiltinOperator_WHILE, Register_WHILE(), ParseWhile); + } + TfLiteStatus AddZerosLike() { return AddBuiltin(BuiltinOperator_ZEROS_LIKE, Register_ZEROS_LIKE(), ParseZerosLike); @@ -486,33 +629,22 @@ class MicroMutableOpResolver : public MicroOpResolver { private: TfLiteStatus AddBuiltin(tflite::BuiltinOperator op, const TfLiteRegistration& registration, - MicroOpResolver::BuiltinParseFunction parser) { + TfLiteBridgeBuiltinParseFunction parser) { if (op == BuiltinOperator_CUSTOM) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Invalid parameter BuiltinOperator_CUSTOM to the " - "AddBuiltin function."); - } + MicroPrintf("Invalid parameter BuiltinOperator_CUSTOM to the "); + MicroPrintf("AddBuiltin function."); return kTfLiteError; } if (FindOp(op) != nullptr) { - if (error_reporter_ != nullptr) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Calling AddBuiltin with the same op more than " - "once is not supported (Op: #%d).", - op); - } + MicroPrintf("Calling AddBuiltin with the same op more than "); + MicroPrintf("once is not supported (Op: #%d).", op); return kTfLiteError; } if (registrations_len_ >= tOpCount) { - if (error_reporter_) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Couldn't register builtin op #%d, resolver size " - "is too small (%d).", - op, tOpCount); - } + MicroPrintf("Couldn't register builtin op #%d, resolver size ", op); + MicroPrintf("is too small (%d).", tOpCount); return kTfLiteError; } @@ -535,10 +667,8 @@ class MicroMutableOpResolver : public MicroOpResolver { // Arrays (and counter) to store the builtin codes and their corresponding // parse functions as these are registered with the Op Resolver. BuiltinOperator builtin_codes_[tOpCount]; - MicroOpResolver::BuiltinParseFunction builtin_parsers_[tOpCount]; + TfLiteBridgeBuiltinParseFunction builtin_parsers_[tOpCount]; unsigned int num_buitin_ops_ = 0; - - ErrorReporter* error_reporter_; }; }; // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h index af8bb67..1bd3f4b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h @@ -16,9 +16,8 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_MICRO_OP_RESOLVER_H_ #include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/flatbuffer_conversions.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/flatbuffer_conversions_bridge.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h" #include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" namespace tflite { @@ -32,13 +31,8 @@ namespace tflite { // We need an interface class instead of directly using MicroMutableOpResolver // because MicroMutableOpResolver is a class template with the number of // registered Ops as the template parameter. -class MicroOpResolver : public OpResolver { +class MicroOpResolver : public TfLiteBridgeOpResolver { public: - typedef TfLiteStatus (*BuiltinParseFunction)(const Operator* op, - ErrorReporter* error_reporter, - BuiltinDataAllocator* allocator, - void** builtin_data); - // Returns the Op registration struct corresponding to the enum code from the // flatbuffer schema. Returns nullptr if the op is not found or if op == // BuiltinOperator_CUSTOM. @@ -63,7 +57,8 @@ class MicroOpResolver : public OpResolver { // Returns the operator specific parsing function for the OpData for a // BuiltinOperator (if registered), else nullptr. - virtual BuiltinParseFunction GetOpDataParser(BuiltinOperator op) const = 0; + virtual TfLiteBridgeBuiltinParseFunction GetOpDataParser( + BuiltinOperator op) const = 0; ~MicroOpResolver() override {} }; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cc new file mode 100644 index 0000000..63306ce --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cc @@ -0,0 +1,115 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_time.h" + +namespace tflite { + +uint32_t MicroProfiler::BeginEvent(const char* tag) { + if (num_events_ == kMaxEvents) { + num_events_ = 0; + } + + tags_[num_events_] = tag; + start_ticks_[num_events_] = GetCurrentTimeTicks(); + end_ticks_[num_events_] = start_ticks_[num_events_] - 1; + return num_events_++; +} + +void MicroProfiler::EndEvent(uint32_t event_handle) { + TFLITE_DCHECK(event_handle < kMaxEvents); + end_ticks_[event_handle] = GetCurrentTimeTicks(); +} + +uint32_t MicroProfiler::GetTotalTicks() const { + int32_t ticks = 0; + for (int i = 0; i < num_events_; ++i) { + ticks += end_ticks_[i] - start_ticks_[i]; + } + return ticks; +} + +void MicroProfiler::Log() const { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + for (int i = 0; i < num_events_; ++i) { + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%s took %" PRIu32 " ticks (%d ms).", tags_[i], ticks, + TicksToMs(ticks)); + } +#endif +} + +void MicroProfiler::LogCsv() const { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + MicroPrintf("\"Event\",\"Tag\",\"Ticks\""); + for (int i = 0; i < num_events_; ++i) { + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + MicroPrintf("%d,%s,%" PRIu32, i, tags_[i], ticks); + } +#endif +} + +void MicroProfiler::LogTicksPerTagCsv() { +#if !defined(TF_LITE_STRIP_ERROR_STRINGS) + MicroPrintf( + "\"Unique Tag\",\"Total ticks across all events with that tag.\""); + int total_ticks = 0; + for (int i = 0; i < num_events_; ++i) { + uint32_t ticks = end_ticks_[i] - start_ticks_[i]; + TFLITE_DCHECK(tags_[i] != nullptr); + int position = FindExistingOrNextPosition(tags_[i]); + TFLITE_DCHECK(position >= 0); + total_ticks_per_tag[position].tag = tags_[i]; + total_ticks_per_tag[position].ticks = + total_ticks_per_tag[position].ticks + ticks; + total_ticks += ticks; + } + + for (int i = 0; i < num_events_; ++i) { + TicksPerTag each_tag_entry = total_ticks_per_tag[i]; + if (each_tag_entry.tag == nullptr) { + break; + } + MicroPrintf("%s, %d", each_tag_entry.tag, each_tag_entry.ticks); + } + MicroPrintf("total number of ticks, %d", total_ticks); +#endif +} + +// This method finds a particular array element in the total_ticks_per_tag array +// with the matching tag_name passed in the method. If it can find a +// matching array element that has the same tag_name, then it will return the +// position of the matching element. But if it unable to find a matching element +// with the given tag_name, it will return the next available empty position +// from the array. +int MicroProfiler::FindExistingOrNextPosition(const char* tag_name) { + int pos = 0; + for (; pos < num_events_; pos++) { + TicksPerTag each_tag_entry = total_ticks_per_tag[pos]; + if (each_tag_entry.tag == nullptr || + strcmp(each_tag_entry.tag, tag_name) == 0) { + return pos; + } + } + return pos < num_events_ ? pos : -1; +} +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cpp deleted file mode 100644 index 1af7a7f..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.cpp +++ /dev/null @@ -1,58 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" - -#include - -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_time.h" - -namespace tflite { - -uint32_t MicroProfiler::BeginEvent(const char* tag) { - if (num_events_ == kMaxEvents) { - num_events_ = 0; - } - - tags_[num_events_] = tag; - start_ticks_[num_events_] = GetCurrentTimeTicks(); - end_ticks_[num_events_] = start_ticks_[num_events_] - 1; - return num_events_++; -} - -void MicroProfiler::EndEvent(uint32_t event_handle) { - TFLITE_DCHECK(event_handle < kMaxEvents); - end_ticks_[event_handle] = GetCurrentTimeTicks(); -} - -int32_t MicroProfiler::GetTotalTicks() const { - int32_t ticks = 0; - for (int i = 0; i < num_events_; ++i) { - ticks += end_ticks_[i] - start_ticks_[i]; - } - return ticks; -} - -void MicroProfiler::Log() const { -#if !defined(TF_LITE_STRIP_ERROR_STRINGS) - for (int i = 0; i < num_events_; ++i) { - int32_t ticks = end_ticks_[i] - start_ticks_[i]; - MicroPrintf("%s took %d ticks (%d ms).", tags_[i], ticks, TicksToMs(ticks)); - } -#endif -} - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h index 3f285b2..d940398 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,9 +16,8 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ #define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_H_ -#include - #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h" namespace tflite { @@ -26,7 +25,7 @@ namespace tflite { // performance. Bottleck operators can be identified along with slow code // sections. This can be used in conjunction with running the relevant micro // benchmark to evaluate end-to-end performance. -class MicroProfiler { +class MicroProfiler : public MicroProfilerInterface { public: MicroProfiler() = default; virtual ~MicroProfiler() = default; @@ -34,7 +33,7 @@ class MicroProfiler { // Marks the start of a new event and returns an event handle that can be used // to mark the end of the event via EndEvent. The lifetime of the tag // parameter must exceed that of the MicroProfiler. - virtual uint32_t BeginEvent(const char* tag); + virtual uint32_t BeginEvent(const char* tag) override; // Marks the end of an event associated with event_handle. It is the // responsibility of the caller to ensure than EndEvent is called once and @@ -43,7 +42,7 @@ class MicroProfiler { // If EndEvent is called more than once for the same event_handle, the last // call will be used as the end of event marker.If EndEvent is called 0 times // for a particular event_handle, the duration of that event will be 0 ticks. - virtual void EndEvent(uint32_t event_handle); + virtual void EndEvent(uint32_t event_handle) override; // Clears all the events that have been currently profiled. void ClearEvents() { num_events_ = 0; } @@ -51,33 +50,55 @@ class MicroProfiler { // Returns the sum of the ticks taken across all the events. This number // is only meaningful if all of the events are disjoint (the end time of // event[i] <= start time of event[i+1]). - int32_t GetTotalTicks() const; + uint32_t GetTotalTicks() const; - // Prints the profiling information of each of the events. + // Prints the profiling information of each of the events in human readable + // form. void Log() const; + // Prints the profiling information of each of the events in CSV (Comma + // Separated Value) form. + void LogCsv() const; + + // Prints total ticks for each unique tag in CSV format. + // Output will have one row for each unique tag along with the + // total ticks summed across all events with that particular tag. + void LogTicksPerTagCsv(); + private: // Maximum number of events that this class can keep track of. If we call // AddEvent more than kMaxEvents number of times, then the oldest event's // profiling information will be overwritten. - static constexpr int kMaxEvents = 50; + static constexpr int kMaxEvents = 1024; const char* tags_[kMaxEvents]; - int32_t start_ticks_[kMaxEvents]; - int32_t end_ticks_[kMaxEvents]; + uint32_t start_ticks_[kMaxEvents]; + uint32_t end_ticks_[kMaxEvents]; int num_events_ = 0; + struct TicksPerTag { + const char* tag; + uint32_t ticks; + }; + // In practice, the number of tags will be much lower than the number of + // events. But it is theoretically possible that each event to be unique and + // hence we allow total_ticks_per_tag to have kMaxEvents entries. + TicksPerTag total_ticks_per_tag[kMaxEvents] = {}; + + int FindExistingOrNextPosition(const char* tag_name); + TF_LITE_REMOVE_VIRTUAL_DELETE; }; -#if defined(NDEBUG) +#if defined(TF_LITE_STRIP_ERROR_STRINGS) // For release builds, the ScopedMicroProfiler is a noop. // // This is done because the ScipedProfiler is used as part of the // MicroInterpreter and we want to ensure zero overhead for the release builds. class ScopedMicroProfiler { public: - explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) {} + explicit ScopedMicroProfiler(const char* tag, + MicroProfilerInterface* profiler) {} }; #else @@ -94,7 +115,8 @@ class ScopedMicroProfiler { // } class ScopedMicroProfiler { public: - explicit ScopedMicroProfiler(const char* tag, MicroProfiler* profiler) + explicit ScopedMicroProfiler(const char* tag, + MicroProfilerInterface* profiler) : profiler_(profiler) { if (profiler_ != nullptr) { event_handle_ = profiler_->BeginEvent(tag); @@ -109,9 +131,9 @@ class ScopedMicroProfiler { private: uint32_t event_handle_ = 0; - MicroProfiler* profiler_ = nullptr; + MicroProfilerInterface* profiler_ = nullptr; }; -#endif // !defined(NDEBUG) +#endif // !defined(TF_LITE_STRIP_ERROR_STRINGS) } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h new file mode 100644 index 0000000..f839a74 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h @@ -0,0 +1,38 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ +#define TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ + +#include + +namespace tflite { + +// Interface class that the TFLM framework relies on for profiling. +class MicroProfilerInterface { + public: + virtual ~MicroProfilerInterface() {} + + // Marks the start of a new event and returns an event handle that can be used + // to mark the end of the event via EndEvent. + virtual uint32_t BeginEvent(const char* tag) = 0; + + // Marks the end of an event associated with event_handle. + virtual void EndEvent(uint32_t event_handle) = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MICRO_PROFILER_INTERFACE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.cc new file mode 100644 index 0000000..c07d111 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.cc @@ -0,0 +1,148 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h" + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" + +namespace tflite { + +namespace {} // namespace + +MicroResourceVariables* MicroResourceVariables::Create( + MicroAllocator* allocator, int max_num_variables) { + TFLITE_DCHECK(allocator != nullptr); + + uint8_t* allocator_buffer = static_cast( + allocator->AllocatePersistentBuffer(sizeof(MicroResourceVariables))); + MicroResourceVariable* variable_array = + static_cast(allocator->AllocatePersistentBuffer( + sizeof(MicroResourceVariable) * max_num_variables)); + MicroResourceVariables* variables = new (allocator_buffer) + MicroResourceVariables(variable_array, max_num_variables); + return variables; +} + +int MicroResourceVariables::CreateIdIfNoneFound(const char* container, + const char* shared_name) { + int resource_id = FindId(container, shared_name); + if (resource_id >= 0) { + return resource_id; + } + + // no existing variable found for the given container and shared name pair. + if (num_resource_variables_ >= max_variable_count_) { + MicroPrintf( + "Failed to allocate resource variable. Maximum resource variable count " + "(%d) " + "reached.", + max_variable_count_); + return -1; + } + + resource_id = num_resource_variables_++; + resource_variables_[resource_id].container = container; + resource_variables_[resource_id].shared_name = shared_name; + resource_variables_[resource_id].resource_buffer = nullptr; + resource_variables_[resource_id].bytes = 0; + return resource_id; +} + +TfLiteStatus MicroResourceVariables::Read(int id, + const TfLiteEvalTensor* tensor) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + MicroResourceVariable variable = resource_variables_[id]; + TFLITE_DCHECK(EvalTensorBytes(tensor) == variable.bytes); + TFLITE_DCHECK(variable.resource_buffer != nullptr); + memcpy(tensor->data.raw, variable.resource_buffer, variable.bytes); + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::Allocate(int id, TfLiteContext* context, + const TfLiteTensor* tensor) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + + MicroResourceVariable& variable = resource_variables_[id]; + + if (variable.resource_buffer == nullptr) { + variable.bytes = tensor->bytes; + variable.resource_buffer = + context->AllocatePersistentBuffer(context, tensor->bytes); + if (variable.resource_buffer == nullptr) { + MicroPrintf("Failed to allocate resource buffer."); + return kTfLiteError; + } + // Zero out resource buffers by deafult. Buffers can be initialized to + // nonzero values using ASSIGN_VARIABLE. + memset(variable.resource_buffer, 0, variable.bytes); + } + + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::Assign(int id, + const TfLiteEvalTensor* tensor) { + if (id < 0 || id >= num_resource_variables_) { + MicroPrintf("Attempting to read non-existent resource variable %d", id); + return kTfLiteError; + } + MicroResourceVariable variable = resource_variables_[id]; + + if (variable.resource_buffer == nullptr) { + MicroPrintf( + "Attempting to assign from a TfLiteEvalTensor before the resource " + "buffer has been allocated. Make sure to call AssignResourceVariable " + "with a TfLiteTensor first."); + return kTfLiteError; + } + TFLITE_DCHECK(EvalTensorBytes(tensor) == variable.bytes); + memcpy(variable.resource_buffer, tensor->data.raw, variable.bytes); + return kTfLiteOk; +} + +TfLiteStatus MicroResourceVariables::ResetAll() { + for (int i = 0; i < num_resource_variables_; i++) { + MicroResourceVariable variable = resource_variables_[i]; + memset(variable.resource_buffer, 0, variable.bytes); + } + return kTfLiteOk; +} + +int MicroResourceVariables::FindId(const char* container, + const char* shared_name) { + for (int i = 0; i < num_resource_variables_; i++) { + // Some TFLite flatbuffers contain null container names to save space. + if ((container == nullptr || + !strcmp(container, resource_variables_[i].container)) && + !strcmp(shared_name, resource_variables_[i].shared_name)) { + return i; + } + } + return -1; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h new file mode 100644 index 0000000..d2ebb35 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_resource_variable.h @@ -0,0 +1,87 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ +#define TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ + +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" + +namespace tflite { + +class MicroResourceVariables { + public: + // Create + static MicroResourceVariables* Create(MicroAllocator* allocator, + int num_variables); + + // Creates a resource variable if none is available for the given container + // and shared name pair. Returns the resource ID corresponding to the + // container and shared name pair. If allocation fails, the returned resource + // ID will be negative. The the container and shared_name must outlive this + // class. + int CreateIdIfNoneFound(const char* container, const char* shared_name); + + // Read the resource buffer associated with the given ID into the given + // tensor. + TfLiteStatus Read(int id, const TfLiteEvalTensor* tensor); + + // Allocates the resource buffer if none has been allocated, based on the + // length of the input tensor. Copies input tensor contents to the resource + // buffer. + TfLiteStatus Allocate(int id, TfLiteContext* context, + const TfLiteTensor* tensor); + + // Copies input tensor contents to the resource buffer. + // AllocateResourceVariable with a TFLite tensor must have been called first + // in order to allocate the resource buffer. + TfLiteStatus Assign(int id, const TfLiteEvalTensor* tensor); + + // Zeros out all resource buffers. + TfLiteStatus ResetAll(); + + private: + int FindId(const char* container, const char* shared_name); + + // Micro resource contains the mapping between resource container/name strings + // and resouce IDs. Each resource ID corresponds to a resource buffer pointer. + // The resouce ID is created during the VAR_HANDLE operator preparation stage. + // The resource buffer pointer is created during ASSIGN_VARIABLE preparation + // stage based on the size of the TFLiteTensor being assigned. + struct MicroResourceVariable { + const char* container; + const char* shared_name; + void* resource_buffer; + + // This is only for verifying read size. + size_t bytes; + }; + + MicroResourceVariables(MicroResourceVariable* variables, + int max_variable_count) + : resource_variables_(variables), + max_variable_count_(max_variable_count), + num_resource_variables_(0) {} + + MicroResourceVariable* resource_variables_; + int max_variable_count_; + int num_resource_variables_; +}; + +} // namespace tflite + +#endif // TFLITE_MICRO_TENSORFLOW_LITE_MICRO_MICRO_RESOURCE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cc similarity index 97% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cc index 30b60a9..39746f9 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_string.cc @@ -283,6 +283,14 @@ extern "C" int MicroVsnprintf(char* output, int len, const char* format, case '%': output[output_index++] = *current++; break; + case 'c': + if (usable_length - output_index < 1) { + output[output_index++] = '\0'; + return output_index; + } + output[output_index++] = va_arg(args, int32_t); + current++; + break; case 's': char* string = va_arg(args, char*); int string_idx = 0; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cc similarity index 87% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cc index b769851..d418509 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.cc @@ -22,8 +22,7 @@ limitations under the License. // To add an equivalent function for your own platform, create your own // implementation file, and place it in a subfolder with named after the OS // you're targeting. For example, see the Cortex M bare metal version in -// tensorflow/lite/micro/bluepill/micro_time.cc or the mbed one on -// tensorflow/lite/micro/mbed/micro_time.cc. +// tensorflow/lite/micro/bluepill/micro_time.cc #include "edge-impulse-sdk/tensorflow/lite/micro/micro_time.h" @@ -39,21 +38,21 @@ namespace tflite { // for a platform to support Tensorflow Lite for Microcontrollers profiling. // This returns 0 by default because timing is an optional feature that builds // without errors on platforms that do not need it. -int32_t ticks_per_second() { return 0; } +uint32_t ticks_per_second() { return 0; } // Reference implementation of the GetCurrentTimeTicks() function that's // required for a platform to support Tensorflow Lite for Microcontrollers // profiling. This returns 0 by default because timing is an optional feature // that builds without errors on platforms that do not need it. -int32_t GetCurrentTimeTicks() { return 0; } +uint32_t GetCurrentTimeTicks() { return 0; } #else // defined(TF_LITE_USE_CTIME) // For platforms that support ctime, we implment the micro_time interface in // this central location. -int32_t ticks_per_second() { return CLOCKS_PER_SEC; } +uint32_t ticks_per_second() { return CLOCKS_PER_SEC; } -int32_t GetCurrentTimeTicks() { return clock(); } +uint32_t GetCurrentTimeTicks() { return clock(); } #endif } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.h index fac9069..7a8ab45 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_time.h @@ -21,14 +21,14 @@ namespace tflite { // These functions should be implemented by each target platform, and provide an // accurate tick count along with how many ticks there are per second. -int32_t ticks_per_second(); +uint32_t ticks_per_second(); // Return time in ticks. The meaning of a tick varies per platform. -int32_t GetCurrentTimeTicks(); +uint32_t GetCurrentTimeTicks(); -inline int32_t TicksToMs(int32_t ticks) { - return static_cast(1000.0f * static_cast(ticks) / - static_cast(ticks_per_second())); +inline uint32_t TicksToMs(int32_t ticks) { + return static_cast(1000.0f * static_cast(ticks) / + static_cast(ticks_per_second())); } } // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cc similarity index 69% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cc index 3d21aaf..4f7eba7 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.cc @@ -20,7 +20,10 @@ limitations under the License. #include #include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { @@ -32,17 +35,26 @@ int ElementCount(const TfLiteIntArray& dims) { return result; } -void SignedSymmetricPerChannelQuantize(const float* values, - TfLiteIntArray* dims, - int quantized_dimension, - int8_t* quantized_values, - float* scaling_factors) { +size_t EvalTensorBytes(const TfLiteEvalTensor* tensor) { + size_t bytes_per_element; + TFLITE_DCHECK(kTfLiteOk == + TfLiteTypeSizeOf(tensor->type, &bytes_per_element)); + return ElementCount(*tensor->dims) * bytes_per_element; +} + +void SignedSymmetricPerChannelQuantize( + const float* values, TfLiteIntArray* dims, int quantized_dimension, + int8_t* quantized_values, float* scaling_factors, TfLiteType type) { int input_size = ElementCount(*dims); int channel_count = dims->data[quantized_dimension]; int per_channel_size = input_size / channel_count; int stride; int channel_stride; + + int qmin = QMinFromTfLiteType(type); + int qmax = QMaxFromTfLiteType(type); + if (quantized_dimension == 0) { stride = 1; channel_stride = per_channel_size; @@ -50,7 +62,8 @@ void SignedSymmetricPerChannelQuantize(const float* values, stride = channel_count; channel_stride = 1; } else { - TF_LITE_FATAL("quantized dimension must be 0 or 3"); + MicroPrintf("quantized dimension must be 0 or 3"); + TFLITE_ABORT; } // Calculate scales for each channel. @@ -63,16 +76,13 @@ void SignedSymmetricPerChannelQuantize(const float* values, min = fminf(min, values[idx]); max = fmaxf(max, values[idx]); } - scaling_factors[channel] = - fmaxf(fabs(min), fabs(max)) / std::numeric_limits::max(); + scaling_factors[channel] = fmaxf(fabs(min), fabs(max)) / qmax; for (int i = 0; i < per_channel_size; i++) { int idx = channel * channel_stride + i * stride; const int32_t quantized_value = static_cast(roundf(values[idx] / scaling_factors[channel])); // Clamp: just in case some odd numeric offset. - quantized_values[idx] = - fminf(std::numeric_limits::max(), - fmaxf(std::numeric_limits::min() + 1, quantized_value)); + quantized_values[idx] = fminf(qmax, fmaxf(qmin + 1, quantized_value)); } } } diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h index e30c8fa..73de1dc 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h @@ -16,8 +16,10 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ #define TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ +// Patched by Edge Impulse // Arduino build defines abs as a macro here. That is invalid C++, and breaks // libc++'s header, undefine it. +// TODO investigate if this belongs to global patch or Arduino lib one #ifdef abs #undef abs #endif @@ -25,7 +27,7 @@ limitations under the License. #include #include #include -#include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" +#include #include "edge-impulse-sdk/tensorflow/lite/c/common.h" @@ -35,6 +37,12 @@ namespace tflite { int ElementCount(const TfLiteIntArray& dims); +size_t EvalTensorBytes(const TfLiteEvalTensor* tensor); + +// C++11 does not support constexpr max; hence, use ternary conditional to +// create our own constexpr Max function. +constexpr int Max(int a, int b) { return a >= b ? a : b; } + // Converts a float value into a quantized value. Note that large values (close // to max int and min int) may see significant error due to a lack of floating // point granularity for large values. @@ -50,11 +58,13 @@ T FloatToQuantizedType(const float value, const float scale, int zero_point) { template T FloatToSymmetricQuantizedType(const float value, const float scale) { - int32_t result = round(value / scale); - result = - std::max(static_cast(std::numeric_limits::min() + 1), result); - result = - std::min(static_cast(std::numeric_limits::max()), result); + // 64-bit values are required since 8x16 conv accumulates to int64, meaning + // an int64 bias is required. + std::int64_t result = round(value / scale); + result = std::max( + static_cast(std::numeric_limits::min() + 1), result); + result = std::min(static_cast(std::numeric_limits::max()), + result); return result; } @@ -102,7 +112,8 @@ void SignedSymmetricPerChannelQuantize(const float* values, TfLiteIntArray* dims, int quantized_dimension, int8_t* quantized_values, - float* scaling_factor); + float* scaling_factor, + TfLiteType type = kTfLiteNoType); // Quantizes inputs based on the values provided, choosing the smallest range // which includes all input values. @@ -136,6 +147,24 @@ void Dequantize(const T* values, const int size, const float scale, } } +// based on TfLiteType passed in to these functions the corresponding max / min +// int for that type are returned +inline int QMinFromTfLiteType(TfLiteType type) { + if (type == kTfLiteInt4) { + return -8; + } else { + return std::numeric_limits::min(); + } +} + +inline int QMaxFromTfLiteType(TfLiteType type) { + if (type == kTfLiteInt4) { + return 7; + } else { + return std::numeric_limits::max(); + } +} + } // namespace tflite #endif // TENSORFLOW_LITE_MICRO_MICRO_UTILS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.cc new file mode 100644 index 0000000..8ad3864 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.cc @@ -0,0 +1,66 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h" + +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" + +namespace tflite { + +MockMicroGraph::MockMicroGraph(SingleArenaBufferAllocator* allocator) + : MicroGraph(nullptr, nullptr, nullptr, nullptr), + allocator_(allocator), + init_count_(0), + prepare_count_(0), + free_count_(0) { + memset(invoke_counts_, 0, sizeof(invoke_counts_)); + mock_tensor_ = + reinterpret_cast(allocator_->AllocatePersistentBuffer( + sizeof(TfLiteEvalTensor), alignof(TfLiteEvalTensor))); + int* dims_array = reinterpret_cast( + allocator_->AllocatePersistentBuffer(3 * sizeof(int), alignof(int))); + float* data_array = reinterpret_cast( + allocator_->AllocatePersistentBuffer(2 * sizeof(float), alignof(float))); + int dims[] = {2, 1, 2}; + memcpy(dims_array, dims, 3 * sizeof(int)); + mock_tensor_->dims = testing::IntArrayFromInts(dims_array); + mock_tensor_->data.f = data_array; + mock_tensor_->type = kTfLiteFloat32; +} + +TfLiteStatus MockMicroGraph::InvokeSubgraph(int subgraph_idx) { + invoke_counts_[subgraph_idx]++; + return kTfLiteOk; +} + +TfLiteStatus MockMicroGraph::ResetVariableTensors() { return kTfLiteOk; } + +size_t MockMicroGraph::NumSubgraphInputs(int subgraph_idx) { return 1; } + +TfLiteEvalTensor* MockMicroGraph::GetSubgraphInput(int subgraph_idx, + int tensor_idx) { + return mock_tensor_; +} + +size_t MockMicroGraph::NumSubgraphOutputs(int subgraph_idx) { return 1; } + +TfLiteEvalTensor* MockMicroGraph::GetSubgraphOutput(int subgraph_idx, + int tensor_idx) { + return mock_tensor_; +} + +int MockMicroGraph::NumSubgraphs() { return kMaxSubgraphs; } + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h new file mode 100644 index 0000000..b1aeb20 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/mock_micro_graph.h @@ -0,0 +1,60 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ +#define TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_graph.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { + +// MockMicroGraph stubs out all MicroGraph methods used during invoke. A count +// of the number of calls to invoke for each subgraph is maintained for +// validation of control flow operators. +class MockMicroGraph : public MicroGraph { + public: + explicit MockMicroGraph(SingleArenaBufferAllocator* allocator); + TfLiteStatus InvokeSubgraph(int subgraph_idx) override; + TfLiteStatus ResetVariableTensors() override; + size_t NumSubgraphInputs(int subgraph_idx) override; + TfLiteEvalTensor* GetSubgraphInput(int subgraph_idx, int tensor_idx) override; + size_t NumSubgraphOutputs(int subgraph_idx) override; + TfLiteEvalTensor* GetSubgraphOutput(int subgraph_idx, + int tensor_idx) override; + int NumSubgraphs() override; + int get_init_count() const { return init_count_; } + int get_prepare_count() const { return prepare_count_; } + int get_free_count() const { return free_count_; } + int get_invoke_count(int subgraph_idx) const { + return invoke_counts_[subgraph_idx]; + } + + private: + static constexpr int kMaxSubgraphs = 10; + SingleArenaBufferAllocator* allocator_; + TfLiteEvalTensor* mock_tensor_; + int init_count_; + int prepare_count_; + int free_count_; + int invoke_counts_[kMaxSubgraphs]; + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_MOCK_MICRO_GRAPH_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.cc new file mode 100644 index 0000000..65515ff --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.cc @@ -0,0 +1,170 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h" + +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +NonPersistentArenaBufferAllocator::NonPersistentArenaBufferAllocator( + uint8_t* buffer, size_t buffer_size) + : buffer_head_(buffer), + buffer_tail_(buffer + buffer_size), + head_temp_(buffer), + next_temp_(buffer) {} + +NonPersistentArenaBufferAllocator::~NonPersistentArenaBufferAllocator() {} + +// Allocates a temporary buffer. This buffer is not resizable. +uint8_t* NonPersistentArenaBufferAllocator::AllocateTemp(size_t size, + size_t alignment) { + uint8_t* const aligned_result = AlignPointerUp(next_temp_, alignment); + const size_t available_memory = buffer_tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to allocate temp memory. Requested: %u, " + "available %u, missing: %u", + size, available_memory, size - available_memory); + return nullptr; + } + next_temp_ = aligned_result + size; + temp_buffer_ptr_check_sum_ ^= reinterpret_cast(aligned_result); + temp_buffer_count_++; + return aligned_result; +} + +// Signals that a temporary buffer is no longer needed. +void NonPersistentArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) { + temp_buffer_ptr_check_sum_ ^= reinterpret_cast(temp_buf); + temp_buffer_count_--; +} + +// Returns true if all temporary buffers are already deallocated. +bool NonPersistentArenaBufferAllocator::IsAllTempDeallocated() { + if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) { + MicroPrintf( + "Number of allocated temp buffers: %d. Checksum passing status: %d", + temp_buffer_count_, !temp_buffer_ptr_check_sum_); + return false; + } + return true; +} + +// Signals that all temporary allocations can be reclaimed. TFLM calls this +// API when it knows that all temporary buffers that it requested has been +// deallocated. The goal of API is to facilitate implementations of +// INonPersistentBufferAllocator can reuse buffer with some reasonable +// complexity. +TfLiteStatus NonPersistentArenaBufferAllocator::ResetTempAllocations() { + if (!IsAllTempDeallocated()) { + MicroPrintf( + "All temp buffers must be freed before calling ResetTempAllocations()"); + return kTfLiteError; + } + next_temp_ = head_temp_; + return kTfLiteOk; +} + +// Returns a buffer that is resizable viable ResizeBuffer(). +uint8_t* NonPersistentArenaBufferAllocator::AllocateResizableBuffer( + size_t size, size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expected_resizable_buf = AlignPointerUp(buffer_head_, alignment); + + if (resizable_buffer_allocated_) { + MicroPrintf( + "Cannot allocate a new resizable buffer when one is already allocated"); + return nullptr; + } + + if (ResizeBuffer(expected_resizable_buf, size, alignment) == kTfLiteOk) { + resizable_buffer_allocated_ = true; + return expected_resizable_buf; + } + return nullptr; +} + +// Resizes a buffer that is previously returned by the AllocateResizableBuffer. +// Note that ResizeBuffer(old_resizable_buf, 0, 1) effectively deallocates +// a previous allocated resizable buffer. +TfLiteStatus NonPersistentArenaBufferAllocator::ResizeBuffer( + uint8_t* resizable_buf, size_t size, size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (resizable_buf != expect_resizable_buf) { + MicroPrintf("Internal error: buffer is not resizable"); + return kTfLiteError; + } + if (head_temp_ != next_temp_) { + MicroPrintf("ResetTempAllocations() is not called before ResizeBuffer()."); + return kTfLiteError; + } + + const size_t available_memory = buffer_tail_ - expect_resizable_buf; + if (available_memory < size) { + MicroPrintf( + "Failed to resize buffer. Requested: %u, available %u, missing: %u", + size, available_memory, size - available_memory); + return kTfLiteError; + } + head_temp_ = expect_resizable_buf + size; + next_temp_ = head_temp_; + + return kTfLiteOk; +} + +// Frees up the memory occupied by the resizable buffer. +TfLiteStatus NonPersistentArenaBufferAllocator::DeallocateResizableBuffer( + uint8_t* resizable_buf) { + TfLiteStatus status = ResizeBuffer(resizable_buf, 0, 1); + if (status == kTfLiteOk) { + resizable_buffer_allocated_ = false; + } + return status; +} + +// Returns a pointer pointing to the start of the overlay memory, which is +// used for activation tensors and scratch buffers by kernels at Invoke stage. +uint8_t* NonPersistentArenaBufferAllocator::GetOverlayMemoryAddress() const { + return buffer_head_; +} + +// Reserves the size of the overlay memory. This overlay is reserved for the +// kernels at Invoke stage. This is referred to as the overlay because before +// Invoket state, the same memory can be used for temp buffers. The layout of +// the memory is planned by the memory planner separately at Invoke stage. +TfLiteStatus +NonPersistentArenaBufferAllocator::ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) { + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + return ResizeBuffer(expect_resizable_buf, size, alignment); +} + +// Returns the size of non-persistent buffer in use. +size_t NonPersistentArenaBufferAllocator::GetNonPersistentUsedBytes() const { + return (next_temp_ - buffer_head_); +} + +// Returns the number of bytes available with a given alignment. This number +// takes in account any temporary allocations. +size_t NonPersistentArenaBufferAllocator::GetAvailableMemory( + size_t alignment) const { + uint8_t* const aligned_temp = AlignPointerUp(next_temp_, alignment); + uint8_t* const aligned_tail = AlignPointerDown(buffer_tail_, alignment); + return aligned_tail - aligned_temp; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h new file mode 100644 index 0000000..2a3d639 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/non_persistent_arena_buffer_allocator.h @@ -0,0 +1,104 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// Implement INonPersistentBufferAllocator on an arena that is dedicated for +// non-persistent buffers. +class NonPersistentArenaBufferAllocator : public INonPersistentBufferAllocator { + public: + NonPersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~NonPersistentArenaBufferAllocator(); + + // Allocates a temporary buffer. This buffer is not resizable. + uint8_t* AllocateTemp(size_t size, size_t alignment) override; + + // Signals that a temporary buffer is no longer needed. + void DeallocateTemp(uint8_t* buf) override; + + // Returns true if all temporary buffers are already deallocated. + bool IsAllTempDeallocated() override; + + // Signals that all temporary allocations can be reclaimed. TFLM calls this + // API when it knows that all temporary buffers that it requested has been + // deallocated. + TfLiteStatus ResetTempAllocations() override; + + // Returns a buffer that is resizable viable ResizeBuffer(). + uint8_t* AllocateResizableBuffer(size_t size, size_t alignment) override; + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. + TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + + // Frees up the memory occupied by the resizable buffer. + TfLiteStatus DeallocateResizableBuffer(uint8_t* resizable_buf) override; + + // Returns a pointer pointing to the start of the overlay memory, which is + // used for activation tensors and scratch buffers by kernels at Invoke stage. + uint8_t* GetOverlayMemoryAddress() const override; + + // Reserves the size of the overlay memory. This overlay is reserved for the + // kernels at Invoke stage. This is referred to as the overlay because before + // Invoket state, the same memory can be used for temp buffers. The layout of + // the memory is planned by the memory planner separately at Invoke stage. + TfLiteStatus ReserveNonPersistentOverlayMemory(size_t size, + size_t alignment) override; + + // Returns the size of non-persistent buffer in use. + size_t GetNonPersistentUsedBytes() const override; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + size_t GetAvailableMemory(size_t alignment) const override; + + TF_LITE_REMOVE_VIRTUAL_DELETE + + private: + // The memory arena that this allocator manages. + uint8_t* const buffer_head_; + uint8_t* const buffer_tail_; + + // The whole region is split into two parts: + // buffer_head_ to head_temp_ - 1 belongs to the only resizable buffer. + // head_temp_ to buffer_tail_ can be used for (non-resizable) temp buffers. + uint8_t* head_temp_; + + // next_temp_ points to the next available temp buffer allocation address and + // its range is between head_temp_ and buffer_tail_ + uint8_t* next_temp_; + + // XOR Check sum for outstanding temp buffers. + // If all temp buffers are deallocated OR no temp buffers are allocated, + // temp_buffer_ptr_check_sum_ == nullptr. + intptr_t temp_buffer_ptr_check_sum_ = 0; + // Count of outstanding temp buffers. + int temp_buffer_count_ = 0; + bool resizable_buffer_allocated_ = false; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_NON_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.cc new file mode 100644 index 0000000..a60b626 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.cc @@ -0,0 +1,32 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h" + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_error_reporter.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_utils.h" + +namespace tflite { + +TfLiteStatus GetRegistrationFromOpCode( + const OperatorCode* opcode, const OpResolver& op_resolver, + const TfLiteRegistration** registration) { + return GetRegistrationFromOpCode( + opcode, op_resolver, tflite::GetMicroErrorReporter(), registration); +} +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h new file mode 100644 index 0000000..bf6a2db --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/op_resolver_bridge.h @@ -0,0 +1,38 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_OP_RESOLVER_BRIDGE_H_ +#define TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_OP_RESOLVER_BRIDGE_H_ + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/api/op_resolver.h" // needed for the Using declarative + +namespace tflite { + +// Forward declaration of the classes and structs used here. +struct OperatorCode; + +using TfLiteBridgeOpResolver = OpResolver; + +// Handles the logic for converting between an OperatorCode structure extracted +// from a flatbuffer and information about a registered operator +// implementation. +TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode, + const OpResolver& op_resolver, + const TfLiteRegistration** registration); + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_TFLITE_BRIDGE_OP_RESOLVER_BRIDGE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.cc new file mode 100644 index 0000000..9237691 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.cc @@ -0,0 +1,52 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#include "edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h" + +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +PersistentArenaBufferAllocator::PersistentArenaBufferAllocator( + uint8_t* buffer, size_t buffer_size) + : buffer_head_(buffer), + buffer_tail_(buffer + buffer_size), + tail_temp_(buffer_tail_) {} + +PersistentArenaBufferAllocator::~PersistentArenaBufferAllocator() {} + +uint8_t* PersistentArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { + uint8_t* const aligned_result = + AlignPointerDown(tail_temp_ - size, alignment); + if (aligned_result < buffer_head_) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + const size_t missing_memory = buffer_head_ - aligned_result; + MicroPrintf( + "Failed to allocate tail memory. Requested: %u, " + "available %u, missing: %u", + size, size - missing_memory, missing_memory); +#endif + return nullptr; + } + tail_temp_ = aligned_result; + return aligned_result; +} + +size_t PersistentArenaBufferAllocator::GetPersistentUsedBytes() const { + return buffer_tail_ - tail_temp_; +} + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h new file mode 100644 index 0000000..911c486 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/persistent_arena_buffer_allocator.h @@ -0,0 +1,58 @@ +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// PersistentArenaBufferAllocator is an implementatation of +// IPersistentBufferAllocator interface on an arena that is dedicated for +// persistent buffers. +class PersistentArenaBufferAllocator : public IPersistentBufferAllocator { + public: + PersistentArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~PersistentArenaBufferAllocator(); + + // Allocates persistent memory. The persistent buffer is never freed. + // Returns nullptr if errors occured. + uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) override; + + // Returns the size of all persistent allocations in bytes. + size_t GetPersistentUsedBytes() const override; + + TF_LITE_REMOVE_VIRTUAL_DELETE + private: + // The memory arena that this allocator manages. + uint8_t* const buffer_head_; + uint8_t* const buffer_tail_; + + // The whole region is split into two parts: + // tail_temp_ to buffer_tail_ contains allocated buffers; + // buffer_head_ to tail_temp_ - 1 belongs to still available spaces. + // So in essence, the allocated region grows from the bottom and emulates + // SingleArenaBufferAllocator's persistent part. + uint8_t* tail_temp_; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_PERSISTENT_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cc similarity index 57% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cc index b108d13..11e4d1b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.cc @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -15,33 +15,49 @@ limitations under the License. #include "edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_planner/greedy_memory_planner.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" namespace tflite { +size_t RecordingMicroAllocator::GetDefaultTailUsage() { + // RecordingMicroAllocator inherits from MicroAllocator and its tail usage is + // similar with MicroAllocator with SingleArenaBufferAllocator and + // MicroAllocator being replaced. + return MicroAllocator::GetDefaultTailUsage( + /*is_memory_planner_given=*/false) + + AlignSizeUp() - + AlignSizeUp() + + AlignSizeUp() - AlignSizeUp(); +} + RecordingMicroAllocator::RecordingMicroAllocator( - RecordingSimpleMemoryAllocator* recording_memory_allocator, - ErrorReporter* error_reporter) - : MicroAllocator(recording_memory_allocator, error_reporter), + RecordingSingleArenaBufferAllocator* recording_memory_allocator, + MicroMemoryPlanner* memory_planner) + : MicroAllocator(recording_memory_allocator, memory_planner), recording_memory_allocator_(recording_memory_allocator) {} -RecordingMicroAllocator* RecordingMicroAllocator::Create( - uint8_t* tensor_arena, size_t arena_size, ErrorReporter* error_reporter) { - TFLITE_DCHECK(error_reporter != nullptr); - - RecordingSimpleMemoryAllocator* simple_memory_allocator = - RecordingSimpleMemoryAllocator::Create(error_reporter, tensor_arena, - arena_size); +RecordingMicroAllocator* RecordingMicroAllocator::Create(uint8_t* tensor_arena, + size_t arena_size) { + RecordingSingleArenaBufferAllocator* simple_memory_allocator = + RecordingSingleArenaBufferAllocator::Create(tensor_arena, arena_size); TFLITE_DCHECK(simple_memory_allocator != nullptr); - uint8_t* allocator_buffer = simple_memory_allocator->AllocateFromTail( + uint8_t* memory_planner_buffer = + simple_memory_allocator->AllocatePersistentBuffer( + sizeof(GreedyMemoryPlanner), alignof(GreedyMemoryPlanner)); + GreedyMemoryPlanner* memory_planner = + new (memory_planner_buffer) GreedyMemoryPlanner(); + + uint8_t* allocator_buffer = simple_memory_allocator->AllocatePersistentBuffer( sizeof(RecordingMicroAllocator), alignof(RecordingMicroAllocator)); RecordingMicroAllocator* allocator = new (allocator_buffer) - RecordingMicroAllocator(simple_memory_allocator, error_reporter); + RecordingMicroAllocator(simple_memory_allocator, memory_planner); return allocator; } @@ -63,29 +79,22 @@ RecordedAllocation RecordingMicroAllocator::GetRecordedAllocation( case RecordedAllocationType::kOpData: return recorded_op_data_; } - TF_LITE_REPORT_ERROR(error_reporter(), "Invalid allocation type supplied: %d", - allocation_type); + MicroPrintf("Invalid allocation type supplied: %d", allocation_type); return RecordedAllocation(); } -const RecordingSimpleMemoryAllocator* +const RecordingSingleArenaBufferAllocator* RecordingMicroAllocator::GetSimpleMemoryAllocator() const { return recording_memory_allocator_; } void RecordingMicroAllocator::PrintAllocations() const { - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation total %d bytes", - recording_memory_allocator_->GetUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation head %d bytes", - recording_memory_allocator_->GetHeadUsedBytes()); - TF_LITE_REPORT_ERROR( - error_reporter(), - "[RecordingMicroAllocator] Arena allocation tail %d bytes", - recording_memory_allocator_->GetTailUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation total %d bytes", + recording_memory_allocator_->GetUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation head %d bytes", + recording_memory_allocator_->GetNonPersistentUsedBytes()); + MicroPrintf("[RecordingMicroAllocator] Arena allocation tail %d bytes", + recording_memory_allocator_->GetPersistentUsedBytes()); PrintRecordedAllocation(RecordedAllocationType::kTfLiteEvalTensorData, "TfLiteEvalTensor data", "allocations"); PrintRecordedAllocation(RecordedAllocationType::kPersistentTfLiteTensorData, @@ -119,8 +128,7 @@ void RecordingMicroAllocator::PrintRecordedAllocation( #ifndef TF_LITE_STRIP_ERROR_STRINGS RecordedAllocation allocation = GetRecordedAllocation(allocation_type); if (allocation.used_bytes > 0 || allocation.requested_bytes > 0) { - TF_LITE_REPORT_ERROR( - error_reporter(), + MicroPrintf( "[RecordingMicroAllocator] '%s' used %d bytes with alignment overhead " "(requested %d bytes for %d %s)", allocation_name, allocation.used_bytes, allocation.requested_bytes, @@ -130,91 +138,90 @@ void RecordingMicroAllocator::PrintRecordedAllocation( } TfLiteStatus RecordingMicroAllocator::AllocateNodeAndRegistrations( - const Model* model, NodeAndRegistration** node_and_registrations) { + const Model* model, SubgraphAllocations* subgraph_allocations) { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteStatus status = MicroAllocator::AllocateNodeAndRegistrations( - model, node_and_registrations); + TfLiteStatus status = + MicroAllocator::AllocateNodeAndRegistrations(model, subgraph_allocations); RecordAllocationUsage(allocations, recorded_node_and_registration_array_data_); - // The allocation count in SimpleMemoryAllocator will only be 1. To provide - // better logging, decrement by 1 and add in the actual number of operators - // used in the graph: - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of nodes in the - // graph (e.g. sizeof(NodeAndRegistration) * num_nodes). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of nodes - // used in the graph: - recorded_node_and_registration_array_data_.count += - GetSubGraphFromModel(model)->operators()->size() - 1; - return status; -} -TfLiteStatus -RecordingMicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) { - RecordedAllocation allocations = SnapshotAllocationUsage(); - - TfLiteStatus status = - MicroAllocator::PrepareNodeAndRegistrationDataFromFlatbuffer( - model, op_resolver, node_and_registrations); - - RecordAllocationUsage(allocations, recorded_op_data_); + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + // The allocation count in SingleArenaBufferAllocator will only be 1. To + // provide better logging, decrement by 1 and add in the actual number of + // operators used in the graph: The allocation for this recording will + // always be 1. This is because the parent class mallocs one large + // allocation for the number of nodes in the graph (e.g. + // sizeof(NodeAndRegistration) * num_nodes). To prevent extra overhead and + // potential for fragmentation, manually adjust the accounting by + // decrementing by 1 and adding the actual number of nodes used in the + // graph: + if (model->subgraphs()->Get(subgraph_idx)->operators()) { + recorded_node_and_registration_array_data_.count += + model->subgraphs()->Get(subgraph_idx)->operators()->size() - 1; + } else { + recorded_node_and_registration_array_data_.count -= 1; + } + } return status; } TfLiteStatus RecordingMicroAllocator::AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) { + const Model* model, SubgraphAllocations* subgraph_allocations) { RecordedAllocation allocations = SnapshotAllocationUsage(); TfLiteStatus status = - MicroAllocator::AllocateTfLiteEvalTensors(model, eval_tensors); + MicroAllocator::AllocateTfLiteEvalTensors(model, subgraph_allocations); RecordAllocationUsage(allocations, recorded_tflite_eval_tensor_data_); - // The allocation for this recording will always be 1. This is because the - // parent class mallocs one large allocation for the number of tensors in the - // graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). - // To prevent extra overhead and potential for fragmentation, manually adjust - // the accounting by decrementing by 1 and adding the actual number of tensors - // used in the graph: - recorded_tflite_eval_tensor_data_.count += - GetSubGraphFromModel(model)->tensors()->size() - 1; + + for (size_t subgraph_idx = 0; subgraph_idx < model->subgraphs()->size(); + subgraph_idx++) { + // The allocation for this recording will always be 1. This is because the + // parent class mallocs one large allocation for the number of tensors in + // the graph (e.g. sizeof(TfLiteEvalTensor) * num_tensors). To prevent extra + // overhead and potential for fragmentation, manually adjust the accounting + // by decrementing by 1 and adding the actual number of tensors used in the + // graph: + recorded_tflite_eval_tensor_data_.count += + model->subgraphs()->Get(subgraph_idx)->tensors()->size() - 1; + } return status; } TfLiteStatus RecordingMicroAllocator::AllocateVariables( - const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors) { + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteStatus status = - MicroAllocator::AllocateVariables(subgraph, eval_tensors); + TfLiteStatus status = MicroAllocator::AllocateVariables( + subgraph, eval_tensors, offline_planner_offsets); RecordAllocationUsage(allocations, recorded_tflite_tensor_variable_buffer_data_); return status; } -TfLiteTensor* RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, int tensor_index) { +TfLiteTensor* +RecordingMicroAllocator::AllocatePersistentTfLiteTensorInternal() { RecordedAllocation allocations = SnapshotAllocationUsage(); - TfLiteTensor* result = MicroAllocator::AllocatePersistentTfLiteTensorInternal( - model, eval_tensors, tensor_index); + TfLiteTensor* result = + MicroAllocator::AllocatePersistentTfLiteTensorInternal(); RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_data_); return result; } TfLiteStatus RecordingMicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - const Model* model, const SubGraph* subgraph, TfLiteTensor* tensor, - int tensor_index, bool allocate_temp) { + const Model* model, TfLiteTensor* tensor, int tensor_index, + int subgraph_index, bool allocate_temp) { RecordedAllocation allocations = SnapshotAllocationUsage(); TfLiteStatus status = MicroAllocator::PopulateTfLiteTensorFromFlatbuffer( - model, subgraph, tensor, tensor_index, allocate_temp); + model, tensor, tensor_index, subgraph_index, allocate_temp); RecordAllocationUsage(allocations, recorded_persistent_tflite_tensor_quantization_data_); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h index c8470c1..9d694af 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2023 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -16,9 +16,9 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_ALLOCATOR_H_ +#include "edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h" namespace tflite { @@ -48,21 +48,23 @@ struct RecordedAllocation { // Utility subclass of MicroAllocator that records all allocations // inside the arena. A summary of allocations can be logged through the // ErrorReporter by invoking LogAllocations(). This special allocator requires -// an instance of RecordingSimpleMemoryAllocator to capture allocations in the -// head and tail. Arena allocation recording can be retrieved by type through -// the GetRecordedAllocation() function. This class should only be used for -// auditing memory usage or integration testing. +// an instance of RecordingSingleArenaBufferAllocator to capture allocations in +// the head and tail. Arena allocation recording can be retrieved by type +// through the GetRecordedAllocation() function. This class should only be used +// for auditing memory usage or integration testing. class RecordingMicroAllocator : public MicroAllocator { public: static RecordingMicroAllocator* Create(uint8_t* tensor_arena, - size_t arena_size, - ErrorReporter* error_reporter); + size_t arena_size); + + // Returns the fixed amount of memory overhead of RecordingMicroAllocator. + static size_t GetDefaultTailUsage(); // Returns the recorded allocations information for a given allocation type. RecordedAllocation GetRecordedAllocation( RecordedAllocationType allocation_type) const; - const RecordingSimpleMemoryAllocator* GetSimpleMemoryAllocator() const; + const RecordingSingleArenaBufferAllocator* GetSimpleMemoryAllocator() const; // Logs out through the ErrorReporter all allocation recordings by type // defined in RecordedAllocationType. @@ -72,32 +74,28 @@ class RecordingMicroAllocator : public MicroAllocator { protected: TfLiteStatus AllocateNodeAndRegistrations( - const Model* model, - NodeAndRegistration** node_and_registrations) override; - TfLiteStatus PrepareNodeAndRegistrationDataFromFlatbuffer( - const Model* model, const MicroOpResolver& op_resolver, - NodeAndRegistration* node_and_registrations) override; + const Model* model, SubgraphAllocations* subgraph_allocations) override; TfLiteStatus AllocateTfLiteEvalTensors( - const Model* model, TfLiteEvalTensor** eval_tensors) override; - TfLiteStatus AllocateVariables(const SubGraph* subgraph, - TfLiteEvalTensor* eval_tensors) override; + const Model* model, SubgraphAllocations* subgraph_allocations) override; + TfLiteStatus AllocateVariables( + const SubGraph* subgraph, TfLiteEvalTensor* eval_tensors, + const int32_t* offline_planner_offsets) override; // TODO(b/162311891): Once all kernels have been updated to the new API drop // this method. It is only used to record TfLiteTensor persistent allocations. - TfLiteTensor* AllocatePersistentTfLiteTensorInternal( - const Model* model, TfLiteEvalTensor* eval_tensors, - int tensor_index) override; + TfLiteTensor* AllocatePersistentTfLiteTensorInternal() override; + // TODO(b/162311891): Once all kernels have been updated to the new API drop // this function since all allocations for quantized data will take place in // the temp section. TfLiteStatus PopulateTfLiteTensorFromFlatbuffer(const Model* model, - const SubGraph* subgraph, TfLiteTensor* tensor, int tensor_index, + int subgraph_index, bool allocate_temp) override; private: - RecordingMicroAllocator(RecordingSimpleMemoryAllocator* memory_allocator, - ErrorReporter* error_reporter); + RecordingMicroAllocator(RecordingSingleArenaBufferAllocator* memory_allocator, + MicroMemoryPlanner* memory_planner); void PrintRecordedAllocation(RecordedAllocationType allocation_type, const char* allocation_name, @@ -107,7 +105,7 @@ class RecordingMicroAllocator : public MicroAllocator { void RecordAllocationUsage(const RecordedAllocation& snapshotted_allocation, RecordedAllocation& recorded_allocation); - const RecordingSimpleMemoryAllocator* recording_memory_allocator_; + const RecordingSingleArenaBufferAllocator* recording_memory_allocator_; RecordedAllocation recorded_tflite_eval_tensor_data_ = {}; RecordedAllocation recorded_persistent_tflite_tensor_data_ = {}; @@ -115,6 +113,8 @@ class RecordingMicroAllocator : public MicroAllocator { RecordedAllocation recorded_persistent_buffer_data_ = {}; RecordedAllocation recorded_tflite_tensor_variable_buffer_data_ = {}; RecordedAllocation recorded_node_and_registration_array_data_ = {}; + + // TODO(b/187993291): Re-enable OpData allocating tracking. RecordedAllocation recorded_op_data_ = {}; TF_LITE_REMOVE_VIRTUAL_DELETE diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_interpreter.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_interpreter.h index 90f27a4..ce44fbd 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_interpreter.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_micro_interpreter.h @@ -1,4 +1,4 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2022 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -17,6 +17,7 @@ limitations under the License. #define TENSORFLOW_LITE_MICRO_RECORDING_MICRO_INTERPRETER_H_ #include "edge-impulse-sdk/tensorflow/lite/micro/micro_interpreter.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler_interface.h" #include "edge-impulse-sdk/tensorflow/lite/micro/recording_micro_allocator.h" namespace tflite { @@ -37,19 +38,22 @@ class RecordingMicroInterpreter : public MicroInterpreter { RecordingMicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, uint8_t* tensor_arena, size_t tensor_arena_size, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, - RecordingMicroAllocator::Create( - tensor_arena, tensor_arena_size, error_reporter), - error_reporter), + MicroResourceVariables* resource_variable = nullptr, + MicroProfilerInterface* profiler = nullptr) + : MicroInterpreter( + model, op_resolver, + RecordingMicroAllocator::Create(tensor_arena, tensor_arena_size), + resource_variable, profiler), recording_micro_allocator_( static_cast(allocator())) {} RecordingMicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, RecordingMicroAllocator* allocator, - ErrorReporter* error_reporter) - : MicroInterpreter(model, op_resolver, allocator, error_reporter), + MicroResourceVariables* resource_variable = nullptr, + MicroProfilerInterface* profiler = nullptr) + : MicroInterpreter(model, op_resolver, allocator, resource_variable, + profiler), recording_micro_allocator_(*allocator) {} const RecordingMicroAllocator& GetMicroAllocator() const { diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.cc similarity index 51% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.cc index d8e9910..746561c 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.cpp +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.cc @@ -13,7 +13,7 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#include "edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h" #include @@ -21,47 +21,47 @@ limitations under the License. namespace tflite { -RecordingSimpleMemoryAllocator::RecordingSimpleMemoryAllocator( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size), +RecordingSingleArenaBufferAllocator::RecordingSingleArenaBufferAllocator( + uint8_t* buffer_head, size_t buffer_size) + : SingleArenaBufferAllocator(buffer_head, buffer_size), requested_head_bytes_(0), requested_tail_bytes_(0), used_bytes_(0), alloc_count_(0) {} -RecordingSimpleMemoryAllocator::~RecordingSimpleMemoryAllocator() {} +RecordingSingleArenaBufferAllocator::~RecordingSingleArenaBufferAllocator() {} -RecordingSimpleMemoryAllocator* RecordingSimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); +RecordingSingleArenaBufferAllocator* +RecordingSingleArenaBufferAllocator::Create(uint8_t* buffer_head, + size_t buffer_size) { TFLITE_DCHECK(buffer_head != nullptr); - RecordingSimpleMemoryAllocator tmp = - RecordingSimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); + RecordingSingleArenaBufferAllocator tmp = + RecordingSingleArenaBufferAllocator(buffer_head, buffer_size); - uint8_t* allocator_buffer = - tmp.AllocateFromTail(sizeof(RecordingSimpleMemoryAllocator), - alignof(RecordingSimpleMemoryAllocator)); + uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer( + sizeof(RecordingSingleArenaBufferAllocator), + alignof(RecordingSingleArenaBufferAllocator)); // Use the default copy constructor to populate internal states. - return new (allocator_buffer) RecordingSimpleMemoryAllocator(tmp); + return new (allocator_buffer) RecordingSingleArenaBufferAllocator(tmp); } -size_t RecordingSimpleMemoryAllocator::GetRequestedBytes() const { +size_t RecordingSingleArenaBufferAllocator::GetRequestedBytes() const { return requested_head_bytes_ + requested_tail_bytes_; } -size_t RecordingSimpleMemoryAllocator::GetUsedBytes() const { +size_t RecordingSingleArenaBufferAllocator::GetUsedBytes() const { return used_bytes_; } -size_t RecordingSimpleMemoryAllocator::GetAllocatedCount() const { +size_t RecordingSingleArenaBufferAllocator::GetAllocatedCount() const { return alloc_count_; } -TfLiteStatus RecordingSimpleMemoryAllocator::SetHeadBufferSize( - size_t size, size_t alignment) { +TfLiteStatus RecordingSingleArenaBufferAllocator::ResizeBuffer( + uint8_t* resizable_buf, size_t size, size_t alignment) { const uint8_t* previous_head = head(); TfLiteStatus status = - SimpleMemoryAllocator::SetHeadBufferSize(size, alignment); + SingleArenaBufferAllocator::ResizeBuffer(resizable_buf, size, alignment); if (status == kTfLiteOk) { used_bytes_ += head() - previous_head; requested_head_bytes_ = size; @@ -69,10 +69,11 @@ TfLiteStatus RecordingSimpleMemoryAllocator::SetHeadBufferSize( return status; } -uint8_t* RecordingSimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { +uint8_t* RecordingSingleArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { const uint8_t* previous_tail = tail(); - uint8_t* result = SimpleMemoryAllocator::AllocateFromTail(size, alignment); + uint8_t* result = + SingleArenaBufferAllocator::AllocatePersistentBuffer(size, alignment); if (result != nullptr) { used_bytes_ += previous_tail - tail(); requested_tail_bytes_ += size; diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h similarity index 57% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h index e8ea581..cb58a8b 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_simple_memory_allocator.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/recording_single_arena_buffer_allocator.h @@ -13,28 +13,26 @@ See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ -#ifndef TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" #include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h" namespace tflite { -// Utility class used to log allocations of a SimpleMemoryAllocator. Should only -// be used in debug/evaluation settings or unit tests to evaluate allocation -// usage. -class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { +// Utility class used to log allocations of a SingleArenaBufferAllocator. Should +// only be used in debug/evaluation settings or unit tests to evaluate +// allocation usage. +class RecordingSingleArenaBufferAllocator : public SingleArenaBufferAllocator { public: - RecordingSimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, size_t buffer_size); + RecordingSingleArenaBufferAllocator(uint8_t* buffer_head, size_t buffer_size); // TODO(b/157615197): Cleanup constructors/destructor and use factory // functions. - ~RecordingSimpleMemoryAllocator() override; + ~RecordingSingleArenaBufferAllocator() override; - static RecordingSimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); + static RecordingSingleArenaBufferAllocator* Create(uint8_t* buffer_head, + size_t buffer_size); // Returns the number of bytes requested from the head or tail. size_t GetRequestedBytes() const; @@ -47,8 +45,9 @@ class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { // Returns the number of alloc calls from the head or tail. size_t GetAllocatedCount() const; - TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment) override; - uint8_t* AllocateFromTail(size_t size, size_t alignment) override; + TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + uint8_t* AllocatePersistentBuffer(size_t size, size_t alignment) override; private: size_t requested_head_bytes_; @@ -61,4 +60,4 @@ class RecordingSimpleMemoryAllocator : public SimpleMemoryAllocator { } // namespace tflite -#endif // TENSORFLOW_LITE_MICRO_RECORDING_SIMPLE_MEMORY_ALLOCATOR_H_ +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_RECORDING_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/schema_utils.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/schema_utils.cc similarity index 100% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/schema_utils.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/schema_utils.cc diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.cpp deleted file mode 100644 index 97ef4f5..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.cpp +++ /dev/null @@ -1,149 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h" - -#include -#include -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" - -namespace tflite { - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer_head, - uint8_t* buffer_tail) - : error_reporter_(error_reporter), - buffer_head_(buffer_head), - buffer_tail_(buffer_tail), - head_(buffer_head), - tail_(buffer_tail), - temp_(buffer_head_) {} - -SimpleMemoryAllocator::SimpleMemoryAllocator(ErrorReporter* error_reporter, - uint8_t* buffer, - size_t buffer_size) - : SimpleMemoryAllocator(error_reporter, buffer, buffer + buffer_size) {} - -/* static */ -SimpleMemoryAllocator* SimpleMemoryAllocator::Create( - ErrorReporter* error_reporter, uint8_t* buffer_head, size_t buffer_size) { - TFLITE_DCHECK(error_reporter != nullptr); - TFLITE_DCHECK(buffer_head != nullptr); - SimpleMemoryAllocator tmp = - SimpleMemoryAllocator(error_reporter, buffer_head, buffer_size); - - // Allocate enough bytes from the buffer to create a SimpleMemoryAllocator. - // The new instance will use the current adjusted tail buffer from the tmp - // allocator instance. - uint8_t* allocator_buffer = tmp.AllocateFromTail( - sizeof(SimpleMemoryAllocator), alignof(SimpleMemoryAllocator)); - // Use the default copy constructor to populate internal states. - return new (allocator_buffer) SimpleMemoryAllocator(tmp); -} - -SimpleMemoryAllocator::~SimpleMemoryAllocator() {} - -TfLiteStatus SimpleMemoryAllocator::SetHeadBufferSize(size_t size, - size_t alignment) { - if (head_ != temp_) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Internal error: SetHeadBufferSize() needs to be called " - "after ResetTempAllocations()."); - return kTfLiteError; - } - - uint8_t* const aligned_result = AlignPointerUp(buffer_head_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR( - error_reporter_, - "Failed to set head size. Requested: %u, available %u, missing: %u", - size, available_memory, size - available_memory); - return kTfLiteError; - } - head_ = aligned_result + size; - temp_ = head_; - - return kTfLiteOk; -} - -uint8_t* SimpleMemoryAllocator::AllocateFromTail(size_t size, - size_t alignment) { - uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); - if (aligned_result < head_) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - const size_t missing_memory = head_ - aligned_result; - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate tail memory. Requested: %u, " - "available %u, missing: %u", - size, size - missing_memory, missing_memory); -#endif - return nullptr; - } - tail_ = aligned_result; - return aligned_result; -} - -uint8_t* SimpleMemoryAllocator::AllocateTemp(size_t size, size_t alignment) { - uint8_t* const aligned_result = AlignPointerUp(temp_, alignment); - const size_t available_memory = tail_ - aligned_result; - if (available_memory < size) { - TF_LITE_REPORT_ERROR(error_reporter_, - "Failed to allocate temp memory. Requested: %u, " - "available %u, missing: %u", - size, available_memory, size - available_memory); - return nullptr; - } - temp_ = aligned_result + size; - return aligned_result; -} - -void SimpleMemoryAllocator::ResetTempAllocations() { temp_ = head_; } - -uint8_t* SimpleMemoryAllocator::GetHeadBuffer() const { return buffer_head_; } - -size_t SimpleMemoryAllocator::GetHeadUsedBytes() const { - return head_ - buffer_head_; -} - -size_t SimpleMemoryAllocator::GetTailUsedBytes() const { - return buffer_tail_ - tail_; -} - -size_t SimpleMemoryAllocator::GetAvailableMemory(size_t alignment) const { - uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment); - uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment); - return aligned_tail - aligned_temp; -} - -size_t SimpleMemoryAllocator::GetUsedBytes() const { - return GetBufferSize() - (tail_ - temp_); -} - -size_t SimpleMemoryAllocator::GetBufferSize() const { - return buffer_tail_ - buffer_head_; -} - -uint8_t* SimpleMemoryAllocator::head() const { return head_; } - -uint8_t* SimpleMemoryAllocator::tail() const { return tail_; } - -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h deleted file mode 100644 index 3ee2f36..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/simple_memory_allocator.h +++ /dev/null @@ -1,112 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#ifndef TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ -#define TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ - -#include -#include - -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" - -namespace tflite { - -// TODO(petewarden): This allocator never frees up or reuses any memory, even -// though we have enough information about lifetimes of the tensors to do so. -// This makes it pretty wasteful, so we should use a more intelligent method. -class SimpleMemoryAllocator { - public: - // TODO(b/157615197): Cleanup constructors/destructor and use factory - // functions. - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer_head, - uint8_t* buffer_tail); - SimpleMemoryAllocator(ErrorReporter* error_reporter, uint8_t* buffer, - size_t buffer_size); - virtual ~SimpleMemoryAllocator(); - - // Creates a new SimpleMemoryAllocator from a given buffer head and size. - static SimpleMemoryAllocator* Create(ErrorReporter* error_reporter, - uint8_t* buffer_head, - size_t buffer_size); - - // Adjusts the head (lowest address and moving upwards) memory allocation to a - // given size. Calls to this method will also invalidate all temporary - // allocation values (it sets the location of temp space at the end of the - // head section). This call will fail if a chain of allocations through - // AllocateTemp() have not been cleaned up with a call to - // ResetTempAllocations(). - virtual TfLiteStatus SetHeadBufferSize(size_t size, size_t alignment); - - // Allocates memory starting at the tail of the arena (highest address and - // moving downwards). - virtual uint8_t* AllocateFromTail(size_t size, size_t alignment); - - // Allocates a temporary buffer from the head of the arena (lowest address and - // moving upwards) but does not update the actual head allocation size or - // position. The returned buffer is guaranteed until either - // ResetTempAllocations() is called or another call to AllocateFromHead(). - // Repeat calls to this function will create a chain of temp allocations. All - // calls to AllocateTemp() must end with a call to ResetTempAllocations(). If - // AllocateFromHead() is called before a call to ResetTempAllocations(), it - // will fail with an error message. - virtual uint8_t* AllocateTemp(size_t size, size_t alignment); - - // Resets a chain of temporary allocations back to the current head of the - // arena (lowest address). - virtual void ResetTempAllocations(); - - // Returns a pointer to the buffer currently assigned to the head section. - // This buffer is set by calling SetHeadSize(). - uint8_t* GetHeadBuffer() const; - - // Returns the size of the head section in bytes. - size_t GetHeadUsedBytes() const; - - // Returns the size of all allocations in the tail section in bytes. - size_t GetTailUsedBytes() const; - - // Returns the number of bytes available with a given alignment. This number - // takes in account any temporary allocations. - size_t GetAvailableMemory(size_t alignment) const; - - // Returns the number of used bytes in the allocator. This number takes in - // account any temporary allocations. - size_t GetUsedBytes() const; - - protected: - // Returns a pointer to the current end of the head buffer. - uint8_t* head() const; - - // Returns a pointer to the current end of the tail buffer. - uint8_t* tail() const; - - private: - size_t GetBufferSize() const; - - ErrorReporter* error_reporter_; - uint8_t* buffer_head_; - uint8_t* buffer_tail_; - uint8_t* head_; - uint8_t* tail_; - uint8_t* temp_; - - TF_LITE_REMOVE_VIRTUAL_DELETE -}; - -} // namespace tflite - -#endif // TENSORFLOW_LITE_MICRO_SIMPLE_MEMORY_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.cc new file mode 100644 index 0000000..1015b53 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.cc @@ -0,0 +1,199 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h" + +#include +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/c_api_types.h" +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/op_macros.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_log.h" + +namespace tflite { + +SingleArenaBufferAllocator::SingleArenaBufferAllocator(uint8_t* buffer_head, + uint8_t* buffer_tail) + : buffer_head_(buffer_head), + buffer_tail_(buffer_tail), + head_(buffer_head), + tail_(buffer_tail), + temp_(buffer_head_) {} + +SingleArenaBufferAllocator::SingleArenaBufferAllocator(uint8_t* buffer, + size_t buffer_size) + : SingleArenaBufferAllocator(buffer, buffer + buffer_size) {} + +/* static */ +SingleArenaBufferAllocator* SingleArenaBufferAllocator::Create( + uint8_t* buffer_head, size_t buffer_size) { + TFLITE_DCHECK(buffer_head != nullptr); + SingleArenaBufferAllocator tmp = + SingleArenaBufferAllocator(buffer_head, buffer_size); + + // Allocate enough bytes from the buffer to create a + // SingleArenaBufferAllocator. The new instance will use the current adjusted + // tail buffer from the tmp allocator instance. + uint8_t* allocator_buffer = tmp.AllocatePersistentBuffer( + sizeof(SingleArenaBufferAllocator), alignof(SingleArenaBufferAllocator)); + // Use the default copy constructor to populate internal states. + return new (allocator_buffer) SingleArenaBufferAllocator(tmp); +} + +SingleArenaBufferAllocator::~SingleArenaBufferAllocator() {} + +uint8_t* SingleArenaBufferAllocator::AllocateResizableBuffer(size_t size, + size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (ResizeBuffer(expect_resizable_buf, size, alignment) == kTfLiteOk) { + return expect_resizable_buf; + } + return nullptr; +} + +TfLiteStatus SingleArenaBufferAllocator::DeallocateResizableBuffer( + uint8_t* resizable_buf) { + return ResizeBuffer(resizable_buf, 0, 1); +} + +TfLiteStatus SingleArenaBufferAllocator::ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) { + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + return ResizeBuffer(expect_resizable_buf, size, alignment); +} + +TfLiteStatus SingleArenaBufferAllocator::ResizeBuffer(uint8_t* resizable_buf, + size_t size, + size_t alignment) { + // Only supports one resizable buffer, which starts at the buffer head. + uint8_t* expect_resizable_buf = AlignPointerUp(buffer_head_, alignment); + if (head_ != temp_ || resizable_buf != expect_resizable_buf) { + MicroPrintf( + "Internal error: either buffer is not resizable or " + "ResetTempAllocations() is not called before ResizeBuffer()."); + return kTfLiteError; + } + + uint8_t* const aligned_result = AlignPointerUp(buffer_head_, alignment); + const size_t available_memory = tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to resize buffer. Requested: %u, available %u, missing: %u", + size, available_memory, size - available_memory); + return kTfLiteError; + } + head_ = aligned_result + size; + temp_ = head_; + + return kTfLiteOk; +} + +uint8_t* SingleArenaBufferAllocator::AllocatePersistentBuffer( + size_t size, size_t alignment) { + uint8_t* const aligned_result = AlignPointerDown(tail_ - size, alignment); + if (aligned_result < head_) { +#ifndef TF_LITE_STRIP_ERROR_STRINGS + const size_t missing_memory = head_ - aligned_result; + MicroPrintf( + "Failed to allocate tail memory. Requested: %u, " + "available %u, missing: %u", + size, size - missing_memory, missing_memory); +#endif + return nullptr; + } + tail_ = aligned_result; + return aligned_result; +} + +uint8_t* SingleArenaBufferAllocator::AllocateTemp(size_t size, + size_t alignment) { + uint8_t* const aligned_result = AlignPointerUp(temp_, alignment); + const size_t available_memory = tail_ - aligned_result; + if (available_memory < size) { + MicroPrintf( + "Failed to allocate temp memory. Requested: %u, " + "available %u, missing: %u", + size, available_memory, size - available_memory); + return nullptr; + } + temp_ = aligned_result + size; + temp_buffer_ptr_check_sum_ ^= (reinterpret_cast(aligned_result)); + temp_buffer_count_++; + return aligned_result; +} + +void SingleArenaBufferAllocator::DeallocateTemp(uint8_t* temp_buf) { + temp_buffer_ptr_check_sum_ ^= (reinterpret_cast(temp_buf)); + temp_buffer_count_--; +} + +bool SingleArenaBufferAllocator::IsAllTempDeallocated() { + if (temp_buffer_count_ != 0 || temp_buffer_ptr_check_sum_ != 0) { + MicroPrintf( + "Number of allocated temp buffers: %d. Checksum passing status: %d", + temp_buffer_count_, !temp_buffer_ptr_check_sum_); + return false; + } + return true; +} + +TfLiteStatus SingleArenaBufferAllocator::ResetTempAllocations() { + // TODO(b/209453859): enable error check based on IsAllTempDeallocated after + // all AllocateTemp have been paird with DeallocateTemp + if (!IsAllTempDeallocated()) { + MicroPrintf( + "All temp buffers must be freed before calling ResetTempAllocations()"); + return kTfLiteError; + } + temp_ = head_; + return kTfLiteOk; +} + +uint8_t* SingleArenaBufferAllocator::GetOverlayMemoryAddress() const { + return buffer_head_; +} + +size_t SingleArenaBufferAllocator::GetNonPersistentUsedBytes() const { + return std::max(head_ - buffer_head_, temp_ - buffer_head_); +} + +size_t SingleArenaBufferAllocator::GetPersistentUsedBytes() const { + return buffer_tail_ - tail_; +} + +size_t SingleArenaBufferAllocator::GetAvailableMemory(size_t alignment) const { + uint8_t* const aligned_temp = AlignPointerUp(temp_, alignment); + uint8_t* const aligned_tail = AlignPointerDown(tail_, alignment); + return aligned_tail - aligned_temp; +} + +size_t SingleArenaBufferAllocator::GetUsedBytes() const { + return GetPersistentUsedBytes() + GetNonPersistentUsedBytes(); +} + +size_t SingleArenaBufferAllocator::GetBufferSize() const { + return buffer_tail_ - buffer_head_; +} + +uint8_t* SingleArenaBufferAllocator::head() const { return head_; } + +uint8_t* SingleArenaBufferAllocator::tail() const { return tail_; } + +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h new file mode 100644 index 0000000..730ee73 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/single_arena_buffer_allocator.h @@ -0,0 +1,144 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ +#define TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ + +#include +#include + +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/ibuffer_allocator.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/compatibility.h" + +namespace tflite { + +// TODO(petewarden): This allocator never frees up or reuses any memory, even +// though we have enough information about lifetimes of the tensors to do so. +// This makes it pretty wasteful, so we should use a more intelligent method. +class SingleArenaBufferAllocator : public INonPersistentBufferAllocator, + public IPersistentBufferAllocator { + public: + // TODO(b/157615197): Cleanup constructors/destructor and use factory + // functions. + SingleArenaBufferAllocator(uint8_t* buffer_head, uint8_t* buffer_tail); + SingleArenaBufferAllocator(uint8_t* buffer, size_t buffer_size); + virtual ~SingleArenaBufferAllocator(); + + // Creates a new SingleArenaBufferAllocator from a given buffer head and size. + static SingleArenaBufferAllocator* Create(uint8_t* buffer_head, + size_t buffer_size); + + // Resizes a buffer that is previously returned by the + // AllocateResizableBuffer. In current implementation, it Adjusts the head + // (lowest address and moving upwards) memory allocation to a given size. + // Calls to this method will also invalidate all temporary allocation values + // (it sets the location of temp space at the end of the head section). This + // call will fail if a chain of allocations through AllocateTemp() have not + // been cleaned up with a call to ResetTempAllocations(). + virtual TfLiteStatus ResizeBuffer(uint8_t* resizable_buf, size_t size, + size_t alignment) override; + + // Returns a buffer that is resizable viable ResizeBuffer(). Only one + // resizable buffer is currently supported. + virtual uint8_t* AllocateResizableBuffer(size_t size, + size_t alignment) override; + + // Frees up the memory occupied by the resizable buffer + virtual TfLiteStatus DeallocateResizableBuffer( + uint8_t* resizable_buf) override; + + // Reserves the non-persistent memory that is planned by the memory planner. + virtual TfLiteStatus ReserveNonPersistentOverlayMemory( + size_t size, size_t alignment) override; + + // Allocates persistent memory starting at the tail of the arena (highest + // address and moving downwards). + virtual uint8_t* AllocatePersistentBuffer(size_t size, + size_t alignment) override; + + // Allocates a temporary buffer from the head of the arena (lowest address and + // moving upwards) but does not update the actual head allocation size or + // position. The returned buffer is guaranteed until either + // ResetTempAllocations() is called or another call to AllocateFromHead(). + // Repeat calls to this function will create a chain of temp allocations. All + // calls to AllocateTemp() must end with a call to ResetTempAllocations(). If + // AllocateFromHead() is called before a call to ResetTempAllocations(), it + // will fail with an error message. + virtual uint8_t* AllocateTemp(size_t size, size_t alignment) override; + + // Signals that a temporary buffer is no longer needed. This is currently for + // book-keeping purpose and the memory region are not immediately available + // for re-use. The deallocated memory region are only reclaimed after + // ResetTempAllocations is called as it is right now. + virtual void DeallocateTemp(uint8_t* buf) override; + + // Returns true if all temporary buffers are already deallocated. + virtual bool IsAllTempDeallocated() override; + + // Resets a chain of temporary allocations back to the current head of the + // arena (lowest address). + virtual TfLiteStatus ResetTempAllocations() override; + + // Returns a pointer to the buffer currently assigned to the head section. + // This buffer is set by calling SetHeadSize(). + uint8_t* GetOverlayMemoryAddress() const override; + + // Returns the size of the head section in bytes. + size_t GetNonPersistentUsedBytes() const override; + + // Returns the size of all allocations in the tail section in bytes. + size_t GetPersistentUsedBytes() const override; + + // Returns the number of bytes available with a given alignment. This number + // takes in account any temporary allocations. + size_t GetAvailableMemory(size_t alignment) const override; + + // Returns the number of used bytes in the allocator. This number takes in + // account any temporary allocations. + size_t GetUsedBytes() const; + + TF_LITE_REMOVE_VIRTUAL_DELETE + + protected: + // Returns a pointer to the current end of the head buffer. + uint8_t* head() const; + + // Returns a pointer to the current end of the tail buffer. + uint8_t* tail() const; + + private: + size_t GetBufferSize() const; + uint8_t* buffer_head_; + uint8_t* buffer_tail_; + uint8_t* head_; + uint8_t* tail_; + uint8_t* temp_; + + // The combination of the checksum of outstanding temporary buffer pointers + // AND the count of outstanding temporary buffer provide a low cost mechanism + // to audit temporary buffers' allocation and deallocation. + // + // XOR Check sum for outstanding temp buffers. + // If all temp buffers are deallocated OR no temp buffers are allocated, + // temp_buffer_ptr_check_sum_ == nullptr. + intptr_t temp_buffer_ptr_check_sum_ = 0; + // Count of outstanding temp buffers. + int temp_buffer_count_ = 0; +}; + +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_ARENA_ALLOCATOR_SINGLE_ARENA_BUFFER_ALLOCATOR_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/system_setup.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/system_setup.cc similarity index 100% rename from firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/system_setup.cpp rename to firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/system_setup.cc diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.cc new file mode 100644 index 0000000..fe4c836 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.cc @@ -0,0 +1,112 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h" + +#include +#include +#include +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +// TODO(b/170464050): Use TFLM test only version of schema_utils. + +namespace tflite { +namespace testing { + +const TfLiteRegistration* PackerOp::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* PackerOp::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* PackerOp::Init(TfLiteContext* context, const char* buffer, + size_t length) { + freed_ = false; + // Do nothing. + return nullptr; +} + +void PackerOp::Free(TfLiteContext* context, void* buffer) { freed_ = true; } + +TfLiteStatus PackerOp::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus PackerOp::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input1 != nullptr); + const int32_t* input1_data = input1->data.i32; + TF_LITE_ENSURE_EQ(context, input1->dims->size, 1); + const int32_t input1_len = input1->dims->data[0]; + + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, input2 != nullptr); + const int32_t* input2_data = input2->data.i32; + TF_LITE_ENSURE_EQ(context, input2->dims->size, 1); + const int32_t input2_len = input2->dims->data[0]; + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + int32_t* output_data = output->data.i32; + int32_t output_len = output->dims->data[0]; + + // Fill output with input: first with the first tensor, then with the second + // tensor up to the size of the output tensor. + int cnt = 0; + int i; + for (i = 0; i < input1_len && cnt < output_len; i++, cnt++) { + output_data[cnt] = input1_data[i]; + } + if (cnt >= output_len) { + return kTfLiteOk; + } + + for (i = 0; i < input2_len && cnt < output_len; i++, cnt++) { + output_data[cnt] = input2_data[i]; + } + if (cnt >= output_len) { + return kTfLiteOk; + } + + for (; cnt < output_len; cnt++) { + output_data[cnt] = 0; + } + return kTfLiteOk; +} + +bool PackerOp::freed_ = false; + +} // namespace testing +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h new file mode 100644 index 0000000..cbbbcec --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h @@ -0,0 +1,50 @@ +/* Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#ifndef TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ +#define TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ + +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +namespace tflite { +namespace testing { + +class PackerOp { + public: + static const TfLiteRegistration* getRegistration(); + static TfLiteRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static void Free(TfLiteContext* context, void* buffer); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + private: + static bool freed_; +}; + +} // namespace testing +} // namespace tflite + +#endif // TENSORFLOW_LITE_MICRO_TEST_HELPER_CUSTOM_OPS_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cc b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cc new file mode 100644 index 0000000..d97caca --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cc @@ -0,0 +1,2035 @@ +/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" + +#include +#include +#include +#include +#include + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers +#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/kernels/kernel_util.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/memory_helpers.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_arena_constants.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" +#include "edge-impulse-sdk/tensorflow/lite/micro/test_helper_custom_ops.h" +#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" + +// TODO(b/170464050): Use TFLM test only version of schema_utils. + +namespace tflite { +namespace testing { +namespace { + +class StackAllocator : public flatbuffers::Allocator { + public: + StackAllocator(size_t alignment) : data_size_(0) { + data_ = AlignPointerUp(data_backing_, alignment); + } + + uint8_t* allocate(size_t size) override { + TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize); + uint8_t* result = data_; + data_ += size; + data_size_ += size; + return result; + } + + void deallocate(uint8_t* p, size_t) override {} + + static StackAllocator& instance(size_t alignment = 1) { + // Avoid using true dynamic memory allocation to be portable to bare metal. + static char inst_memory[sizeof(StackAllocator)]; + static StackAllocator* inst = new (inst_memory) StackAllocator(alignment); + return *inst; + } + + static constexpr size_t kStackAllocatorSize = 8192; + + private: + uint8_t data_backing_[kStackAllocatorSize]; + uint8_t* data_; + int data_size_; + + TF_LITE_REMOVE_VIRTUAL_DELETE +}; + +flatbuffers::FlatBufferBuilder* BuilderInstance() { + static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; + static flatbuffers::FlatBufferBuilder* inst = + new (inst_memory) flatbuffers::FlatBufferBuilder( + StackAllocator::kStackAllocatorSize, + &StackAllocator::instance(MicroArenaBufferAlignment())); + return inst; +} + +// A wrapper around FlatBuffer API to help build model easily. +class ModelBuilder { + public: + typedef int32_t Tensor; + typedef int Operator; + typedef int Node; + + // `builder` needs to be available until BuildModel is called. + explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder) + : builder_(builder) {} + + // Registers an operator that will be used in the model. + Operator RegisterOp(BuiltinOperator op, const char* custom_code); + + // Adds a tensor to the model. + Tensor AddTensor(TensorType type, std::initializer_list shape) { + return AddTensorImpl(type, /* is_variable */ false, shape); + } + + // Adds a variable tensor to the model. + Tensor AddVariableTensor(TensorType type, + std::initializer_list shape) { + return AddTensorImpl(type, /* is_variable */ true, shape); + } + + // Adds a node to the model with given input and output Tensors. + Node AddNode(Operator op, std::initializer_list inputs, + std::initializer_list outputs, + std::initializer_list intermediates = + std::initializer_list{}); + + void AddMetadata(const char* description_string, + const int32_t* metadata_buffer_data, size_t num_elements); + + // Constructs the flatbuffer model using `builder_` and return a pointer to + // it. The returned model has the same lifetime as `builder_`. + // Note the default value of 0 for num_subgraph_inputs means all tensor inputs + // are in subgraph input list. + const Model* BuildModel(std::initializer_list inputs, + std::initializer_list outputs, + size_t num_subgraph_inputs = 0); + + private: + // Adds a tensor to the model. + Tensor AddTensorImpl(TensorType type, bool is_variable, + std::initializer_list shape); + + flatbuffers::FlatBufferBuilder* builder_; + + static constexpr int kMaxOperatorCodes = 10; + flatbuffers::Offset operator_codes_[kMaxOperatorCodes]; + int next_operator_code_id_ = 0; + + static constexpr int kMaxOperators = 50; + flatbuffers::Offset operators_[kMaxOperators]; + int next_operator_id_ = 0; + + static constexpr int kMaxTensors = 50; + flatbuffers::Offset tensors_[kMaxTensors]; + + static constexpr int kMaxMetadataBuffers = 10; + + static constexpr int kMaxMetadatas = 10; + flatbuffers::Offset metadata_[kMaxMetadatas]; + + flatbuffers::Offset metadata_buffers_[kMaxMetadataBuffers]; + + int nbr_of_metadata_buffers_ = 0; + + int next_tensor_id_ = 0; +}; + +ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op, + const char* custom_code) { + TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes); + operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect( + *builder_, /*deprecated_builtin_code=*/0, custom_code, /*version=*/0, op); + next_operator_code_id_++; + return next_operator_code_id_ - 1; +} + +ModelBuilder::Node ModelBuilder::AddNode( + ModelBuilder::Operator op, + std::initializer_list inputs, + std::initializer_list outputs, + std::initializer_list intermediates) { + TFLITE_DCHECK(next_operator_id_ <= kMaxOperators); + operators_[next_operator_id_] = tflite::CreateOperator( + *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()), + builder_->CreateVector(outputs.begin(), outputs.size()), + BuiltinOptions_NONE, + /*builtin_options=*/0, + /*custom_options=*/0, tflite::CustomOptionsFormat_FLEXBUFFERS, + /*mutating_variable_inputs =*/0, + builder_->CreateVector(intermediates.begin(), intermediates.size())); + next_operator_id_++; + return next_operator_id_ - 1; +} + +void ModelBuilder::AddMetadata(const char* description_string, + const int32_t* metadata_buffer_data, + size_t num_elements) { + metadata_[ModelBuilder::nbr_of_metadata_buffers_] = + CreateMetadata(*builder_, builder_->CreateString(description_string), + 1 + ModelBuilder::nbr_of_metadata_buffers_); + + metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer( + *builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data, + sizeof(uint32_t) * num_elements)); + + ModelBuilder::nbr_of_metadata_buffers_++; +} + +const Model* ModelBuilder::BuildModel( + std::initializer_list inputs, + std::initializer_list outputs, + size_t num_subgraph_inputs) { + // Model schema requires an empty buffer at idx 0. + size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_; + flatbuffers::Offset buffers[kMaxMetadataBuffers]; + buffers[0] = tflite::CreateBuffer(*builder_); + + // Place the metadata buffers first in the buffer since the indices for them + // have already been set in AddMetadata() + for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) { + buffers[i] = metadata_buffers_[i - 1]; + } + + // Default to single subgraph model. + constexpr size_t subgraphs_size = 1; + + // Find out number of subgraph inputs. + if (num_subgraph_inputs == 0) { + // This is the default case. + num_subgraph_inputs = inputs.size(); + } else { + // A non-zero value of num_subgraph_inputs means that some of + // the operator input tensors are not subgraph inputs. + TFLITE_DCHECK(num_subgraph_inputs <= inputs.size()); + } + + const flatbuffers::Offset subgraphs[subgraphs_size] = { + tflite::CreateSubGraph( + *builder_, builder_->CreateVector(tensors_, next_tensor_id_), + builder_->CreateVector(inputs.begin(), num_subgraph_inputs), + builder_->CreateVector(outputs.begin(), outputs.size()), + builder_->CreateVector(operators_, next_operator_id_), + builder_->CreateString("test_subgraph"))}; + + flatbuffers::Offset model_offset; + if (ModelBuilder::nbr_of_metadata_buffers_ > 0) { + model_offset = tflite::CreateModel( + *builder_, 0, + builder_->CreateVector(operator_codes_, next_operator_code_id_), + builder_->CreateVector(subgraphs, subgraphs_size), + builder_->CreateString("teset_model"), + builder_->CreateVector(buffers, buffer_size), 0, + builder_->CreateVector(metadata_, + ModelBuilder::nbr_of_metadata_buffers_)); + } else { + model_offset = tflite::CreateModel( + *builder_, 0, + builder_->CreateVector(operator_codes_, next_operator_code_id_), + builder_->CreateVector(subgraphs, subgraphs_size), + builder_->CreateString("teset_model"), + builder_->CreateVector(buffers, buffer_size)); + } + + tflite::FinishModelBuffer(*builder_, model_offset); + void* model_pointer = builder_->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +ModelBuilder::Tensor ModelBuilder::AddTensorImpl( + TensorType type, bool is_variable, std::initializer_list shape) { + TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors); + tensors_[next_tensor_id_] = tflite::CreateTensor( + *builder_, builder_->CreateVector(shape.begin(), shape.size()), type, + /* buffer */ 0, /* name */ 0, /* quantization */ 0, + /* is_variable */ is_variable, + /* sparsity */ 0); + next_tensor_id_++; + return next_tensor_id_ - 1; +} + +const Model* BuildSimpleStatefulModel() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + + const int op_id = + model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op"); + const int input_tensor = model_builder.AddTensor(TensorType_INT8, {3}); + const int median_tensor = model_builder.AddTensor(TensorType_INT8, {3}); + const int invoke_count_tensor = + model_builder.AddTensor(TensorType_INT32, {1}); + const int intermediate_tensor = + model_builder.AddTensor(TensorType_FLOAT32, {0}); + + model_builder.AddNode(op_id, {input_tensor}, + {median_tensor, invoke_count_tensor}, + {intermediate_tensor}); + return model_builder.BuildModel({input_tensor}, + {median_tensor, invoke_count_tensor}); +} + +const Model* BuildSimpleModelWithBranch() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + /* Model structure + | t0 + +------| + | v + | +---------+ + | | n0 | + | | | + | +---------+ + v + + | + +---------+ | t1 + | n1 | | + | | | + +---------+ | + | | + t2 | v + | +---------+ + +-->| n2 | + | | + +-------|-+ + |t3 + v + */ + const int op_id = + model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); + const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); + const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); + const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); + const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); + model_builder.AddNode(op_id, {t0}, {t1}); // n0 + model_builder.AddNode(op_id, {t0}, {t2}); // n1 + model_builder.AddNode(op_id, {t1, t2}, {t3}); // n2 + return model_builder.BuildModel({t0}, {t3}); +} + +const Model* BuildModelWithOfflinePlanning(int number_of_tensors, + const int32_t* metadata_buffer, + NodeConnection* node_conn, + int num_conns, + int num_subgraph_inputs) { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + + const int op_id = + model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); + + for (int i = 0; i < number_of_tensors; ++i) { + model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); + } + + for (int i = 0; i < num_conns; ++i) { + model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output); + } + + model_builder.AddMetadata( + "OfflineMemoryAllocation", metadata_buffer, + number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize); + + return model_builder.BuildModel( + node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); +} + +const Model* BuildModelWithUnusedInputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = {CreateBuffer(*builder)}; + constexpr size_t tensor_shape_size = 2; + const int32_t tensor_shape[tensor_shape_size] = {1, 64}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_output_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_tensor"), 0, false), + }; + constexpr size_t inputs_size = 2; + const int32_t inputs[inputs_size] = {0, 1}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {2}; + constexpr size_t operator_inputs_size = 1; + const int32_t operator_inputs[operator_inputs_size] = {0}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildModelWithUnusedOperatorOutputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = {CreateBuffer(*builder)}; + constexpr size_t tensor_shape_size = 2; + const int32_t tensor_shape[tensor_shape_size] = {1, 64}; + constexpr size_t tensors_size = 2; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_unused_output_tensor"), 0, false)}; + constexpr size_t inputs_size = 1; + const int32_t inputs[inputs_size] = {}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {0}; + constexpr size_t operator_inputs_size = 1; + const int32_t operator_inputs[operator_inputs_size] = {}; + constexpr size_t operator_outputs_size = 2; + const int32_t operator_outputs[operator_outputs_size] = {0, 1}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildModelWith256x256Tensor() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); + + ModelBuilder model_builder(fb_builder); + + const int op_id = + model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); + const int input1_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + const int input2_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + const int output_tensor = + model_builder.AddTensor(TensorType_INT8, {256, 256}); + + model_builder.AddNode(op_id, {input1_tensor, input2_tensor}, {output_tensor}); + return model_builder.BuildModel({input1_tensor, input2_tensor}, + {output_tensor}); +} + +const Model* BuildSimpleMockModel() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffer_data_size = 1; + const uint8_t buffer_data[buffer_data_size] = {21}; + constexpr size_t buffers_size = 2; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + CreateBuffer(*builder, + builder->CreateVector(buffer_data, buffer_data_size))}; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {1}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 1, + builder->CreateString("test_weight_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_output_tensor"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_output2_tensor"), 0, false), + }; + constexpr size_t inputs_size = 1; + const int32_t inputs[inputs_size] = {0}; + constexpr size_t outputs_size = 2; + const int32_t outputs[outputs_size] = {2, 3}; + constexpr size_t operator_inputs_size = 2; + const int32_t operator_inputs[operator_inputs_size] = {0, 1}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {2}; + const int32_t operator2_outputs[operator_outputs_size] = {3}; + constexpr size_t operators_size = 2; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator2_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildComplexMockModel() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffer_data_size = 1; + const uint8_t buffer_data_1[buffer_data_size] = {21}; + const uint8_t buffer_data_2[buffer_data_size] = {21}; + const uint8_t buffer_data_3[buffer_data_size] = {21}; + constexpr size_t buffers_size = 7; + const Offset buffers[buffers_size] = { + // Op 1 buffers: + CreateBuffer(*builder), + CreateBuffer(*builder), + CreateBuffer(*builder, + builder->CreateVector(buffer_data_1, buffer_data_size)), + // Op 2 buffers: + CreateBuffer(*builder), + CreateBuffer(*builder, + builder->CreateVector(buffer_data_2, buffer_data_size)), + // Op 3 buffers: + CreateBuffer(*builder), + CreateBuffer(*builder, + builder->CreateVector(buffer_data_3, buffer_data_size)), + }; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {1}; + + constexpr size_t tensors_size = 10; + const Offset tensors[tensors_size] = { + // Op 1 inputs: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0, + false /* is_variable */), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"), + 0, true /* is_variable */), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_1"), 0, + false /* is_variable */), + // Op 1 output / Op 2 input: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0, + false /* is_variable */), + // Op 2 inputs: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"), + 0, true /* is_variable */), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_2"), 0, + false /* is_variable */), + // Op 2 output / Op 3 input: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0, + false /* is_variable */), + // Op 3 inputs: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"), + 0, true /* is_variable */), + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 2, builder->CreateString("test_weight_tensor_3"), 0, + false /* is_variable */), + // Op 3 output: + CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0, + false /* is_variable */), + }; + + constexpr size_t operators_size = 3; + Offset operators[operators_size]; + { + // Set Op 1 attributes: + constexpr size_t operator_inputs_size = 3; + const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {3}; + + operators[0] = {CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE)}; + } + + { + // Set Op 2 attributes + constexpr size_t operator_inputs_size = 3; + const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {6}; + + operators[1] = {CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE)}; + } + + { + // Set Op 3 attributes + constexpr size_t operator_inputs_size = 3; + const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {9}; + + operators[2] = {CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE)}; + } + + constexpr size_t inputs_size = 1; + const int32_t inputs[inputs_size] = {0}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {9}; + + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "mock_custom", + /*version=*/0, BuiltinOperator_CUSTOM)}; + + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleMultipleInputsModel() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {1}; + constexpr size_t tensors_size = 4; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor1"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT8, 0, + builder->CreateString("test_input_tensor2"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor3"), 0, false), + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_output_tensor"), 0, false), + }; + constexpr size_t inputs_size = 3; + const int32_t inputs[inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 3; + const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; + constexpr size_t operator_outputs_size = 1; + const int32_t operator_outputs[operator_outputs_size] = {3}; + constexpr size_t operators_size = 1; + const Offset operators[operators_size] = { + CreateOperator( + *builder, 0, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, operator_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 1; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(outputs, outputs_size), + builder->CreateVector(operators, operators_size), + builder->CreateString("test_subgraph"))}; + constexpr size_t operator_codes_size = 1; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleModelWithSubgraphsAndIf() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t condition_tensor_shape[] = {1}; + const int32_t data_tensor_shape[] = {1, 2}; + constexpr size_t tensors_size = 4; + const Offset subgraph1_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph2_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph3_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + + constexpr size_t if_inputs_size = 3; + const int32_t if_inputs[if_inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t if_outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 2; + const int32_t operator_inputs[operator_inputs_size] = {0, 1}; + const int32_t operator_outputs[outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset subgraph1_operators[operators_size] = { + CreateOperator( + *builder, 0, builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator( + *builder, 1, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + const Offset subgraph3_operators[operators_size] = { + CreateOperator( + *builder, 2, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4), + builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph3_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph3_operators, operators_size), + builder->CreateString("else_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_MUL), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleModelWithIfAndEmptySubgraph() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t condition_tensor_shape[] = {1}; + const int32_t data_tensor_shape[] = {1, 2}; + constexpr size_t tensors_size = 4; + const Offset subgraph1_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(condition_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph2_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + const Offset subgraph3_tensors[tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 2), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor"), 0, false), + }; + + constexpr size_t if_inputs_size = 3; + const int32_t if_inputs[if_inputs_size] = {0, 1, 2}; + constexpr size_t outputs_size = 1; + const int32_t if_outputs[outputs_size] = {3}; + constexpr size_t operator_inputs_size = 2; + const int32_t operator_inputs[operator_inputs_size] = {0, 1}; + const int32_t operator_outputs[outputs_size] = {2}; + constexpr size_t operators_size = 1; + const Offset subgraph1_operators[operators_size] = { + CreateOperator( + *builder, 0, builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + BuiltinOptions_IfOptions, CreateIfOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator( + *builder, 1, + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 4), + builder->CreateVector(if_inputs, if_inputs_size), + builder->CreateVector(if_outputs, outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, builder->CreateVector(subgraph3_tensors, 3), + builder->CreateVector(operator_inputs, operator_inputs_size), + builder->CreateVector(operator_outputs, outputs_size), 0, + builder->CreateString("else_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_MUL), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +const Model* BuildSimpleModelWithSubgraphsAndWhile() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const int32_t data_tensor_shape[] = {1, 1}; + constexpr size_t while_tensors_size = 4; + constexpr size_t op_tensors_size = 3; + const Offset subgraph0_tensors[while_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor1"), 0, false), + }; + const Offset subgraph1_tensors[op_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition_tensor"), 0, false), + }; + const Offset subgraph2_tensors[op_tensors_size] = { + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor0"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(data_tensor_shape, 1), + TensorType_FLOAT32, 0, + builder->CreateString("output_tensor0"), 0, false), + }; + + constexpr size_t inputs_size = 2; + const int32_t inputs[inputs_size] = {0, 1}; + constexpr size_t while_outputs_size = 2; + const int32_t while_outputs[while_outputs_size] = {2, 3}; + constexpr size_t cond_outputs_size = 1; + const int32_t cond_outputs[cond_outputs_size] = {2}; + constexpr size_t add_outputs_size = 1; + const int32_t add_outputs[add_outputs_size] = {2}; + constexpr size_t add_subgraph_outputs_size = 2; + const int32_t add_subgraph_outputs[add_subgraph_outputs_size] = {2, 1}; + constexpr size_t operators_size = 1; + const Offset subgraph0_operators[operators_size] = { + CreateOperator(*builder, 0, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(while_outputs, while_outputs_size), + BuiltinOptions_WhileOptions, + CreateWhileOptions(*builder, 1, 2).Union()), + }; + const Offset subgraph1_operators[operators_size] = { + CreateOperator(*builder, 1, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(cond_outputs, cond_outputs_size), + BuiltinOptions_NONE), + }; + const Offset subgraph2_operators[operators_size] = { + CreateOperator(*builder, 2, builder->CreateVector(inputs, inputs_size), + builder->CreateVector(add_outputs, add_outputs_size), + BuiltinOptions_NONE), + }; + constexpr size_t subgraphs_size = 3; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph(*builder, builder->CreateVector(subgraph0_tensors, 4), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(while_outputs, while_outputs_size), + builder->CreateVector(subgraph0_operators, operators_size), + builder->CreateString("while_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(subgraph1_tensors, 3), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(cond_outputs, cond_outputs_size), + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("cond_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(subgraph2_tensors, 3), + builder->CreateVector(inputs, inputs_size), + builder->CreateVector(add_subgraph_outputs, + add_subgraph_outputs_size), + builder->CreateVector(subgraph2_operators, operators_size), + builder->CreateString("body_subgraph")), + }; + constexpr size_t operator_codes_size = 3; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_WHILE), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_LESS), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "multiple_inputs_op", + /*version=*/0, BuiltinOperator_ADD), + }; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +// Build a model with If and two subgraphs: two data tensors A1 of size 2, A2 of +// size 4 are first concatenated, then cut to a new tensor A3 of size 3; the new +// tensor A3 of size 3 is then concatenated with A2 tensor of size 4 to produce +// a final output tensor A4. This model is specially crafted to capture the +// corner case outlined in go/avoid-memory-corruption-in-if-operator. +// +// Subgraph0 +// A0(1) A2_0(4) A1_0(2) +// | | | ---+ +// v v v | +// +--------------+ | +// | IF | | +// +------+-------+ | +// | A3_0(3) | +// v | +// +--------------+ | +// | CUSTOM |<---+ +// +------+-------+ +// | +// v +// A4_0(8) +// +// Subgraph1/2 +// A1_1(2) A2_1(4) +// | | +// v v +// +---------------+ +// | CUSTOM | +// +-------+-------+ +// | +// v A3_1(3) +// +// And it leads to memory plan as below +// +// Subgraph0 Layout +// +// +// <------------A4_0 -------------> <----- A2_0-------> <----A3_0 ---> +// +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+ +// | | | | | | | | | 3 | 4 | 5 | 6 | | | | +// +----+----+----+----+----+----+----+----+----+----+----+----+----+----+----+ +// +// +----+----+----+ +// | 1 | 2 | A0 | +// +----+----+----+ +// <---A1_0--> +// +// Subgraph 1 Layout +// +// +----+----+----+----+----+----+----+----+----+ +// | | | | | | | | | | +// +----+----+----+----+----+----+----+----+----+ +// +// +// <------A2_1 -------><----A3_1 ---><--A1_1---> +// +// +// A1_1 of subgraph 1 will overlap with A2_0 of subgraph 0. +// In a buggy implementation of IF, two overwrite may happen: +// 1. copying input from A1_0 to A1_1 overwrites A2_0 before A2_0 is copied to +// A2_1; thus subgraph 1 produce incorrect output. +// 2. copying output from A3_1 to A4_0 overwrites A1_0, which should remain +// intact so that it can be used by the OP after the IF operator in subgraph 0 +// + +const Model* BuildModelWithIfAndSubgraphInputTensorOverlap() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr TensorType kTensorType = TensorType_INT32; + constexpr int kBlockSize = + tflite::MicroArenaBufferAlignment() / sizeof(int32_t); + constexpr size_t kBuffersCount = 1; + const Offset buffers[kBuffersCount] = { + CreateBuffer(*builder), + }; + const int32_t kConditionTensorShape[] = {1}; + const int32_t kIfInput1TensorShape[] = {2 * kBlockSize}; + const int32_t kIfInput2TensorShape[] = {4 * kBlockSize}; + const int32_t kIfOutputTensorShape[] = {3 * kBlockSize}; + const int32_t kFinalOutputTensorShape[] = {8 * kBlockSize}; + constexpr size_t kSubgraph0TensorsCount = 5; + const Offset kSubgraph0Tensors[kSubgraph0TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kConditionTensorShape, 1), + TensorType_BOOL, 0, + builder->CreateString("condition tensor"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor1"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor2"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("if_output_tensor"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kFinalOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("final_output_tensor"), + 0, false), + }; + + // Subgraph 1 is the chosen path if condition tensor in IF is true. + constexpr size_t kSubgraph1TensorsCount = 3; + const Offset kSubgraph1Tensors[kSubgraph1TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_input_tensor1"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_input_tensor2"), 0, false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, + builder->CreateString("subgraph1_output_tensor"), 0, false), + }; + + // Subgraph 2 is the chosen path if condition tensor in IF is false + constexpr size_t kSubgraph2TensorsCount = 3; + const Offset kSubgraph2Tensors[kSubgraph2TensorsCount] = { + CreateTensor(*builder, builder->CreateVector(kIfInput1TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor1"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfInput2TensorShape, 1), + kTensorType, 0, builder->CreateString("if_input_tensor2"), 0, + false), + CreateTensor(*builder, builder->CreateVector(kIfOutputTensorShape, 1), + kTensorType, 0, builder->CreateString("if_output_tensor"), 0, + false), + }; + + constexpr int kIfOpCodeIndex = 0; + constexpr int kCustomOpCodeIndex = 1; + + constexpr size_t kIfInputsCount = 3; + const int32_t kIfInputs[kIfInputsCount] = {0, 1, 2}; + constexpr size_t kOutputsCount = 1; + const int32_t kIfOutputs[kOutputsCount] = {3}; + constexpr size_t kOpAfterIfInputsCount = 2; + const int32_t kOpAfterIfInputs[kOpAfterIfInputsCount] = {3, 2}; + const int32_t kOpAfterIfOutputs[kOutputsCount] = {4}; + constexpr size_t kOperatorsCount = 2; + const Offset kSubgraph0Operators[kOperatorsCount] = { + CreateOperator(*builder, kIfOpCodeIndex, + builder->CreateVector(kIfInputs, kIfInputsCount), + builder->CreateVector(kIfOutputs, kOutputsCount), + BuiltinOptions_IfOptions, + CreateIfOptions(*builder, 1, 2).Union()), + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kOpAfterIfInputs, kOpAfterIfInputsCount), + builder->CreateVector(kOpAfterIfOutputs, kOutputsCount)), + }; + + constexpr size_t kSubgraph1InputsCount = 2; + const int32_t kSubgraph1Inputs[kSubgraph1InputsCount] = {0, 1}; + constexpr size_t kSubgraph1OutputsCount = 1; + const int32_t kSubgraph1Outputs[kSubgraph1OutputsCount] = {2}; + constexpr size_t kSubgraph1OperatorsCount = 1; + const Offset kSubgraph1Operators[kSubgraph1OperatorsCount] = { + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kSubgraph1Inputs, kSubgraph1InputsCount), + builder->CreateVector(kSubgraph1Outputs, kSubgraph1OutputsCount), + BuiltinOptions_NONE), + }; + + constexpr size_t kSubgraph2InputsCount = 2; + const int32_t kSubgraph2Inputs[kSubgraph2InputsCount] = {0, 1}; + constexpr size_t kSubgraph2OutputsCount = 1; + const int32_t kSubgraph2Outputs[kSubgraph2OutputsCount] = {2}; + constexpr size_t kSubgraph2OperatorsCount = 1; + const Offset kSubgraph2Operators[kSubgraph2OperatorsCount] = { + CreateOperator( + *builder, kCustomOpCodeIndex, + builder->CreateVector(kSubgraph2Inputs, kSubgraph2InputsCount), + builder->CreateVector(kSubgraph2Outputs, kSubgraph2OutputsCount), + BuiltinOptions_NONE), + }; + + constexpr size_t kSubgraphsCount = 3; + const Offset kSubgraphs[kSubgraphsCount] = { + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph0Tensors, kSubgraph0TensorsCount), + builder->CreateVector(kIfInputs, kIfInputsCount), + builder->CreateVector(kOpAfterIfOutputs, kOutputsCount), + builder->CreateVector(kSubgraph0Operators, kOperatorsCount), + builder->CreateString("if_subgraph")), + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph1Tensors, kSubgraph1TensorsCount), + builder->CreateVector(kSubgraph1Inputs, kSubgraph1InputsCount), + builder->CreateVector(kSubgraph1Outputs, kSubgraph1OutputsCount), + builder->CreateVector(kSubgraph1Operators, kSubgraph1OperatorsCount), + builder->CreateString("then_subgraph")), + CreateSubGraph( + *builder, + builder->CreateVector(kSubgraph2Tensors, kSubgraph2TensorsCount), + builder->CreateVector(kSubgraph2Inputs, kSubgraph2InputsCount), + builder->CreateVector(kSubgraph2Outputs, kSubgraph2OutputsCount), + builder->CreateVector(kSubgraph2Operators, kSubgraph2OperatorsCount), + builder->CreateString("else_subgraph")), + }; + + constexpr size_t kOperatorCodesCount = 2; + const Offset kOperatorCodes[kOperatorCodesCount] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, "if", + /*version=*/0, BuiltinOperator_IF), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "custom_packer_op", + /*version=*/0, BuiltinOperator_CUSTOM), + }; + const Offset kModelOffset = CreateModel( + *builder, 0, builder->CreateVector(kOperatorCodes, kOperatorCodesCount), + builder->CreateVector(kSubgraphs, kSubgraphsCount), + builder->CreateString("test_model"), + builder->CreateVector(buffers, kBuffersCount)); + FinishModelBuffer(*builder, kModelOffset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +// Mock model with one main subgraph containing a single CALL_ONCE op (with null +// inputs and outputs) which invokes a second subgraph which has null inputs and +// outputs. +const Model* BuildSimpleMockModelWithNullInputsOutputs() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {0}; + constexpr size_t tensors_size = 1; + const Offset tensors[tensors_size] = { + CreateTensor(*builder, + builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, + builder->CreateString("test_input_tensor1"), 0, false), + }; + constexpr size_t subgraph0_inputs_size = 1; + const int32_t subgraph0_inputs[subgraph0_inputs_size] = {0}; + constexpr size_t subgraph0_outputs_size = 1; + const int32_t subgraph0_outputs[subgraph0_outputs_size] = {0}; + constexpr size_t operators_size = 1; + const Offset subgraph0_operators[operators_size] = { + CreateOperator(*builder, 0, {}, {}, BuiltinOptions_CallOnceOptions, + CreateCallOnceOptions(*builder, 1).Union()), + }; + const Offset subgraph1_operators[operators_size] = { + CreateOperator(*builder, 1, {}, {}, BuiltinOptions_NONE)}; + constexpr size_t subgraphs_size = 2; + const Offset subgraphs[subgraphs_size] = { + CreateSubGraph( + *builder, builder->CreateVector(tensors, tensors_size), + builder->CreateVector(subgraph0_inputs, subgraph0_inputs_size), + builder->CreateVector(subgraph0_outputs, subgraph0_outputs_size), + builder->CreateVector(subgraph0_operators, operators_size), + builder->CreateString("main_subgraph")), + CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), {}, + {}, + builder->CreateVector(subgraph1_operators, operators_size), + builder->CreateString("secondary subgraph")), + }; + constexpr size_t operator_codes_size = 2; + const Offset operator_codes[operator_codes_size] = { + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, + "call_once_op", + /*version=*/0, BuiltinOperator_CALL_ONCE), + CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, "no_op", + /*version=*/0, BuiltinOperator_CUSTOM)}; + const Offset model_offset = CreateModel( + *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), + builder->CreateVector(subgraphs, subgraphs_size), + builder->CreateString("test_model"), + builder->CreateVector(buffers, buffers_size)); + FinishModelBuffer(*builder, model_offset); + void* model_pointer = builder->GetBufferPointer(); + const Model* model = flatbuffers::GetRoot(model_pointer); + return model; +} + +} // namespace + +const TfLiteRegistration* SimpleStatefulOp::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + return &r; +} + +void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer, + size_t length) { + TFLITE_DCHECK(context->AllocateBufferForEval == nullptr); + TFLITE_DCHECK(context->GetScratchBuffer == nullptr); + TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr); + + void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData)); + OpData* data = reinterpret_cast(raw); + *data = {}; + return raw; +} + +TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, + TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + + // Make sure that the input is in uint8_t with at least 1 data entry. + MicroContext* micro_context = GetMicroContext(context); + TfLiteTensor* input = + micro_context->AllocateTempInputTensor(node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + + if (input->type != kTfLiteInt8) return kTfLiteError; + if (NumElements(input->dims) == 0) return kTfLiteError; + + // Allocate a temporary buffer with the same size of input for sorting. + TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( + context, sizeof(uint8_t) * NumElements(input->dims), + &data->sorting_buffer)); + // We can interleave scratch / persistent buffer allocation. + data->invoke_count = reinterpret_cast( + context->AllocatePersistentBuffer(context, sizeof(int))); + *data->invoke_count = 0; + + micro_context->DeallocateTempTfLiteTensor(input); + return kTfLiteOk; +} + +TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, + TfLiteNode* node) { + OpData* data = reinterpret_cast(node->user_data); + *data->invoke_count += 1; + + const TfLiteEvalTensor* input = + tflite::micro::GetEvalInput(context, node, kInputTensor); + TF_LITE_ENSURE(context, input != nullptr); + const uint8_t* input_data = input->data.uint8; + int size = NumElements(input->dims); + + uint8_t* sorting_buffer = reinterpret_cast( + context->GetScratchBuffer(context, data->sorting_buffer)); + // Copy inputs data to the sorting buffer. We don't want to mutate the input + // tensor as it might be used by a another node. + for (int i = 0; i < size; i++) { + sorting_buffer[i] = input_data[i]; + } + + // In place insertion sort on `sorting_buffer`. + for (int i = 1; i < size; i++) { + for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) { + std::swap(sorting_buffer[j], sorting_buffer[j - 1]); + } + } + + TfLiteEvalTensor* median = + tflite::micro::GetEvalOutput(context, node, kMedianTensor); + TF_LITE_ENSURE(context, median != nullptr); + uint8_t* median_data = median->data.uint8; + TfLiteEvalTensor* invoke_count = + tflite::micro::GetEvalOutput(context, node, kInvokeCount); + TF_LITE_ENSURE(context, invoke_count != nullptr); + int32_t* invoke_count_data = invoke_count->data.i32; + + median_data[0] = sorting_buffer[size / 2]; + invoke_count_data[0] = *data->invoke_count; + return kTfLiteOk; +} + +const TfLiteRegistration* MockCustom::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* MockCustom::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* MockCustom::Init(TfLiteContext* context, const char* buffer, + size_t length) { + // We don't support delegate in TFL micro. This is a weak check to test if + // context struct being zero-initialized. + TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); + freed_ = false; + // Do nothing. + return nullptr; +} + +void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; } + +TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + const int32_t* input_data = input->data.i32; + const TfLiteEvalTensor* weight = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, weight != nullptr); + const uint8_t* weight_data = weight->data.uint8; + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + int32_t* output_data = output->data.i32; + output_data[0] = + 0; // Catch output tensor sharing memory with an input tensor + output_data[0] = input_data[0] + weight_data[0]; + return kTfLiteOk; +} + +bool MockCustom::freed_ = false; + +const TfLiteRegistration* MultipleInputs::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* MultipleInputs::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* MultipleInputs::Init(TfLiteContext* context, const char* buffer, + size_t length) { + // We don't support delegate in TFL micro. This is a weak check to test if + // context struct being zero-initialized. + TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); + freed_ = false; + // Do nothing. + return nullptr; +} + +void MultipleInputs::Free(TfLiteContext* context, void* buffer) { + freed_ = true; +} + +TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { + const TfLiteEvalTensor* input = tflite::micro::GetEvalInput(context, node, 0); + TF_LITE_ENSURE(context, input != nullptr); + const int32_t* input_data = input->data.i32; + const TfLiteEvalTensor* input1 = + tflite::micro::GetEvalInput(context, node, 1); + TF_LITE_ENSURE(context, input1 != nullptr); + const int32_t* input_data1 = input1->data.i32; + const TfLiteEvalTensor* input2 = + tflite::micro::GetEvalInput(context, node, 2); + TF_LITE_ENSURE(context, input2 != nullptr); + const int32_t* input_data2 = input2->data.i32; + + TfLiteEvalTensor* output = tflite::micro::GetEvalOutput(context, node, 0); + TF_LITE_ENSURE(context, output != nullptr); + int32_t* output_data = output->data.i32; + output_data[0] = + 0; // Catch output tensor sharing memory with an input tensor + output_data[0] = input_data[0] + input_data1[0] + input_data2[0]; + return kTfLiteOk; +} + +bool MultipleInputs::freed_ = false; + +const TfLiteRegistration* NoOp::getRegistration() { + return GetMutableRegistration(); +} + +TfLiteRegistration* NoOp::GetMutableRegistration() { + static TfLiteRegistration r; + r.init = Init; + r.prepare = Prepare; + r.invoke = Invoke; + r.free = Free; + return &r; +} + +void* NoOp::Init(TfLiteContext* context, const char* buffer, size_t length) { + // We don't support delegate in TFL micro. This is a weak check to test if + // context struct being zero-initialized. + TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); + freed_ = false; + // Do nothing. + return nullptr; +} + +void NoOp::Free(TfLiteContext* context, void* buffer) { freed_ = true; } + +TfLiteStatus NoOp::Prepare(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +TfLiteStatus NoOp::Invoke(TfLiteContext* context, TfLiteNode* node) { + return kTfLiteOk; +} + +bool NoOp::freed_ = false; + +AllOpsResolver GetOpResolver() { + AllOpsResolver op_resolver; + op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); + op_resolver.AddCustom("simple_stateful_op", + SimpleStatefulOp::GetMutableRegistration()); + op_resolver.AddCustom("multiple_inputs_op", + MultipleInputs::GetMutableRegistration()); + op_resolver.AddCustom("no_op", NoOp::GetMutableRegistration()); + op_resolver.AddCustom("custom_packer_op", PackerOp::GetMutableRegistration()); + return op_resolver; +} + +const Model* GetModelWithUnusedInputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithUnusedInputs()); + } + return model; +} + +const Model* GetModelWithUnusedOperatorOutputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithUnusedOperatorOutputs()); + } + return model; +} + +const Model* GetModelWith256x256Tensor() { + static const Model* model = BuildModelWith256x256Tensor(); + return model; +} + +const Model* GetSimpleMockModel() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMockModel()); + } + return model; +} + +const Model* GetSimpleMultipleInputsModel() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMultipleInputsModel()); + } + return model; +} + +const Model* GetSimpleModelWithSubgraphsAndIf() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithSubgraphsAndIf()); + } + return model; +} + +const Model* GetSimpleModelWithIfAndEmptySubgraph() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithIfAndEmptySubgraph()); + } + return model; +} + +const Model* GetSimpleModelWithSubgraphsAndWhile() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithSubgraphsAndWhile()); + } + return model; +} + +const Model* GetModelWithIfAndSubgraphInputTensorOverlap() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildModelWithIfAndSubgraphInputTensorOverlap()); + } + return model; +} + +const Model* GetSimpleModelWithNullInputsAndOutputs() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleMockModelWithNullInputsOutputs()); + } + return model; +} + +const Model* GetComplexMockModel() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildComplexMockModel()); + } + return model; +} + +const Model* GetSimpleModelWithBranch() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleModelWithBranch()); + } + return model; +} + +const Model* GetModelWithOfflinePlanning(int num_tensors, + const int32_t* metadata_buffer, + NodeConnection* node_conn, + int num_conns, + int num_subgraph_inputs) { + const Model* model = BuildModelWithOfflinePlanning( + num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs); + return model; +} + +const Model* GetSimpleStatefulModel() { + static Model* model = nullptr; + if (!model) { + model = const_cast(BuildSimpleStatefulModel()); + } + return model; +} + +const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {size}; + const Offset tensor_offset = CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_tensor"), 0, + is_variable); + builder->Finish(tensor_offset); + void* tensor_pointer = builder->GetBufferPointer(); + const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); + return tensor; +} + +const Tensor* CreateQuantizedFlatbufferTensor(int size) { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + constexpr size_t quant_params_size = 1; + const float min_array[quant_params_size] = {0.1f}; + const float max_array[quant_params_size] = {0.2f}; + const float scale_array[quant_params_size] = {0.3f}; + const int64_t zero_point_array[quant_params_size] = {100ll}; + + const Offset quant_params = + CreateQuantizationParameters( + *builder, + /*min=*/builder->CreateVector(min_array, quant_params_size), + /*max=*/builder->CreateVector(max_array, quant_params_size), + /*scale=*/ + builder->CreateVector(scale_array, quant_params_size), + /*zero_point=*/ + builder->CreateVector(zero_point_array, quant_params_size)); + + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {size}; + const Offset tensor_offset = CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, + false); + builder->Finish(tensor_offset); + void* tensor_pointer = builder->GetBufferPointer(); + const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); + return tensor; +} + +const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + const Offset quant_params = + CreateQuantizationParameters(*builder, 0, 0, 0, 0, + QuantizationDetails_NONE, 0, 0); + constexpr size_t tensor_shape_size = 1; + const int32_t tensor_shape[tensor_shape_size] = {size}; + const Offset tensor_offset = CreateTensor( + *builder, builder->CreateVector(tensor_shape, tensor_shape_size), + TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, + false); + builder->Finish(tensor_offset); + void* tensor_pointer = builder->GetBufferPointer(); + const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); + return tensor; +} + +const flatbuffers::Vector>* +CreateFlatbufferBuffers() { + using flatbuffers::Offset; + flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); + constexpr size_t buffers_size = 1; + const Offset buffers[buffers_size] = { + CreateBuffer(*builder), + }; + const flatbuffers::Offset>> + buffers_offset = builder->CreateVector(buffers, buffers_size); + builder->Finish(buffers_offset); + void* buffers_pointer = builder->GetBufferPointer(); + const flatbuffers::Vector>* result = + flatbuffers::GetRoot>>( + buffers_pointer); + return result; +} + +int TestStrcmp(const char* a, const char* b) { + if ((a == nullptr) || (b == nullptr)) { + return -1; + } + while ((*a != 0) && (*a == *b)) { + a++; + b++; + } + return *reinterpret_cast(a) - + *reinterpret_cast(b); +} + +// Create a TfLiteIntArray from an array of ints. The first element in the +// supplied array must be the size of the array expressed as an int. +TfLiteIntArray* IntArrayFromInts(int* int_array) { + return reinterpret_cast(int_array); +} + +// Create a TfLiteFloatArray from an array of floats. The first element in the +// supplied array must be the size of the array expressed as a float. +TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { + static_assert(sizeof(float) == sizeof(int), + "assumes sizeof(float) == sizeof(int) to perform casting"); + int size = static_cast(floats[0]); + *reinterpret_cast(const_cast(floats)) = size; + return reinterpret_cast(const_cast(floats)); +} + +TfLiteTensor CreateQuantizedBiasTensor(const float* data, int16_t* quantized, + TfLiteIntArray* dims, float input_scale, + float weights_scale, bool is_variable) { + float bias_scale = input_scale * weights_scale; + tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); + + // Quantized int16_t tensors always have a zero point of 0, since the range of + // int16_t values is large, and because zero point costs extra cycles during + // processing. + TfLiteTensor result = + CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); + return result; +} + +TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, + TfLiteIntArray* dims, float input_scale, + float weights_scale, bool is_variable) { + float bias_scale = input_scale * weights_scale; + tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); + + // Quantized int32_t tensors always have a zero point of 0, since the range of + // int32_t values is large, and because zero point costs extra cycles during + // processing. + TfLiteTensor result = + CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); + return result; +} + +TfLiteTensor CreateQuantizedBiasTensor(const float* data, + std::int64_t* quantized, + TfLiteIntArray* dims, float input_scale, + float weights_scale, bool is_variable) { + float bias_scale = input_scale * weights_scale; + tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); + + // Quantized int32_t tensors always have a zero point of 0, since the range of + // int32_t values is large, and because zero point costs extra cycles during + // processing. + TfLiteTensor result = + CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); + return result; +} + +// Quantizes int32_t bias tensor with per-channel weights determined by input +// scale multiplied by weight scale for each channel. +template +TfLiteTensor CreatePerChannelQuantizedBiasTensor( + const float* input, T* quantized, TfLiteIntArray* dims, float input_scale, + float* weight_scales, float* scales, int* zero_points, + TfLiteAffineQuantization* affine_quant, int quantized_dimension, + bool is_variable) { + int input_size = ElementCount(*dims); + int num_channels = dims->data[quantized_dimension]; + // First element is reserved for array length + zero_points[0] = num_channels; + scales[0] = static_cast(num_channels); + float* scales_array = &scales[1]; + for (int i = 0; i < num_channels; i++) { + scales_array[i] = input_scale * weight_scales[i]; + zero_points[i + 1] = 0; + } + + SymmetricPerChannelQuantize(input, quantized, input_size, num_channels, + scales_array); + + affine_quant->scale = FloatArrayFromFloats(scales); + affine_quant->zero_point = IntArrayFromInts(zero_points); + affine_quant->quantized_dimension = quantized_dimension; + + TfLiteTensor result = CreateTensor(quantized, dims, is_variable); + result.quantization = {kTfLiteAffineQuantization, affine_quant}; + return result; +} + +TfLiteTensor CreatePerChannelQuantizedBiasTensor( + const float* input, int32_t* quantized, TfLiteIntArray* dims, + float input_scale, float* weight_scales, float* scales, int* zero_points, + TfLiteAffineQuantization* affine_quant, int quantized_dimension, + bool is_variable) { + return CreatePerChannelQuantizedBiasTensor( + input, quantized, dims, input_scale, weight_scales, scales, zero_points, + affine_quant, quantized_dimension, is_variable); +} + +TfLiteTensor CreatePerChannelQuantizedBiasTensor( + const float* input, std::int64_t* quantized, TfLiteIntArray* dims, + float input_scale, float* weight_scales, float* scales, int* zero_points, + TfLiteAffineQuantization* affine_quant, int quantized_dimension, + bool is_variable) { + return CreatePerChannelQuantizedBiasTensor( + input, quantized, dims, input_scale, weight_scales, scales, zero_points, + affine_quant, quantized_dimension, is_variable); +} + +TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( + const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, + int* zero_points, TfLiteAffineQuantization* affine_quant, + int quantized_dimension, bool is_variable, TfLiteType tensor_weight_type) { + int channel_count = dims->data[quantized_dimension]; + + scales[0] = static_cast(channel_count); + zero_points[0] = channel_count; + + SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, + &scales[1], tensor_weight_type); + + for (int i = 0; i < channel_count; i++) { + zero_points[i + 1] = 0; + } + + affine_quant->scale = FloatArrayFromFloats(scales); + affine_quant->zero_point = IntArrayFromInts(zero_points); + affine_quant->quantized_dimension = quantized_dimension; + TfLiteTensor result = + CreateTensor(quantized, dims, is_variable, tensor_weight_type); + result.quantization = {kTfLiteAffineQuantization, affine_quant}; + return result; +} + +size_t GetModelTensorCount(const Model* model) { + auto* subgraphs = model->subgraphs(); + if (subgraphs) { + return (*subgraphs)[0]->tensors()->size(); + } + return 0; +} + +void PackInt4ValuesDenselyInPlace(uint8_t* src_buffer, int buffer_size) { + for (int i = 0; i < buffer_size; ++i) { + if (i % 2 == 0) { + src_buffer[i / 2] = src_buffer[i] & 0x0F; + } else { + src_buffer[i / 2] |= src_buffer[i] << 4; + } + } + // the rest of the buffer should be empty since half of it is packed with the + // values + memset(src_buffer + (buffer_size + 1) / 2, 0, buffer_size / 2); +} + +} // namespace testing +} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cpp b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cpp deleted file mode 100644 index 6fb3685..0000000 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.cpp +++ /dev/null @@ -1,1079 +0,0 @@ -/* Copyright 2019 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ - -#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" - -#include -#include -#include -#include -#include - -#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" -#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" -#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" -#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" -#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" - -// TODO(b/170464050): Use TFLM test only version of schema_utils. - -namespace tflite { -namespace testing { -namespace { - -class StackAllocator : public flatbuffers::Allocator { - public: - StackAllocator() : data_(data_backing_), data_size_(0) {} - - uint8_t* allocate(size_t size) override { - TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize); - uint8_t* result = data_; - data_ += size; - data_size_ += size; - return result; - } - - void deallocate(uint8_t* p, size_t) override {} - - static StackAllocator& instance() { - // Avoid using true dynamic memory allocation to be portable to bare metal. - static char inst_memory[sizeof(StackAllocator)]; - static StackAllocator* inst = new (inst_memory) StackAllocator; - return *inst; - } - - static constexpr size_t kStackAllocatorSize = 8192; - - private: - uint8_t data_backing_[kStackAllocatorSize]; - uint8_t* data_; - int data_size_; -}; - -flatbuffers::FlatBufferBuilder* BuilderInstance() { - static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; - static flatbuffers::FlatBufferBuilder* inst = - new (inst_memory) flatbuffers::FlatBufferBuilder( - StackAllocator::kStackAllocatorSize, &StackAllocator::instance()); - return inst; -} - -// A wrapper around FlatBuffer API to help build model easily. -class ModelBuilder { - public: - typedef int32_t Tensor; - typedef int Operator; - typedef int Node; - - // `builder` needs to be available until BuildModel is called. - explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder) - : builder_(builder) {} - - // Registers an operator that will be used in the model. - Operator RegisterOp(BuiltinOperator op, const char* custom_code); - - // Adds a tensor to the model. - Tensor AddTensor(TensorType type, std::initializer_list shape) { - return AddTensorImpl(type, /* is_variable */ false, shape); - } - - // Adds a variable tensor to the model. - Tensor AddVariableTensor(TensorType type, - std::initializer_list shape) { - return AddTensorImpl(type, /* is_variable */ true, shape); - } - - // Adds a node to the model with given input and output Tensors. - Node AddNode(Operator op, std::initializer_list inputs, - std::initializer_list outputs); - - void AddMetadata(const char* description_string, - const int32_t* metadata_buffer_data, size_t num_elements); - - // Constructs the flatbuffer model using `builder_` and return a pointer to - // it. The returned model has the same lifetime as `builder_`. - // Note the default value of 0 for num_subgraph_inputs means all tensor inputs - // are in subgraph input list. - const Model* BuildModel(std::initializer_list inputs, - std::initializer_list outputs, - size_t num_subgraph_inputs = 0); - - private: - // Adds a tensor to the model. - Tensor AddTensorImpl(TensorType type, bool is_variable, - std::initializer_list shape); - - flatbuffers::FlatBufferBuilder* builder_; - - static constexpr int kMaxOperatorCodes = 10; - flatbuffers::Offset operator_codes_[kMaxOperatorCodes]; - int next_operator_code_id_ = 0; - - static constexpr int kMaxOperators = 50; - flatbuffers::Offset operators_[kMaxOperators]; - int next_operator_id_ = 0; - - static constexpr int kMaxTensors = 50; - flatbuffers::Offset tensors_[kMaxTensors]; - - static constexpr int kMaxMetadataBuffers = 10; - - static constexpr int kMaxMetadatas = 10; - flatbuffers::Offset metadata_[kMaxMetadatas]; - - flatbuffers::Offset metadata_buffers_[kMaxMetadataBuffers]; - - int nbr_of_metadata_buffers_ = 0; - - int next_tensor_id_ = 0; -}; - -ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op, - const char* custom_code) { - TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes); - operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect( - *builder_, /*deprecated_builtin_code=*/0, custom_code, /*version=*/0, op); - next_operator_code_id_++; - return next_operator_code_id_ - 1; -} - -ModelBuilder::Node ModelBuilder::AddNode( - ModelBuilder::Operator op, - std::initializer_list inputs, - std::initializer_list outputs) { - TFLITE_DCHECK(next_operator_id_ <= kMaxOperators); - operators_[next_operator_id_] = tflite::CreateOperator( - *builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()), - builder_->CreateVector(outputs.begin(), outputs.size()), - BuiltinOptions_NONE); - next_operator_id_++; - return next_operator_id_ - 1; -} - -void ModelBuilder::AddMetadata(const char* description_string, - const int32_t* metadata_buffer_data, - size_t num_elements) { - metadata_[ModelBuilder::nbr_of_metadata_buffers_] = - CreateMetadata(*builder_, builder_->CreateString(description_string), - 1 + ModelBuilder::nbr_of_metadata_buffers_); - - metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer( - *builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data, - sizeof(uint32_t) * num_elements)); - - ModelBuilder::nbr_of_metadata_buffers_++; -} - -const Model* ModelBuilder::BuildModel( - std::initializer_list inputs, - std::initializer_list outputs, - size_t num_subgraph_inputs) { - // Model schema requires an empty buffer at idx 0. - size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_; - flatbuffers::Offset buffers[kMaxMetadataBuffers]; - buffers[0] = tflite::CreateBuffer(*builder_); - - // Place the metadata buffers first in the buffer since the indices for them - // have already been set in AddMetadata() - for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) { - buffers[i] = metadata_buffers_[i - 1]; - } - - // TFLM only supports single subgraph. - constexpr size_t subgraphs_size = 1; - - // Find out number of subgraph inputs. - if (num_subgraph_inputs == 0) { - // This is the default case. - num_subgraph_inputs = inputs.size(); - } else { - // A non-zero value of num_subgraph_inputs means that some of - // the operator input tensors are not subgraph inputs. - TFLITE_DCHECK(num_subgraph_inputs <= inputs.size()); - } - - const flatbuffers::Offset subgraphs[subgraphs_size] = { - tflite::CreateSubGraph( - *builder_, builder_->CreateVector(tensors_, next_tensor_id_), - builder_->CreateVector(inputs.begin(), num_subgraph_inputs), - builder_->CreateVector(outputs.begin(), outputs.size()), - builder_->CreateVector(operators_, next_operator_id_), - builder_->CreateString("test_subgraph"))}; - - flatbuffers::Offset model_offset; - if (ModelBuilder::nbr_of_metadata_buffers_ > 0) { - model_offset = tflite::CreateModel( - *builder_, 0, - builder_->CreateVector(operator_codes_, next_operator_code_id_), - builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), - builder_->CreateVector(buffers, buffer_size), 0, - builder_->CreateVector(metadata_, - ModelBuilder::nbr_of_metadata_buffers_)); - } else { - model_offset = tflite::CreateModel( - *builder_, 0, - builder_->CreateVector(operator_codes_, next_operator_code_id_), - builder_->CreateVector(subgraphs, subgraphs_size), - builder_->CreateString("teset_model"), - builder_->CreateVector(buffers, buffer_size)); - } - - tflite::FinishModelBuffer(*builder_, model_offset); - void* model_pointer = builder_->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -ModelBuilder::Tensor ModelBuilder::AddTensorImpl( - TensorType type, bool is_variable, std::initializer_list shape) { - TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors); - tensors_[next_tensor_id_] = tflite::CreateTensor( - *builder_, builder_->CreateVector(shape.begin(), shape.size()), type, - /* buffer */ 0, /* name */ 0, /* quantization */ 0, - /* is_variable */ is_variable, - /* sparsity */ 0); - next_tensor_id_++; - return next_tensor_id_ - 1; -} - -const Model* BuildSimpleStatefulModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op"); - const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); - const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); - const int invoke_count_tensor = - model_builder.AddTensor(TensorType_INT32, {1}); - - model_builder.AddNode(op_id, {input_tensor}, - {median_tensor, invoke_count_tensor}); - return model_builder.BuildModel({input_tensor}, - {median_tensor, invoke_count_tensor}); -} - -const Model* BuildSimpleModelWithBranch() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - /* Model structure - | t0 - +------| - | v - | +---------+ - | | n0 | - | | | - | +---------+ - v + - | - +---------+ | t1 - | n1 | | - | | | - +---------+ | - | | - t2 | v - | +---------+ - +-->| n2 | - | | - +-------|-+ - |t3 - v - */ - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); - const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - model_builder.AddNode(op_id, {t0}, {t1}); // n0 - model_builder.AddNode(op_id, {t0}, {t2}); // n1 - model_builder.AddNode(op_id, {t1, t2}, {t3}); // n2 - return model_builder.BuildModel({t0}, {t3}); -} - -const Model* BuildModelWithOfflinePlanning(int number_of_tensors, - const int32_t* metadata_buffer, - NodeConnection* node_conn, - int num_conns, - int num_subgraph_inputs) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); - - ModelBuilder model_builder(fb_builder); - - const int op_id = - model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); - - for (int i = 0; i < number_of_tensors; ++i) { - model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); - } - - for (int i = 0; i < num_conns; ++i) { - model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output); - } - - model_builder.AddMetadata( - "OfflineMemoryAllocation", metadata_buffer, - number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize); - - return model_builder.BuildModel( - node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); -} - -const Model* BuildSimpleMockModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffer_data_size = 1; - const uint8_t buffer_data[buffer_data_size] = {21}; - constexpr size_t buffers_size = 2; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data, buffer_data_size))}; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - constexpr size_t tensors_size = 4; - const Offset tensors[tensors_size] = { - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 1, - builder->CreateString("test_weight_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output_tensor"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output2_tensor"), 0, false), - }; - constexpr size_t inputs_size = 1; - const int32_t inputs[inputs_size] = {0}; - constexpr size_t outputs_size = 2; - const int32_t outputs[outputs_size] = {2, 3}; - constexpr size_t operator_inputs_size = 2; - const int32_t operator_inputs[operator_inputs_size] = {0, 1}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {2}; - const int32_t operator2_outputs[operator_outputs_size] = {3}; - constexpr size_t operators_size = 2; - const Offset operators[operators_size] = { - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE), - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator2_outputs, operator_outputs_size), - BuiltinOptions_NONE), - }; - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "mock_custom", - /*version=*/0, BuiltinOperator_CUSTOM)}; - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -const Model* BuildComplexMockModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffer_data_size = 1; - const uint8_t buffer_data_1[buffer_data_size] = {21}; - const uint8_t buffer_data_2[buffer_data_size] = {21}; - const uint8_t buffer_data_3[buffer_data_size] = {21}; - constexpr size_t buffers_size = 7; - const Offset buffers[buffers_size] = { - // Op 1 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_1, buffer_data_size)), - // Op 2 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_2, buffer_data_size)), - // Op 3 buffers: - CreateBuffer(*builder), - CreateBuffer(*builder, - builder->CreateVector(buffer_data_3, buffer_data_size)), - }; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - - constexpr size_t tensors_size = 10; - const Offset tensors[tensors_size] = { - // Op 1 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0, - false /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0, - false /* is_variable */), - // Op 1 output / Op 2 input: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0, - false /* is_variable */), - // Op 2 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0, - false /* is_variable */), - // Op 2 output / Op 3 input: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0, - false /* is_variable */), - // Op 3 inputs: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"), - 0, true /* is_variable */), - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0, - false /* is_variable */), - // Op 3 output: - CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0, - false /* is_variable */), - }; - - constexpr size_t operators_size = 3; - Offset operators[operators_size]; - { - // Set Op 1 attributes: - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {3}; - - operators[0] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - { - // Set Op 2 attributes - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {6}; - - operators[1] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - { - // Set Op 3 attributes - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {9}; - - operators[2] = {CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE)}; - } - - constexpr size_t inputs_size = 1; - const int32_t inputs[inputs_size] = {0}; - constexpr size_t outputs_size = 1; - const int32_t outputs[outputs_size] = {9}; - - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "mock_custom", - /*version=*/0, BuiltinOperator_CUSTOM)}; - - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -const Model* BuildSimpleMultipleInputsModel() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - - constexpr size_t buffers_size = 1; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - }; - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {1}; - constexpr size_t tensors_size = 4; - const Offset tensors[tensors_size] = { - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor1"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT8, 0, - builder->CreateString("test_input_tensor2"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_input_tensor3"), 0, false), - CreateTensor(*builder, - builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, - builder->CreateString("test_output_tensor"), 0, false), - }; - constexpr size_t inputs_size = 3; - const int32_t inputs[inputs_size] = {0, 1, 2}; - constexpr size_t outputs_size = 1; - const int32_t outputs[outputs_size] = {3}; - constexpr size_t operator_inputs_size = 3; - const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; - constexpr size_t operator_outputs_size = 1; - const int32_t operator_outputs[operator_outputs_size] = {3}; - constexpr size_t operators_size = 1; - const Offset operators[operators_size] = { - CreateOperator( - *builder, 0, - builder->CreateVector(operator_inputs, operator_inputs_size), - builder->CreateVector(operator_outputs, operator_outputs_size), - BuiltinOptions_NONE), - }; - constexpr size_t subgraphs_size = 1; - const Offset subgraphs[subgraphs_size] = { - CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), - builder->CreateVector(inputs, inputs_size), - builder->CreateVector(outputs, outputs_size), - builder->CreateVector(operators, operators_size), - builder->CreateString("test_subgraph"))}; - constexpr size_t operator_codes_size = 1; - const Offset operator_codes[operator_codes_size] = { - CreateOperatorCodeDirect(*builder, /*deprecated_builtin_code=*/0, - "multiple_inputs_op", - /*version=*/0, BuiltinOperator_CUSTOM)}; - const Offset model_offset = CreateModel( - *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), - builder->CreateVector(subgraphs, subgraphs_size), - builder->CreateString("test_model"), - builder->CreateVector(buffers, buffers_size)); - FinishModelBuffer(*builder, model_offset); - void* model_pointer = builder->GetBufferPointer(); - const Model* model = flatbuffers::GetRoot(model_pointer); - return model; -} - -} // namespace - -const TfLiteRegistration* SimpleStatefulOp::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - return &r; -} - -void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer, - size_t length) { - TFLITE_DCHECK(context->AllocateBufferForEval == nullptr); - TFLITE_DCHECK(context->GetScratchBuffer == nullptr); - TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr); - - void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData)); - OpData* data = reinterpret_cast(raw); - *data = {}; - return raw; -} - -TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, - TfLiteNode* node) { - OpData* data = reinterpret_cast(node->user_data); - - // Make sure that the input is in uint8_t with at least 1 data entry. - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - if (input->type != kTfLiteUInt8) return kTfLiteError; - if (NumElements(input->dims) == 0) return kTfLiteError; - - // Allocate a temporary buffer with the same size of input for sorting. - TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( - context, sizeof(uint8_t) * NumElements(input->dims), - &data->sorting_buffer)); - // We can interleave scratch / persistent buffer allocation. - data->invoke_count = reinterpret_cast( - context->AllocatePersistentBuffer(context, sizeof(int))); - *data->invoke_count = 0; - - return kTfLiteOk; -} - -TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, - TfLiteNode* node) { - OpData* data = reinterpret_cast(node->user_data); - *data->invoke_count += 1; - - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); - const uint8_t* input_data = GetTensorData(input); - int size = NumElements(input->dims); - - uint8_t* sorting_buffer = reinterpret_cast( - context->GetScratchBuffer(context, data->sorting_buffer)); - // Copy inputs data to the sorting buffer. We don't want to mutate the input - // tensor as it might be used by a another node. - for (int i = 0; i < size; i++) { - sorting_buffer[i] = input_data[i]; - } - - // In place insertion sort on `sorting_buffer`. - for (int i = 1; i < size; i++) { - for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) { - std::swap(sorting_buffer[j], sorting_buffer[j - 1]); - } - } - - TfLiteTensor* median; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kMedianTensor, &median)); - uint8_t* median_data = GetTensorData(median); - TfLiteTensor* invoke_count; - TF_LITE_ENSURE_OK(context, - GetOutputSafe(context, node, kInvokeCount, &invoke_count)); - int32_t* invoke_count_data = GetTensorData(invoke_count); - - median_data[0] = sorting_buffer[size / 2]; - invoke_count_data[0] = *data->invoke_count; - return kTfLiteOk; -} - -const TfLiteRegistration* MockCustom::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* MockCustom::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - r.free = Free; - return &r; -} - -void* MockCustom::Init(TfLiteContext* context, const char* buffer, - size_t length) { - // We don't support delegate in TFL micro. This is a weak check to test if - // context struct being zero-initialized. - TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); - freed_ = false; - // Do nothing. - return nullptr; -} - -void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; } - -TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - -TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); - const int32_t* input_data = input->data.i32; - const TfLiteTensor* weight; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight)); - const uint8_t* weight_data = weight->data.uint8; - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); - int32_t* output_data = output->data.i32; - output_data[0] = - 0; // Catch output tensor sharing memory with an input tensor - output_data[0] = input_data[0] + weight_data[0]; - return kTfLiteOk; -} - -bool MockCustom::freed_ = false; - -const TfLiteRegistration* MultipleInputs::getRegistration() { - return GetMutableRegistration(); -} - -TfLiteRegistration* MultipleInputs::GetMutableRegistration() { - static TfLiteRegistration r; - r.init = Init; - r.prepare = Prepare; - r.invoke = Invoke; - r.free = Free; - return &r; -} - -void* MultipleInputs::Init(TfLiteContext* context, const char* buffer, - size_t length) { - // We don't support delegate in TFL micro. This is a weak check to test if - // context struct being zero-initialized. - TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); - freed_ = false; - // Do nothing. - return nullptr; -} - -void MultipleInputs::Free(TfLiteContext* context, void* buffer) { - freed_ = true; -} - -TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { - return kTfLiteOk; -} - -TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { - const TfLiteTensor* input; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); - const int32_t* input_data = input->data.i32; - const TfLiteTensor* input1; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1)); - const int32_t* input_data1 = input1->data.i32; - const TfLiteTensor* input2; - TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2)); - const int32_t* input_data2 = input2->data.i32; - - TfLiteTensor* output; - TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); - int32_t* output_data = output->data.i32; - output_data[0] = - 0; // Catch output tensor sharing memory with an input tensor - output_data[0] = input_data[0] + input_data1[0] + input_data2[0]; - return kTfLiteOk; -} - -bool MultipleInputs::freed_ = false; - -AllOpsResolver GetOpResolver() { - AllOpsResolver op_resolver; - op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); - op_resolver.AddCustom("simple_stateful_op", - SimpleStatefulOp::GetMutableRegistration()); - op_resolver.AddCustom("multiple_inputs_op", - MultipleInputs::GetMutableRegistration()); - return op_resolver; -} - -const Model* GetSimpleMockModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleMockModel()); - } - return model; -} - -const Model* GetSimpleMultipleInputsModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleMultipleInputsModel()); - } - return model; -} - -const Model* GetComplexMockModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildComplexMockModel()); - } - return model; -} - -const Model* GetSimpleModelWithBranch() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleModelWithBranch()); - } - return model; -} - -const Model* GetModelWithOfflinePlanning(int num_tensors, - const int32_t* metadata_buffer, - NodeConnection* node_conn, - int num_conns, - int num_subgraph_inputs) { - const Model* model = BuildModelWithOfflinePlanning( - num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs); - return model; -} - -const Model* GetSimpleStatefulModel() { - static Model* model = nullptr; - if (!model) { - model = const_cast(BuildSimpleStatefulModel()); - } - return model; -} - -const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), 0, - is_variable); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const Tensor* CreateQuantizedFlatbufferTensor(int size) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - const Offset quant_params = - CreateQuantizationParameters( - *builder, - /*min=*/builder->CreateVector({0.1f}), - /*max=*/builder->CreateVector({0.2f}), - /*scale=*/builder->CreateVector({0.3f}), - /*zero_point=*/builder->CreateVector({100ll})); - - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, - false); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - const Offset quant_params = - CreateQuantizationParameters(*builder, 0, 0, 0, 0, - QuantizationDetails_NONE, 0, 0); - constexpr size_t tensor_shape_size = 1; - const int32_t tensor_shape[tensor_shape_size] = {size}; - const Offset tensor_offset = CreateTensor( - *builder, builder->CreateVector(tensor_shape, tensor_shape_size), - TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, - false); - builder->Finish(tensor_offset); - void* tensor_pointer = builder->GetBufferPointer(); - const Tensor* tensor = flatbuffers::GetRoot(tensor_pointer); - return tensor; -} - -const flatbuffers::Vector>* -CreateFlatbufferBuffers() { - using flatbuffers::Offset; - flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); - constexpr size_t buffers_size = 1; - const Offset buffers[buffers_size] = { - CreateBuffer(*builder), - }; - const flatbuffers::Offset>> - buffers_offset = builder->CreateVector(buffers, buffers_size); - builder->Finish(buffers_offset); - void* buffers_pointer = builder->GetBufferPointer(); - const flatbuffers::Vector>* result = - flatbuffers::GetRoot>>( - buffers_pointer); - return result; -} - -int TestStrcmp(const char* a, const char* b) { - if ((a == nullptr) || (b == nullptr)) { - return -1; - } - while ((*a != 0) && (*a == *b)) { - a++; - b++; - } - return *reinterpret_cast(a) - - *reinterpret_cast(b); -} - -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...) { -#ifndef TF_LITE_STRIP_ERROR_STRINGS - ErrorReporter* error_reporter = static_cast(context->impl_); - va_list args; - va_start(args, format); - TF_LITE_REPORT_ERROR(error_reporter, format, args); - va_end(args); -#endif -} - -// Create a TfLiteIntArray from an array of ints. The first element in the -// supplied array must be the size of the array expressed as an int. -TfLiteIntArray* IntArrayFromInts(const int* int_array) { - return const_cast( - reinterpret_cast(int_array)); -} - -// Create a TfLiteFloatArray from an array of floats. The first element in the -// supplied array must be the size of the array expressed as a float. -TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { - static_assert(sizeof(float) == sizeof(int), - "assumes sizeof(float) == sizeof(int) to perform casting"); - int size = static_cast(floats[0]); - *reinterpret_cast(const_cast(floats)) = size; - return reinterpret_cast(const_cast(floats)); -} - -TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, - TfLiteIntArray* dims, float input_scale, - float weights_scale, bool is_variable) { - float bias_scale = input_scale * weights_scale; - tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); - - // Quantized int32_t tensors always have a zero point of 0, since the range of - // int32_t values is large, and because zero point costs extra cycles during - // processing. - TfLiteTensor result = - CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); - return result; -} - -// Quantizes int32_t bias tensor with per-channel weights determined by input -// scale multiplied by weight scale for each channel. -TfLiteTensor CreatePerChannelQuantizedBiasTensor( - const float* input, int32_t* quantized, TfLiteIntArray* dims, - float input_scale, float* weight_scales, float* scales, int* zero_points, - TfLiteAffineQuantization* affine_quant, int quantized_dimension, - bool is_variable) { - int input_size = ElementCount(*dims); - int num_channels = dims->data[quantized_dimension]; - // First element is reserved for array length - zero_points[0] = num_channels; - scales[0] = static_cast(num_channels); - float* scales_array = &scales[1]; - for (int i = 0; i < num_channels; i++) { - scales_array[i] = input_scale * weight_scales[i]; - zero_points[i + 1] = 0; - } - - SymmetricPerChannelQuantize(input, quantized, input_size, - num_channels, scales_array); - - affine_quant->scale = FloatArrayFromFloats(scales); - affine_quant->zero_point = IntArrayFromInts(zero_points); - affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); - result.quantization = {kTfLiteAffineQuantization, affine_quant}; - return result; -} - -TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( - const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, - int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable) { - int channel_count = dims->data[quantized_dimension]; - scales[0] = static_cast(channel_count); - zero_points[0] = channel_count; - - SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, - &scales[1]); - - for (int i = 0; i < channel_count; i++) { - zero_points[i + 1] = 0; - } - - affine_quant->scale = FloatArrayFromFloats(scales); - affine_quant->zero_point = IntArrayFromInts(zero_points); - affine_quant->quantized_dimension = quantized_dimension; - - TfLiteTensor result = CreateTensor(quantized, dims, is_variable); - result.quantization = {kTfLiteAffineQuantization, affine_quant}; - return result; -} - -size_t GetModelTensorCount(const Model* model) { - auto* subgraphs = model->subgraphs(); - if (subgraphs) { - return (*subgraphs)[0]->tensors()->size(); - } - return 0; -} - -} // namespace testing -} // namespace tflite diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h index 03c0872..544181d 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h @@ -16,15 +16,13 @@ limitations under the License. #ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ #define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ -// Useful functions for writing tests. - #include #include #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" // from @flatbuffers -#include "edge-impulse-sdk/tensorflow/lite//kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/c/common.h" #include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" +#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" #include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" #include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" #include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" @@ -90,6 +88,19 @@ class MultipleInputs { static bool freed_; }; +// A simple no-op operator. +class NoOp { + public: + static const TfLiteRegistration* getRegistration(); + static TfLiteRegistration* GetMutableRegistration(); + static void* Init(TfLiteContext* context, const char* buffer, size_t length); + static void Free(TfLiteContext* context, void* buffer); + static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); + static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); + + static bool freed_; +}; + // Returns an Op Resolver that can be used in the testing code. AllOpsResolver GetOpResolver(); @@ -101,6 +112,12 @@ const Model* GetSimpleMockModel(); // tensors, and operators. const Model* GetComplexMockModel(); +// Returns a simple example flatbuffer TensorFlow Lite model. Contains 1 input, +// 1 layer of weights, 1 output Tensor, and 1 operator. +// The size of all three tensors is 256 x 256, which is larger than what other +// models provide from this test helper. +const Model* GetModelWith256x256Tensor(); + // Returns a simple flatbuffer model with two branches. const Model* GetSimpleModelWithBranch(); @@ -126,9 +143,33 @@ const Model* GetModelWithOfflinePlanning(int num_tensors, int num_conns, int num_subgraph_inputs = 0); +// Returns a flatbuffer with a single operator, two inputs (one unused) and one +// output. +const Model* GetModelWithUnusedInputs(); + +// Returns a flatbuffer with a single operator, zero inputs and two outputs +// (one unused). +const Model* GetModelWithUnusedOperatorOutputs(); + // Returns a flatbuffer model with `simple_stateful_op` const Model* GetSimpleStatefulModel(); +// Returns a flatbuffer model with "if" and two subgraphs. +const Model* GetSimpleModelWithSubgraphsAndIf(); + +// Returns a flatbuffer model with "if" and two subgraphs one of which is empty. +const Model* GetSimpleModelWithIfAndEmptySubgraph(); + +// Returns a flatbuffer model with "while" and three subgraphs. +const Model* GetSimpleModelWithSubgraphsAndWhile(); + +// Returns a flatbuffer model with "if" and two subgraphs and the input tensor 1 +// of "if" subgraph overlaps with the input tensor 2 of subgraph 1. +const Model* GetModelWithIfAndSubgraphInputTensorOverlap(); + +// Returns a flatbuffer model with null subgraph/operator inputs and outputs. +const Model* GetSimpleModelWithNullInputsAndOutputs(); + // Builds a one-dimensional flatbuffer tensor of the given size. const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false); @@ -146,45 +187,60 @@ CreateFlatbufferBuffers(); // Performs a simple string comparison without requiring standard C library. int TestStrcmp(const char* a, const char* b); -// Wrapper to forward kernel errors to the interpreter's error reporter. -void ReportOpError(struct TfLiteContext* context, const char* format, ...); - void PopulateContext(TfLiteTensor* tensors, int tensors_size, TfLiteContext* context); // Create a TfLiteIntArray from an array of ints. The first element in the // supplied array must be the size of the array expressed as an int. -TfLiteIntArray* IntArrayFromInts(const int* int_array); +TfLiteIntArray* IntArrayFromInts(int* int_array); // Create a TfLiteFloatArray from an array of floats. The first element in the // supplied array must be the size of the array expressed as a float. TfLiteFloatArray* FloatArrayFromFloats(const float* floats); +// Assumes that `src_tensor` is a buffer where each element is a 4-bit value +// stored in 8-bit. +// Returns a new buffer that is packed densely with 2 4-bit values in a byte. +// The packing format is low-bits-first, i.e. the lower nibble of a byte is +// filled first, followed by the upper nibble. +void PackInt4ValuesDenselyInPlace(uint8_t* src_buffer, int buffer_size); + template TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims, - const bool is_variable = false) { + const bool is_variable = false, + TfLiteType type = kTfLiteNoType) { TfLiteTensor result; result.dims = dims; result.params = {}; result.quantization = {kTfLiteNoQuantization, nullptr}; result.is_variable = is_variable; result.allocation_type = kTfLiteMemNone; - result.type = typeToTfLiteType(); - // Const cast is used to allow passing in const and non-const arrays within a - // single CreateTensor method. A Const array should be used for immutable - // input tensors and non-const array should be used for mutable and output - // tensors. result.data.data = const_cast(data); result.quantization = {kTfLiteAffineQuantization, nullptr}; result.bytes = ElementCount(*dims) * sizeof(T); + result.data.data = const_cast(data); + + if (type == kTfLiteInt4) { + result.type = kTfLiteInt4; + PackInt4ValuesDenselyInPlace(tflite::GetTensorData(&result), + ElementCount(*dims)); + result.bytes = ((ElementCount(*dims) + 1) / 2); + } else { + // Const cast is used to allow passing in const and non-const arrays within + // a single CreateTensor method. A Const array should be used for immutable + // input tensors and non-const array should be used for mutable and output + // tensors. + result.type = typeToTfLiteType(); + } return result; } template TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, const float scale, const int zero_point = 0, - const bool is_variable = false) { - TfLiteTensor result = CreateTensor(data, dims, is_variable); + const bool is_variable = false, + TfLiteType type = kTfLiteNoType) { + TfLiteTensor result = CreateTensor(data, dims, is_variable, type); result.params = {scale, zero_point}; result.quantization = {kTfLiteAffineQuantization, nullptr}; return result; @@ -193,17 +249,30 @@ TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, template TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized, TfLiteIntArray* dims, float scale, - int zero_point, bool is_variable = false) { + int zero_point, bool is_variable = false, + TfLiteType type = kTfLiteNoType) { int input_size = ElementCount(*dims); tflite::Quantize(input, quantized, input_size, scale, zero_point); - return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable); + return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable, + type); } +TfLiteTensor CreateQuantizedBiasTensor(const float* data, int16_t* quantized, + TfLiteIntArray* dims, float input_scale, + float weights_scale, + bool is_variable = false); + TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, TfLiteIntArray* dims, float input_scale, float weights_scale, bool is_variable = false); +TfLiteTensor CreateQuantizedBiasTensor(const float* data, + std::int64_t* quantized, + TfLiteIntArray* dims, float input_scale, + float weights_scale, + bool is_variable = false); + // Quantizes int32_t bias tensor with per-channel weights determined by input // scale multiplied by weight scale for each channel. TfLiteTensor CreatePerChannelQuantizedBiasTensor( @@ -212,10 +281,19 @@ TfLiteTensor CreatePerChannelQuantizedBiasTensor( TfLiteAffineQuantization* affine_quant, int quantized_dimension, bool is_variable = false); +// Quantizes int64_t bias tensor with per-channel weights determined by input +// scale multiplied by weight scale for each channel. +TfLiteTensor CreatePerChannelQuantizedBiasTensor( + const float* input, std::int64_t* quantized, TfLiteIntArray* dims, + float input_scale, float* weight_scales, float* scales, int* zero_points, + TfLiteAffineQuantization* affine_quant, int quantized_dimension, + bool is_variable = false); + TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, int* zero_points, TfLiteAffineQuantization* affine_quant, - int quantized_dimension, bool is_variable = false); + int quantized_dimension, bool is_variable = false, + TfLiteType tensor_weight_type = kTfLiteNoType); // Returns the number of tensors in the default subgraph for a tflite::Model. size_t GetModelTensorCount(const Model* model); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h index 82cf1c0..28d2bf8 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h @@ -27,7 +27,7 @@ limitations under the License. #include -#include "edge-impulse-sdk/tensorflow/lite/c/common.h" +#include "edge-impulse-sdk/tensorflow/lite/core/c/common.h" namespace tflite { @@ -61,6 +61,7 @@ struct TfLiteTypeToType {}; // Specializations below MATCH_TYPE_AND_TFLITE_TYPE(int32_t, kTfLiteInt32); MATCH_TYPE_AND_TFLITE_TYPE(uint32_t, kTfLiteUInt32); MATCH_TYPE_AND_TFLITE_TYPE(int16_t, kTfLiteInt16); +MATCH_TYPE_AND_TFLITE_TYPE(uint16_t, kTfLiteUInt16); MATCH_TYPE_AND_TFLITE_TYPE(int64_t, kTfLiteInt64); MATCH_TYPE_AND_TFLITE_TYPE(float, kTfLiteFloat32); MATCH_TYPE_AND_TFLITE_TYPE(unsigned char, kTfLiteUInt8); diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h index f46e84d..416029f 100644 --- a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h @@ -1,406 +1,578 @@ -/* Copyright 2018 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -// automatically generated by the FlatBuffers compiler, do not modify - - -#ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ -#define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ +#ifndef FLATBUFFERS_GENERATED_SCHEMA_SUPPL_TFLITE_H_ +#define FLATBUFFERS_GENERATED_SCHEMA_SUPPL_TFLITE_H_ #include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 2 && + FLATBUFFERS_VERSION_MINOR == 0 && + FLATBUFFERS_VERSION_REVISION == 6, + "Non-compatible flatbuffers version included"); + namespace tflite { struct CustomQuantization; +struct CustomQuantizationBuilder; struct CustomQuantizationT; struct QuantizationParameters; +struct QuantizationParametersBuilder; struct QuantizationParametersT; struct Int32Vector; +struct Int32VectorBuilder; struct Int32VectorT; struct Uint16Vector; +struct Uint16VectorBuilder; struct Uint16VectorT; struct Uint8Vector; +struct Uint8VectorBuilder; struct Uint8VectorT; struct DimensionMetadata; +struct DimensionMetadataBuilder; struct DimensionMetadataT; struct SparsityParameters; +struct SparsityParametersBuilder; struct SparsityParametersT; +struct VariantSubType; +struct VariantSubTypeBuilder; +struct VariantSubTypeT; + struct Tensor; +struct TensorBuilder; struct TensorT; struct Conv2DOptions; +struct Conv2DOptionsBuilder; struct Conv2DOptionsT; struct Conv3DOptions; +struct Conv3DOptionsBuilder; struct Conv3DOptionsT; struct Pool2DOptions; +struct Pool2DOptionsBuilder; struct Pool2DOptionsT; struct DepthwiseConv2DOptions; +struct DepthwiseConv2DOptionsBuilder; struct DepthwiseConv2DOptionsT; struct ConcatEmbeddingsOptions; +struct ConcatEmbeddingsOptionsBuilder; struct ConcatEmbeddingsOptionsT; struct LSHProjectionOptions; +struct LSHProjectionOptionsBuilder; struct LSHProjectionOptionsT; struct SVDFOptions; +struct SVDFOptionsBuilder; struct SVDFOptionsT; struct RNNOptions; +struct RNNOptionsBuilder; struct RNNOptionsT; struct SequenceRNNOptions; +struct SequenceRNNOptionsBuilder; struct SequenceRNNOptionsT; struct BidirectionalSequenceRNNOptions; +struct BidirectionalSequenceRNNOptionsBuilder; struct BidirectionalSequenceRNNOptionsT; struct FullyConnectedOptions; +struct FullyConnectedOptionsBuilder; struct FullyConnectedOptionsT; struct SoftmaxOptions; +struct SoftmaxOptionsBuilder; struct SoftmaxOptionsT; struct ConcatenationOptions; +struct ConcatenationOptionsBuilder; struct ConcatenationOptionsT; struct AddOptions; +struct AddOptionsBuilder; struct AddOptionsT; struct MulOptions; +struct MulOptionsBuilder; struct MulOptionsT; struct L2NormOptions; +struct L2NormOptionsBuilder; struct L2NormOptionsT; struct LocalResponseNormalizationOptions; +struct LocalResponseNormalizationOptionsBuilder; struct LocalResponseNormalizationOptionsT; struct LSTMOptions; +struct LSTMOptionsBuilder; struct LSTMOptionsT; struct UnidirectionalSequenceLSTMOptions; +struct UnidirectionalSequenceLSTMOptionsBuilder; struct UnidirectionalSequenceLSTMOptionsT; struct BidirectionalSequenceLSTMOptions; +struct BidirectionalSequenceLSTMOptionsBuilder; struct BidirectionalSequenceLSTMOptionsT; struct ResizeBilinearOptions; +struct ResizeBilinearOptionsBuilder; struct ResizeBilinearOptionsT; struct ResizeNearestNeighborOptions; +struct ResizeNearestNeighborOptionsBuilder; struct ResizeNearestNeighborOptionsT; struct CallOptions; +struct CallOptionsBuilder; struct CallOptionsT; struct PadOptions; +struct PadOptionsBuilder; struct PadOptionsT; struct PadV2Options; +struct PadV2OptionsBuilder; struct PadV2OptionsT; struct ReshapeOptions; +struct ReshapeOptionsBuilder; struct ReshapeOptionsT; struct SpaceToBatchNDOptions; +struct SpaceToBatchNDOptionsBuilder; struct SpaceToBatchNDOptionsT; struct BatchToSpaceNDOptions; +struct BatchToSpaceNDOptionsBuilder; struct BatchToSpaceNDOptionsT; struct SkipGramOptions; +struct SkipGramOptionsBuilder; struct SkipGramOptionsT; struct SpaceToDepthOptions; +struct SpaceToDepthOptionsBuilder; struct SpaceToDepthOptionsT; struct DepthToSpaceOptions; +struct DepthToSpaceOptionsBuilder; struct DepthToSpaceOptionsT; struct SubOptions; +struct SubOptionsBuilder; struct SubOptionsT; struct DivOptions; +struct DivOptionsBuilder; struct DivOptionsT; struct TopKV2Options; +struct TopKV2OptionsBuilder; struct TopKV2OptionsT; struct EmbeddingLookupSparseOptions; +struct EmbeddingLookupSparseOptionsBuilder; struct EmbeddingLookupSparseOptionsT; struct GatherOptions; +struct GatherOptionsBuilder; struct GatherOptionsT; struct TransposeOptions; +struct TransposeOptionsBuilder; struct TransposeOptionsT; struct ExpOptions; +struct ExpOptionsBuilder; struct ExpOptionsT; struct CosOptions; +struct CosOptionsBuilder; struct CosOptionsT; struct ReducerOptions; +struct ReducerOptionsBuilder; struct ReducerOptionsT; struct SqueezeOptions; +struct SqueezeOptionsBuilder; struct SqueezeOptionsT; struct SplitOptions; +struct SplitOptionsBuilder; struct SplitOptionsT; struct SplitVOptions; +struct SplitVOptionsBuilder; struct SplitVOptionsT; struct StridedSliceOptions; +struct StridedSliceOptionsBuilder; struct StridedSliceOptionsT; struct LogSoftmaxOptions; +struct LogSoftmaxOptionsBuilder; struct LogSoftmaxOptionsT; struct CastOptions; +struct CastOptionsBuilder; struct CastOptionsT; struct DequantizeOptions; +struct DequantizeOptionsBuilder; struct DequantizeOptionsT; struct MaximumMinimumOptions; +struct MaximumMinimumOptionsBuilder; struct MaximumMinimumOptionsT; struct TileOptions; +struct TileOptionsBuilder; struct TileOptionsT; struct ArgMaxOptions; +struct ArgMaxOptionsBuilder; struct ArgMaxOptionsT; struct ArgMinOptions; +struct ArgMinOptionsBuilder; struct ArgMinOptionsT; struct GreaterOptions; +struct GreaterOptionsBuilder; struct GreaterOptionsT; struct GreaterEqualOptions; +struct GreaterEqualOptionsBuilder; struct GreaterEqualOptionsT; struct LessOptions; +struct LessOptionsBuilder; struct LessOptionsT; struct LessEqualOptions; +struct LessEqualOptionsBuilder; struct LessEqualOptionsT; struct NegOptions; +struct NegOptionsBuilder; struct NegOptionsT; struct SelectOptions; +struct SelectOptionsBuilder; struct SelectOptionsT; struct SliceOptions; +struct SliceOptionsBuilder; struct SliceOptionsT; struct TransposeConvOptions; +struct TransposeConvOptionsBuilder; struct TransposeConvOptionsT; struct ExpandDimsOptions; +struct ExpandDimsOptionsBuilder; struct ExpandDimsOptionsT; struct SparseToDenseOptions; +struct SparseToDenseOptionsBuilder; struct SparseToDenseOptionsT; struct EqualOptions; +struct EqualOptionsBuilder; struct EqualOptionsT; struct NotEqualOptions; +struct NotEqualOptionsBuilder; struct NotEqualOptionsT; struct ShapeOptions; +struct ShapeOptionsBuilder; struct ShapeOptionsT; struct RankOptions; +struct RankOptionsBuilder; struct RankOptionsT; struct PowOptions; +struct PowOptionsBuilder; struct PowOptionsT; struct FakeQuantOptions; +struct FakeQuantOptionsBuilder; struct FakeQuantOptionsT; struct PackOptions; +struct PackOptionsBuilder; struct PackOptionsT; struct LogicalOrOptions; +struct LogicalOrOptionsBuilder; struct LogicalOrOptionsT; struct OneHotOptions; +struct OneHotOptionsBuilder; struct OneHotOptionsT; struct AbsOptions; +struct AbsOptionsBuilder; struct AbsOptionsT; struct HardSwishOptions; +struct HardSwishOptionsBuilder; struct HardSwishOptionsT; struct LogicalAndOptions; +struct LogicalAndOptionsBuilder; struct LogicalAndOptionsT; struct LogicalNotOptions; +struct LogicalNotOptionsBuilder; struct LogicalNotOptionsT; struct UnpackOptions; +struct UnpackOptionsBuilder; struct UnpackOptionsT; struct FloorDivOptions; +struct FloorDivOptionsBuilder; struct FloorDivOptionsT; struct SquareOptions; +struct SquareOptionsBuilder; struct SquareOptionsT; struct ZerosLikeOptions; +struct ZerosLikeOptionsBuilder; struct ZerosLikeOptionsT; struct FillOptions; +struct FillOptionsBuilder; struct FillOptionsT; struct FloorModOptions; +struct FloorModOptionsBuilder; struct FloorModOptionsT; struct RangeOptions; +struct RangeOptionsBuilder; struct RangeOptionsT; struct LeakyReluOptions; +struct LeakyReluOptionsBuilder; struct LeakyReluOptionsT; struct SquaredDifferenceOptions; +struct SquaredDifferenceOptionsBuilder; struct SquaredDifferenceOptionsT; struct MirrorPadOptions; +struct MirrorPadOptionsBuilder; struct MirrorPadOptionsT; struct UniqueOptions; +struct UniqueOptionsBuilder; struct UniqueOptionsT; struct ReverseV2Options; +struct ReverseV2OptionsBuilder; struct ReverseV2OptionsT; struct AddNOptions; +struct AddNOptionsBuilder; struct AddNOptionsT; struct GatherNdOptions; +struct GatherNdOptionsBuilder; struct GatherNdOptionsT; struct WhereOptions; +struct WhereOptionsBuilder; struct WhereOptionsT; struct ReverseSequenceOptions; +struct ReverseSequenceOptionsBuilder; struct ReverseSequenceOptionsT; struct MatrixDiagOptions; +struct MatrixDiagOptionsBuilder; struct MatrixDiagOptionsT; struct QuantizeOptions; +struct QuantizeOptionsBuilder; struct QuantizeOptionsT; struct MatrixSetDiagOptions; +struct MatrixSetDiagOptionsBuilder; struct MatrixSetDiagOptionsT; struct IfOptions; +struct IfOptionsBuilder; struct IfOptionsT; struct CallOnceOptions; +struct CallOnceOptionsBuilder; struct CallOnceOptionsT; struct WhileOptions; +struct WhileOptionsBuilder; struct WhileOptionsT; struct NonMaxSuppressionV4Options; +struct NonMaxSuppressionV4OptionsBuilder; struct NonMaxSuppressionV4OptionsT; struct NonMaxSuppressionV5Options; +struct NonMaxSuppressionV5OptionsBuilder; struct NonMaxSuppressionV5OptionsT; struct ScatterNdOptions; +struct ScatterNdOptionsBuilder; struct ScatterNdOptionsT; struct SelectV2Options; +struct SelectV2OptionsBuilder; struct SelectV2OptionsT; struct DensifyOptions; +struct DensifyOptionsBuilder; struct DensifyOptionsT; struct SegmentSumOptions; +struct SegmentSumOptionsBuilder; struct SegmentSumOptionsT; struct BatchMatMulOptions; +struct BatchMatMulOptionsBuilder; struct BatchMatMulOptionsT; struct CumsumOptions; +struct CumsumOptionsBuilder; struct CumsumOptionsT; struct BroadcastToOptions; +struct BroadcastToOptionsBuilder; struct BroadcastToOptionsT; struct Rfft2dOptions; +struct Rfft2dOptionsBuilder; struct Rfft2dOptionsT; struct HashtableOptions; +struct HashtableOptionsBuilder; struct HashtableOptionsT; struct HashtableFindOptions; +struct HashtableFindOptionsBuilder; struct HashtableFindOptionsT; struct HashtableImportOptions; +struct HashtableImportOptionsBuilder; struct HashtableImportOptionsT; struct HashtableSizeOptions; +struct HashtableSizeOptionsBuilder; struct HashtableSizeOptionsT; +struct VarHandleOptions; +struct VarHandleOptionsBuilder; +struct VarHandleOptionsT; + +struct ReadVariableOptions; +struct ReadVariableOptionsBuilder; +struct ReadVariableOptionsT; + +struct AssignVariableOptions; +struct AssignVariableOptionsBuilder; +struct AssignVariableOptionsT; + +struct RandomOptions; +struct RandomOptionsBuilder; +struct RandomOptionsT; + +struct BucketizeOptions; +struct BucketizeOptionsBuilder; +struct BucketizeOptionsT; + +struct GeluOptions; +struct GeluOptionsBuilder; +struct GeluOptionsT; + +struct DynamicUpdateSliceOptions; +struct DynamicUpdateSliceOptionsBuilder; +struct DynamicUpdateSliceOptionsT; + +struct UnsortedSegmentProdOptions; +struct UnsortedSegmentProdOptionsBuilder; +struct UnsortedSegmentProdOptionsT; + +struct UnsortedSegmentMaxOptions; +struct UnsortedSegmentMaxOptionsBuilder; +struct UnsortedSegmentMaxOptionsT; + +struct UnsortedSegmentSumOptions; +struct UnsortedSegmentSumOptionsBuilder; +struct UnsortedSegmentSumOptionsT; + +struct ATan2Options; +struct ATan2OptionsBuilder; +struct ATan2OptionsT; + +struct UnsortedSegmentMinOptions; +struct UnsortedSegmentMinOptionsBuilder; +struct UnsortedSegmentMinOptionsT; + +struct SignOptions; +struct SignOptionsBuilder; +struct SignOptionsT; + struct OperatorCode; +struct OperatorCodeBuilder; struct OperatorCodeT; struct Operator; +struct OperatorBuilder; struct OperatorT; struct SubGraph; +struct SubGraphBuilder; struct SubGraphT; struct Buffer; +struct BufferBuilder; struct BufferT; struct Metadata; +struct MetadataBuilder; struct MetadataT; struct TensorMap; +struct TensorMapBuilder; struct TensorMapT; struct SignatureDef; +struct SignatureDefBuilder; struct SignatureDefT; struct Model; +struct ModelBuilder; struct ModelT; -enum TensorType { +enum TensorType : int8_t { TensorType_FLOAT32 = 0, TensorType_FLOAT16 = 1, TensorType_INT32 = 2, @@ -417,11 +589,13 @@ enum TensorType { TensorType_RESOURCE = 13, TensorType_VARIANT = 14, TensorType_UINT32 = 15, + TensorType_UINT16 = 16, + TensorType_INT4 = 17, TensorType_MIN = TensorType_FLOAT32, - TensorType_MAX = TensorType_UINT32 + TensorType_MAX = TensorType_INT4 }; -inline const TensorType (&EnumValuesTensorType())[16] { +inline const TensorType (&EnumValuesTensorType())[18] { static const TensorType values[] = { TensorType_FLOAT32, TensorType_FLOAT16, @@ -438,13 +612,15 @@ inline const TensorType (&EnumValuesTensorType())[16] { TensorType_UINT64, TensorType_RESOURCE, TensorType_VARIANT, - TensorType_UINT32 + TensorType_UINT32, + TensorType_UINT16, + TensorType_INT4 }; return values; } inline const char * const *EnumNamesTensorType() { - static const char * const names[17] = { + static const char * const names[19] = { "FLOAT32", "FLOAT16", "INT32", @@ -461,244 +637,21 @@ inline const char * const *EnumNamesTensorType() { "RESOURCE", "VARIANT", "UINT32", + "UINT16", + "INT4", nullptr }; return names; } inline const char *EnumNameTensorType(TensorType e) { - if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_UINT32)) return ""; + if (flatbuffers::IsOutRange(e, TensorType_FLOAT32, TensorType_INT4)) return ""; const size_t index = static_cast(e); return EnumNamesTensorType()[index]; } -enum QuantizationDetails { - QuantizationDetails_NONE = 0, - QuantizationDetails_CustomQuantization = 1, - QuantizationDetails_MIN = QuantizationDetails_NONE, - QuantizationDetails_MAX = QuantizationDetails_CustomQuantization -}; - -inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { - static const QuantizationDetails values[] = { - QuantizationDetails_NONE, - QuantizationDetails_CustomQuantization - }; - return values; -} - -inline const char * const *EnumNamesQuantizationDetails() { - static const char * const names[3] = { - "NONE", - "CustomQuantization", - nullptr - }; - return names; -} - -inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { - if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return ""; - const size_t index = static_cast(e); - return EnumNamesQuantizationDetails()[index]; -} - -template struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_NONE; -}; - -template<> struct QuantizationDetailsTraits { - static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; -}; - -struct QuantizationDetailsUnion { - QuantizationDetails type; - void *value; - - QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {} - QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(QuantizationDetails_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - QuantizationDetailsUnion(const QuantizationDetailsUnion &) FLATBUFFERS_NOEXCEPT; - QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT - { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~QuantizationDetailsUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = QuantizationDetailsTraits::enum_value; - if (type != QuantizationDetails_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::CustomQuantizationT *AsCustomQuantization() { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } - const tflite::CustomQuantizationT *AsCustomQuantization() const { - return type == QuantizationDetails_CustomQuantization ? - reinterpret_cast(value) : nullptr; - } -}; - -bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); -bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum DimensionType { - DimensionType_DENSE = 0, - DimensionType_SPARSE_CSR = 1, - DimensionType_MIN = DimensionType_DENSE, - DimensionType_MAX = DimensionType_SPARSE_CSR -}; - -inline const DimensionType (&EnumValuesDimensionType())[2] { - static const DimensionType values[] = { - DimensionType_DENSE, - DimensionType_SPARSE_CSR - }; - return values; -} - -inline const char * const *EnumNamesDimensionType() { - static const char * const names[3] = { - "DENSE", - "SPARSE_CSR", - nullptr - }; - return names; -} - -inline const char *EnumNameDimensionType(DimensionType e) { - if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; - const size_t index = static_cast(e); - return EnumNamesDimensionType()[index]; -} - -enum SparseIndexVector { - SparseIndexVector_NONE = 0, - SparseIndexVector_Int32Vector = 1, - SparseIndexVector_Uint16Vector = 2, - SparseIndexVector_Uint8Vector = 3, - SparseIndexVector_MIN = SparseIndexVector_NONE, - SparseIndexVector_MAX = SparseIndexVector_Uint8Vector -}; - -inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] { - static const SparseIndexVector values[] = { - SparseIndexVector_NONE, - SparseIndexVector_Int32Vector, - SparseIndexVector_Uint16Vector, - SparseIndexVector_Uint8Vector - }; - return values; -} - -inline const char * const *EnumNamesSparseIndexVector() { - static const char * const names[5] = { - "NONE", - "Int32Vector", - "Uint16Vector", - "Uint8Vector", - nullptr - }; - return names; -} - -inline const char *EnumNameSparseIndexVector(SparseIndexVector e) { - if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; - const size_t index = static_cast(e); - return EnumNamesSparseIndexVector()[index]; -} - -template struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_NONE; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; -}; - -template<> struct SparseIndexVectorTraits { - static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; -}; - -struct SparseIndexVectorUnion { - SparseIndexVector type; - void *value; - - SparseIndexVectorUnion() : type(SparseIndexVector_NONE), value(nullptr) {} - SparseIndexVectorUnion(SparseIndexVectorUnion&& u) FLATBUFFERS_NOEXCEPT : - type(SparseIndexVector_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - SparseIndexVectorUnion(const SparseIndexVectorUnion &) FLATBUFFERS_NOEXCEPT; - SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT - { SparseIndexVectorUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~SparseIndexVectorUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = SparseIndexVectorTraits::enum_value; - if (type != SparseIndexVector_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL - - static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; - - tflite::Int32VectorT *AsInt32Vector() { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Int32VectorT *AsInt32Vector() const { - return type == SparseIndexVector_Int32Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint16VectorT *AsUint16Vector() { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint16VectorT *AsUint16Vector() const { - return type == SparseIndexVector_Uint16Vector ? - reinterpret_cast(value) : nullptr; - } - tflite::Uint8VectorT *AsUint8Vector() { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } - const tflite::Uint8VectorT *AsUint8Vector() const { - return type == SparseIndexVector_Uint8Vector ? - reinterpret_cast(value) : nullptr; - } -}; -bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); -bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum BuiltinOperator { +enum BuiltinOperator : int32_t { BuiltinOperator_ADD = 0, BuiltinOperator_AVERAGE_POOL_2D = 1, BuiltinOperator_CONCATENATION = 2, @@ -839,11 +792,30 @@ enum BuiltinOperator { BuiltinOperator_HASHTABLE_FIND = 137, BuiltinOperator_HASHTABLE_IMPORT = 138, BuiltinOperator_HASHTABLE_SIZE = 139, + BuiltinOperator_REDUCE_ALL = 140, + BuiltinOperator_CONV_3D_TRANSPOSE = 141, + BuiltinOperator_VAR_HANDLE = 142, + BuiltinOperator_READ_VARIABLE = 143, + BuiltinOperator_ASSIGN_VARIABLE = 144, + BuiltinOperator_BROADCAST_ARGS = 145, + BuiltinOperator_RANDOM_STANDARD_NORMAL = 146, + BuiltinOperator_BUCKETIZE = 147, + BuiltinOperator_RANDOM_UNIFORM = 148, + BuiltinOperator_MULTINOMIAL = 149, + BuiltinOperator_GELU = 150, + BuiltinOperator_DYNAMIC_UPDATE_SLICE = 151, + BuiltinOperator_RELU_0_TO_1 = 152, + BuiltinOperator_UNSORTED_SEGMENT_PROD = 153, + BuiltinOperator_UNSORTED_SEGMENT_MAX = 154, + BuiltinOperator_UNSORTED_SEGMENT_SUM = 155, + BuiltinOperator_ATAN2 = 156, + BuiltinOperator_UNSORTED_SEGMENT_MIN = 157, + BuiltinOperator_SIGN = 158, BuiltinOperator_MIN = BuiltinOperator_ADD, - BuiltinOperator_MAX = BuiltinOperator_HASHTABLE_SIZE + BuiltinOperator_MAX = BuiltinOperator_SIGN }; -inline const BuiltinOperator (&EnumValuesBuiltinOperator())[140] { +inline const BuiltinOperator (&EnumValuesBuiltinOperator())[159] { static const BuiltinOperator values[] = { BuiltinOperator_ADD, BuiltinOperator_AVERAGE_POOL_2D, @@ -984,13 +956,32 @@ inline const BuiltinOperator (&EnumValuesBuiltinOperator())[140] { BuiltinOperator_HASHTABLE, BuiltinOperator_HASHTABLE_FIND, BuiltinOperator_HASHTABLE_IMPORT, - BuiltinOperator_HASHTABLE_SIZE + BuiltinOperator_HASHTABLE_SIZE, + BuiltinOperator_REDUCE_ALL, + BuiltinOperator_CONV_3D_TRANSPOSE, + BuiltinOperator_VAR_HANDLE, + BuiltinOperator_READ_VARIABLE, + BuiltinOperator_ASSIGN_VARIABLE, + BuiltinOperator_BROADCAST_ARGS, + BuiltinOperator_RANDOM_STANDARD_NORMAL, + BuiltinOperator_BUCKETIZE, + BuiltinOperator_RANDOM_UNIFORM, + BuiltinOperator_MULTINOMIAL, + BuiltinOperator_GELU, + BuiltinOperator_DYNAMIC_UPDATE_SLICE, + BuiltinOperator_RELU_0_TO_1, + BuiltinOperator_UNSORTED_SEGMENT_PROD, + BuiltinOperator_UNSORTED_SEGMENT_MAX, + BuiltinOperator_UNSORTED_SEGMENT_SUM, + BuiltinOperator_ATAN2, + BuiltinOperator_UNSORTED_SEGMENT_MIN, + BuiltinOperator_SIGN }; return values; } inline const char * const *EnumNamesBuiltinOperator() { - static const char * const names[141] = { + static const char * const names[160] = { "ADD", "AVERAGE_POOL_2D", "CONCATENATION", @@ -1131,18 +1122,37 @@ inline const char * const *EnumNamesBuiltinOperator() { "HASHTABLE_FIND", "HASHTABLE_IMPORT", "HASHTABLE_SIZE", + "REDUCE_ALL", + "CONV_3D_TRANSPOSE", + "VAR_HANDLE", + "READ_VARIABLE", + "ASSIGN_VARIABLE", + "BROADCAST_ARGS", + "RANDOM_STANDARD_NORMAL", + "BUCKETIZE", + "RANDOM_UNIFORM", + "MULTINOMIAL", + "GELU", + "DYNAMIC_UPDATE_SLICE", + "RELU_0_TO_1", + "UNSORTED_SEGMENT_PROD", + "UNSORTED_SEGMENT_MAX", + "UNSORTED_SEGMENT_SUM", + "ATAN2", + "UNSORTED_SEGMENT_MIN", + "SIGN", nullptr }; return names; } inline const char *EnumNameBuiltinOperator(BuiltinOperator e) { - if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_HASHTABLE_SIZE)) return ""; + if (flatbuffers::IsOutRange(e, BuiltinOperator_ADD, BuiltinOperator_SIGN)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOperator()[index]; } -enum BuiltinOptions { +enum BuiltinOptions : uint8_t { BuiltinOptions_NONE = 0, BuiltinOptions_Conv2DOptions = 1, BuiltinOptions_DepthwiseConv2DOptions = 2, @@ -1254,11 +1264,24 @@ enum BuiltinOptions { BuiltinOptions_HashtableFindOptions = 108, BuiltinOptions_HashtableImportOptions = 109, BuiltinOptions_HashtableSizeOptions = 110, + BuiltinOptions_VarHandleOptions = 111, + BuiltinOptions_ReadVariableOptions = 112, + BuiltinOptions_AssignVariableOptions = 113, + BuiltinOptions_RandomOptions = 114, + BuiltinOptions_BucketizeOptions = 115, + BuiltinOptions_GeluOptions = 116, + BuiltinOptions_DynamicUpdateSliceOptions = 117, + BuiltinOptions_UnsortedSegmentProdOptions = 118, + BuiltinOptions_UnsortedSegmentMaxOptions = 119, + BuiltinOptions_UnsortedSegmentMinOptions = 120, + BuiltinOptions_UnsortedSegmentSumOptions = 121, + BuiltinOptions_ATan2Options = 122, + BuiltinOptions_SignOptions = 123, BuiltinOptions_MIN = BuiltinOptions_NONE, - BuiltinOptions_MAX = BuiltinOptions_HashtableSizeOptions + BuiltinOptions_MAX = BuiltinOptions_SignOptions }; -inline const BuiltinOptions (&EnumValuesBuiltinOptions())[111] { +inline const BuiltinOptions (&EnumValuesBuiltinOptions())[124] { static const BuiltinOptions values[] = { BuiltinOptions_NONE, BuiltinOptions_Conv2DOptions, @@ -1370,13 +1393,26 @@ inline const BuiltinOptions (&EnumValuesBuiltinOptions())[111] { BuiltinOptions_HashtableOptions, BuiltinOptions_HashtableFindOptions, BuiltinOptions_HashtableImportOptions, - BuiltinOptions_HashtableSizeOptions + BuiltinOptions_HashtableSizeOptions, + BuiltinOptions_VarHandleOptions, + BuiltinOptions_ReadVariableOptions, + BuiltinOptions_AssignVariableOptions, + BuiltinOptions_RandomOptions, + BuiltinOptions_BucketizeOptions, + BuiltinOptions_GeluOptions, + BuiltinOptions_DynamicUpdateSliceOptions, + BuiltinOptions_UnsortedSegmentProdOptions, + BuiltinOptions_UnsortedSegmentMaxOptions, + BuiltinOptions_UnsortedSegmentMinOptions, + BuiltinOptions_UnsortedSegmentSumOptions, + BuiltinOptions_ATan2Options, + BuiltinOptions_SignOptions }; return values; } inline const char * const *EnumNamesBuiltinOptions() { - static const char * const names[112] = { + static const char * const names[125] = { "NONE", "Conv2DOptions", "DepthwiseConv2DOptions", @@ -1488,13 +1524,26 @@ inline const char * const *EnumNamesBuiltinOptions() { "HashtableFindOptions", "HashtableImportOptions", "HashtableSizeOptions", + "VarHandleOptions", + "ReadVariableOptions", + "AssignVariableOptions", + "RandomOptions", + "BucketizeOptions", + "GeluOptions", + "DynamicUpdateSliceOptions", + "UnsortedSegmentProdOptions", + "UnsortedSegmentMaxOptions", + "UnsortedSegmentMinOptions", + "UnsortedSegmentSumOptions", + "ATan2Options", + "SignOptions", nullptr }; return names; } inline const char *EnumNameBuiltinOptions(BuiltinOptions e) { - if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_HashtableSizeOptions)) return ""; + if (flatbuffers::IsOutRange(e, BuiltinOptions_NONE, BuiltinOptions_SignOptions)) return ""; const size_t index = static_cast(e); return EnumNamesBuiltinOptions()[index]; } @@ -1943,15946 +1992,596 @@ template<> struct BuiltinOptionsTraits { static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; }; -struct BuiltinOptionsUnion { - BuiltinOptions type; - void *value; - - BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} - BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : - type(BuiltinOptions_NONE), value(nullptr) - { std::swap(type, u.type); std::swap(value, u.value); } - BuiltinOptionsUnion(const BuiltinOptionsUnion &) FLATBUFFERS_NOEXCEPT; - BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT - { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } - BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT - { std::swap(type, u.type); std::swap(value, u.value); return *this; } - ~BuiltinOptionsUnion() { Reset(); } - - void Reset(); - -#ifndef FLATBUFFERS_CPP98_STL - template - void Set(T&& val) { - using RT = typename std::remove_reference::type; - Reset(); - type = BuiltinOptionsTraits::enum_value; - if (type != BuiltinOptions_NONE) { - value = new RT(std::forward(val)); - } - } -#endif // FLATBUFFERS_CPP98_STL +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; +}; - static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver); - flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; +}; - tflite::Conv2DOptionsT *AsConv2DOptions() { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv2DOptionsT *AsConv2DOptions() const { - return type == BuiltinOptions_Conv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { - return type == BuiltinOptions_DepthwiseConv2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { - return type == BuiltinOptions_ConcatEmbeddingsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { - return type == BuiltinOptions_LSHProjectionOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Pool2DOptionsT *AsPool2DOptions() { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Pool2DOptionsT *AsPool2DOptions() const { - return type == BuiltinOptions_Pool2DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SVDFOptionsT *AsSVDFOptions() { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SVDFOptionsT *AsSVDFOptions() const { - return type == BuiltinOptions_SVDFOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RNNOptionsT *AsRNNOptions() { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RNNOptionsT *AsRNNOptions() const { - return type == BuiltinOptions_RNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { - return type == BuiltinOptions_FullyConnectedOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SoftmaxOptionsT *AsSoftmaxOptions() { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { - return type == BuiltinOptions_SoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ConcatenationOptionsT *AsConcatenationOptions() { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { - return type == BuiltinOptions_ConcatenationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AddOptionsT *AsAddOptions() { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddOptionsT *AsAddOptions() const { - return type == BuiltinOptions_AddOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::L2NormOptionsT *AsL2NormOptions() { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::L2NormOptionsT *AsL2NormOptions() const { - return type == BuiltinOptions_L2NormOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { - return type == BuiltinOptions_LocalResponseNormalizationOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LSTMOptionsT *AsLSTMOptions() { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LSTMOptionsT *AsLSTMOptions() const { - return type == BuiltinOptions_LSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { - return type == BuiltinOptions_ResizeBilinearOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOptionsT *AsCallOptions() { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOptionsT *AsCallOptions() const { - return type == BuiltinOptions_CallOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReshapeOptionsT *AsReshapeOptions() { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReshapeOptionsT *AsReshapeOptions() const { - return type == BuiltinOptions_ReshapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SkipGramOptionsT *AsSkipGramOptions() { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SkipGramOptionsT *AsSkipGramOptions() const { - return type == BuiltinOptions_SkipGramOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { - return type == BuiltinOptions_SpaceToDepthOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { - return type == BuiltinOptions_EmbeddingLookupSparseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MulOptionsT *AsMulOptions() { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MulOptionsT *AsMulOptions() const { - return type == BuiltinOptions_MulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadOptionsT *AsPadOptions() { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadOptionsT *AsPadOptions() const { - return type == BuiltinOptions_PadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherOptionsT *AsGatherOptions() { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherOptionsT *AsGatherOptions() const { - return type == BuiltinOptions_GatherOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { - return type == BuiltinOptions_BatchToSpaceNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { - return type == BuiltinOptions_SpaceToBatchNDOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeOptionsT *AsTransposeOptions() { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeOptionsT *AsTransposeOptions() const { - return type == BuiltinOptions_TransposeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReducerOptionsT *AsReducerOptions() { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReducerOptionsT *AsReducerOptions() const { - return type == BuiltinOptions_ReducerOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SubOptionsT *AsSubOptions() { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SubOptionsT *AsSubOptions() const { - return type == BuiltinOptions_SubOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DivOptionsT *AsDivOptions() { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DivOptionsT *AsDivOptions() const { - return type == BuiltinOptions_DivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SqueezeOptionsT *AsSqueezeOptions() { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SqueezeOptionsT *AsSqueezeOptions() const { - return type == BuiltinOptions_SqueezeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { - return type == BuiltinOptions_SequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::StridedSliceOptionsT *AsStridedSliceOptions() { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { - return type == BuiltinOptions_StridedSliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpOptionsT *AsExpOptions() { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpOptionsT *AsExpOptions() const { - return type == BuiltinOptions_ExpOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TopKV2OptionsT *AsTopKV2Options() { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::TopKV2OptionsT *AsTopKV2Options() const { - return type == BuiltinOptions_TopKV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitOptionsT *AsSplitOptions() { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitOptionsT *AsSplitOptions() const { - return type == BuiltinOptions_SplitOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { - return type == BuiltinOptions_LogSoftmaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CastOptionsT *AsCastOptions() { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CastOptionsT *AsCastOptions() const { - return type == BuiltinOptions_CastOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DequantizeOptionsT *AsDequantizeOptions() { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DequantizeOptionsT *AsDequantizeOptions() const { - return type == BuiltinOptions_DequantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { - return type == BuiltinOptions_MaximumMinimumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMaxOptionsT *AsArgMaxOptions() { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { - return type == BuiltinOptions_ArgMaxOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessOptionsT *AsLessOptions() { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessOptionsT *AsLessOptions() const { - return type == BuiltinOptions_LessOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NegOptionsT *AsNegOptions() { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NegOptionsT *AsNegOptions() const { - return type == BuiltinOptions_NegOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PadV2OptionsT *AsPadV2Options() { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::PadV2OptionsT *AsPadV2Options() const { - return type == BuiltinOptions_PadV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterOptionsT *AsGreaterOptions() { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterOptionsT *AsGreaterOptions() const { - return type == BuiltinOptions_GreaterOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { - return type == BuiltinOptions_GreaterEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LessEqualOptionsT *AsLessEqualOptions() { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LessEqualOptionsT *AsLessEqualOptions() const { - return type == BuiltinOptions_LessEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectOptionsT *AsSelectOptions() { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectOptionsT *AsSelectOptions() const { - return type == BuiltinOptions_SelectOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SliceOptionsT *AsSliceOptions() { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SliceOptionsT *AsSliceOptions() const { - return type == BuiltinOptions_SliceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TransposeConvOptionsT *AsTransposeConvOptions() { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { - return type == BuiltinOptions_TransposeConvOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { - return type == BuiltinOptions_SparseToDenseOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::TileOptionsT *AsTileOptions() { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::TileOptionsT *AsTileOptions() const { - return type == BuiltinOptions_TileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { - return type == BuiltinOptions_ExpandDimsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::EqualOptionsT *AsEqualOptions() { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::EqualOptionsT *AsEqualOptions() const { - return type == BuiltinOptions_EqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NotEqualOptionsT *AsNotEqualOptions() { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::NotEqualOptionsT *AsNotEqualOptions() const { - return type == BuiltinOptions_NotEqualOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ShapeOptionsT *AsShapeOptions() { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ShapeOptionsT *AsShapeOptions() const { - return type == BuiltinOptions_ShapeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PowOptionsT *AsPowOptions() { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PowOptionsT *AsPowOptions() const { - return type == BuiltinOptions_PowOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ArgMinOptionsT *AsArgMinOptions() { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ArgMinOptionsT *AsArgMinOptions() const { - return type == BuiltinOptions_ArgMinOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FakeQuantOptionsT *AsFakeQuantOptions() { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { - return type == BuiltinOptions_FakeQuantOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::PackOptionsT *AsPackOptions() { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::PackOptionsT *AsPackOptions() const { - return type == BuiltinOptions_PackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalOrOptionsT *AsLogicalOrOptions() { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { - return type == BuiltinOptions_LogicalOrOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::OneHotOptionsT *AsOneHotOptions() { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::OneHotOptionsT *AsOneHotOptions() const { - return type == BuiltinOptions_OneHotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalAndOptionsT *AsLogicalAndOptions() { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { - return type == BuiltinOptions_LogicalAndOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LogicalNotOptionsT *AsLogicalNotOptions() { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { - return type == BuiltinOptions_LogicalNotOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnpackOptionsT *AsUnpackOptions() { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnpackOptionsT *AsUnpackOptions() const { - return type == BuiltinOptions_UnpackOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorDivOptionsT *AsFloorDivOptions() { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorDivOptionsT *AsFloorDivOptions() const { - return type == BuiltinOptions_FloorDivOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquareOptionsT *AsSquareOptions() { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquareOptionsT *AsSquareOptions() const { - return type == BuiltinOptions_SquareOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { - return type == BuiltinOptions_ZerosLikeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FillOptionsT *AsFillOptions() { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FillOptionsT *AsFillOptions() const { - return type == BuiltinOptions_FillOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { - return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { - return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::FloorModOptionsT *AsFloorModOptions() { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::FloorModOptionsT *AsFloorModOptions() const { - return type == BuiltinOptions_FloorModOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RangeOptionsT *AsRangeOptions() { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RangeOptionsT *AsRangeOptions() const { - return type == BuiltinOptions_RangeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { - return type == BuiltinOptions_ResizeNearestNeighborOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::LeakyReluOptionsT *AsLeakyReluOptions() { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { - return type == BuiltinOptions_LeakyReluOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { - return type == BuiltinOptions_SquaredDifferenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MirrorPadOptionsT *AsMirrorPadOptions() { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { - return type == BuiltinOptions_MirrorPadOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::AbsOptionsT *AsAbsOptions() { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AbsOptionsT *AsAbsOptions() const { - return type == BuiltinOptions_AbsOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SplitVOptionsT *AsSplitVOptions() { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SplitVOptionsT *AsSplitVOptions() const { - return type == BuiltinOptions_SplitVOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::UniqueOptionsT *AsUniqueOptions() { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::UniqueOptionsT *AsUniqueOptions() const { - return type == BuiltinOptions_UniqueOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseV2OptionsT *AsReverseV2Options() { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseV2OptionsT *AsReverseV2Options() const { - return type == BuiltinOptions_ReverseV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::AddNOptionsT *AsAddNOptions() { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::AddNOptionsT *AsAddNOptions() const { - return type == BuiltinOptions_AddNOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::GatherNdOptionsT *AsGatherNdOptions() { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::GatherNdOptionsT *AsGatherNdOptions() const { - return type == BuiltinOptions_GatherNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CosOptionsT *AsCosOptions() { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CosOptionsT *AsCosOptions() const { - return type == BuiltinOptions_CosOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhereOptionsT *AsWhereOptions() { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhereOptionsT *AsWhereOptions() const { - return type == BuiltinOptions_WhereOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::RankOptionsT *AsRankOptions() { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::RankOptionsT *AsRankOptions() const { - return type == BuiltinOptions_RankOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { - return type == BuiltinOptions_ReverseSequenceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { - return type == BuiltinOptions_MatrixDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::QuantizeOptionsT *AsQuantizeOptions() { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::QuantizeOptionsT *AsQuantizeOptions() const { - return type == BuiltinOptions_QuantizeOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { - return type == BuiltinOptions_MatrixSetDiagOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HardSwishOptionsT *AsHardSwishOptions() { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HardSwishOptionsT *AsHardSwishOptions() const { - return type == BuiltinOptions_HardSwishOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::IfOptionsT *AsIfOptions() { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::IfOptionsT *AsIfOptions() const { - return type == BuiltinOptions_IfOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::WhileOptionsT *AsWhileOptions() { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::WhileOptionsT *AsWhileOptions() const { - return type == BuiltinOptions_WhileOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { - return type == BuiltinOptions_DepthToSpaceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { - return type == BuiltinOptions_NonMaxSuppressionV4Options ? - reinterpret_cast(value) : nullptr; - } - tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { - return type == BuiltinOptions_NonMaxSuppressionV5Options ? - reinterpret_cast(value) : nullptr; - } - tflite::ScatterNdOptionsT *AsScatterNdOptions() { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { - return type == BuiltinOptions_ScatterNdOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SelectV2OptionsT *AsSelectV2Options() { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - const tflite::SelectV2OptionsT *AsSelectV2Options() const { - return type == BuiltinOptions_SelectV2Options ? - reinterpret_cast(value) : nullptr; - } - tflite::DensifyOptionsT *AsDensifyOptions() { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::DensifyOptionsT *AsDensifyOptions() const { - return type == BuiltinOptions_DensifyOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::SegmentSumOptionsT *AsSegmentSumOptions() { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { - return type == BuiltinOptions_SegmentSumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { - return type == BuiltinOptions_BatchMatMulOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CumsumOptionsT *AsCumsumOptions() { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CumsumOptionsT *AsCumsumOptions() const { - return type == BuiltinOptions_CumsumOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::CallOnceOptionsT *AsCallOnceOptions() { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::CallOnceOptionsT *AsCallOnceOptions() const { - return type == BuiltinOptions_CallOnceOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::BroadcastToOptionsT *AsBroadcastToOptions() { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { - return type == BuiltinOptions_BroadcastToOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Rfft2dOptionsT *AsRfft2dOptions() { - return type == BuiltinOptions_Rfft2dOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { - return type == BuiltinOptions_Rfft2dOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::Conv3DOptionsT *AsConv3DOptions() { - return type == BuiltinOptions_Conv3DOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::Conv3DOptionsT *AsConv3DOptions() const { - return type == BuiltinOptions_Conv3DOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableOptionsT *AsHashtableOptions() { - return type == BuiltinOptions_HashtableOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableOptionsT *AsHashtableOptions() const { - return type == BuiltinOptions_HashtableOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableFindOptionsT *AsHashtableFindOptions() { - return type == BuiltinOptions_HashtableFindOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableFindOptionsT *AsHashtableFindOptions() const { - return type == BuiltinOptions_HashtableFindOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableImportOptionsT *AsHashtableImportOptions() { - return type == BuiltinOptions_HashtableImportOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableImportOptionsT *AsHashtableImportOptions() const { - return type == BuiltinOptions_HashtableImportOptions ? - reinterpret_cast(value) : nullptr; - } - tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() { - return type == BuiltinOptions_HashtableSizeOptions ? - reinterpret_cast(value) : nullptr; - } - const tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() const { - return type == BuiltinOptions_HashtableSizeOptions ? - reinterpret_cast(value) : nullptr; - } +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; }; -bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); -bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); - -enum Padding { - Padding_SAME = 0, - Padding_VALID = 1, - Padding_MIN = Padding_SAME, - Padding_MAX = Padding_VALID +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; }; -inline const Padding (&EnumValuesPadding())[2] { - static const Padding values[] = { - Padding_SAME, - Padding_VALID - }; - return values; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; +}; -inline const char * const *EnumNamesPadding() { - static const char * const names[3] = { - "SAME", - "VALID", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; +}; -inline const char *EnumNamePadding(Padding e) { - if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; - const size_t index = static_cast(e); - return EnumNamesPadding()[index]; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; +}; -enum ActivationFunctionType { - ActivationFunctionType_NONE = 0, - ActivationFunctionType_RELU = 1, - ActivationFunctionType_RELU_N1_TO_1 = 2, - ActivationFunctionType_RELU6 = 3, - ActivationFunctionType_TANH = 4, - ActivationFunctionType_SIGN_BIT = 5, - ActivationFunctionType_MIN = ActivationFunctionType_NONE, - ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; }; -inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { - static const ActivationFunctionType values[] = { - ActivationFunctionType_NONE, - ActivationFunctionType_RELU, - ActivationFunctionType_RELU_N1_TO_1, - ActivationFunctionType_RELU6, - ActivationFunctionType_TANH, - ActivationFunctionType_SIGN_BIT - }; - return values; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; +}; -inline const char * const *EnumNamesActivationFunctionType() { - static const char * const names[7] = { - "NONE", - "RELU", - "RELU_N1_TO_1", - "RELU6", - "TANH", - "SIGN_BIT", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; +}; -inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { - if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; - const size_t index = static_cast(e); - return EnumNamesActivationFunctionType()[index]; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; +}; -enum LSHProjectionType { - LSHProjectionType_UNKNOWN = 0, - LSHProjectionType_SPARSE = 1, - LSHProjectionType_DENSE = 2, - LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, - LSHProjectionType_MAX = LSHProjectionType_DENSE +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; }; -inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { - static const LSHProjectionType values[] = { - LSHProjectionType_UNKNOWN, - LSHProjectionType_SPARSE, - LSHProjectionType_DENSE - }; - return values; -} +template<> struct BuiltinOptionsTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; +}; -inline const char * const *EnumNamesLSHProjectionType() { - static const char * const names[4] = { - "UNKNOWN", - "SPARSE", - "DENSE", - nullptr - }; - return names; -} +template struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NONE; +}; -inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { - if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSHProjectionType()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv2DOptions; +}; -enum FullyConnectedOptionsWeightsFormat { - FullyConnectedOptionsWeightsFormat_DEFAULT = 0, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, - FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DepthwiseConv2DOptions; }; -inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { - static const FullyConnectedOptionsWeightsFormat values[] = { - FullyConnectedOptionsWeightsFormat_DEFAULT, - FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 - }; - return values; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ConcatEmbeddingsOptions; +}; -inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { - static const char * const names[3] = { - "DEFAULT", - "SHUFFLED4x16INT8", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LSHProjectionOptions; +}; -inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { - if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; - const size_t index = static_cast(e); - return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Pool2DOptions; +}; -enum LSTMKernelType { - LSTMKernelType_FULL = 0, - LSTMKernelType_BASIC = 1, - LSTMKernelType_MIN = LSTMKernelType_FULL, - LSTMKernelType_MAX = LSTMKernelType_BASIC +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SVDFOptions; }; -inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { - static const LSTMKernelType values[] = { - LSTMKernelType_FULL, - LSTMKernelType_BASIC - }; - return values; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RNNOptions; +}; -inline const char * const *EnumNamesLSTMKernelType() { - static const char * const names[3] = { - "FULL", - "BASIC", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FullyConnectedOptions; +}; -inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { - if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesLSTMKernelType()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SoftmaxOptions; +}; -enum CombinerType { - CombinerType_SUM = 0, - CombinerType_MEAN = 1, - CombinerType_SQRTN = 2, - CombinerType_MIN = CombinerType_SUM, - CombinerType_MAX = CombinerType_SQRTN +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ConcatenationOptions; }; -inline const CombinerType (&EnumValuesCombinerType())[3] { - static const CombinerType values[] = { - CombinerType_SUM, - CombinerType_MEAN, - CombinerType_SQRTN - }; - return values; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AddOptions; +}; -inline const char * const *EnumNamesCombinerType() { - static const char * const names[4] = { - "SUM", - "MEAN", - "SQRTN", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_L2NormOptions; +}; -inline const char *EnumNameCombinerType(CombinerType e) { - if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; - const size_t index = static_cast(e); - return EnumNamesCombinerType()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LocalResponseNormalizationOptions; +}; -enum MirrorPadMode { - MirrorPadMode_REFLECT = 0, - MirrorPadMode_SYMMETRIC = 1, - MirrorPadMode_MIN = MirrorPadMode_REFLECT, - MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LSTMOptions; }; -inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { - static const MirrorPadMode values[] = { - MirrorPadMode_REFLECT, - MirrorPadMode_SYMMETRIC - }; - return values; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ResizeBilinearOptions; +}; -inline const char * const *EnumNamesMirrorPadMode() { - static const char * const names[3] = { - "REFLECT", - "SYMMETRIC", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CallOptions; +}; -inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { - if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; - const size_t index = static_cast(e); - return EnumNamesMirrorPadMode()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReshapeOptions; +}; -enum CustomOptionsFormat { - CustomOptionsFormat_FLEXBUFFERS = 0, - CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, - CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SkipGramOptions; }; -inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { - static const CustomOptionsFormat values[] = { - CustomOptionsFormat_FLEXBUFFERS - }; - return values; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToDepthOptions; +}; -inline const char * const *EnumNamesCustomOptionsFormat() { - static const char * const names[2] = { - "FLEXBUFFERS", - nullptr - }; - return names; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_EmbeddingLookupSparseOptions; +}; -inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { - if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; - const size_t index = static_cast(e); - return EnumNamesCustomOptionsFormat()[index]; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MulOptions; +}; -struct CustomQuantizationT : public flatbuffers::NativeTable { - typedef CustomQuantization TableType; - std::vector custom; - CustomQuantizationT() { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PadOptions; }; -struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CustomQuantizationT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_CUSTOM = 4 - }; - const flatbuffers::Vector *custom() const { - return GetPointer *>(VT_CUSTOM); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_CUSTOM) && - verifier.VerifyVector(custom()) && - verifier.EndTable(); - } - CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GatherOptions; }; -struct CustomQuantizationBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_custom(flatbuffers::Offset> custom) { - fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); - } - explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CustomQuantizationBuilder &operator=(const CustomQuantizationBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BatchToSpaceNDOptions; }; -inline flatbuffers::Offset CreateCustomQuantization( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> custom = 0) { - CustomQuantizationBuilder builder_(_fbb); - builder_.add_custom(custom); - return builder_.Finish(); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SpaceToBatchNDOptions; +}; -inline flatbuffers::Offset CreateCustomQuantizationDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *custom = nullptr) { - if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } - auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - custom__); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TransposeOptions; +}; -flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizationParametersT : public flatbuffers::NativeTable { - typedef QuantizationParameters TableType; - std::vector min; - std::vector max; - std::vector scale; - std::vector zero_point; - tflite::QuantizationDetailsUnion details; - int32_t quantized_dimension; - QuantizationParametersT() - : quantized_dimension(0) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReducerOptions; }; -struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizationParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_SCALE = 8, - VT_ZERO_POINT = 10, - VT_DETAILS_TYPE = 12, - VT_DETAILS = 14, - VT_QUANTIZED_DIMENSION = 16 - }; - const flatbuffers::Vector *min() const { - return GetPointer *>(VT_MIN); - } - const flatbuffers::Vector *max() const { - return GetPointer *>(VT_MAX); - } - const flatbuffers::Vector *scale() const { - return GetPointer *>(VT_SCALE); - } - const flatbuffers::Vector *zero_point() const { - return GetPointer *>(VT_ZERO_POINT); - } - tflite::QuantizationDetails details_type() const { - return static_cast(GetField(VT_DETAILS_TYPE, 0)); - } - const void *details() const { - return GetPointer(VT_DETAILS); - } - template const T *details_as() const; - const tflite::CustomQuantization *details_as_CustomQuantization() const { - return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; - } - int32_t quantized_dimension() const { - return GetField(VT_QUANTIZED_DIMENSION, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_MIN) && - verifier.VerifyVector(min()) && - VerifyOffset(verifier, VT_MAX) && - verifier.VerifyVector(max()) && - VerifyOffset(verifier, VT_SCALE) && - verifier.VerifyVector(scale()) && - VerifyOffset(verifier, VT_ZERO_POINT) && - verifier.VerifyVector(zero_point()) && - VerifyField(verifier, VT_DETAILS_TYPE) && - VerifyOffset(verifier, VT_DETAILS) && - VerifyQuantizationDetails(verifier, details(), details_type()) && - VerifyField(verifier, VT_QUANTIZED_DIMENSION) && - verifier.EndTable(); - } - QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SubOptions; }; -template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { - return details_as_CustomQuantization(); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DivOptions; +}; -struct QuantizationParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(flatbuffers::Offset> min) { - fbb_.AddOffset(QuantizationParameters::VT_MIN, min); - } - void add_max(flatbuffers::Offset> max) { - fbb_.AddOffset(QuantizationParameters::VT_MAX, max); - } - void add_scale(flatbuffers::Offset> scale) { - fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); - } - void add_zero_point(flatbuffers::Offset> zero_point) { - fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); - } - void add_details_type(tflite::QuantizationDetails details_type) { - fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); - } - void add_details(flatbuffers::Offset details) { - fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); - } - void add_quantized_dimension(int32_t quantized_dimension) { - fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); - } - explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizationParametersBuilder &operator=(const QuantizationParametersBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SqueezeOptions; }; -inline flatbuffers::Offset CreateQuantizationParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> min = 0, - flatbuffers::Offset> max = 0, - flatbuffers::Offset> scale = 0, - flatbuffers::Offset> zero_point = 0, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - QuantizationParametersBuilder builder_(_fbb); - builder_.add_quantized_dimension(quantized_dimension); - builder_.add_details(details); - builder_.add_zero_point(zero_point); - builder_.add_scale(scale); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_details_type(details_type); - return builder_.Finish(); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SequenceRNNOptions; +}; -inline flatbuffers::Offset CreateQuantizationParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *min = nullptr, - const std::vector *max = nullptr, - const std::vector *scale = nullptr, - const std::vector *zero_point = nullptr, - tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, - flatbuffers::Offset details = 0, - int32_t quantized_dimension = 0) { - auto min__ = min ? _fbb.CreateVector(*min) : 0; - auto max__ = max ? _fbb.CreateVector(*max) : 0; - auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; - auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; - return tflite::CreateQuantizationParameters( - _fbb, - min__, - max__, - scale__, - zero_point__, - details_type, - details, - quantized_dimension); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_StridedSliceOptions; +}; -flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ExpOptions; +}; -struct Int32VectorT : public flatbuffers::NativeTable { - typedef Int32Vector TableType; - std::vector values; - Int32VectorT() { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TopKV2Options; }; -struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Int32VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SplitOptions; }; -struct Int32VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Int32Vector::VT_VALUES, values); - } - explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Int32VectorBuilder &operator=(const Int32VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateInt32Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Int32VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateInt32VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateInt32Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint16VectorT : public flatbuffers::NativeTable { - typedef Uint16Vector TableType; - std::vector values; - Uint16VectorT() { - } -}; - -struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint16VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint16VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint16Vector::VT_VALUES, values); - } - explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Uint16VectorBuilder &operator=(const Uint16VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint16Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint16VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint16VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint16Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Uint8VectorT : public flatbuffers::NativeTable { - typedef Uint8Vector TableType; - std::vector values; - Uint8VectorT() { - } -}; - -struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Uint8VectorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES = 4 - }; - const flatbuffers::Vector *values() const { - return GetPointer *>(VT_VALUES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_VALUES) && - verifier.VerifyVector(values()) && - verifier.EndTable(); - } - Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Uint8VectorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values(flatbuffers::Offset> values) { - fbb_.AddOffset(Uint8Vector::VT_VALUES, values); - } - explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Uint8VectorBuilder &operator=(const Uint8VectorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUint8Vector( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> values = 0) { - Uint8VectorBuilder builder_(_fbb); - builder_.add_values(values); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateUint8VectorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *values = nullptr) { - if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } - auto values__ = values ? _fbb.CreateVector(*values) : 0; - return tflite::CreateUint8Vector( - _fbb, - values__); -} - -flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DimensionMetadataT : public flatbuffers::NativeTable { - typedef DimensionMetadata TableType; - tflite::DimensionType format; - int32_t dense_size; - tflite::SparseIndexVectorUnion array_segments; - tflite::SparseIndexVectorUnion array_indices; - DimensionMetadataT() - : format(tflite::DimensionType_DENSE), - dense_size(0) { - } -}; - -struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DimensionMetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FORMAT = 4, - VT_DENSE_SIZE = 6, - VT_ARRAY_SEGMENTS_TYPE = 8, - VT_ARRAY_SEGMENTS = 10, - VT_ARRAY_INDICES_TYPE = 12, - VT_ARRAY_INDICES = 14 - }; - tflite::DimensionType format() const { - return static_cast(GetField(VT_FORMAT, 0)); - } - int32_t dense_size() const { - return GetField(VT_DENSE_SIZE, 0); - } - tflite::SparseIndexVector array_segments_type() const { - return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); - } - const void *array_segments() const { - return GetPointer(VT_ARRAY_SEGMENTS); - } - template const T *array_segments_as() const; - const tflite::Int32Vector *array_segments_as_Int32Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; - } - const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { - return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; - } - tflite::SparseIndexVector array_indices_type() const { - return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); - } - const void *array_indices() const { - return GetPointer(VT_ARRAY_INDICES); - } - template const T *array_indices_as() const; - const tflite::Int32Vector *array_indices_as_Int32Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; - } - const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { - return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FORMAT) && - VerifyField(verifier, VT_DENSE_SIZE) && - VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE) && - VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && - VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && - VerifyField(verifier, VT_ARRAY_INDICES_TYPE) && - VerifyOffset(verifier, VT_ARRAY_INDICES) && - VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && - verifier.EndTable(); - } - DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { - return array_segments_as_Uint8Vector(); -} - -template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Int32Vector(); -} - -template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint16Vector(); -} - -template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { - return array_indices_as_Uint8Vector(); -} - -struct DimensionMetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_format(tflite::DimensionType format) { - fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); - } - void add_dense_size(int32_t dense_size) { - fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); - } - void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); - } - void add_array_segments(flatbuffers::Offset array_segments) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); - } - void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { - fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); - } - void add_array_indices(flatbuffers::Offset array_indices) { - fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); - } - explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DimensionMetadataBuilder &operator=(const DimensionMetadataBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDimensionMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::DimensionType format = tflite::DimensionType_DENSE, - int32_t dense_size = 0, - tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_segments = 0, - tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, - flatbuffers::Offset array_indices = 0) { - DimensionMetadataBuilder builder_(_fbb); - builder_.add_array_indices(array_indices); - builder_.add_array_segments(array_segments); - builder_.add_dense_size(dense_size); - builder_.add_array_indices_type(array_indices_type); - builder_.add_array_segments_type(array_segments_type); - builder_.add_format(format); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparsityParametersT : public flatbuffers::NativeTable { - typedef SparsityParameters TableType; - std::vector traversal_order; - std::vector block_map; - std::vector> dim_metadata; - SparsityParametersT() { - } -}; - -struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparsityParametersT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TRAVERSAL_ORDER = 4, - VT_BLOCK_MAP = 6, - VT_DIM_METADATA = 8 - }; - const flatbuffers::Vector *traversal_order() const { - return GetPointer *>(VT_TRAVERSAL_ORDER); - } - const flatbuffers::Vector *block_map() const { - return GetPointer *>(VT_BLOCK_MAP); - } - const flatbuffers::Vector> *dim_metadata() const { - return GetPointer> *>(VT_DIM_METADATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && - verifier.VerifyVector(traversal_order()) && - VerifyOffset(verifier, VT_BLOCK_MAP) && - verifier.VerifyVector(block_map()) && - VerifyOffset(verifier, VT_DIM_METADATA) && - verifier.VerifyVector(dim_metadata()) && - verifier.VerifyVectorOfTables(dim_metadata()) && - verifier.EndTable(); - } - SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogSoftmaxOptions; }; -struct SparsityParametersBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_traversal_order(flatbuffers::Offset> traversal_order) { - fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); - } - void add_block_map(flatbuffers::Offset> block_map) { - fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); - } - void add_dim_metadata(flatbuffers::Offset>> dim_metadata) { - fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); - } - explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SparsityParametersBuilder &operator=(const SparsityParametersBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CastOptions; }; -inline flatbuffers::Offset CreateSparsityParameters( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> traversal_order = 0, - flatbuffers::Offset> block_map = 0, - flatbuffers::Offset>> dim_metadata = 0) { - SparsityParametersBuilder builder_(_fbb); - builder_.add_dim_metadata(dim_metadata); - builder_.add_block_map(block_map); - builder_.add_traversal_order(traversal_order); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSparsityParametersDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *traversal_order = nullptr, - const std::vector *block_map = nullptr, - const std::vector> *dim_metadata = nullptr) { - auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; - auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; - auto dim_metadata__ = dim_metadata ? _fbb.CreateVector>(*dim_metadata) : 0; - return tflite::CreateSparsityParameters( - _fbb, - traversal_order__, - block_map__, - dim_metadata__); -} - -flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorT : public flatbuffers::NativeTable { - typedef Tensor TableType; - std::vector shape; - tflite::TensorType type; - uint32_t buffer; - std::string name; - std::unique_ptr quantization; - bool is_variable; - std::unique_ptr sparsity; - std::vector shape_signature; - TensorT() - : type(tflite::TensorType_FLOAT32), - buffer(0), - is_variable(false) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DequantizeOptions; }; -struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SHAPE = 4, - VT_TYPE = 6, - VT_BUFFER = 8, - VT_NAME = 10, - VT_QUANTIZATION = 12, - VT_IS_VARIABLE = 14, - VT_SPARSITY = 16, - VT_SHAPE_SIGNATURE = 18 - }; - const flatbuffers::Vector *shape() const { - return GetPointer *>(VT_SHAPE); - } - tflite::TensorType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - const tflite::QuantizationParameters *quantization() const { - return GetPointer(VT_QUANTIZATION); - } - bool is_variable() const { - return GetField(VT_IS_VARIABLE, 0) != 0; - } - const tflite::SparsityParameters *sparsity() const { - return GetPointer(VT_SPARSITY); - } - const flatbuffers::Vector *shape_signature() const { - return GetPointer *>(VT_SHAPE_SIGNATURE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SHAPE) && - verifier.VerifyVector(shape()) && - VerifyField(verifier, VT_TYPE) && - VerifyField(verifier, VT_BUFFER) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyOffset(verifier, VT_QUANTIZATION) && - verifier.VerifyTable(quantization()) && - VerifyField(verifier, VT_IS_VARIABLE) && - VerifyOffset(verifier, VT_SPARSITY) && - verifier.VerifyTable(sparsity()) && - VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && - verifier.VerifyVector(shape_signature()) && - verifier.EndTable(); - } - TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MaximumMinimumOptions; }; -struct TensorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_shape(flatbuffers::Offset> shape) { - fbb_.AddOffset(Tensor::VT_SHAPE, shape); - } - void add_type(tflite::TensorType type) { - fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Tensor::VT_NAME, name); - } - void add_quantization(flatbuffers::Offset quantization) { - fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); - } - void add_is_variable(bool is_variable) { - fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); - } - void add_sparsity(flatbuffers::Offset sparsity) { - fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); - } - void add_shape_signature(flatbuffers::Offset> shape_signature) { - fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); - } - explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorBuilder &operator=(const TensorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ArgMaxOptions; }; -inline flatbuffers::Offset CreateTensor( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> shape = 0, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - flatbuffers::Offset name = 0, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - flatbuffers::Offset> shape_signature = 0) { - TensorBuilder builder_(_fbb); - builder_.add_shape_signature(shape_signature); - builder_.add_sparsity(sparsity); - builder_.add_quantization(quantization); - builder_.add_name(name); - builder_.add_buffer(buffer); - builder_.add_shape(shape); - builder_.add_is_variable(is_variable); - builder_.add_type(type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *shape = nullptr, - tflite::TensorType type = tflite::TensorType_FLOAT32, - uint32_t buffer = 0, - const char *name = nullptr, - flatbuffers::Offset quantization = 0, - bool is_variable = false, - flatbuffers::Offset sparsity = 0, - const std::vector *shape_signature = nullptr) { - auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; - return tflite::CreateTensor( - _fbb, - shape__, - type, - buffer, - name__, - quantization, - is_variable, - sparsity, - shape_signature__); -} - -flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Conv2DOptionsT : public flatbuffers::NativeTable { - typedef Conv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - Conv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LessOptions; }; -struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FUSED_ACTIVATION_FUNCTION = 10, - VT_DILATION_W_FACTOR = 12, - VT_DILATION_H_FACTOR = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && - verifier.EndTable(); - } - Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NegOptions; }; -struct Conv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Conv2DOptionsBuilder &operator=(const Conv2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PadV2Options; }; -inline flatbuffers::Offset CreateConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Conv3DOptionsT : public flatbuffers::NativeTable { - typedef Conv3DOptions TableType; - tflite::Padding padding; - int32_t stride_d; - int32_t stride_w; - int32_t stride_h; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_d_factor; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - Conv3DOptionsT() - : padding(tflite::Padding_SAME), - stride_d(0), - stride_w(0), - stride_h(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_d_factor(1), - dilation_w_factor(1), - dilation_h_factor(1) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GreaterOptions; }; -struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Conv3DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_D = 6, - VT_STRIDE_W = 8, - VT_STRIDE_H = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_D_FACTOR = 14, - VT_DILATION_W_FACTOR = 16, - VT_DILATION_H_FACTOR = 18 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_d() const { - return GetField(VT_STRIDE_D, 0); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_d_factor() const { - return GetField(VT_DILATION_D_FACTOR, 1); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_D) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_D_FACTOR) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && - verifier.EndTable(); - } - Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GreaterEqualOptions; }; -struct Conv3DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Conv3DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_d(int32_t stride_d) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_D, stride_d, 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Conv3DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_d_factor(int32_t dilation_d_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Conv3DOptionsBuilder &operator=(const Conv3DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LessEqualOptions; }; -inline flatbuffers::Offset CreateConv3DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_d = 0, - int32_t stride_w = 0, - int32_t stride_h = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_d_factor = 1, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - Conv3DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_dilation_d_factor(dilation_d_factor); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_stride_d(stride_d); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Pool2DOptionsT : public flatbuffers::NativeTable { - typedef Pool2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t filter_width; - int32_t filter_height; - tflite::ActivationFunctionType fused_activation_function; - Pool2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - filter_width(0), - filter_height(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SelectOptions; }; -struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Pool2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_FILTER_WIDTH = 10, - VT_FILTER_HEIGHT = 12, - VT_FUSED_ACTIVATION_FUNCTION = 14 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t filter_width() const { - return GetField(VT_FILTER_WIDTH, 0); - } - int32_t filter_height() const { - return GetField(VT_FILTER_HEIGHT, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_FILTER_WIDTH) && - VerifyField(verifier, VT_FILTER_HEIGHT) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SliceOptions; }; -struct Pool2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_filter_width(int32_t filter_width) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); - } - void add_filter_height(int32_t filter_height) { - fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Pool2DOptionsBuilder &operator=(const Pool2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TransposeConvOptions; }; -inline flatbuffers::Offset CreatePool2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t filter_width = 0, - int32_t filter_height = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - Pool2DOptionsBuilder builder_(_fbb); - builder_.add_filter_height(filter_height); - builder_.add_filter_width(filter_width); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { - typedef DepthwiseConv2DOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - int32_t depth_multiplier; - tflite::ActivationFunctionType fused_activation_function; - int32_t dilation_w_factor; - int32_t dilation_h_factor; - DepthwiseConv2DOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0), - depth_multiplier(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - dilation_w_factor(1), - dilation_h_factor(1) { - } +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SparseToDenseOptions; }; -struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthwiseConv2DOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8, - VT_DEPTH_MULTIPLIER = 10, - VT_FUSED_ACTIVATION_FUNCTION = 12, - VT_DILATION_W_FACTOR = 14, - VT_DILATION_H_FACTOR = 16 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - int32_t depth_multiplier() const { - return GetField(VT_DEPTH_MULTIPLIER, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - int32_t dilation_w_factor() const { - return GetField(VT_DILATION_W_FACTOR, 1); - } - int32_t dilation_h_factor() const { - return GetField(VT_DILATION_H_FACTOR, 1); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - VerifyField(verifier, VT_DEPTH_MULTIPLIER) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_DILATION_W_FACTOR) && - VerifyField(verifier, VT_DILATION_H_FACTOR) && - verifier.EndTable(); - } - DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_TileOptions; }; -struct DepthwiseConv2DOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); - } - void add_depth_multiplier(int32_t depth_multiplier) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_dilation_w_factor(int32_t dilation_w_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); - } - void add_dilation_h_factor(int32_t dilation_h_factor) { - fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); - } - explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DepthwiseConv2DOptionsBuilder &operator=(const DepthwiseConv2DOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0, - int32_t depth_multiplier = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - int32_t dilation_w_factor = 1, - int32_t dilation_h_factor = 1) { - DepthwiseConv2DOptionsBuilder builder_(_fbb); - builder_.add_dilation_h_factor(dilation_h_factor); - builder_.add_dilation_w_factor(dilation_w_factor); - builder_.add_depth_multiplier(depth_multiplier); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { - typedef ConcatEmbeddingsOptions TableType; - int32_t num_channels; - std::vector num_columns_per_channel; - std::vector embedding_dim_per_channel; - ConcatEmbeddingsOptionsT() - : num_channels(0) { - } -}; - -struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatEmbeddingsOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_CHANNELS = 4, - VT_NUM_COLUMNS_PER_CHANNEL = 6, - VT_EMBEDDING_DIM_PER_CHANNEL = 8 - }; - int32_t num_channels() const { - return GetField(VT_NUM_CHANNELS, 0); - } - const flatbuffers::Vector *num_columns_per_channel() const { - return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); - } - const flatbuffers::Vector *embedding_dim_per_channel() const { - return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_CHANNELS) && - VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && - verifier.VerifyVector(num_columns_per_channel()) && - VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && - verifier.VerifyVector(embedding_dim_per_channel()) && - verifier.EndTable(); - } - ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatEmbeddingsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_channels(int32_t num_channels) { - fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); - } - void add_num_columns_per_channel(flatbuffers::Offset> num_columns_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); - } - void add_embedding_dim_per_channel(flatbuffers::Offset> embedding_dim_per_channel) { - fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); - } - explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ConcatEmbeddingsOptionsBuilder &operator=(const ConcatEmbeddingsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - flatbuffers::Offset> num_columns_per_channel = 0, - flatbuffers::Offset> embedding_dim_per_channel = 0) { - ConcatEmbeddingsOptionsBuilder builder_(_fbb); - builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); - builder_.add_num_columns_per_channel(num_columns_per_channel); - builder_.add_num_channels(num_channels); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_channels = 0, - const std::vector *num_columns_per_channel = nullptr, - const std::vector *embedding_dim_per_channel = nullptr) { - auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; - auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - num_channels, - num_columns_per_channel__, - embedding_dim_per_channel__); -} - -flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSHProjectionOptionsT : public flatbuffers::NativeTable { - typedef LSHProjectionOptions TableType; - tflite::LSHProjectionType type; - LSHProjectionOptionsT() - : type(tflite::LSHProjectionType_UNKNOWN) { - } -}; - -struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSHProjectionOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TYPE = 4 - }; - tflite::LSHProjectionType type() const { - return static_cast(GetField(VT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TYPE) && - verifier.EndTable(); - } - LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSHProjectionOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_type(tflite::LSHProjectionType type) { - fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); - } - explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LSHProjectionOptionsBuilder &operator=(const LSHProjectionOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSHProjectionOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { - LSHProjectionOptionsBuilder builder_(_fbb); - builder_.add_type(type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SVDFOptionsT : public flatbuffers::NativeTable { - typedef SVDFOptions TableType; - int32_t rank; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SVDFOptionsT() - : rank(0), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SVDFOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RANK = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - int32_t rank() const { - return GetField(VT_RANK, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RANK) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SVDFOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_rank(int32_t rank) { - fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SVDFOptionsBuilder &operator=(const SVDFOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSVDFOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t rank = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SVDFOptionsBuilder builder_(_fbb); - builder_.add_rank(rank); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RNNOptionsT : public flatbuffers::NativeTable { - typedef RNNOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - RNNOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RNNOptionsBuilder &operator=(const RNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - RNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef SequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool asymmetric_quantize_inputs; - SequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - asymmetric_quantize_inputs(false) { - } -}; - -struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SequenceRNNOptionsBuilder &operator=(const SequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool asymmetric_quantize_inputs = false) { - SequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceRNNOptions TableType; - bool time_major; - tflite::ActivationFunctionType fused_activation_function; - bool merge_outputs; - bool asymmetric_quantize_inputs; - BidirectionalSequenceRNNOptionsT() - : time_major(false), - fused_activation_function(tflite::ActivationFunctionType_NONE), - merge_outputs(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceRNNOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TIME_MAJOR = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6, - VT_MERGE_OUTPUTS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceRNNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BidirectionalSequenceRNNOptionsBuilder &operator=(const BidirectionalSequenceRNNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool time_major = false, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool merge_outputs = false, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - builder_.add_time_major(time_major); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FullyConnectedOptionsT : public flatbuffers::NativeTable { - typedef FullyConnectedOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - tflite::FullyConnectedOptionsWeightsFormat weights_format; - bool keep_num_dims; - bool asymmetric_quantize_inputs; - FullyConnectedOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - weights_format(tflite::FullyConnectedOptionsWeightsFormat_DEFAULT), - keep_num_dims(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FullyConnectedOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_WEIGHTS_FORMAT = 6, - VT_KEEP_NUM_DIMS = 8, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - tflite::FullyConnectedOptionsWeightsFormat weights_format() const { - return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); - } - bool keep_num_dims() const { - return GetField(VT_KEEP_NUM_DIMS, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_WEIGHTS_FORMAT) && - VerifyField(verifier, VT_KEEP_NUM_DIMS) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FullyConnectedOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { - fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); - } - void add_keep_num_dims(bool keep_num_dims) { - fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FullyConnectedOptionsBuilder &operator=(const FullyConnectedOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFullyConnectedOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, - bool keep_num_dims = false, - bool asymmetric_quantize_inputs = false) { - FullyConnectedOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_keep_num_dims(keep_num_dims); - builder_.add_weights_format(weights_format); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SoftmaxOptionsT : public flatbuffers::NativeTable { - typedef SoftmaxOptions TableType; - float beta; - SoftmaxOptionsT() - : beta(0.0f) { - } -}; - -struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SoftmaxOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BETA = 4 - }; - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BETA) && - verifier.EndTable(); - } - SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_beta(float beta) { - fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); - } - explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SoftmaxOptionsBuilder &operator=(const SoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float beta = 0.0f) { - SoftmaxOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ConcatenationOptionsT : public flatbuffers::NativeTable { - typedef ConcatenationOptions TableType; - int32_t axis; - tflite::ActivationFunctionType fused_activation_function; - ConcatenationOptionsT() - : axis(0), - fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ConcatenationOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_FUSED_ACTIVATION_FUNCTION = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ConcatenationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); - } - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ConcatenationOptionsBuilder &operator=(const ConcatenationOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateConcatenationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - ConcatenationOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddOptionsT : public flatbuffers::NativeTable { - typedef AddOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - AddOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { - } -}; - -struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && - verifier.EndTable(); - } - AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AddOptionsBuilder &operator=(const AddOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - AddOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MulOptionsT : public flatbuffers::NativeTable { - typedef MulOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - MulOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MulOptionsBuilder &operator=(const MulOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - MulOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct L2NormOptionsT : public flatbuffers::NativeTable { - typedef L2NormOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - L2NormOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef L2NormOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct L2NormOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - L2NormOptionsBuilder &operator=(const L2NormOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateL2NormOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - L2NormOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { - typedef LocalResponseNormalizationOptions TableType; - int32_t radius; - float bias; - float alpha; - float beta; - LocalResponseNormalizationOptionsT() - : radius(0), - bias(0.0f), - alpha(0.0f), - beta(0.0f) { - } -}; - -struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LocalResponseNormalizationOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_RADIUS = 4, - VT_BIAS = 6, - VT_ALPHA = 8, - VT_BETA = 10 - }; - int32_t radius() const { - return GetField(VT_RADIUS, 0); - } - float bias() const { - return GetField(VT_BIAS, 0.0f); - } - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - float beta() const { - return GetField(VT_BETA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_RADIUS) && - VerifyField(verifier, VT_BIAS) && - VerifyField(verifier, VT_ALPHA) && - VerifyField(verifier, VT_BETA) && - verifier.EndTable(); - } - LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LocalResponseNormalizationOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_radius(int32_t radius) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); - } - void add_bias(float bias) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); - } - void add_alpha(float alpha) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); - } - void add_beta(float beta) { - fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); - } - explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LocalResponseNormalizationOptionsBuilder &operator=(const LocalResponseNormalizationOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t radius = 0, - float bias = 0.0f, - float alpha = 0.0f, - float beta = 0.0f) { - LocalResponseNormalizationOptionsBuilder builder_(_fbb); - builder_.add_beta(beta); - builder_.add_alpha(alpha); - builder_.add_bias(bias); - builder_.add_radius(radius); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LSTMOptionsT : public flatbuffers::NativeTable { - typedef LSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - tflite::LSTMKernelType kernel_type; - bool asymmetric_quantize_inputs; - LSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - kernel_type(tflite::LSTMKernelType_FULL), - asymmetric_quantize_inputs(false) { - } -}; - -struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_KERNEL_TYPE = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - tflite::LSTMKernelType kernel_type() const { - return static_cast(GetField(VT_KERNEL_TYPE, 0)); - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_KERNEL_TYPE) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_kernel_type(tflite::LSTMKernelType kernel_type) { - fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LSTMOptionsBuilder &operator=(const LSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, - bool asymmetric_quantize_inputs = false) { - LSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_kernel_type(kernel_type); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef UnidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool time_major; - bool asymmetric_quantize_inputs; - UnidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - time_major(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_TIME_MAJOR = 10, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_time_major(bool time_major) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UnidirectionalSequenceLSTMOptionsBuilder &operator=(const UnidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool time_major = false, - bool asymmetric_quantize_inputs = false) { - UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { - typedef BidirectionalSequenceLSTMOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - float cell_clip; - float proj_clip; - bool merge_outputs; - bool time_major; - bool asymmetric_quantize_inputs; - BidirectionalSequenceLSTMOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - cell_clip(0.0f), - proj_clip(0.0f), - merge_outputs(false), - time_major(true), - asymmetric_quantize_inputs(false) { - } -}; - -struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BidirectionalSequenceLSTMOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_CELL_CLIP = 6, - VT_PROJ_CLIP = 8, - VT_MERGE_OUTPUTS = 10, - VT_TIME_MAJOR = 12, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - float cell_clip() const { - return GetField(VT_CELL_CLIP, 0.0f); - } - float proj_clip() const { - return GetField(VT_PROJ_CLIP, 0.0f); - } - bool merge_outputs() const { - return GetField(VT_MERGE_OUTPUTS, 0) != 0; - } - bool time_major() const { - return GetField(VT_TIME_MAJOR, 1) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_CELL_CLIP) && - VerifyField(verifier, VT_PROJ_CLIP) && - VerifyField(verifier, VT_MERGE_OUTPUTS) && - VerifyField(verifier, VT_TIME_MAJOR) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BidirectionalSequenceLSTMOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_cell_clip(float cell_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); - } - void add_proj_clip(float proj_clip) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); - } - void add_merge_outputs(bool merge_outputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); - } - void add_time_major(bool time_major) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BidirectionalSequenceLSTMOptionsBuilder &operator=(const BidirectionalSequenceLSTMOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - float cell_clip = 0.0f, - float proj_clip = 0.0f, - bool merge_outputs = false, - bool time_major = true, - bool asymmetric_quantize_inputs = false) { - BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); - builder_.add_proj_clip(proj_clip); - builder_.add_cell_clip(cell_clip); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_time_major(time_major); - builder_.add_merge_outputs(merge_outputs); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { - typedef ResizeBilinearOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeBilinearOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } -}; - -struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeBilinearOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 8, - VT_HALF_PIXEL_CENTERS = 10 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && - verifier.EndTable(); - } - ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeBilinearOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ResizeBilinearOptionsBuilder &operator=(const ResizeBilinearOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeBilinearOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeBilinearOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { - typedef ResizeNearestNeighborOptions TableType; - bool align_corners; - bool half_pixel_centers; - ResizeNearestNeighborOptionsT() - : align_corners(false), - half_pixel_centers(false) { - } -}; - -struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ResizeNearestNeighborOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALIGN_CORNERS = 4, - VT_HALF_PIXEL_CENTERS = 6 - }; - bool align_corners() const { - return GetField(VT_ALIGN_CORNERS, 0) != 0; - } - bool half_pixel_centers() const { - return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALIGN_CORNERS) && - VerifyField(verifier, VT_HALF_PIXEL_CENTERS) && - verifier.EndTable(); - } - ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ResizeNearestNeighborOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_align_corners(bool align_corners) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); - } - void add_half_pixel_centers(bool half_pixel_centers) { - fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); - } - explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ResizeNearestNeighborOptionsBuilder &operator=(const ResizeNearestNeighborOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool align_corners = false, - bool half_pixel_centers = false) { - ResizeNearestNeighborOptionsBuilder builder_(_fbb); - builder_.add_half_pixel_centers(half_pixel_centers); - builder_.add_align_corners(align_corners); - return builder_.Finish(); -} - -flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOptionsT : public flatbuffers::NativeTable { - typedef CallOptions TableType; - uint32_t subgraph; - CallOptionsT() - : subgraph(0) { - } -}; - -struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SUBGRAPH = 4 - }; - uint32_t subgraph() const { - return GetField(VT_SUBGRAPH, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SUBGRAPH) && - verifier.EndTable(); - } - CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_subgraph(uint32_t subgraph) { - fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); - } - explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CallOptionsBuilder &operator=(const CallOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOptions( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t subgraph = 0) { - CallOptionsBuilder builder_(_fbb); - builder_.add_subgraph(subgraph); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadOptionsT : public flatbuffers::NativeTable { - typedef PadOptions TableType; - PadOptionsT() { - } -}; - -struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PadOptionsBuilder &operator=(const PadOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PadOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PadV2OptionsT : public flatbuffers::NativeTable { - typedef PadV2Options TableType; - PadV2OptionsT() { - } -}; - -struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PadV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PadV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PadV2OptionsBuilder &operator=(const PadV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePadV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - PadV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReshapeOptionsT : public flatbuffers::NativeTable { - typedef ReshapeOptions TableType; - std::vector new_shape; - ReshapeOptionsT() { - } -}; - -struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReshapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NEW_SHAPE = 4 - }; - const flatbuffers::Vector *new_shape() const { - return GetPointer *>(VT_NEW_SHAPE); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NEW_SHAPE) && - verifier.VerifyVector(new_shape()) && - verifier.EndTable(); - } - ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReshapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_new_shape(flatbuffers::Offset> new_shape) { - fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); - } - explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReshapeOptionsBuilder &operator=(const ReshapeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReshapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> new_shape = 0) { - ReshapeOptionsBuilder builder_(_fbb); - builder_.add_new_shape(new_shape); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateReshapeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *new_shape = nullptr) { - auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - new_shape__); -} - -flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { - typedef SpaceToBatchNDOptions TableType; - SpaceToBatchNDOptionsT() { - } -}; - -struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToBatchNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToBatchNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SpaceToBatchNDOptionsBuilder &operator=(const SpaceToBatchNDOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SpaceToBatchNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { - typedef BatchToSpaceNDOptions TableType; - BatchToSpaceNDOptionsT() { - } -}; - -struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchToSpaceNDOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchToSpaceNDOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchToSpaceNDOptionsBuilder &operator=(const BatchToSpaceNDOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BatchToSpaceNDOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SkipGramOptionsT : public flatbuffers::NativeTable { - typedef SkipGramOptions TableType; - int32_t ngram_size; - int32_t max_skip_size; - bool include_all_ngrams; - SkipGramOptionsT() - : ngram_size(0), - max_skip_size(0), - include_all_ngrams(false) { - } -}; - -struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SkipGramOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NGRAM_SIZE = 4, - VT_MAX_SKIP_SIZE = 6, - VT_INCLUDE_ALL_NGRAMS = 8 - }; - int32_t ngram_size() const { - return GetField(VT_NGRAM_SIZE, 0); - } - int32_t max_skip_size() const { - return GetField(VT_MAX_SKIP_SIZE, 0); - } - bool include_all_ngrams() const { - return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NGRAM_SIZE) && - VerifyField(verifier, VT_MAX_SKIP_SIZE) && - VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS) && - verifier.EndTable(); - } - SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SkipGramOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_ngram_size(int32_t ngram_size) { - fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); - } - void add_max_skip_size(int32_t max_skip_size) { - fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); - } - void add_include_all_ngrams(bool include_all_ngrams) { - fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); - } - explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SkipGramOptionsBuilder &operator=(const SkipGramOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSkipGramOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t ngram_size = 0, - int32_t max_skip_size = 0, - bool include_all_ngrams = false) { - SkipGramOptionsBuilder builder_(_fbb); - builder_.add_max_skip_size(max_skip_size); - builder_.add_ngram_size(ngram_size); - builder_.add_include_all_ngrams(include_all_ngrams); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { - typedef SpaceToDepthOptions TableType; - int32_t block_size; - SpaceToDepthOptionsT() - : block_size(0) { - } -}; - -struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SpaceToDepthOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && - verifier.EndTable(); - } - SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SpaceToDepthOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SpaceToDepthOptionsBuilder &operator=(const SpaceToDepthOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSpaceToDepthOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - SpaceToDepthOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { - typedef DepthToSpaceOptions TableType; - int32_t block_size; - DepthToSpaceOptionsT() - : block_size(0) { - } -}; - -struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DepthToSpaceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BLOCK_SIZE = 4 - }; - int32_t block_size() const { - return GetField(VT_BLOCK_SIZE, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BLOCK_SIZE) && - verifier.EndTable(); - } - DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DepthToSpaceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_block_size(int32_t block_size) { - fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); - } - explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DepthToSpaceOptionsBuilder &operator=(const DepthToSpaceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDepthToSpaceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t block_size = 0) { - DepthToSpaceOptionsBuilder builder_(_fbb); - builder_.add_block_size(block_size); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubOptionsT : public flatbuffers::NativeTable { - typedef SubOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - bool pot_scale_int16; - SubOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE), - pot_scale_int16(true) { - } -}; - -struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4, - VT_POT_SCALE_INT16 = 6 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool pot_scale_int16() const { - return GetField(VT_POT_SCALE_INT16, 1) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - VerifyField(verifier, VT_POT_SCALE_INT16) && - verifier.EndTable(); - } - SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - void add_pot_scale_int16(bool pot_scale_int16) { - fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); - } - explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SubOptionsBuilder &operator=(const SubOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, - bool pot_scale_int16 = true) { - SubOptionsBuilder builder_(_fbb); - builder_.add_pot_scale_int16(pot_scale_int16); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DivOptionsT : public flatbuffers::NativeTable { - typedef DivOptions TableType; - tflite::ActivationFunctionType fused_activation_function; - DivOptionsT() - : fused_activation_function(tflite::ActivationFunctionType_NONE) { - } -}; - -struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DivOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_FUSED_ACTIVATION_FUNCTION = 4 - }; - tflite::ActivationFunctionType fused_activation_function() const { - return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION) && - verifier.EndTable(); - } - DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { - fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); - } - explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DivOptionsBuilder &operator=(const DivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDivOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { - DivOptionsBuilder builder_(_fbb); - builder_.add_fused_activation_function(fused_activation_function); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TopKV2OptionsT : public flatbuffers::NativeTable { - typedef TopKV2Options TableType; - TopKV2OptionsT() { - } -}; - -struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TopKV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TopKV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TopKV2OptionsBuilder &operator=(const TopKV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTopKV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - TopKV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { - typedef EmbeddingLookupSparseOptions TableType; - tflite::CombinerType combiner; - EmbeddingLookupSparseOptionsT() - : combiner(tflite::CombinerType_SUM) { - } -}; - -struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EmbeddingLookupSparseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COMBINER = 4 - }; - tflite::CombinerType combiner() const { - return static_cast(GetField(VT_COMBINER, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COMBINER) && - verifier.EndTable(); - } - EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EmbeddingLookupSparseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_combiner(tflite::CombinerType combiner) { - fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); - } - explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EmbeddingLookupSparseOptionsBuilder &operator=(const EmbeddingLookupSparseOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::CombinerType combiner = tflite::CombinerType_SUM) { - EmbeddingLookupSparseOptionsBuilder builder_(_fbb); - builder_.add_combiner(combiner); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherOptionsT : public flatbuffers::NativeTable { - typedef GatherOptions TableType; - int32_t axis; - int32_t batch_dims; - GatherOptionsT() - : axis(0), - batch_dims(0) { - } -}; - -struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4, - VT_BATCH_DIMS = 6 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - int32_t batch_dims() const { - return GetField(VT_BATCH_DIMS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - VerifyField(verifier, VT_BATCH_DIMS) && - verifier.EndTable(); - } - GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); - } - void add_batch_dims(int32_t batch_dims) { - fbb_.AddElement(GatherOptions::VT_BATCH_DIMS, batch_dims, 0); - } - explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherOptionsBuilder &operator=(const GatherOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0, - int32_t batch_dims = 0) { - GatherOptionsBuilder builder_(_fbb); - builder_.add_batch_dims(batch_dims); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeOptionsT : public flatbuffers::NativeTable { - typedef TransposeOptions TableType; - TransposeOptionsT() { - } -}; - -struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TransposeOptionsBuilder &operator=(const TransposeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TransposeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpOptionsT : public flatbuffers::NativeTable { - typedef ExpOptions TableType; - ExpOptionsT() { - } -}; - -struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExpOptionsBuilder &operator=(const ExpOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CosOptionsT : public flatbuffers::NativeTable { - typedef CosOptions TableType; - CosOptionsT() { - } -}; - -struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CosOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CosOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CosOptionsBuilder &operator=(const CosOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCosOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - CosOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReducerOptionsT : public flatbuffers::NativeTable { - typedef ReducerOptions TableType; - bool keep_dims; - ReducerOptionsT() - : keep_dims(false) { - } -}; - -struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReducerOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_KEEP_DIMS = 4 - }; - bool keep_dims() const { - return GetField(VT_KEEP_DIMS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_KEEP_DIMS) && - verifier.EndTable(); - } - ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReducerOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_keep_dims(bool keep_dims) { - fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); - } - explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReducerOptionsBuilder &operator=(const ReducerOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReducerOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool keep_dims = false) { - ReducerOptionsBuilder builder_(_fbb); - builder_.add_keep_dims(keep_dims); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SqueezeOptionsT : public flatbuffers::NativeTable { - typedef SqueezeOptions TableType; - std::vector squeeze_dims; - SqueezeOptionsT() { - } -}; - -struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SqueezeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SQUEEZE_DIMS = 4 - }; - const flatbuffers::Vector *squeeze_dims() const { - return GetPointer *>(VT_SQUEEZE_DIMS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_SQUEEZE_DIMS) && - verifier.VerifyVector(squeeze_dims()) && - verifier.EndTable(); - } - SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SqueezeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { - fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); - } - explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SqueezeOptionsBuilder &operator=(const SqueezeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSqueezeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> squeeze_dims = 0) { - SqueezeOptionsBuilder builder_(_fbb); - builder_.add_squeeze_dims(squeeze_dims); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSqueezeOptionsDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *squeeze_dims = nullptr) { - auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - squeeze_dims__); -} - -flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitOptionsT : public flatbuffers::NativeTable { - typedef SplitOptions TableType; - int32_t num_splits; - SplitOptionsT() - : num_splits(0) { - } -}; - -struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && - verifier.EndTable(); - } - SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SplitOptionsBuilder &operator=(const SplitOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SplitVOptionsT : public flatbuffers::NativeTable { - typedef SplitVOptions TableType; - int32_t num_splits; - SplitVOptionsT() - : num_splits(0) { - } -}; - -struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SplitVOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM_SPLITS = 4 - }; - int32_t num_splits() const { - return GetField(VT_NUM_SPLITS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM_SPLITS) && - verifier.EndTable(); - } - SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SplitVOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num_splits(int32_t num_splits) { - fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); - } - explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SplitVOptionsBuilder &operator=(const SplitVOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSplitVOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num_splits = 0) { - SplitVOptionsBuilder builder_(_fbb); - builder_.add_num_splits(num_splits); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct StridedSliceOptionsT : public flatbuffers::NativeTable { - typedef StridedSliceOptions TableType; - int32_t begin_mask; - int32_t end_mask; - int32_t ellipsis_mask; - int32_t new_axis_mask; - int32_t shrink_axis_mask; - StridedSliceOptionsT() - : begin_mask(0), - end_mask(0), - ellipsis_mask(0), - new_axis_mask(0), - shrink_axis_mask(0) { - } -}; - -struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef StridedSliceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_BEGIN_MASK = 4, - VT_END_MASK = 6, - VT_ELLIPSIS_MASK = 8, - VT_NEW_AXIS_MASK = 10, - VT_SHRINK_AXIS_MASK = 12 - }; - int32_t begin_mask() const { - return GetField(VT_BEGIN_MASK, 0); - } - int32_t end_mask() const { - return GetField(VT_END_MASK, 0); - } - int32_t ellipsis_mask() const { - return GetField(VT_ELLIPSIS_MASK, 0); - } - int32_t new_axis_mask() const { - return GetField(VT_NEW_AXIS_MASK, 0); - } - int32_t shrink_axis_mask() const { - return GetField(VT_SHRINK_AXIS_MASK, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_BEGIN_MASK) && - VerifyField(verifier, VT_END_MASK) && - VerifyField(verifier, VT_ELLIPSIS_MASK) && - VerifyField(verifier, VT_NEW_AXIS_MASK) && - VerifyField(verifier, VT_SHRINK_AXIS_MASK) && - verifier.EndTable(); - } - StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct StridedSliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_begin_mask(int32_t begin_mask) { - fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); - } - void add_end_mask(int32_t end_mask) { - fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); - } - void add_ellipsis_mask(int32_t ellipsis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); - } - void add_new_axis_mask(int32_t new_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); - } - void add_shrink_axis_mask(int32_t shrink_axis_mask) { - fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); - } - explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - StridedSliceOptionsBuilder &operator=(const StridedSliceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateStridedSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t begin_mask = 0, - int32_t end_mask = 0, - int32_t ellipsis_mask = 0, - int32_t new_axis_mask = 0, - int32_t shrink_axis_mask = 0) { - StridedSliceOptionsBuilder builder_(_fbb); - builder_.add_shrink_axis_mask(shrink_axis_mask); - builder_.add_new_axis_mask(new_axis_mask); - builder_.add_ellipsis_mask(ellipsis_mask); - builder_.add_end_mask(end_mask); - builder_.add_begin_mask(begin_mask); - return builder_.Finish(); -} - -flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { - typedef LogSoftmaxOptions TableType; - LogSoftmaxOptionsT() { - } -}; - -struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogSoftmaxOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogSoftmaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogSoftmaxOptionsBuilder &operator=(const LogSoftmaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogSoftmaxOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogSoftmaxOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CastOptionsT : public flatbuffers::NativeTable { - typedef CastOptions TableType; - tflite::TensorType in_data_type; - tflite::TensorType out_data_type; - CastOptionsT() - : in_data_type(tflite::TensorType_FLOAT32), - out_data_type(tflite::TensorType_FLOAT32) { - } -}; - -struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CastOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IN_DATA_TYPE = 4, - VT_OUT_DATA_TYPE = 6 - }; - tflite::TensorType in_data_type() const { - return static_cast(GetField(VT_IN_DATA_TYPE, 0)); - } - tflite::TensorType out_data_type() const { - return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IN_DATA_TYPE) && - VerifyField(verifier, VT_OUT_DATA_TYPE) && - verifier.EndTable(); - } - CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CastOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_in_data_type(tflite::TensorType in_data_type) { - fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); - } - void add_out_data_type(tflite::TensorType out_data_type) { - fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); - } - explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CastOptionsBuilder &operator=(const CastOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCastOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, - tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { - CastOptionsBuilder builder_(_fbb); - builder_.add_out_data_type(out_data_type); - builder_.add_in_data_type(in_data_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DequantizeOptionsT : public flatbuffers::NativeTable { - typedef DequantizeOptions TableType; - DequantizeOptionsT() { - } -}; - -struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DequantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DequantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DequantizeOptionsBuilder &operator=(const DequantizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDequantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DequantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { - typedef MaximumMinimumOptions TableType; - MaximumMinimumOptionsT() { - } -}; - -struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MaximumMinimumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MaximumMinimumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MaximumMinimumOptionsBuilder &operator=(const MaximumMinimumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMaximumMinimumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MaximumMinimumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TileOptionsT : public flatbuffers::NativeTable { - typedef TileOptions TableType; - TileOptionsT() { - } -}; - -struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TileOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TileOptionsBuilder &operator=(const TileOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTileOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - TileOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMaxOptionsT : public flatbuffers::NativeTable { - typedef ArgMaxOptions TableType; - tflite::TensorType output_type; - ArgMaxOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMaxOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && - verifier.EndTable(); - } - ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMaxOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ArgMaxOptionsBuilder &operator=(const ArgMaxOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMaxOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMaxOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ArgMinOptionsT : public flatbuffers::NativeTable { - typedef ArgMinOptions TableType; - tflite::TensorType output_type; - ArgMinOptionsT() - : output_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ArgMinOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUTPUT_TYPE = 4 - }; - tflite::TensorType output_type() const { - return static_cast(GetField(VT_OUTPUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUTPUT_TYPE) && - verifier.EndTable(); - } - ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ArgMinOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_output_type(tflite::TensorType output_type) { - fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); - } - explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ArgMinOptionsBuilder &operator=(const ArgMinOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateArgMinOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType output_type = tflite::TensorType_FLOAT32) { - ArgMinOptionsBuilder builder_(_fbb); - builder_.add_output_type(output_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterOptionsT : public flatbuffers::NativeTable { - typedef GreaterOptions TableType; - GreaterOptionsT() { - } -}; - -struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GreaterOptionsBuilder &operator=(const GreaterOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GreaterEqualOptionsT : public flatbuffers::NativeTable { - typedef GreaterEqualOptions TableType; - GreaterEqualOptionsT() { - } -}; - -struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GreaterEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GreaterEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GreaterEqualOptionsBuilder &operator=(const GreaterEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGreaterEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GreaterEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessOptionsT : public flatbuffers::NativeTable { - typedef LessOptions TableType; - LessOptionsT() { - } -}; - -struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LessOptionsBuilder &operator=(const LessOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LessEqualOptionsT : public flatbuffers::NativeTable { - typedef LessEqualOptions TableType; - LessEqualOptionsT() { - } -}; - -struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LessEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LessEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LessEqualOptionsBuilder &operator=(const LessEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLessEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LessEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NegOptionsT : public flatbuffers::NativeTable { - typedef NegOptions TableType; - NegOptionsT() { - } -}; - -struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NegOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NegOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NegOptionsBuilder &operator=(const NegOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNegOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NegOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectOptionsT : public flatbuffers::NativeTable { - typedef SelectOptions TableType; - SelectOptionsT() { - } -}; - -struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SelectOptionsBuilder &operator=(const SelectOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SliceOptionsT : public flatbuffers::NativeTable { - typedef SliceOptions TableType; - SliceOptionsT() { - } -}; - -struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SliceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SliceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SliceOptionsBuilder &operator=(const SliceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSliceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SliceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TransposeConvOptionsT : public flatbuffers::NativeTable { - typedef TransposeConvOptions TableType; - tflite::Padding padding; - int32_t stride_w; - int32_t stride_h; - TransposeConvOptionsT() - : padding(tflite::Padding_SAME), - stride_w(0), - stride_h(0) { - } -}; - -struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TransposeConvOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_PADDING = 4, - VT_STRIDE_W = 6, - VT_STRIDE_H = 8 - }; - tflite::Padding padding() const { - return static_cast(GetField(VT_PADDING, 0)); - } - int32_t stride_w() const { - return GetField(VT_STRIDE_W, 0); - } - int32_t stride_h() const { - return GetField(VT_STRIDE_H, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_PADDING) && - VerifyField(verifier, VT_STRIDE_W) && - VerifyField(verifier, VT_STRIDE_H) && - verifier.EndTable(); - } - TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TransposeConvOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_padding(tflite::Padding padding) { - fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); - } - void add_stride_w(int32_t stride_w) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); - } - void add_stride_h(int32_t stride_h) { - fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); - } - explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TransposeConvOptionsBuilder &operator=(const TransposeConvOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTransposeConvOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::Padding padding = tflite::Padding_SAME, - int32_t stride_w = 0, - int32_t stride_h = 0) { - TransposeConvOptionsBuilder builder_(_fbb); - builder_.add_stride_h(stride_h); - builder_.add_stride_w(stride_w); - builder_.add_padding(padding); - return builder_.Finish(); -} - -flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ExpandDimsOptionsT : public flatbuffers::NativeTable { - typedef ExpandDimsOptions TableType; - ExpandDimsOptionsT() { - } -}; - -struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ExpandDimsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ExpandDimsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ExpandDimsOptionsBuilder &operator=(const ExpandDimsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateExpandDimsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ExpandDimsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SparseToDenseOptionsT : public flatbuffers::NativeTable { - typedef SparseToDenseOptions TableType; - bool validate_indices; - SparseToDenseOptionsT() - : validate_indices(false) { - } -}; - -struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SparseToDenseOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALIDATE_INDICES = 4 - }; - bool validate_indices() const { - return GetField(VT_VALIDATE_INDICES, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALIDATE_INDICES) && - verifier.EndTable(); - } - SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SparseToDenseOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_validate_indices(bool validate_indices) { - fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); - } - explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SparseToDenseOptionsBuilder &operator=(const SparseToDenseOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSparseToDenseOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool validate_indices = false) { - SparseToDenseOptionsBuilder builder_(_fbb); - builder_.add_validate_indices(validate_indices); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct EqualOptionsT : public flatbuffers::NativeTable { - typedef EqualOptions TableType; - EqualOptionsT() { - } -}; - -struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef EqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct EqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - EqualOptionsBuilder &operator=(const EqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - EqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NotEqualOptionsT : public flatbuffers::NativeTable { - typedef NotEqualOptions TableType; - NotEqualOptionsT() { - } -}; - -struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NotEqualOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NotEqualOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NotEqualOptionsBuilder &operator=(const NotEqualOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNotEqualOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - NotEqualOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ShapeOptionsT : public flatbuffers::NativeTable { - typedef ShapeOptions TableType; - tflite::TensorType out_type; - ShapeOptionsT() - : out_type(tflite::TensorType_FLOAT32) { - } -}; - -struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ShapeOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OUT_TYPE = 4 - }; - tflite::TensorType out_type() const { - return static_cast(GetField(VT_OUT_TYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OUT_TYPE) && - verifier.EndTable(); - } - ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ShapeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_out_type(tflite::TensorType out_type) { - fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); - } - explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ShapeOptionsBuilder &operator=(const ShapeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateShapeOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType out_type = tflite::TensorType_FLOAT32) { - ShapeOptionsBuilder builder_(_fbb); - builder_.add_out_type(out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RankOptionsT : public flatbuffers::NativeTable { - typedef RankOptions TableType; - RankOptionsT() { - } -}; - -struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RankOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RankOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RankOptionsBuilder &operator=(const RankOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRankOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RankOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PowOptionsT : public flatbuffers::NativeTable { - typedef PowOptions TableType; - PowOptionsT() { - } -}; - -struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PowOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PowOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PowOptionsBuilder &operator=(const PowOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePowOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - PowOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FakeQuantOptionsT : public flatbuffers::NativeTable { - typedef FakeQuantOptions TableType; - float min; - float max; - int32_t num_bits; - bool narrow_range; - FakeQuantOptionsT() - : min(0.0f), - max(0.0f), - num_bits(0), - narrow_range(false) { - } -}; - -struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FakeQuantOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MIN = 4, - VT_MAX = 6, - VT_NUM_BITS = 8, - VT_NARROW_RANGE = 10 - }; - float min() const { - return GetField(VT_MIN, 0.0f); - } - float max() const { - return GetField(VT_MAX, 0.0f); - } - int32_t num_bits() const { - return GetField(VT_NUM_BITS, 0); - } - bool narrow_range() const { - return GetField(VT_NARROW_RANGE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MIN) && - VerifyField(verifier, VT_MAX) && - VerifyField(verifier, VT_NUM_BITS) && - VerifyField(verifier, VT_NARROW_RANGE) && - verifier.EndTable(); - } - FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FakeQuantOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_min(float min) { - fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); - } - void add_max(float max) { - fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); - } - void add_num_bits(int32_t num_bits) { - fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); - } - void add_narrow_range(bool narrow_range) { - fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); - } - explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FakeQuantOptionsBuilder &operator=(const FakeQuantOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFakeQuantOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float min = 0.0f, - float max = 0.0f, - int32_t num_bits = 0, - bool narrow_range = false) { - FakeQuantOptionsBuilder builder_(_fbb); - builder_.add_num_bits(num_bits); - builder_.add_max(max); - builder_.add_min(min); - builder_.add_narrow_range(narrow_range); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct PackOptionsT : public flatbuffers::NativeTable { - typedef PackOptions TableType; - int32_t values_count; - int32_t axis; - PackOptionsT() - : values_count(0), - axis(0) { - } -}; - -struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef PackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VALUES_COUNT = 4, - VT_AXIS = 6 - }; - int32_t values_count() const { - return GetField(VT_VALUES_COUNT, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VALUES_COUNT) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct PackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_values_count(int32_t values_count) { - fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); - } - explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - PackOptionsBuilder &operator=(const PackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreatePackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t values_count = 0, - int32_t axis = 0) { - PackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_values_count(values_count); - return builder_.Finish(); -} - -flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalOrOptionsT : public flatbuffers::NativeTable { - typedef LogicalOrOptions TableType; - LogicalOrOptionsT() { - } -}; - -struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalOrOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalOrOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalOrOptionsBuilder &operator=(const LogicalOrOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalOrOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalOrOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OneHotOptionsT : public flatbuffers::NativeTable { - typedef OneHotOptions TableType; - int32_t axis; - OneHotOptionsT() - : axis(0) { - } -}; - -struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OneHotOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_AXIS = 4 - }; - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OneHotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_axis(int32_t axis) { - fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); - } - explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OneHotOptionsBuilder &operator=(const OneHotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOneHotOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t axis = 0) { - OneHotOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - return builder_.Finish(); -} - -flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AbsOptionsT : public flatbuffers::NativeTable { - typedef AbsOptions TableType; - AbsOptionsT() { - } -}; - -struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AbsOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AbsOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AbsOptionsBuilder &operator=(const AbsOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAbsOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AbsOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HardSwishOptionsT : public flatbuffers::NativeTable { - typedef HardSwishOptions TableType; - HardSwishOptionsT() { - } -}; - -struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HardSwishOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HardSwishOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HardSwishOptionsBuilder &operator=(const HardSwishOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHardSwishOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HardSwishOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalAndOptionsT : public flatbuffers::NativeTable { - typedef LogicalAndOptions TableType; - LogicalAndOptionsT() { - } -}; - -struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalAndOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalAndOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalAndOptionsBuilder &operator=(const LogicalAndOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalAndOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalAndOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LogicalNotOptionsT : public flatbuffers::NativeTable { - typedef LogicalNotOptions TableType; - LogicalNotOptionsT() { - } -}; - -struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LogicalNotOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LogicalNotOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LogicalNotOptionsBuilder &operator=(const LogicalNotOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLogicalNotOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - LogicalNotOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UnpackOptionsT : public flatbuffers::NativeTable { - typedef UnpackOptions TableType; - int32_t num; - int32_t axis; - UnpackOptionsT() - : num(0), - axis(0) { - } -}; - -struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UnpackOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NUM = 4, - VT_AXIS = 6 - }; - int32_t num() const { - return GetField(VT_NUM, 0); - } - int32_t axis() const { - return GetField(VT_AXIS, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_NUM) && - VerifyField(verifier, VT_AXIS) && - verifier.EndTable(); - } - UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UnpackOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_num(int32_t num) { - fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); - } - void add_axis(int32_t axis) { - fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); - } - explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UnpackOptionsBuilder &operator=(const UnpackOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUnpackOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t num = 0, - int32_t axis = 0) { - UnpackOptionsBuilder builder_(_fbb); - builder_.add_axis(axis); - builder_.add_num(num); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorDivOptionsT : public flatbuffers::NativeTable { - typedef FloorDivOptions TableType; - FloorDivOptionsT() { - } -}; - -struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorDivOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorDivOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FloorDivOptionsBuilder &operator=(const FloorDivOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorDivOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorDivOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquareOptionsT : public flatbuffers::NativeTable { - typedef SquareOptions TableType; - SquareOptionsT() { - } -}; - -struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquareOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquareOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SquareOptionsBuilder &operator=(const SquareOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquareOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquareOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ZerosLikeOptionsT : public flatbuffers::NativeTable { - typedef ZerosLikeOptions TableType; - ZerosLikeOptionsT() { - } -}; - -struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ZerosLikeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ZerosLikeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ZerosLikeOptionsBuilder &operator=(const ZerosLikeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateZerosLikeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ZerosLikeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FillOptionsT : public flatbuffers::NativeTable { - typedef FillOptions TableType; - FillOptionsT() { - } -}; - -struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FillOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FillOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FillOptionsBuilder &operator=(const FillOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFillOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FillOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct FloorModOptionsT : public flatbuffers::NativeTable { - typedef FloorModOptions TableType; - FloorModOptionsT() { - } -}; - -struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef FloorModOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct FloorModOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - FloorModOptionsBuilder &operator=(const FloorModOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateFloorModOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - FloorModOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct RangeOptionsT : public flatbuffers::NativeTable { - typedef RangeOptions TableType; - RangeOptionsT() { - } -}; - -struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef RangeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct RangeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - RangeOptionsBuilder &operator=(const RangeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRangeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - RangeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct LeakyReluOptionsT : public flatbuffers::NativeTable { - typedef LeakyReluOptions TableType; - float alpha; - LeakyReluOptionsT() - : alpha(0.0f) { - } -}; - -struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef LeakyReluOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ALPHA = 4 - }; - float alpha() const { - return GetField(VT_ALPHA, 0.0f); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ALPHA) && - verifier.EndTable(); - } - LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct LeakyReluOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_alpha(float alpha) { - fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); - } - explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - LeakyReluOptionsBuilder &operator=(const LeakyReluOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateLeakyReluOptions( - flatbuffers::FlatBufferBuilder &_fbb, - float alpha = 0.0f) { - LeakyReluOptionsBuilder builder_(_fbb); - builder_.add_alpha(alpha); - return builder_.Finish(); -} - -flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { - typedef SquaredDifferenceOptions TableType; - SquaredDifferenceOptionsT() { - } -}; - -struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SquaredDifferenceOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SquaredDifferenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SquaredDifferenceOptionsBuilder &operator=(const SquaredDifferenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSquaredDifferenceOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SquaredDifferenceOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MirrorPadOptionsT : public flatbuffers::NativeTable { - typedef MirrorPadOptions TableType; - tflite::MirrorPadMode mode; - MirrorPadOptionsT() - : mode(tflite::MirrorPadMode_REFLECT) { - } -}; - -struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MirrorPadOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_MODE = 4 - }; - tflite::MirrorPadMode mode() const { - return static_cast(GetField(VT_MODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_MODE) && - verifier.EndTable(); - } - MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MirrorPadOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_mode(tflite::MirrorPadMode mode) { - fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); - } - explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MirrorPadOptionsBuilder &operator=(const MirrorPadOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMirrorPadOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { - MirrorPadOptionsBuilder builder_(_fbb); - builder_.add_mode(mode); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct UniqueOptionsT : public flatbuffers::NativeTable { - typedef UniqueOptions TableType; - tflite::TensorType idx_out_type; - UniqueOptionsT() - : idx_out_type(tflite::TensorType_INT32) { - } -}; - -struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef UniqueOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_IDX_OUT_TYPE = 4 - }; - tflite::TensorType idx_out_type() const { - return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_IDX_OUT_TYPE) && - verifier.EndTable(); - } - UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct UniqueOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_idx_out_type(tflite::TensorType idx_out_type) { - fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); - } - explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - UniqueOptionsBuilder &operator=(const UniqueOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateUniqueOptions( - flatbuffers::FlatBufferBuilder &_fbb, - tflite::TensorType idx_out_type = tflite::TensorType_INT32) { - UniqueOptionsBuilder builder_(_fbb); - builder_.add_idx_out_type(idx_out_type); - return builder_.Finish(); -} - -flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseV2OptionsT : public flatbuffers::NativeTable { - typedef ReverseV2Options TableType; - ReverseV2OptionsT() { - } -}; - -struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseV2OptionsBuilder &operator=(const ReverseV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - ReverseV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct AddNOptionsT : public flatbuffers::NativeTable { - typedef AddNOptions TableType; - AddNOptionsT() { - } -}; - -struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef AddNOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct AddNOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - AddNOptionsBuilder &operator=(const AddNOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateAddNOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - AddNOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct GatherNdOptionsT : public flatbuffers::NativeTable { - typedef GatherNdOptions TableType; - GatherNdOptionsT() { - } -}; - -struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef GatherNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct GatherNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - GatherNdOptionsBuilder &operator=(const GatherNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateGatherNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - GatherNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhereOptionsT : public flatbuffers::NativeTable { - typedef WhereOptions TableType; - WhereOptionsT() { - } -}; - -struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhereOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhereOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhereOptionsBuilder &operator=(const WhereOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhereOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - WhereOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { - typedef ReverseSequenceOptions TableType; - int32_t seq_dim; - int32_t batch_dim; - ReverseSequenceOptionsT() - : seq_dim(0), - batch_dim(0) { - } -}; - -struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ReverseSequenceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_SEQ_DIM = 4, - VT_BATCH_DIM = 6 - }; - int32_t seq_dim() const { - return GetField(VT_SEQ_DIM, 0); - } - int32_t batch_dim() const { - return GetField(VT_BATCH_DIM, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_SEQ_DIM) && - VerifyField(verifier, VT_BATCH_DIM) && - verifier.EndTable(); - } - ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ReverseSequenceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_seq_dim(int32_t seq_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); - } - void add_batch_dim(int32_t batch_dim) { - fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); - } - explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ReverseSequenceOptionsBuilder &operator=(const ReverseSequenceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateReverseSequenceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t seq_dim = 0, - int32_t batch_dim = 0) { - ReverseSequenceOptionsBuilder builder_(_fbb); - builder_.add_batch_dim(batch_dim); - builder_.add_seq_dim(seq_dim); - return builder_.Finish(); -} - -flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixDiagOptions TableType; - MatrixDiagOptionsT() { - } -}; - -struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixDiagOptionsBuilder &operator=(const MatrixDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct QuantizeOptionsT : public flatbuffers::NativeTable { - typedef QuantizeOptions TableType; - QuantizeOptionsT() { - } -}; - -struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef QuantizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct QuantizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - QuantizeOptionsBuilder &operator=(const QuantizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateQuantizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - QuantizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { - typedef MatrixSetDiagOptions TableType; - MatrixSetDiagOptionsT() { - } -}; - -struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MatrixSetDiagOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MatrixSetDiagOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MatrixSetDiagOptionsBuilder &operator=(const MatrixSetDiagOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMatrixSetDiagOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - MatrixSetDiagOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct IfOptionsT : public flatbuffers::NativeTable { - typedef IfOptions TableType; - int32_t then_subgraph_index; - int32_t else_subgraph_index; - IfOptionsT() - : then_subgraph_index(0), - else_subgraph_index(0) { - } -}; - -struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef IfOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_THEN_SUBGRAPH_INDEX = 4, - VT_ELSE_SUBGRAPH_INDEX = 6 - }; - int32_t then_subgraph_index() const { - return GetField(VT_THEN_SUBGRAPH_INDEX, 0); - } - int32_t else_subgraph_index() const { - return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct IfOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_then_subgraph_index(int32_t then_subgraph_index) { - fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); - } - void add_else_subgraph_index(int32_t else_subgraph_index) { - fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); - } - explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - IfOptionsBuilder &operator=(const IfOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateIfOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t then_subgraph_index = 0, - int32_t else_subgraph_index = 0) { - IfOptionsBuilder builder_(_fbb); - builder_.add_else_subgraph_index(else_subgraph_index); - builder_.add_then_subgraph_index(then_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CallOnceOptionsT : public flatbuffers::NativeTable { - typedef CallOnceOptions TableType; - int32_t init_subgraph_index; - CallOnceOptionsT() - : init_subgraph_index(0) { - } -}; - -struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CallOnceOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INIT_SUBGRAPH_INDEX = 4 - }; - int32_t init_subgraph_index() const { - return GetField(VT_INIT_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CallOnceOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_init_subgraph_index(int32_t init_subgraph_index) { - fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); - } - explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CallOnceOptionsBuilder &operator=(const CallOnceOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCallOnceOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t init_subgraph_index = 0) { - CallOnceOptionsBuilder builder_(_fbb); - builder_.add_init_subgraph_index(init_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct WhileOptionsT : public flatbuffers::NativeTable { - typedef WhileOptions TableType; - int32_t cond_subgraph_index; - int32_t body_subgraph_index; - WhileOptionsT() - : cond_subgraph_index(0), - body_subgraph_index(0) { - } -}; - -struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef WhileOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_COND_SUBGRAPH_INDEX = 4, - VT_BODY_SUBGRAPH_INDEX = 6 - }; - int32_t cond_subgraph_index() const { - return GetField(VT_COND_SUBGRAPH_INDEX, 0); - } - int32_t body_subgraph_index() const { - return GetField(VT_BODY_SUBGRAPH_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_COND_SUBGRAPH_INDEX) && - VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX) && - verifier.EndTable(); - } - WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct WhileOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_cond_subgraph_index(int32_t cond_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); - } - void add_body_subgraph_index(int32_t body_subgraph_index) { - fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); - } - explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - WhileOptionsBuilder &operator=(const WhileOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateWhileOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t cond_subgraph_index = 0, - int32_t body_subgraph_index = 0) { - WhileOptionsBuilder builder_(_fbb); - builder_.add_body_subgraph_index(body_subgraph_index); - builder_.add_cond_subgraph_index(cond_subgraph_index); - return builder_.Finish(); -} - -flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV4Options TableType; - NonMaxSuppressionV4OptionsT() { - } -}; - -struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV4OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV4OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV4OptionsBuilder &operator=(const NonMaxSuppressionV4OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV4OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { - typedef NonMaxSuppressionV5Options TableType; - NonMaxSuppressionV5OptionsT() { - } -}; - -struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef NonMaxSuppressionV5OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct NonMaxSuppressionV5OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - NonMaxSuppressionV5OptionsBuilder &operator=(const NonMaxSuppressionV5OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( - flatbuffers::FlatBufferBuilder &_fbb) { - NonMaxSuppressionV5OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ScatterNdOptionsT : public flatbuffers::NativeTable { - typedef ScatterNdOptions TableType; - ScatterNdOptionsT() { - } -}; - -struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ScatterNdOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ScatterNdOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ScatterNdOptionsBuilder &operator=(const ScatterNdOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateScatterNdOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - ScatterNdOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SelectV2OptionsT : public flatbuffers::NativeTable { - typedef SelectV2Options TableType; - SelectV2OptionsT() { - } -}; - -struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SelectV2OptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SelectV2OptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SelectV2OptionsBuilder &operator=(const SelectV2OptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSelectV2Options( - flatbuffers::FlatBufferBuilder &_fbb) { - SelectV2OptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct DensifyOptionsT : public flatbuffers::NativeTable { - typedef DensifyOptions TableType; - DensifyOptionsT() { - } -}; - -struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef DensifyOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct DensifyOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - DensifyOptionsBuilder &operator=(const DensifyOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateDensifyOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - DensifyOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SegmentSumOptionsT : public flatbuffers::NativeTable { - typedef SegmentSumOptions TableType; - SegmentSumOptionsT() { - } -}; - -struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SegmentSumOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SegmentSumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SegmentSumOptionsBuilder &operator=(const SegmentSumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSegmentSumOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - SegmentSumOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BatchMatMulOptionsT : public flatbuffers::NativeTable { - typedef BatchMatMulOptions TableType; - bool adj_x; - bool adj_y; - bool asymmetric_quantize_inputs; - BatchMatMulOptionsT() - : adj_x(false), - adj_y(false), - asymmetric_quantize_inputs(false) { - } -}; - -struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BatchMatMulOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_ADJ_X = 4, - VT_ADJ_Y = 6, - VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 - }; - bool adj_x() const { - return GetField(VT_ADJ_X, 0) != 0; - } - bool adj_y() const { - return GetField(VT_ADJ_Y, 0) != 0; - } - bool asymmetric_quantize_inputs() const { - return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_ADJ_X) && - VerifyField(verifier, VT_ADJ_Y) && - VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS) && - verifier.EndTable(); - } - BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BatchMatMulOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_adj_x(bool adj_x) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); - } - void add_adj_y(bool adj_y) { - fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); - } - void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { - fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); - } - explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BatchMatMulOptionsBuilder &operator=(const BatchMatMulOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBatchMatMulOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool adj_x = false, - bool adj_y = false, - bool asymmetric_quantize_inputs = false) { - BatchMatMulOptionsBuilder builder_(_fbb); - builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); - builder_.add_adj_y(adj_y); - builder_.add_adj_x(adj_x); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct CumsumOptionsT : public flatbuffers::NativeTable { - typedef CumsumOptions TableType; - bool exclusive; - bool reverse; - CumsumOptionsT() - : exclusive(false), - reverse(false) { - } -}; - -struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef CumsumOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_EXCLUSIVE = 4, - VT_REVERSE = 6 - }; - bool exclusive() const { - return GetField(VT_EXCLUSIVE, 0) != 0; - } - bool reverse() const { - return GetField(VT_REVERSE, 0) != 0; - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_EXCLUSIVE) && - VerifyField(verifier, VT_REVERSE) && - verifier.EndTable(); - } - CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct CumsumOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_exclusive(bool exclusive) { - fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); - } - void add_reverse(bool reverse) { - fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); - } - explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - CumsumOptionsBuilder &operator=(const CumsumOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateCumsumOptions( - flatbuffers::FlatBufferBuilder &_fbb, - bool exclusive = false, - bool reverse = false) { - CumsumOptionsBuilder builder_(_fbb); - builder_.add_reverse(reverse); - builder_.add_exclusive(exclusive); - return builder_.Finish(); -} - -flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BroadcastToOptionsT : public flatbuffers::NativeTable { - typedef BroadcastToOptions TableType; - BroadcastToOptionsT() { - } -}; - -struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BroadcastToOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BroadcastToOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BroadcastToOptionsBuilder &operator=(const BroadcastToOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBroadcastToOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - BroadcastToOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct Rfft2dOptionsT : public flatbuffers::NativeTable { - typedef Rfft2dOptions TableType; - Rfft2dOptionsT() { - } -}; - -struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef Rfft2dOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct Rfft2dOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - Rfft2dOptionsBuilder &operator=(const Rfft2dOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateRfft2dOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - Rfft2dOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableOptionsT : public flatbuffers::NativeTable { - typedef HashtableOptions TableType; - int32_t table_id; - tflite::TensorType key_dtype; - tflite::TensorType value_dtype; - HashtableOptionsT() - : table_id(0), - key_dtype(tflite::TensorType_FLOAT32), - value_dtype(tflite::TensorType_FLOAT32) { - } -}; - -struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableOptionsT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TABLE_ID = 4, - VT_KEY_DTYPE = 6, - VT_VALUE_DTYPE = 8 - }; - int32_t table_id() const { - return GetField(VT_TABLE_ID, 0); - } - tflite::TensorType key_dtype() const { - return static_cast(GetField(VT_KEY_DTYPE, 0)); - } - tflite::TensorType value_dtype() const { - return static_cast(GetField(VT_VALUE_DTYPE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_TABLE_ID) && - VerifyField(verifier, VT_KEY_DTYPE) && - VerifyField(verifier, VT_VALUE_DTYPE) && - verifier.EndTable(); - } - HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_table_id(int32_t table_id) { - fbb_.AddElement(HashtableOptions::VT_TABLE_ID, table_id, 0); - } - void add_key_dtype(tflite::TensorType key_dtype) { - fbb_.AddElement(HashtableOptions::VT_KEY_DTYPE, static_cast(key_dtype), 0); - } - void add_value_dtype(tflite::TensorType value_dtype) { - fbb_.AddElement(HashtableOptions::VT_VALUE_DTYPE, static_cast(value_dtype), 0); - } - explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HashtableOptionsBuilder &operator=(const HashtableOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableOptions( - flatbuffers::FlatBufferBuilder &_fbb, - int32_t table_id = 0, - tflite::TensorType key_dtype = tflite::TensorType_FLOAT32, - tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) { - HashtableOptionsBuilder builder_(_fbb); - builder_.add_table_id(table_id); - builder_.add_value_dtype(value_dtype); - builder_.add_key_dtype(key_dtype); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableFindOptionsT : public flatbuffers::NativeTable { - typedef HashtableFindOptions TableType; - HashtableFindOptionsT() { - } -}; - -struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableFindOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableFindOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HashtableFindOptionsBuilder &operator=(const HashtableFindOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableFindOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableFindOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableImportOptionsT : public flatbuffers::NativeTable { - typedef HashtableImportOptions TableType; - HashtableImportOptionsT() { - } -}; - -struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableImportOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableImportOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableImportOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HashtableImportOptionsBuilder &operator=(const HashtableImportOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableImportOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableImportOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct HashtableSizeOptionsT : public flatbuffers::NativeTable { - typedef HashtableSizeOptions TableType; - HashtableSizeOptionsT() { - } -}; - -struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef HashtableSizeOptionsT NativeTableType; - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - verifier.EndTable(); - } - HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct HashtableSizeOptionsBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - HashtableSizeOptionsBuilder &operator=(const HashtableSizeOptionsBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateHashtableSizeOptions( - flatbuffers::FlatBufferBuilder &_fbb) { - HashtableSizeOptionsBuilder builder_(_fbb); - return builder_.Finish(); -} - -flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorCodeT : public flatbuffers::NativeTable { - typedef OperatorCode TableType; - int8_t deprecated_builtin_code; - std::string custom_code; - int32_t version; - tflite::BuiltinOperator builtin_code; - OperatorCodeT() - : deprecated_builtin_code(0), - version(1), - builtin_code(tflite::BuiltinOperator_ADD) { - } -}; - -struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorCodeT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DEPRECATED_BUILTIN_CODE = 4, - VT_CUSTOM_CODE = 6, - VT_VERSION = 8, - VT_BUILTIN_CODE = 10 - }; - int8_t deprecated_builtin_code() const { - return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); - } - const flatbuffers::String *custom_code() const { - return GetPointer(VT_CUSTOM_CODE); - } - int32_t version() const { - return GetField(VT_VERSION, 1); - } - tflite::BuiltinOperator builtin_code() const { - return static_cast(GetField(VT_BUILTIN_CODE, 0)); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE) && - VerifyOffset(verifier, VT_CUSTOM_CODE) && - verifier.VerifyString(custom_code()) && - VerifyField(verifier, VT_VERSION) && - VerifyField(verifier, VT_BUILTIN_CODE) && - verifier.EndTable(); - } - OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct OperatorCodeBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { - fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); - } - void add_custom_code(flatbuffers::Offset custom_code) { - fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); - } - void add_version(int32_t version) { - fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); - } - void add_builtin_code(tflite::BuiltinOperator builtin_code) { - fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); - } - explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorCodeBuilder &operator=(const OperatorCodeBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperatorCode( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - flatbuffers::Offset custom_code = 0, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - OperatorCodeBuilder builder_(_fbb); - builder_.add_builtin_code(builtin_code); - builder_.add_version(version); - builder_.add_custom_code(custom_code); - builder_.add_deprecated_builtin_code(deprecated_builtin_code); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorCodeDirect( - flatbuffers::FlatBufferBuilder &_fbb, - int8_t deprecated_builtin_code = 0, - const char *custom_code = nullptr, - int32_t version = 1, - tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { - auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; - return tflite::CreateOperatorCode( - _fbb, - deprecated_builtin_code, - custom_code__, - version, - builtin_code); -} - -flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct OperatorT : public flatbuffers::NativeTable { - typedef Operator TableType; - uint32_t opcode_index; - std::vector inputs; - std::vector outputs; - tflite::BuiltinOptionsUnion builtin_options; - std::vector custom_options; - tflite::CustomOptionsFormat custom_options_format; - std::vector mutating_variable_inputs; - std::vector intermediates; - OperatorT() - : opcode_index(0), - custom_options_format(tflite::CustomOptionsFormat_FLEXBUFFERS) { - } -}; - -struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef OperatorT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_OPCODE_INDEX = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_BUILTIN_OPTIONS_TYPE = 10, - VT_BUILTIN_OPTIONS = 12, - VT_CUSTOM_OPTIONS = 14, - VT_CUSTOM_OPTIONS_FORMAT = 16, - VT_MUTATING_VARIABLE_INPUTS = 18, - VT_INTERMEDIATES = 20 - }; - uint32_t opcode_index() const { - return GetField(VT_OPCODE_INDEX, 0); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - tflite::BuiltinOptions builtin_options_type() const { - return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); - } - const void *builtin_options() const { - return GetPointer(VT_BUILTIN_OPTIONS); - } - template const T *builtin_options_as() const; - const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RNNOptions *builtin_options_as_RNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddOptions *builtin_options_as_AddOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOptions *builtin_options_as_CallOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MulOptions *builtin_options_as_MulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadOptions *builtin_options_as_PadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherOptions *builtin_options_as_GatherOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SubOptions *builtin_options_as_SubOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DivOptions *builtin_options_as_DivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpOptions *builtin_options_as_ExpOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitOptions *builtin_options_as_SplitOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CastOptions *builtin_options_as_CastOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessOptions *builtin_options_as_LessOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NegOptions *builtin_options_as_NegOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PadV2Options *builtin_options_as_PadV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectOptions *builtin_options_as_SelectOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SliceOptions *builtin_options_as_SliceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::TileOptions *builtin_options_as_TileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::EqualOptions *builtin_options_as_EqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PowOptions *builtin_options_as_PowOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::PackOptions *builtin_options_as_PackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquareOptions *builtin_options_as_SquareOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FillOptions *builtin_options_as_FillOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RangeOptions *builtin_options_as_RangeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::AbsOptions *builtin_options_as_AbsOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::AddNOptions *builtin_options_as_AddNOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CosOptions *builtin_options_as_CosOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhereOptions *builtin_options_as_WhereOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::RankOptions *builtin_options_as_RankOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::IfOptions *builtin_options_as_IfOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::WhileOptions *builtin_options_as_WhileOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { - return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { - return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; - } - const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::Conv3DOptions *builtin_options_as_Conv3DOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_Conv3DOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableOptions *builtin_options_as_HashtableOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableFindOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableImportOptions ? static_cast(builtin_options()) : nullptr; - } - const tflite::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const { - return builtin_options_type() == tflite::BuiltinOptions_HashtableSizeOptions ? static_cast(builtin_options()) : nullptr; - } - const flatbuffers::Vector *custom_options() const { - return GetPointer *>(VT_CUSTOM_OPTIONS); - } - tflite::CustomOptionsFormat custom_options_format() const { - return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); - } - const flatbuffers::Vector *mutating_variable_inputs() const { - return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); - } - const flatbuffers::Vector *intermediates() const { - return GetPointer *>(VT_INTERMEDIATES); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_OPCODE_INDEX) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE) && - VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && - VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && - VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && - verifier.VerifyVector(custom_options()) && - VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT) && - VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && - verifier.VerifyVector(mutating_variable_inputs()) && - VerifyOffset(verifier, VT_INTERMEDIATES) && - verifier.VerifyVector(intermediates()) && - verifier.EndTable(); - } - OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv2DOptions(); -} - -template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthwiseConv2DOptions(); -} - -template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatEmbeddingsOptions(); -} - -template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSHProjectionOptions(); -} - -template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Pool2DOptions(); -} - -template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { - return builtin_options_as_SVDFOptions(); -} - -template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_RNNOptions(); -} - -template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { - return builtin_options_as_FullyConnectedOptions(); -} - -template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_SoftmaxOptions(); -} - -template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { - return builtin_options_as_ConcatenationOptions(); -} - -template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddOptions(); -} - -template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { - return builtin_options_as_L2NormOptions(); -} - -template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { - return builtin_options_as_LocalResponseNormalizationOptions(); -} - -template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_LSTMOptions(); -} - -template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeBilinearOptions(); -} - -template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOptions(); -} - -template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReshapeOptions(); -} - -template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { - return builtin_options_as_SkipGramOptions(); -} - -template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToDepthOptions(); -} - -template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { - return builtin_options_as_EmbeddingLookupSparseOptions(); -} - -template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { - return builtin_options_as_MulOptions(); -} - -template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { - return builtin_options_as_PadOptions(); -} - -template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherOptions(); -} - -template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchToSpaceNDOptions(); -} - -template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { - return builtin_options_as_SpaceToBatchNDOptions(); -} - -template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeOptions(); -} - -template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReducerOptions(); -} - -template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { - return builtin_options_as_SubOptions(); -} - -template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { - return builtin_options_as_DivOptions(); -} - -template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { - return builtin_options_as_SqueezeOptions(); -} - -template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_SequenceRNNOptions(); -} - -template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_StridedSliceOptions(); -} - -template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpOptions(); -} - -template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { - return builtin_options_as_TopKV2Options(); -} - -template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitOptions(); -} - -template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogSoftmaxOptions(); -} - -template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { - return builtin_options_as_CastOptions(); -} - -template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_DequantizeOptions(); -} - -template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { - return builtin_options_as_MaximumMinimumOptions(); -} - -template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMaxOptions(); -} - -template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessOptions(); -} - -template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { - return builtin_options_as_NegOptions(); -} - -template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { - return builtin_options_as_PadV2Options(); -} - -template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterOptions(); -} - -template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_GreaterEqualOptions(); -} - -template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_LessEqualOptions(); -} - -template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { - return builtin_options_as_SelectOptions(); -} - -template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SliceOptions(); -} - -template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { - return builtin_options_as_TransposeConvOptions(); -} - -template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { - return builtin_options_as_SparseToDenseOptions(); -} - -template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { - return builtin_options_as_TileOptions(); -} - -template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { - return builtin_options_as_ExpandDimsOptions(); -} - -template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_EqualOptions(); -} - -template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { - return builtin_options_as_NotEqualOptions(); -} - -template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ShapeOptions(); -} - -template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { - return builtin_options_as_PowOptions(); -} - -template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { - return builtin_options_as_ArgMinOptions(); -} - -template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { - return builtin_options_as_FakeQuantOptions(); -} - -template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { - return builtin_options_as_PackOptions(); -} - -template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalOrOptions(); -} - -template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { - return builtin_options_as_OneHotOptions(); -} - -template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalAndOptions(); -} - -template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { - return builtin_options_as_LogicalNotOptions(); -} - -template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnpackOptions(); -} - -template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorDivOptions(); -} - -template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquareOptions(); -} - -template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { - return builtin_options_as_ZerosLikeOptions(); -} - -template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { - return builtin_options_as_FillOptions(); -} - -template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { - return builtin_options_as_BidirectionalSequenceRNNOptions(); -} - -template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { - return builtin_options_as_UnidirectionalSequenceLSTMOptions(); -} - -template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { - return builtin_options_as_FloorModOptions(); -} - -template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { - return builtin_options_as_RangeOptions(); -} - -template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { - return builtin_options_as_ResizeNearestNeighborOptions(); -} - -template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { - return builtin_options_as_LeakyReluOptions(); -} - -template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_SquaredDifferenceOptions(); -} - -template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { - return builtin_options_as_MirrorPadOptions(); -} - -template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { - return builtin_options_as_AbsOptions(); -} - -template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { - return builtin_options_as_SplitVOptions(); -} - -template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { - return builtin_options_as_UniqueOptions(); -} - -template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { - return builtin_options_as_ReverseV2Options(); -} - -template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { - return builtin_options_as_AddNOptions(); -} - -template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_GatherNdOptions(); -} - -template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { - return builtin_options_as_CosOptions(); -} - -template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhereOptions(); -} - -template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { - return builtin_options_as_RankOptions(); -} - -template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { - return builtin_options_as_ReverseSequenceOptions(); -} - -template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixDiagOptions(); -} - -template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_QuantizeOptions(); -} - -template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { - return builtin_options_as_MatrixSetDiagOptions(); -} - -template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { - return builtin_options_as_HardSwishOptions(); -} - -template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { - return builtin_options_as_IfOptions(); -} - -template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { - return builtin_options_as_WhileOptions(); -} - -template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { - return builtin_options_as_DepthToSpaceOptions(); -} - -template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV4Options(); -} - -template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { - return builtin_options_as_NonMaxSuppressionV5Options(); -} - -template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { - return builtin_options_as_ScatterNdOptions(); -} - -template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { - return builtin_options_as_SelectV2Options(); -} - -template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { - return builtin_options_as_DensifyOptions(); -} - -template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { - return builtin_options_as_SegmentSumOptions(); -} - -template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { - return builtin_options_as_BatchMatMulOptions(); -} - -template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { - return builtin_options_as_CumsumOptions(); -} - -template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { - return builtin_options_as_CallOnceOptions(); -} - -template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { - return builtin_options_as_BroadcastToOptions(); -} - -template<> inline const tflite::Rfft2dOptions *Operator::builtin_options_as() const { - return builtin_options_as_Rfft2dOptions(); -} - -template<> inline const tflite::Conv3DOptions *Operator::builtin_options_as() const { - return builtin_options_as_Conv3DOptions(); -} - -template<> inline const tflite::HashtableOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableOptions(); -} - -template<> inline const tflite::HashtableFindOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableFindOptions(); -} - -template<> inline const tflite::HashtableImportOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableImportOptions(); -} - -template<> inline const tflite::HashtableSizeOptions *Operator::builtin_options_as() const { - return builtin_options_as_HashtableSizeOptions(); -} - -struct OperatorBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_opcode_index(uint32_t opcode_index) { - fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(Operator::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); - } - void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { - fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); - } - void add_builtin_options(flatbuffers::Offset builtin_options) { - fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); - } - void add_custom_options(flatbuffers::Offset> custom_options) { - fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); - } - void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { - fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); - } - void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { - fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); - } - void add_intermediates(flatbuffers::Offset> intermediates) { - fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); - } - explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - OperatorBuilder &operator=(const OperatorBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateOperator( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - flatbuffers::Offset> custom_options = 0, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - flatbuffers::Offset> mutating_variable_inputs = 0, - flatbuffers::Offset> intermediates = 0) { - OperatorBuilder builder_(_fbb); - builder_.add_intermediates(intermediates); - builder_.add_mutating_variable_inputs(mutating_variable_inputs); - builder_.add_custom_options(custom_options); - builder_.add_builtin_options(builtin_options); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_opcode_index(opcode_index); - builder_.add_custom_options_format(custom_options_format); - builder_.add_builtin_options_type(builtin_options_type); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateOperatorDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t opcode_index = 0, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, - flatbuffers::Offset builtin_options = 0, - const std::vector *custom_options = nullptr, - tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, - const std::vector *mutating_variable_inputs = nullptr, - const std::vector *intermediates = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; - auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; - auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; - return tflite::CreateOperator( - _fbb, - opcode_index, - inputs__, - outputs__, - builtin_options_type, - builtin_options, - custom_options__, - custom_options_format, - mutating_variable_inputs__, - intermediates__); -} - -flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SubGraphT : public flatbuffers::NativeTable { - typedef SubGraph TableType; - std::vector> tensors; - std::vector inputs; - std::vector outputs; - std::vector> operators; - std::string name; - SubGraphT() { - } -}; - -struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SubGraphT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_TENSORS = 4, - VT_INPUTS = 6, - VT_OUTPUTS = 8, - VT_OPERATORS = 10, - VT_NAME = 12 - }; - const flatbuffers::Vector> *tensors() const { - return GetPointer> *>(VT_TENSORS); - } - const flatbuffers::Vector *inputs() const { - return GetPointer *>(VT_INPUTS); - } - const flatbuffers::Vector *outputs() const { - return GetPointer *>(VT_OUTPUTS); - } - const flatbuffers::Vector> *operators() const { - return GetPointer> *>(VT_OPERATORS); - } - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_TENSORS) && - verifier.VerifyVector(tensors()) && - verifier.VerifyVectorOfTables(tensors()) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - VerifyOffset(verifier, VT_OPERATORS) && - verifier.VerifyVector(operators()) && - verifier.VerifyVectorOfTables(operators()) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - verifier.EndTable(); - } - SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SubGraphBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_tensors(flatbuffers::Offset>> tensors) { - fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); - } - void add_inputs(flatbuffers::Offset> inputs) { - fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset> outputs) { - fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); - } - void add_operators(flatbuffers::Offset>> operators) { - fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); - } - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(SubGraph::VT_NAME, name); - } - explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SubGraphBuilder &operator=(const SubGraphBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSubGraph( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> tensors = 0, - flatbuffers::Offset> inputs = 0, - flatbuffers::Offset> outputs = 0, - flatbuffers::Offset>> operators = 0, - flatbuffers::Offset name = 0) { - SubGraphBuilder builder_(_fbb); - builder_.add_name(name); - builder_.add_operators(operators); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - builder_.add_tensors(tensors); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSubGraphDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *tensors = nullptr, - const std::vector *inputs = nullptr, - const std::vector *outputs = nullptr, - const std::vector> *operators = nullptr, - const char *name = nullptr) { - auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; - auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; - auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateSubGraph( - _fbb, - tensors__, - inputs__, - outputs__, - operators__, - name__); -} - -flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct BufferT : public flatbuffers::NativeTable { - typedef Buffer TableType; - std::vector data; - BufferT() { - } -}; - -struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef BufferT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_DATA = 4 - }; - const flatbuffers::Vector *data() const { - return GetPointer *>(VT_DATA); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_DATA) && - verifier.VerifyVector(data()) && - verifier.EndTable(); - } - BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct BufferBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_data(flatbuffers::Offset> data) { - fbb_.AddOffset(Buffer::VT_DATA, data); - } - explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - BufferBuilder &operator=(const BufferBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateBuffer( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset> data = 0) { - BufferBuilder builder_(_fbb); - builder_.add_data(data); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateBufferDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector *data = nullptr) { - if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } - auto data__ = data ? _fbb.CreateVector(*data) : 0; - return tflite::CreateBuffer( - _fbb, - data__); -} - -flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct MetadataT : public flatbuffers::NativeTable { - typedef Metadata TableType; - std::string name; - uint32_t buffer; - MetadataT() - : buffer(0) { - } -}; - -struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef MetadataT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_BUFFER = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t buffer() const { - return GetField(VT_BUFFER, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_BUFFER) && - verifier.EndTable(); - } - MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct MetadataBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(Metadata::VT_NAME, name); - } - void add_buffer(uint32_t buffer) { - fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); - } - explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - MetadataBuilder &operator=(const MetadataBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateMetadata( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t buffer = 0) { - MetadataBuilder builder_(_fbb); - builder_.add_buffer(buffer); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateMetadataDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t buffer = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateMetadata( - _fbb, - name__, - buffer); -} - -flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct TensorMapT : public flatbuffers::NativeTable { - typedef TensorMap TableType; - std::string name; - uint32_t tensor_index; - TensorMapT() - : tensor_index(0) { - } -}; - -struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef TensorMapT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_NAME = 4, - VT_TENSOR_INDEX = 6 - }; - const flatbuffers::String *name() const { - return GetPointer(VT_NAME); - } - uint32_t tensor_index() const { - return GetField(VT_TENSOR_INDEX, 0); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_NAME) && - verifier.VerifyString(name()) && - VerifyField(verifier, VT_TENSOR_INDEX) && - verifier.EndTable(); - } - TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct TensorMapBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_name(flatbuffers::Offset name) { - fbb_.AddOffset(TensorMap::VT_NAME, name); - } - void add_tensor_index(uint32_t tensor_index) { - fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); - } - explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - TensorMapBuilder &operator=(const TensorMapBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateTensorMap( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset name = 0, - uint32_t tensor_index = 0) { - TensorMapBuilder builder_(_fbb); - builder_.add_tensor_index(tensor_index); - builder_.add_name(name); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateTensorMapDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const char *name = nullptr, - uint32_t tensor_index = 0) { - auto name__ = name ? _fbb.CreateString(name) : 0; - return tflite::CreateTensorMap( - _fbb, - name__, - tensor_index); -} - -flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct SignatureDefT : public flatbuffers::NativeTable { - typedef SignatureDef TableType; - std::vector> inputs; - std::vector> outputs; - std::string method_name; - std::string key; - SignatureDefT() { - } -}; - -struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef SignatureDefT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_INPUTS = 4, - VT_OUTPUTS = 6, - VT_METHOD_NAME = 8, - VT_KEY = 10 - }; - const flatbuffers::Vector> *inputs() const { - return GetPointer> *>(VT_INPUTS); - } - const flatbuffers::Vector> *outputs() const { - return GetPointer> *>(VT_OUTPUTS); - } - const flatbuffers::String *method_name() const { - return GetPointer(VT_METHOD_NAME); - } - const flatbuffers::String *key() const { - return GetPointer(VT_KEY); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyOffset(verifier, VT_INPUTS) && - verifier.VerifyVector(inputs()) && - verifier.VerifyVectorOfTables(inputs()) && - VerifyOffset(verifier, VT_OUTPUTS) && - verifier.VerifyVector(outputs()) && - verifier.VerifyVectorOfTables(outputs()) && - VerifyOffset(verifier, VT_METHOD_NAME) && - verifier.VerifyString(method_name()) && - VerifyOffset(verifier, VT_KEY) && - verifier.VerifyString(key()) && - verifier.EndTable(); - } - SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct SignatureDefBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_inputs(flatbuffers::Offset>> inputs) { - fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); - } - void add_outputs(flatbuffers::Offset>> outputs) { - fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); - } - void add_method_name(flatbuffers::Offset method_name) { - fbb_.AddOffset(SignatureDef::VT_METHOD_NAME, method_name); - } - void add_key(flatbuffers::Offset key) { - fbb_.AddOffset(SignatureDef::VT_KEY, key); - } - explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - SignatureDefBuilder &operator=(const SignatureDefBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateSignatureDef( - flatbuffers::FlatBufferBuilder &_fbb, - flatbuffers::Offset>> inputs = 0, - flatbuffers::Offset>> outputs = 0, - flatbuffers::Offset method_name = 0, - flatbuffers::Offset key = 0) { - SignatureDefBuilder builder_(_fbb); - builder_.add_key(key); - builder_.add_method_name(method_name); - builder_.add_outputs(outputs); - builder_.add_inputs(inputs); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateSignatureDefDirect( - flatbuffers::FlatBufferBuilder &_fbb, - const std::vector> *inputs = nullptr, - const std::vector> *outputs = nullptr, - const char *method_name = nullptr, - const char *key = nullptr) { - auto inputs__ = inputs ? _fbb.CreateVector>(*inputs) : 0; - auto outputs__ = outputs ? _fbb.CreateVector>(*outputs) : 0; - auto method_name__ = method_name ? _fbb.CreateString(method_name) : 0; - auto key__ = key ? _fbb.CreateString(key) : 0; - return tflite::CreateSignatureDef( - _fbb, - inputs__, - outputs__, - method_name__, - key__); -} - -flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -struct ModelT : public flatbuffers::NativeTable { - typedef Model TableType; - uint32_t version; - std::vector> operator_codes; - std::vector> subgraphs; - std::string description; - std::vector> buffers; - std::vector metadata_buffer; - std::vector> metadata; - std::vector> signature_defs; - ModelT() - : version(0) { - } -}; - -struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { - typedef ModelT NativeTableType; - enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { - VT_VERSION = 4, - VT_OPERATOR_CODES = 6, - VT_SUBGRAPHS = 8, - VT_DESCRIPTION = 10, - VT_BUFFERS = 12, - VT_METADATA_BUFFER = 14, - VT_METADATA = 16, - VT_SIGNATURE_DEFS = 18 - }; - uint32_t version() const { - return GetField(VT_VERSION, 0); - } - const flatbuffers::Vector> *operator_codes() const { - return GetPointer> *>(VT_OPERATOR_CODES); - } - const flatbuffers::Vector> *subgraphs() const { - return GetPointer> *>(VT_SUBGRAPHS); - } - const flatbuffers::String *description() const { - return GetPointer(VT_DESCRIPTION); - } - const flatbuffers::Vector> *buffers() const { - return GetPointer> *>(VT_BUFFERS); - } - const flatbuffers::Vector *metadata_buffer() const { - return GetPointer *>(VT_METADATA_BUFFER); - } - const flatbuffers::Vector> *metadata() const { - return GetPointer> *>(VT_METADATA); - } - const flatbuffers::Vector> *signature_defs() const { - return GetPointer> *>(VT_SIGNATURE_DEFS); - } - bool Verify(flatbuffers::Verifier &verifier) const { - return VerifyTableStart(verifier) && - VerifyField(verifier, VT_VERSION) && - VerifyOffset(verifier, VT_OPERATOR_CODES) && - verifier.VerifyVector(operator_codes()) && - verifier.VerifyVectorOfTables(operator_codes()) && - VerifyOffset(verifier, VT_SUBGRAPHS) && - verifier.VerifyVector(subgraphs()) && - verifier.VerifyVectorOfTables(subgraphs()) && - VerifyOffset(verifier, VT_DESCRIPTION) && - verifier.VerifyString(description()) && - VerifyOffset(verifier, VT_BUFFERS) && - verifier.VerifyVector(buffers()) && - verifier.VerifyVectorOfTables(buffers()) && - VerifyOffset(verifier, VT_METADATA_BUFFER) && - verifier.VerifyVector(metadata_buffer()) && - VerifyOffset(verifier, VT_METADATA) && - verifier.VerifyVector(metadata()) && - verifier.VerifyVectorOfTables(metadata()) && - VerifyOffset(verifier, VT_SIGNATURE_DEFS) && - verifier.VerifyVector(signature_defs()) && - verifier.VerifyVectorOfTables(signature_defs()) && - verifier.EndTable(); - } - ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; - void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; - static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); -}; - -struct ModelBuilder { - flatbuffers::FlatBufferBuilder &fbb_; - flatbuffers::uoffset_t start_; - void add_version(uint32_t version) { - fbb_.AddElement(Model::VT_VERSION, version, 0); - } - void add_operator_codes(flatbuffers::Offset>> operator_codes) { - fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); - } - void add_subgraphs(flatbuffers::Offset>> subgraphs) { - fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); - } - void add_description(flatbuffers::Offset description) { - fbb_.AddOffset(Model::VT_DESCRIPTION, description); - } - void add_buffers(flatbuffers::Offset>> buffers) { - fbb_.AddOffset(Model::VT_BUFFERS, buffers); - } - void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { - fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); - } - void add_metadata(flatbuffers::Offset>> metadata) { - fbb_.AddOffset(Model::VT_METADATA, metadata); - } - void add_signature_defs(flatbuffers::Offset>> signature_defs) { - fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); - } - explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) - : fbb_(_fbb) { - start_ = fbb_.StartTable(); - } - ModelBuilder &operator=(const ModelBuilder &); - flatbuffers::Offset Finish() { - const auto end = fbb_.EndTable(start_); - auto o = flatbuffers::Offset(end); - return o; - } -}; - -inline flatbuffers::Offset CreateModel( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - flatbuffers::Offset>> operator_codes = 0, - flatbuffers::Offset>> subgraphs = 0, - flatbuffers::Offset description = 0, - flatbuffers::Offset>> buffers = 0, - flatbuffers::Offset> metadata_buffer = 0, - flatbuffers::Offset>> metadata = 0, - flatbuffers::Offset>> signature_defs = 0) { - ModelBuilder builder_(_fbb); - builder_.add_signature_defs(signature_defs); - builder_.add_metadata(metadata); - builder_.add_metadata_buffer(metadata_buffer); - builder_.add_buffers(buffers); - builder_.add_description(description); - builder_.add_subgraphs(subgraphs); - builder_.add_operator_codes(operator_codes); - builder_.add_version(version); - return builder_.Finish(); -} - -inline flatbuffers::Offset CreateModelDirect( - flatbuffers::FlatBufferBuilder &_fbb, - uint32_t version = 0, - const std::vector> *operator_codes = nullptr, - const std::vector> *subgraphs = nullptr, - const char *description = nullptr, - const std::vector> *buffers = nullptr, - const std::vector *metadata_buffer = nullptr, - const std::vector> *metadata = nullptr, - const std::vector> *signature_defs = nullptr) { - auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; - auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; - auto description__ = description ? _fbb.CreateString(description) : 0; - auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; - auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; - auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; - auto signature_defs__ = signature_defs ? _fbb.CreateVector>(*signature_defs) : 0; - return tflite::CreateModel( - _fbb, - version, - operator_codes__, - subgraphs__, - description__, - buffers__, - metadata_buffer__, - metadata__, - signature_defs__); -} - -flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); - -inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CustomQuantizationT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } -} - -inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCustomQuantization(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); - auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; - return tflite::CreateCustomQuantization( - _fbb, - _custom); -} - -inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizationParametersT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } } - { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } } - { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } } - { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } } - { auto _e = details_type(); _o->details.type = _e; } - { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } - { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } -} - -inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizationParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; - auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; - auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; - auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; - auto _details_type = _o->details.type; - auto _details = _o->details.Pack(_fbb); - auto _quantized_dimension = _o->quantized_dimension; - return tflite::CreateQuantizationParameters( - _fbb, - _min, - _max, - _scale, - _zero_point, - _details_type, - _details, - _quantized_dimension); -} - -inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Int32VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateInt32Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateInt32Vector( - _fbb, - _values); -} - -inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint16VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint16Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint16Vector( - _fbb, - _values); -} - -inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Uint8VectorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } -} - -inline flatbuffers::Offset Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUint8Vector(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); - auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; - return tflite::CreateUint8Vector( - _fbb, - _values); -} - -inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DimensionMetadataT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = format(); _o->format = _e; } - { auto _e = dense_size(); _o->dense_size = _e; } - { auto _e = array_segments_type(); _o->array_segments.type = _e; } - { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } - { auto _e = array_indices_type(); _o->array_indices.type = _e; } - { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } -} - -inline flatbuffers::Offset DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDimensionMetadata(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _format = _o->format; - auto _dense_size = _o->dense_size; - auto _array_segments_type = _o->array_segments.type; - auto _array_segments = _o->array_segments.Pack(_fbb); - auto _array_indices_type = _o->array_indices.type; - auto _array_indices = _o->array_indices.Pack(_fbb); - return tflite::CreateDimensionMetadata( - _fbb, - _format, - _dense_size, - _array_segments_type, - _array_segments, - _array_indices_type, - _array_indices); -} - -inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparsityParametersT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } } - { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } } - { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } -} - -inline flatbuffers::Offset SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparsityParameters(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; - auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; - auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateSparsityParameters( - _fbb, - _traversal_order, - _block_map, - _dim_metadata); -} - -inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } - { auto _e = type(); _o->type = _e; } - { auto _e = buffer(); _o->buffer = _e; } - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = quantization(); if (_e) _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = is_variable(); _o->is_variable = _e; } - { auto _e = sparsity(); if (_e) _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } - { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensor(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; - auto _type = _o->type; - auto _buffer = _o->buffer; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; - auto _is_variable = _o->is_variable; - auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; - auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; - return tflite::CreateTensor( - _fbb, - _shape, - _type, - _buffer, - _name, - _quantization, - _is_variable, - _sparsity, - _shape_signature); -} - -inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Conv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline Conv3DOptionsT *Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Conv3DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_d(); _o->stride_d = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_d_factor(); _o->dilation_d_factor = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConv3DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_d = _o->stride_d; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_d_factor = _o->dilation_d_factor; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateConv3DOptions( - _fbb, - _padding, - _stride_d, - _stride_w, - _stride_h, - _fused_activation_function, - _dilation_d_factor, - _dilation_w_factor, - _dilation_h_factor); -} - -inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Pool2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = filter_width(); _o->filter_width = _e; } - { auto _e = filter_height(); _o->filter_height = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePool2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _filter_width = _o->filter_width; - auto _filter_height = _o->filter_height; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreatePool2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _filter_width, - _filter_height, - _fused_activation_function); -} - -inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthwiseConv2DOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } - { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } - { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } -} - -inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - auto _depth_multiplier = _o->depth_multiplier; - auto _fused_activation_function = _o->fused_activation_function; - auto _dilation_w_factor = _o->dilation_w_factor; - auto _dilation_h_factor = _o->dilation_h_factor; - return tflite::CreateDepthwiseConv2DOptions( - _fbb, - _padding, - _stride_w, - _stride_h, - _depth_multiplier, - _fused_activation_function, - _dilation_w_factor, - _dilation_h_factor); -} - -inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatEmbeddingsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_channels(); _o->num_channels = _e; } - { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } } - { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_channels = _o->num_channels; - auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; - auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; - return tflite::CreateConcatEmbeddingsOptions( - _fbb, - _num_channels, - _num_columns_per_channel, - _embedding_dim_per_channel); -} - -inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSHProjectionOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = type(); _o->type = _e; } -} - -inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSHProjectionOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _type = _o->type; - return tflite::CreateLSHProjectionOptions( - _fbb, - _type); -} - -inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SVDFOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = rank(); _o->rank = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSVDFOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _rank = _o->rank; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSVDFOptions( - _fbb, - _rank, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateRNNOptions( - _fbb, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceRNNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _time_major = _o->time_major; - auto _fused_activation_function = _o->fused_activation_function; - auto _merge_outputs = _o->merge_outputs; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceRNNOptions( - _fbb, - _time_major, - _fused_activation_function, - _merge_outputs, - _asymmetric_quantize_inputs); -} - -inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FullyConnectedOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = weights_format(); _o->weights_format = _e; } - { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFullyConnectedOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _weights_format = _o->weights_format; - auto _keep_num_dims = _o->keep_num_dims; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateFullyConnectedOptions( - _fbb, - _fused_activation_function, - _weights_format, - _keep_num_dims, - _asymmetric_quantize_inputs); -} - -inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _beta = _o->beta; - return tflite::CreateSoftmaxOptions( - _fbb, - _beta); -} - -inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ConcatenationOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateConcatenationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateConcatenationOptions( - _fbb, - _axis, - _fused_activation_function); -} - -inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateAddOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MulOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateMulOptions( - _fbb, - _fused_activation_function); -} - -inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new L2NormOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateL2NormOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateL2NormOptions( - _fbb, - _fused_activation_function); -} - -inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LocalResponseNormalizationOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = radius(); _o->radius = _e; } - { auto _e = bias(); _o->bias = _e; } - { auto _e = alpha(); _o->alpha = _e; } - { auto _e = beta(); _o->beta = _e; } -} - -inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _radius = _o->radius; - auto _bias = _o->bias; - auto _alpha = _o->alpha; - auto _beta = _o->beta; - return tflite::CreateLocalResponseNormalizationOptions( - _fbb, - _radius, - _bias, - _alpha, - _beta); -} - -inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = kernel_type(); _o->kernel_type = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _kernel_type = _o->kernel_type; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _kernel_type, - _asymmetric_quantize_inputs); -} - -inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateUnidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _time_major, - _asymmetric_quantize_inputs); -} - -inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BidirectionalSequenceLSTMOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = cell_clip(); _o->cell_clip = _e; } - { auto _e = proj_clip(); _o->proj_clip = _e; } - { auto _e = merge_outputs(); _o->merge_outputs = _e; } - { auto _e = time_major(); _o->time_major = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _cell_clip = _o->cell_clip; - auto _proj_clip = _o->proj_clip; - auto _merge_outputs = _o->merge_outputs; - auto _time_major = _o->time_major; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBidirectionalSequenceLSTMOptions( - _fbb, - _fused_activation_function, - _cell_clip, - _proj_clip, - _merge_outputs, - _time_major, - _asymmetric_quantize_inputs); -} - -inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeBilinearOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeBilinearOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeBilinearOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ResizeNearestNeighborOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = align_corners(); _o->align_corners = _e; } - { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } -} - -inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _align_corners = _o->align_corners; - auto _half_pixel_centers = _o->half_pixel_centers; - return tflite::CreateResizeNearestNeighborOptions( - _fbb, - _align_corners, - _half_pixel_centers); -} - -inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = subgraph(); _o->subgraph = _e; } -} - -inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _subgraph = _o->subgraph; - return tflite::CreateCallOptions( - _fbb, - _subgraph); -} - -inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadOptions( - _fbb); -} - -inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PadV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePadV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePadV2Options( - _fbb); -} - -inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReshapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReshapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; - return tflite::CreateReshapeOptions( - _fbb, - _new_shape); -} - -inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToBatchNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSpaceToBatchNDOptions( - _fbb); -} - -inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchToSpaceNDOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBatchToSpaceNDOptions( - _fbb); -} - -inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SkipGramOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = ngram_size(); _o->ngram_size = _e; } - { auto _e = max_skip_size(); _o->max_skip_size = _e; } - { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } -} - -inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSkipGramOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _ngram_size = _o->ngram_size; - auto _max_skip_size = _o->max_skip_size; - auto _include_all_ngrams = _o->include_all_ngrams; - return tflite::CreateSkipGramOptions( - _fbb, - _ngram_size, - _max_skip_size, - _include_all_ngrams); -} - -inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SpaceToDepthOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateSpaceToDepthOptions( - _fbb, - _block_size); -} - -inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DepthToSpaceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = block_size(); _o->block_size = _e; } -} - -inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _block_size = _o->block_size; - return tflite::CreateDepthToSpaceOptions( - _fbb, - _block_size); -} - -inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } - { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } -} - -inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - auto _pot_scale_int16 = _o->pot_scale_int16; - return tflite::CreateSubOptions( - _fbb, - _fused_activation_function, - _pot_scale_int16); -} - -inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DivOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } -} - -inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _fused_activation_function = _o->fused_activation_function; - return tflite::CreateDivOptions( - _fbb, - _fused_activation_function); -} - -inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TopKV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTopKV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTopKV2Options( - _fbb); -} - -inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EmbeddingLookupSparseOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = combiner(); _o->combiner = _e; } -} - -inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _combiner = _o->combiner; - return tflite::CreateEmbeddingLookupSparseOptions( - _fbb, - _combiner); -} - -inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } - { auto _e = batch_dims(); _o->batch_dims = _e; } -} - -inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - auto _batch_dims = _o->batch_dims; - return tflite::CreateGatherOptions( - _fbb, - _axis, - _batch_dims); -} - -inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTransposeOptions( - _fbb); -} - -inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpOptions( - _fbb); -} - -inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CosOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCosOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateCosOptions( - _fbb); -} - -inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReducerOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = keep_dims(); _o->keep_dims = _e; } -} - -inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReducerOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _keep_dims = _o->keep_dims; - return tflite::CreateReducerOptions( - _fbb, - _keep_dims); -} - -inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SqueezeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } } -} - -inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSqueezeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; - return tflite::CreateSqueezeOptions( - _fbb, - _squeeze_dims); -} - -inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitOptions( - _fbb, - _num_splits); -} - -inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SplitVOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num_splits(); _o->num_splits = _e; } -} - -inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSplitVOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num_splits = _o->num_splits; - return tflite::CreateSplitVOptions( - _fbb, - _num_splits); -} - -inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new StridedSliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = begin_mask(); _o->begin_mask = _e; } - { auto _e = end_mask(); _o->end_mask = _e; } - { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } - { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } - { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } -} - -inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateStridedSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _begin_mask = _o->begin_mask; - auto _end_mask = _o->end_mask; - auto _ellipsis_mask = _o->ellipsis_mask; - auto _new_axis_mask = _o->new_axis_mask; - auto _shrink_axis_mask = _o->shrink_axis_mask; - return tflite::CreateStridedSliceOptions( - _fbb, - _begin_mask, - _end_mask, - _ellipsis_mask, - _new_axis_mask, - _shrink_axis_mask); -} - -inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogSoftmaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogSoftmaxOptions( - _fbb); -} - -inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CastOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = in_data_type(); _o->in_data_type = _e; } - { auto _e = out_data_type(); _o->out_data_type = _e; } -} - -inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCastOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _in_data_type = _o->in_data_type; - auto _out_data_type = _o->out_data_type; - return tflite::CreateCastOptions( - _fbb, - _in_data_type, - _out_data_type); -} - -inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DequantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDequantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDequantizeOptions( - _fbb); -} - -inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MaximumMinimumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMaximumMinimumOptions( - _fbb); -} - -inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TileOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateTileOptions( - _fbb); -} - -inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMaxOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMaxOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMaxOptions( - _fbb, - _output_type); -} - -inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ArgMinOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = output_type(); _o->output_type = _e; } -} - -inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateArgMinOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _output_type = _o->output_type; - return tflite::CreateArgMinOptions( - _fbb, - _output_type); -} - -inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterOptions( - _fbb); -} - -inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GreaterEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGreaterEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGreaterEqualOptions( - _fbb); -} - -inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessOptions( - _fbb); -} - -inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LessEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLessEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLessEqualOptions( - _fbb); -} - -inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NegOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNegOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNegOptions( - _fbb); -} - -inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectOptions( - _fbb); -} - -inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SliceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSliceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSliceOptions( - _fbb); -} - -inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TransposeConvOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = padding(); _o->padding = _e; } - { auto _e = stride_w(); _o->stride_w = _e; } - { auto _e = stride_h(); _o->stride_h = _e; } -} - -inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTransposeConvOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _padding = _o->padding; - auto _stride_w = _o->stride_w; - auto _stride_h = _o->stride_h; - return tflite::CreateTransposeConvOptions( - _fbb, - _padding, - _stride_w, - _stride_h); -} - -inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ExpandDimsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateExpandDimsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateExpandDimsOptions( - _fbb); -} - -inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SparseToDenseOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = validate_indices(); _o->validate_indices = _e; } -} - -inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSparseToDenseOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _validate_indices = _o->validate_indices; - return tflite::CreateSparseToDenseOptions( - _fbb, - _validate_indices); -} - -inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new EqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateEqualOptions( - _fbb); -} - -inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NotEqualOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNotEqualOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNotEqualOptions( - _fbb); -} - -inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ShapeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = out_type(); _o->out_type = _e; } -} - -inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateShapeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _out_type = _o->out_type; - return tflite::CreateShapeOptions( - _fbb, - _out_type); -} - -inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RankOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRankOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRankOptions( - _fbb); -} - -inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PowOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePowOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreatePowOptions( - _fbb); -} - -inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FakeQuantOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = min(); _o->min = _e; } - { auto _e = max(); _o->max = _e; } - { auto _e = num_bits(); _o->num_bits = _e; } - { auto _e = narrow_range(); _o->narrow_range = _e; } -} - -inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFakeQuantOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _min = _o->min; - auto _max = _o->max; - auto _num_bits = _o->num_bits; - auto _narrow_range = _o->narrow_range; - return tflite::CreateFakeQuantOptions( - _fbb, - _min, - _max, - _num_bits, - _narrow_range); -} - -inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new PackOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = values_count(); _o->values_count = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreatePackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _values_count = _o->values_count; - auto _axis = _o->axis; - return tflite::CreatePackOptions( - _fbb, - _values_count, - _axis); -} - -inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalOrOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalOrOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalOrOptions( - _fbb); -} - -inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OneHotOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOneHotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _axis = _o->axis; - return tflite::CreateOneHotOptions( - _fbb, - _axis); -} - -inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AbsOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAbsOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAbsOptions( - _fbb); -} - -inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HardSwishOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHardSwishOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHardSwishOptions( - _fbb); -} - -inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalAndOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalAndOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalAndOptions( - _fbb); -} - -inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LogicalNotOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLogicalNotOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateLogicalNotOptions( - _fbb); -} - -inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UnpackOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = num(); _o->num = _e; } - { auto _e = axis(); _o->axis = _e; } -} - -inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUnpackOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _num = _o->num; - auto _axis = _o->axis; - return tflite::CreateUnpackOptions( - _fbb, - _num, - _axis); -} - -inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorDivOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorDivOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorDivOptions( - _fbb); -} - -inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquareOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquareOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquareOptions( - _fbb); -} - -inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ZerosLikeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateZerosLikeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateZerosLikeOptions( - _fbb); -} - -inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FillOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFillOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFillOptions( - _fbb); -} - -inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new FloorModOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateFloorModOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateFloorModOptions( - _fbb); -} - -inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new RangeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRangeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRangeOptions( - _fbb); -} - -inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new LeakyReluOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = alpha(); _o->alpha = _e; } -} - -inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateLeakyReluOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _alpha = _o->alpha; - return tflite::CreateLeakyReluOptions( - _fbb, - _alpha); -} - -inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SquaredDifferenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSquaredDifferenceOptions( - _fbb); -} - -inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MirrorPadOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = mode(); _o->mode = _e; } -} - -inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMirrorPadOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _mode = _o->mode; - return tflite::CreateMirrorPadOptions( - _fbb, - _mode); -} - -inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new UniqueOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = idx_out_type(); _o->idx_out_type = _e; } -} - -inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateUniqueOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _idx_out_type = _o->idx_out_type; - return tflite::CreateUniqueOptions( - _fbb, - _idx_out_type); -} - -inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateReverseV2Options( - _fbb); -} - -inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new AddNOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateAddNOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateAddNOptions( - _fbb); -} - -inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new GatherNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateGatherNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateGatherNdOptions( - _fbb); -} - -inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhereOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhereOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateWhereOptions( - _fbb); -} - -inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ReverseSequenceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = seq_dim(); _o->seq_dim = _e; } - { auto _e = batch_dim(); _o->batch_dim = _e; } -} - -inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateReverseSequenceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _seq_dim = _o->seq_dim; - auto _batch_dim = _o->batch_dim; - return tflite::CreateReverseSequenceOptions( - _fbb, - _seq_dim, - _batch_dim); -} - -inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixDiagOptions( - _fbb); -} - -inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new QuantizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateQuantizeOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateQuantizeOptions( - _fbb); -} - -inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MatrixSetDiagOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateMatrixSetDiagOptions( - _fbb); -} - -inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new IfOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } - { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } -} - -inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateIfOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _then_subgraph_index = _o->then_subgraph_index; - auto _else_subgraph_index = _o->else_subgraph_index; - return tflite::CreateIfOptions( - _fbb, - _then_subgraph_index, - _else_subgraph_index); -} - -inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CallOnceOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } -} - -inline flatbuffers::Offset CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCallOnceOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _init_subgraph_index = _o->init_subgraph_index; - return tflite::CreateCallOnceOptions( - _fbb, - _init_subgraph_index); -} - -inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new WhileOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } - { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } -} - -inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateWhileOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _cond_subgraph_index = _o->cond_subgraph_index; - auto _body_subgraph_index = _o->body_subgraph_index; - return tflite::CreateWhileOptions( - _fbb, - _cond_subgraph_index, - _body_subgraph_index); -} - -inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV4OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV4Options( - _fbb); -} - -inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new NonMaxSuppressionV5OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateNonMaxSuppressionV5Options( - _fbb); -} - -inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ScatterNdOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateScatterNdOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateScatterNdOptions( - _fbb); -} - -inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SelectV2OptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSelectV2Options(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSelectV2Options( - _fbb); -} - -inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new DensifyOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateDensifyOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateDensifyOptions( - _fbb); -} - -inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SegmentSumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSegmentSumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateSegmentSumOptions( - _fbb); -} - -inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BatchMatMulOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = adj_x(); _o->adj_x = _e; } - { auto _e = adj_y(); _o->adj_y = _e; } - { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } -} - -inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBatchMatMulOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _adj_x = _o->adj_x; - auto _adj_y = _o->adj_y; - auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; - return tflite::CreateBatchMatMulOptions( - _fbb, - _adj_x, - _adj_y, - _asymmetric_quantize_inputs); -} - -inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new CumsumOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = exclusive(); _o->exclusive = _e; } - { auto _e = reverse(); _o->reverse = _e; } -} - -inline flatbuffers::Offset CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateCumsumOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _exclusive = _o->exclusive; - auto _reverse = _o->reverse; - return tflite::CreateCumsumOptions( - _fbb, - _exclusive, - _reverse); -} - -inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BroadcastToOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBroadcastToOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateBroadcastToOptions( - _fbb); -} - -inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new Rfft2dOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} - -inline flatbuffers::Offset Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateRfft2dOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateRfft2dOptions( - _fbb); -} - -inline HashtableOptionsT *HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HashtableOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} - -inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = table_id(); _o->table_id = _e; } - { auto _e = key_dtype(); _o->key_dtype = _e; } - { auto _e = value_dtype(); _o->value_dtype = _e; } -} - -inline flatbuffers::Offset HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableOptions(_fbb, _o, _rehasher); -} - -inline flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _table_id = _o->table_id; - auto _key_dtype = _o->key_dtype; - auto _value_dtype = _o->value_dtype; - return tflite::CreateHashtableOptions( - _fbb, - _table_id, - _key_dtype, - _value_dtype); -} - -inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HashtableFindOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ExpandDimsOptions; +}; -inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_EqualOptions; +}; -inline flatbuffers::Offset HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableFindOptions(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NotEqualOptions; +}; -inline flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableFindOptions( - _fbb); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ShapeOptions; +}; -inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HashtableImportOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PowOptions; +}; -inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ArgMinOptions; +}; -inline flatbuffers::Offset HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableImportOptions(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FakeQuantOptions; +}; -inline flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableImportOptions( - _fbb); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_PackOptions; +}; -inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new HashtableSizeOptionsT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalOrOptions; +}; -inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_OneHotOptions; +}; -inline flatbuffers::Offset HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateHashtableSizeOptions(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalAndOptions; +}; -inline flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - return tflite::CreateHashtableSizeOptions( - _fbb); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LogicalNotOptions; +}; -inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorCodeT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnpackOptions; +}; -inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } - { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); } - { auto _e = version(); _o->version = _e; } - { auto _e = builtin_code(); _o->builtin_code = _e; } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FloorDivOptions; +}; -inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperatorCode(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SquareOptions; +}; -inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _deprecated_builtin_code = _o->deprecated_builtin_code; - auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); - auto _version = _o->version; - auto _builtin_code = _o->builtin_code; - return tflite::CreateOperatorCode( - _fbb, - _deprecated_builtin_code, - _custom_code, - _version, - _builtin_code); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ZerosLikeOptions; +}; -inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new OperatorT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FillOptions; +}; -inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = opcode_index(); _o->opcode_index = _e; } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = builtin_options_type(); _o->builtin_options.type = _e; } - { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); } - { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } } - { auto _e = custom_options_format(); _o->custom_options_format = _e; } - { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } } - { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceLSTMOptions; +}; -inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateOperator(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BidirectionalSequenceRNNOptions; +}; -inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _opcode_index = _o->opcode_index; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _builtin_options_type = _o->builtin_options.type; - auto _builtin_options = _o->builtin_options.Pack(_fbb); - auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; - auto _custom_options_format = _o->custom_options_format; - auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; - auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; - return tflite::CreateOperator( - _fbb, - _opcode_index, - _inputs, - _outputs, - _builtin_options_type, - _builtin_options, - _custom_options, - _custom_options_format, - _mutating_variable_inputs, - _intermediates); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnidirectionalSequenceLSTMOptions; +}; -inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SubGraphT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_FloorModOptions; +}; -inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } - { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = name(); if (_e) _o->name = _e->str(); } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RangeOptions; +}; -inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSubGraph(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ResizeNearestNeighborOptions; +}; -inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; - auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - return tflite::CreateSubGraph( - _fbb, - _tensors, - _inputs, - _outputs, - _operators, - _name); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_LeakyReluOptions; +}; -inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new BufferT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SquaredDifferenceOptions; +}; -inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MirrorPadOptions; +}; -inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateBuffer(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AbsOptions; +}; -inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16); - auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; - return tflite::CreateBuffer( - _fbb, - _data); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SplitVOptions; +}; -inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new MetadataT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UniqueOptions; +}; -inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = buffer(); _o->buffer = _e; } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseV2Options; +}; -inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateMetadata(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AddNOptions; +}; -inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _buffer = _o->buffer; - return tflite::CreateMetadata( - _fbb, - _name, - _buffer); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GatherNdOptions; +}; -inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new TensorMapT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CosOptions; +}; -inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = name(); if (_e) _o->name = _e->str(); } - { auto _e = tensor_index(); _o->tensor_index = _e; } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhereOptions; +}; -inline flatbuffers::Offset TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateTensorMap(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RankOptions; +}; -inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); - auto _tensor_index = _o->tensor_index; - return tflite::CreateTensorMap( - _fbb, - _name, - _tensor_index); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReverseSequenceOptions; +}; -inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new SignatureDefT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixDiagOptions; +}; -inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = method_name(); if (_e) _o->method_name = _e->str(); } - { auto _e = key(); if (_e) _o->key = _e->str(); } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_QuantizeOptions; +}; -inline flatbuffers::Offset SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateSignatureDef(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_MatrixSetDiagOptions; +}; -inline flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _inputs = _o->inputs.size() ? _fbb.CreateVector> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _outputs = _o->outputs.size() ? _fbb.CreateVector> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _method_name = _o->method_name.empty() ? 0 : _fbb.CreateString(_o->method_name); - auto _key = _o->key.empty() ? 0 : _fbb.CreateString(_o->key); - return tflite::CreateSignatureDef( - _fbb, - _inputs, - _outputs, - _method_name, - _key); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HardSwishOptions; +}; -inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { - auto _o = new ModelT(); - UnPackTo(_o, _resolver); - return _o; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_IfOptions; +}; -inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { - (void)_o; - (void)_resolver; - { auto _e = version(); _o->version = _e; } - { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = description(); if (_e) _o->description = _e->str(); } - { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } } - { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } - { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); } } } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_WhileOptions; +}; -inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { - return CreateModel(_fbb, _o, _rehasher); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DepthToSpaceOptions; +}; -inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { - (void)_rehasher; - (void)_o; - struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; - auto _version = _o->version; - auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); - auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; - auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; - auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; - return tflite::CreateModel( - _fbb, - _version, - _operator_codes, - _subgraphs, - _description, - _buffers, - _metadata_buffer, - _metadata, - _signature_defs); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV4Options; +}; -inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { - switch (type) { - case QuantizationDetails_NONE: { - return true; - } - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_NonMaxSuppressionV5Options; +}; -inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyQuantizationDetails( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ScatterNdOptions; +}; -inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SelectV2Options; +}; -inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DensifyOptions; +}; -inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case QuantizationDetails_CustomQuantization: { - value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SegmentSumOptions; +}; -inline void QuantizationDetailsUnion::Reset() { - switch (type) { - case QuantizationDetails_CustomQuantization: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = QuantizationDetails_NONE; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BatchMatMulOptions; +}; -inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { - switch (type) { - case SparseIndexVector_NONE: { - return true; - } - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CumsumOptions; +}; -inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifySparseIndexVector( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_CallOnceOptions; +}; -inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BroadcastToOptions; +}; -inline flatbuffers::Offset SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Rfft2dOptions; +}; -inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case SparseIndexVector_Int32Vector: { - value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint16Vector: { - value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); - break; - } - case SparseIndexVector_Uint8Vector: { - value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_Conv3DOptions; +}; -inline void SparseIndexVectorUnion::Reset() { - switch (type) { - case SparseIndexVector_Int32Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint16Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case SparseIndexVector_Uint8Vector: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = SparseIndexVector_NONE; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableOptions; +}; -inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { - switch (type) { - case BuiltinOptions_NONE: { - return true; - } - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(obj); - return verifier.VerifyTable(ptr); - } - default: return true; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableFindOptions; +}; -inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { - if (!values || !types) return !values && !types; - if (values->size() != types->size()) return false; - for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { - if (!VerifyBuiltinOptions( - verifier, values->Get(i), types->GetEnum(i))) { - return false; - } - } - return true; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableImportOptions; +}; -inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(obj); - return ptr->UnPack(resolver); - } - default: return nullptr; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_HashtableSizeOptions; +}; -inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - return CreateMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - return CreatePadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - return CreateSubOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - return CreateDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - return CreateCastOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - return CreateNegOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - return CreateTileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - return CreatePowOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - return CreatePackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - return CreateFillOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - return CreateCosOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - return CreateRankOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - return CreateIfOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(value); - return CreateConv3DOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union(); - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(value); - return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union(); - } - default: return 0; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_VarHandleOptions; +}; -inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) FLATBUFFERS_NOEXCEPT : type(u.type), value(nullptr) { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSHProjectionOptions: { - value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Pool2DOptions: { - value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SVDFOptions: { - value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RNNOptions: { - value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FullyConnectedOptions: { - value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SoftmaxOptions: { - value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ConcatenationOptions: { - value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddOptions: { - value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_L2NormOptions: { - value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LSTMOptions: { - value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOptions: { - value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReshapeOptions: { - value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SkipGramOptions: { - value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MulOptions: { - value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadOptions: { - value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherOptions: { - value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeOptions: { - value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReducerOptions: { - value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SubOptions: { - value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DivOptions: { - value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SqueezeOptions: { - value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SequenceRNNOptions: { - value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_StridedSliceOptions: { - value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpOptions: { - value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TopKV2Options: { - value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitOptions: { - value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CastOptions: { - value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DequantizeOptions: { - value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMaxOptions: { - value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessOptions: { - value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NegOptions: { - value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PadV2Options: { - value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterOptions: { - value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GreaterEqualOptions: { - value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LessEqualOptions: { - value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectOptions: { - value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SliceOptions: { - value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TransposeConvOptions: { - value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SparseToDenseOptions: { - value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_TileOptions: { - value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ExpandDimsOptions: { - value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_EqualOptions: { - value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NotEqualOptions: { - value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ShapeOptions: { - value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PowOptions: { - value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ArgMinOptions: { - value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FakeQuantOptions: { - value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_PackOptions: { - value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalOrOptions: { - value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_OneHotOptions: { - value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalAndOptions: { - value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LogicalNotOptions: { - value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnpackOptions: { - value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorDivOptions: { - value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquareOptions: { - value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ZerosLikeOptions: { - value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FillOptions: { - value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_FloorModOptions: { - value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RangeOptions: { - value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_LeakyReluOptions: { - value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MirrorPadOptions: { - value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AbsOptions: { - value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SplitVOptions: { - value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_UniqueOptions: { - value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseV2Options: { - value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_AddNOptions: { - value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_GatherNdOptions: { - value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CosOptions: { - value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhereOptions: { - value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_RankOptions: { - value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixDiagOptions: { - value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_QuantizeOptions: { - value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HardSwishOptions: { - value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_IfOptions: { - value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_WhileOptions: { - value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_ScatterNdOptions: { - value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SelectV2Options: { - value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_DensifyOptions: { - value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_SegmentSumOptions: { - value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BatchMatMulOptions: { - value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CumsumOptions: { - value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_CallOnceOptions: { - value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_BroadcastToOptions: { - value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Rfft2dOptions: { - value = new tflite::Rfft2dOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_Conv3DOptions: { - value = new tflite::Conv3DOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableOptions: { - value = new tflite::HashtableOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableFindOptions: { - value = new tflite::HashtableFindOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableImportOptions: { - value = new tflite::HashtableImportOptionsT(*reinterpret_cast(u.value)); - break; - } - case BuiltinOptions_HashtableSizeOptions: { - value = new tflite::HashtableSizeOptionsT(*reinterpret_cast(u.value)); - break; - } - default: - break; - } -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ReadVariableOptions; +}; -inline void BuiltinOptionsUnion::Reset() { - switch (type) { - case BuiltinOptions_Conv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthwiseConv2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatEmbeddingsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSHProjectionOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Pool2DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SVDFOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FullyConnectedOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ConcatenationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_L2NormOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LocalResponseNormalizationOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeBilinearOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReshapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SkipGramOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToDepthOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EmbeddingLookupSparseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchToSpaceNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SpaceToBatchNDOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReducerOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SubOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SqueezeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_StridedSliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TopKV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogSoftmaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CastOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DequantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MaximumMinimumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMaxOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NegOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PadV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GreaterEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LessEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SliceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TransposeConvOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SparseToDenseOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_TileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ExpandDimsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_EqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NotEqualOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ShapeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PowOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ArgMinOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FakeQuantOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_PackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalOrOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_OneHotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalAndOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LogicalNotOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnpackOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorDivOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquareOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ZerosLikeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FillOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BidirectionalSequenceRNNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_FloorModOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RangeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ResizeNearestNeighborOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_LeakyReluOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SquaredDifferenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MirrorPadOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AbsOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SplitVOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_UniqueOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_AddNOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_GatherNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CosOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhereOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_RankOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ReverseSequenceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_QuantizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_MatrixSetDiagOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HardSwishOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_IfOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_WhileOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DepthToSpaceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV4Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_NonMaxSuppressionV5Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_ScatterNdOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SelectV2Options: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_DensifyOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_SegmentSumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BatchMatMulOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CumsumOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_CallOnceOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_BroadcastToOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Rfft2dOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_Conv3DOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableFindOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableImportOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - case BuiltinOptions_HashtableSizeOptions: { - auto ptr = reinterpret_cast(value); - delete ptr; - break; - } - default: break; - } - value = nullptr; - type = BuiltinOptions_NONE; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_AssignVariableOptions; +}; -inline const tflite::Model *GetModel(const void *buf) { - return flatbuffers::GetRoot(buf); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_RandomOptions; +}; -inline const tflite::Model *GetSizePrefixedModel(const void *buf) { - return flatbuffers::GetSizePrefixedRoot(buf); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_BucketizeOptions; +}; -inline const char *ModelIdentifier() { - return "TFL3"; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_GeluOptions; +}; -inline bool ModelBufferHasIdentifier(const void *buf) { - return flatbuffers::BufferHasIdentifier( - buf, ModelIdentifier()); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_DynamicUpdateSliceOptions; +}; -inline bool VerifyModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifyBuffer(ModelIdentifier()); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentProdOptions; +}; -inline bool VerifySizePrefixedModelBuffer( - flatbuffers::Verifier &verifier) { - return verifier.VerifySizePrefixedBuffer(ModelIdentifier()); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMaxOptions; +}; -inline const char *ModelExtension() { - return "tflite"; -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentMinOptions; +}; -inline void FinishModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.Finish(root, ModelIdentifier()); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_UnsortedSegmentSumOptions; +}; -inline void FinishSizePrefixedModelBuffer( - flatbuffers::FlatBufferBuilder &fbb, - flatbuffers::Offset root) { - fbb.FinishSizePrefixed(root, ModelIdentifier()); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_ATan2Options; +}; -inline std::unique_ptr UnPackModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetModel(buf)->UnPack(res)); -} +template<> struct BuiltinOptionsUnionTraits { + static const BuiltinOptions enum_value = BuiltinOptions_SignOptions; +}; -inline std::unique_ptr UnPackSizePrefixedModel( - const void *buf, - const flatbuffers::resolver_function_t *res = nullptr) { - return std::unique_ptr(GetSizePrefixedModel(buf)->UnPack(res)); -} +struct OperatorCodeT : public flatbuffers::NativeTable { + typedef OperatorCode TableType; + int8_t deprecated_builtin_code = 0; + std::string custom_code{}; + int32_t version = 1; + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD; +}; -} // namespace tflite +struct OperatorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OperatorCodeT NativeTableType; + typedef OperatorCodeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DEPRECATED_BUILTIN_CODE = 4, + VT_CUSTOM_CODE = 6, + VT_VERSION = 8, + VT_BUILTIN_CODE = 10 + }; + int8_t deprecated_builtin_code() const { + return GetField(VT_DEPRECATED_BUILTIN_CODE, 0); + } + const flatbuffers::String *custom_code() const { + return GetPointer(VT_CUSTOM_CODE); + } + int32_t version() const { + return GetField(VT_VERSION, 1); + } + tflite::BuiltinOperator builtin_code() const { + return static_cast(GetField(VT_BUILTIN_CODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_DEPRECATED_BUILTIN_CODE, 1) && + VerifyOffset(verifier, VT_CUSTOM_CODE) && + verifier.VerifyString(custom_code()) && + VerifyField(verifier, VT_VERSION, 4) && + VerifyField(verifier, VT_BUILTIN_CODE, 4) && + verifier.EndTable(); + } + OperatorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; -#endif // FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ +} +#endif // FLATBUFFERS_GENERATED_SCHEMA_SUPPL_TFLITE_H_ \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h new file mode 100644 index 0000000..aaa2252 --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/tensorflow/lite/schema/schema_generated_full.h @@ -0,0 +1,17601 @@ +// automatically generated by the FlatBuffers compiler, do not modify + + +#ifndef FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ +#define FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ + +#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" + +// Ensure the included flatbuffers.h is the same version as when this file was +// generated, otherwise it may not be compatible. +static_assert(FLATBUFFERS_VERSION_MAJOR == 2 && + FLATBUFFERS_VERSION_MINOR == 0 && + FLATBUFFERS_VERSION_REVISION == 6, + "Non-compatible flatbuffers version included"); + +namespace tflite { + +enum QuantizationDetails : uint8_t { + QuantizationDetails_NONE = 0, + QuantizationDetails_CustomQuantization = 1, + QuantizationDetails_MIN = QuantizationDetails_NONE, + QuantizationDetails_MAX = QuantizationDetails_CustomQuantization +}; + +inline const QuantizationDetails (&EnumValuesQuantizationDetails())[2] { + static const QuantizationDetails values[] = { + QuantizationDetails_NONE, + QuantizationDetails_CustomQuantization + }; + return values; +} + +inline const char * const *EnumNamesQuantizationDetails() { + static const char * const names[3] = { + "NONE", + "CustomQuantization", + nullptr + }; + return names; +} + +inline const char *EnumNameQuantizationDetails(QuantizationDetails e) { + if (flatbuffers::IsOutRange(e, QuantizationDetails_NONE, QuantizationDetails_CustomQuantization)) return ""; + const size_t index = static_cast(e); + return EnumNamesQuantizationDetails()[index]; +} + +template struct QuantizationDetailsTraits { + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template<> struct QuantizationDetailsTraits { + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +template struct QuantizationDetailsUnionTraits { + static const QuantizationDetails enum_value = QuantizationDetails_NONE; +}; + +template<> struct QuantizationDetailsUnionTraits { + static const QuantizationDetails enum_value = QuantizationDetails_CustomQuantization; +}; + +struct QuantizationDetailsUnion { + QuantizationDetails type; + void *value; + + QuantizationDetailsUnion() : type(QuantizationDetails_NONE), value(nullptr) {} + QuantizationDetailsUnion(QuantizationDetailsUnion&& u) FLATBUFFERS_NOEXCEPT : + type(QuantizationDetails_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + QuantizationDetailsUnion(const QuantizationDetailsUnion &); + QuantizationDetailsUnion &operator=(const QuantizationDetailsUnion &u) + { QuantizationDetailsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + QuantizationDetailsUnion &operator=(QuantizationDetailsUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~QuantizationDetailsUnion() { Reset(); } + + void Reset(); + + template + void Set(T&& val) { + typedef typename std::remove_reference::type RT; + Reset(); + type = QuantizationDetailsUnionTraits::enum_value; + if (type != QuantizationDetails_NONE) { + value = new RT(std::forward(val)); + } + } + + static void *UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver); + flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + tflite::CustomQuantizationT *AsCustomQuantization() { + return type == QuantizationDetails_CustomQuantization ? + reinterpret_cast(value) : nullptr; + } + const tflite::CustomQuantizationT *AsCustomQuantization() const { + return type == QuantizationDetails_CustomQuantization ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type); +bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + +enum DimensionType : int8_t { + DimensionType_DENSE = 0, + DimensionType_SPARSE_CSR = 1, + DimensionType_MIN = DimensionType_DENSE, + DimensionType_MAX = DimensionType_SPARSE_CSR +}; + +inline const DimensionType (&EnumValuesDimensionType())[2] { + static const DimensionType values[] = { + DimensionType_DENSE, + DimensionType_SPARSE_CSR + }; + return values; +} + +inline const char * const *EnumNamesDimensionType() { + static const char * const names[3] = { + "DENSE", + "SPARSE_CSR", + nullptr + }; + return names; +} + +inline const char *EnumNameDimensionType(DimensionType e) { + if (flatbuffers::IsOutRange(e, DimensionType_DENSE, DimensionType_SPARSE_CSR)) return ""; + const size_t index = static_cast(e); + return EnumNamesDimensionType()[index]; +} + +enum SparseIndexVector : uint8_t { + SparseIndexVector_NONE = 0, + SparseIndexVector_Int32Vector = 1, + SparseIndexVector_Uint16Vector = 2, + SparseIndexVector_Uint8Vector = 3, + SparseIndexVector_MIN = SparseIndexVector_NONE, + SparseIndexVector_MAX = SparseIndexVector_Uint8Vector +}; + +inline const SparseIndexVector (&EnumValuesSparseIndexVector())[4] { + static const SparseIndexVector values[] = { + SparseIndexVector_NONE, + SparseIndexVector_Int32Vector, + SparseIndexVector_Uint16Vector, + SparseIndexVector_Uint8Vector + }; + return values; +} + +inline const char * const *EnumNamesSparseIndexVector() { + static const char * const names[5] = { + "NONE", + "Int32Vector", + "Uint16Vector", + "Uint8Vector", + nullptr + }; + return names; +} + +inline const char *EnumNameSparseIndexVector(SparseIndexVector e) { + if (flatbuffers::IsOutRange(e, SparseIndexVector_NONE, SparseIndexVector_Uint8Vector)) return ""; + const size_t index = static_cast(e); + return EnumNamesSparseIndexVector()[index]; +} + +template struct SparseIndexVectorTraits { + static const SparseIndexVector enum_value = SparseIndexVector_NONE; +}; + +template<> struct SparseIndexVectorTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; +}; + +template<> struct SparseIndexVectorTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; +}; + +template<> struct SparseIndexVectorTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; +}; + +template struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_NONE; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Int32Vector; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint16Vector; +}; + +template<> struct SparseIndexVectorUnionTraits { + static const SparseIndexVector enum_value = SparseIndexVector_Uint8Vector; +}; + +struct SparseIndexVectorUnion { + SparseIndexVector type; + void *value; + + SparseIndexVectorUnion() : type(SparseIndexVector_NONE), value(nullptr) {} + SparseIndexVectorUnion(SparseIndexVectorUnion&& u) FLATBUFFERS_NOEXCEPT : + type(SparseIndexVector_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + SparseIndexVectorUnion(const SparseIndexVectorUnion &); + SparseIndexVectorUnion &operator=(const SparseIndexVectorUnion &u) + { SparseIndexVectorUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + SparseIndexVectorUnion &operator=(SparseIndexVectorUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~SparseIndexVectorUnion() { Reset(); } + + void Reset(); + + template + void Set(T&& val) { + typedef typename std::remove_reference::type RT; + Reset(); + type = SparseIndexVectorUnionTraits::enum_value; + if (type != SparseIndexVector_NONE) { + value = new RT(std::forward(val)); + } + } + + static void *UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver); + flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + tflite::Int32VectorT *AsInt32Vector() { + return type == SparseIndexVector_Int32Vector ? + reinterpret_cast(value) : nullptr; + } + const tflite::Int32VectorT *AsInt32Vector() const { + return type == SparseIndexVector_Int32Vector ? + reinterpret_cast(value) : nullptr; + } + tflite::Uint16VectorT *AsUint16Vector() { + return type == SparseIndexVector_Uint16Vector ? + reinterpret_cast(value) : nullptr; + } + const tflite::Uint16VectorT *AsUint16Vector() const { + return type == SparseIndexVector_Uint16Vector ? + reinterpret_cast(value) : nullptr; + } + tflite::Uint8VectorT *AsUint8Vector() { + return type == SparseIndexVector_Uint8Vector ? + reinterpret_cast(value) : nullptr; + } + const tflite::Uint8VectorT *AsUint8Vector() const { + return type == SparseIndexVector_Uint8Vector ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type); +bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + +struct BuiltinOptionsUnion { + BuiltinOptions type; + void *value; + + BuiltinOptionsUnion() : type(BuiltinOptions_NONE), value(nullptr) {} + BuiltinOptionsUnion(BuiltinOptionsUnion&& u) FLATBUFFERS_NOEXCEPT : + type(BuiltinOptions_NONE), value(nullptr) + { std::swap(type, u.type); std::swap(value, u.value); } + BuiltinOptionsUnion(const BuiltinOptionsUnion &); + BuiltinOptionsUnion &operator=(const BuiltinOptionsUnion &u) + { BuiltinOptionsUnion t(u); std::swap(type, t.type); std::swap(value, t.value); return *this; } + BuiltinOptionsUnion &operator=(BuiltinOptionsUnion &&u) FLATBUFFERS_NOEXCEPT + { std::swap(type, u.type); std::swap(value, u.value); return *this; } + ~BuiltinOptionsUnion() { Reset(); } + + void Reset(); + + template + void Set(T&& val) { + typedef typename std::remove_reference::type RT; + Reset(); + type = BuiltinOptionsUnionTraits::enum_value; + if (type != BuiltinOptions_NONE) { + value = new RT(std::forward(val)); + } + } + + static void *UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver); + flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher = nullptr) const; + + tflite::Conv2DOptionsT *AsConv2DOptions() { + return type == BuiltinOptions_Conv2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Conv2DOptionsT *AsConv2DOptions() const { + return type == BuiltinOptions_Conv2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() { + return type == BuiltinOptions_DepthwiseConv2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DepthwiseConv2DOptionsT *AsDepthwiseConv2DOptions() const { + return type == BuiltinOptions_DepthwiseConv2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() { + return type == BuiltinOptions_ConcatEmbeddingsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ConcatEmbeddingsOptionsT *AsConcatEmbeddingsOptions() const { + return type == BuiltinOptions_ConcatEmbeddingsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() { + return type == BuiltinOptions_LSHProjectionOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LSHProjectionOptionsT *AsLSHProjectionOptions() const { + return type == BuiltinOptions_LSHProjectionOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Pool2DOptionsT *AsPool2DOptions() { + return type == BuiltinOptions_Pool2DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Pool2DOptionsT *AsPool2DOptions() const { + return type == BuiltinOptions_Pool2DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SVDFOptionsT *AsSVDFOptions() { + return type == BuiltinOptions_SVDFOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SVDFOptionsT *AsSVDFOptions() const { + return type == BuiltinOptions_SVDFOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RNNOptionsT *AsRNNOptions() { + return type == BuiltinOptions_RNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RNNOptionsT *AsRNNOptions() const { + return type == BuiltinOptions_RNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() { + return type == BuiltinOptions_FullyConnectedOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FullyConnectedOptionsT *AsFullyConnectedOptions() const { + return type == BuiltinOptions_FullyConnectedOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SoftmaxOptionsT *AsSoftmaxOptions() { + return type == BuiltinOptions_SoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SoftmaxOptionsT *AsSoftmaxOptions() const { + return type == BuiltinOptions_SoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ConcatenationOptionsT *AsConcatenationOptions() { + return type == BuiltinOptions_ConcatenationOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ConcatenationOptionsT *AsConcatenationOptions() const { + return type == BuiltinOptions_ConcatenationOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AddOptionsT *AsAddOptions() { + return type == BuiltinOptions_AddOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AddOptionsT *AsAddOptions() const { + return type == BuiltinOptions_AddOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::L2NormOptionsT *AsL2NormOptions() { + return type == BuiltinOptions_L2NormOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::L2NormOptionsT *AsL2NormOptions() const { + return type == BuiltinOptions_L2NormOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() { + return type == BuiltinOptions_LocalResponseNormalizationOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LocalResponseNormalizationOptionsT *AsLocalResponseNormalizationOptions() const { + return type == BuiltinOptions_LocalResponseNormalizationOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LSTMOptionsT *AsLSTMOptions() { + return type == BuiltinOptions_LSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LSTMOptionsT *AsLSTMOptions() const { + return type == BuiltinOptions_LSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() { + return type == BuiltinOptions_ResizeBilinearOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ResizeBilinearOptionsT *AsResizeBilinearOptions() const { + return type == BuiltinOptions_ResizeBilinearOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CallOptionsT *AsCallOptions() { + return type == BuiltinOptions_CallOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CallOptionsT *AsCallOptions() const { + return type == BuiltinOptions_CallOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReshapeOptionsT *AsReshapeOptions() { + return type == BuiltinOptions_ReshapeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReshapeOptionsT *AsReshapeOptions() const { + return type == BuiltinOptions_ReshapeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SkipGramOptionsT *AsSkipGramOptions() { + return type == BuiltinOptions_SkipGramOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SkipGramOptionsT *AsSkipGramOptions() const { + return type == BuiltinOptions_SkipGramOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() { + return type == BuiltinOptions_SpaceToDepthOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SpaceToDepthOptionsT *AsSpaceToDepthOptions() const { + return type == BuiltinOptions_SpaceToDepthOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() { + return type == BuiltinOptions_EmbeddingLookupSparseOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::EmbeddingLookupSparseOptionsT *AsEmbeddingLookupSparseOptions() const { + return type == BuiltinOptions_EmbeddingLookupSparseOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MulOptionsT *AsMulOptions() { + return type == BuiltinOptions_MulOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MulOptionsT *AsMulOptions() const { + return type == BuiltinOptions_MulOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PadOptionsT *AsPadOptions() { + return type == BuiltinOptions_PadOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PadOptionsT *AsPadOptions() const { + return type == BuiltinOptions_PadOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GatherOptionsT *AsGatherOptions() { + return type == BuiltinOptions_GatherOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GatherOptionsT *AsGatherOptions() const { + return type == BuiltinOptions_GatherOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() { + return type == BuiltinOptions_BatchToSpaceNDOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BatchToSpaceNDOptionsT *AsBatchToSpaceNDOptions() const { + return type == BuiltinOptions_BatchToSpaceNDOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() { + return type == BuiltinOptions_SpaceToBatchNDOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SpaceToBatchNDOptionsT *AsSpaceToBatchNDOptions() const { + return type == BuiltinOptions_SpaceToBatchNDOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TransposeOptionsT *AsTransposeOptions() { + return type == BuiltinOptions_TransposeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TransposeOptionsT *AsTransposeOptions() const { + return type == BuiltinOptions_TransposeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReducerOptionsT *AsReducerOptions() { + return type == BuiltinOptions_ReducerOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReducerOptionsT *AsReducerOptions() const { + return type == BuiltinOptions_ReducerOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SubOptionsT *AsSubOptions() { + return type == BuiltinOptions_SubOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SubOptionsT *AsSubOptions() const { + return type == BuiltinOptions_SubOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DivOptionsT *AsDivOptions() { + return type == BuiltinOptions_DivOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DivOptionsT *AsDivOptions() const { + return type == BuiltinOptions_DivOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SqueezeOptionsT *AsSqueezeOptions() { + return type == BuiltinOptions_SqueezeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SqueezeOptionsT *AsSqueezeOptions() const { + return type == BuiltinOptions_SqueezeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() { + return type == BuiltinOptions_SequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SequenceRNNOptionsT *AsSequenceRNNOptions() const { + return type == BuiltinOptions_SequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::StridedSliceOptionsT *AsStridedSliceOptions() { + return type == BuiltinOptions_StridedSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::StridedSliceOptionsT *AsStridedSliceOptions() const { + return type == BuiltinOptions_StridedSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ExpOptionsT *AsExpOptions() { + return type == BuiltinOptions_ExpOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ExpOptionsT *AsExpOptions() const { + return type == BuiltinOptions_ExpOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TopKV2OptionsT *AsTopKV2Options() { + return type == BuiltinOptions_TopKV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::TopKV2OptionsT *AsTopKV2Options() const { + return type == BuiltinOptions_TopKV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::SplitOptionsT *AsSplitOptions() { + return type == BuiltinOptions_SplitOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SplitOptionsT *AsSplitOptions() const { + return type == BuiltinOptions_SplitOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() { + return type == BuiltinOptions_LogSoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogSoftmaxOptionsT *AsLogSoftmaxOptions() const { + return type == BuiltinOptions_LogSoftmaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CastOptionsT *AsCastOptions() { + return type == BuiltinOptions_CastOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CastOptionsT *AsCastOptions() const { + return type == BuiltinOptions_CastOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DequantizeOptionsT *AsDequantizeOptions() { + return type == BuiltinOptions_DequantizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DequantizeOptionsT *AsDequantizeOptions() const { + return type == BuiltinOptions_DequantizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() { + return type == BuiltinOptions_MaximumMinimumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MaximumMinimumOptionsT *AsMaximumMinimumOptions() const { + return type == BuiltinOptions_MaximumMinimumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ArgMaxOptionsT *AsArgMaxOptions() { + return type == BuiltinOptions_ArgMaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ArgMaxOptionsT *AsArgMaxOptions() const { + return type == BuiltinOptions_ArgMaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LessOptionsT *AsLessOptions() { + return type == BuiltinOptions_LessOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LessOptionsT *AsLessOptions() const { + return type == BuiltinOptions_LessOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NegOptionsT *AsNegOptions() { + return type == BuiltinOptions_NegOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::NegOptionsT *AsNegOptions() const { + return type == BuiltinOptions_NegOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PadV2OptionsT *AsPadV2Options() { + return type == BuiltinOptions_PadV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::PadV2OptionsT *AsPadV2Options() const { + return type == BuiltinOptions_PadV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::GreaterOptionsT *AsGreaterOptions() { + return type == BuiltinOptions_GreaterOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GreaterOptionsT *AsGreaterOptions() const { + return type == BuiltinOptions_GreaterOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() { + return type == BuiltinOptions_GreaterEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GreaterEqualOptionsT *AsGreaterEqualOptions() const { + return type == BuiltinOptions_GreaterEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LessEqualOptionsT *AsLessEqualOptions() { + return type == BuiltinOptions_LessEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LessEqualOptionsT *AsLessEqualOptions() const { + return type == BuiltinOptions_LessEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SelectOptionsT *AsSelectOptions() { + return type == BuiltinOptions_SelectOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SelectOptionsT *AsSelectOptions() const { + return type == BuiltinOptions_SelectOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SliceOptionsT *AsSliceOptions() { + return type == BuiltinOptions_SliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SliceOptionsT *AsSliceOptions() const { + return type == BuiltinOptions_SliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TransposeConvOptionsT *AsTransposeConvOptions() { + return type == BuiltinOptions_TransposeConvOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TransposeConvOptionsT *AsTransposeConvOptions() const { + return type == BuiltinOptions_TransposeConvOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() { + return type == BuiltinOptions_SparseToDenseOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SparseToDenseOptionsT *AsSparseToDenseOptions() const { + return type == BuiltinOptions_SparseToDenseOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::TileOptionsT *AsTileOptions() { + return type == BuiltinOptions_TileOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::TileOptionsT *AsTileOptions() const { + return type == BuiltinOptions_TileOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ExpandDimsOptionsT *AsExpandDimsOptions() { + return type == BuiltinOptions_ExpandDimsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ExpandDimsOptionsT *AsExpandDimsOptions() const { + return type == BuiltinOptions_ExpandDimsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::EqualOptionsT *AsEqualOptions() { + return type == BuiltinOptions_EqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::EqualOptionsT *AsEqualOptions() const { + return type == BuiltinOptions_EqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NotEqualOptionsT *AsNotEqualOptions() { + return type == BuiltinOptions_NotEqualOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::NotEqualOptionsT *AsNotEqualOptions() const { + return type == BuiltinOptions_NotEqualOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ShapeOptionsT *AsShapeOptions() { + return type == BuiltinOptions_ShapeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ShapeOptionsT *AsShapeOptions() const { + return type == BuiltinOptions_ShapeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PowOptionsT *AsPowOptions() { + return type == BuiltinOptions_PowOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PowOptionsT *AsPowOptions() const { + return type == BuiltinOptions_PowOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ArgMinOptionsT *AsArgMinOptions() { + return type == BuiltinOptions_ArgMinOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ArgMinOptionsT *AsArgMinOptions() const { + return type == BuiltinOptions_ArgMinOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FakeQuantOptionsT *AsFakeQuantOptions() { + return type == BuiltinOptions_FakeQuantOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FakeQuantOptionsT *AsFakeQuantOptions() const { + return type == BuiltinOptions_FakeQuantOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::PackOptionsT *AsPackOptions() { + return type == BuiltinOptions_PackOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::PackOptionsT *AsPackOptions() const { + return type == BuiltinOptions_PackOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalOrOptionsT *AsLogicalOrOptions() { + return type == BuiltinOptions_LogicalOrOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalOrOptionsT *AsLogicalOrOptions() const { + return type == BuiltinOptions_LogicalOrOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::OneHotOptionsT *AsOneHotOptions() { + return type == BuiltinOptions_OneHotOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::OneHotOptionsT *AsOneHotOptions() const { + return type == BuiltinOptions_OneHotOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalAndOptionsT *AsLogicalAndOptions() { + return type == BuiltinOptions_LogicalAndOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalAndOptionsT *AsLogicalAndOptions() const { + return type == BuiltinOptions_LogicalAndOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LogicalNotOptionsT *AsLogicalNotOptions() { + return type == BuiltinOptions_LogicalNotOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LogicalNotOptionsT *AsLogicalNotOptions() const { + return type == BuiltinOptions_LogicalNotOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnpackOptionsT *AsUnpackOptions() { + return type == BuiltinOptions_UnpackOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnpackOptionsT *AsUnpackOptions() const { + return type == BuiltinOptions_UnpackOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FloorDivOptionsT *AsFloorDivOptions() { + return type == BuiltinOptions_FloorDivOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FloorDivOptionsT *AsFloorDivOptions() const { + return type == BuiltinOptions_FloorDivOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SquareOptionsT *AsSquareOptions() { + return type == BuiltinOptions_SquareOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SquareOptionsT *AsSquareOptions() const { + return type == BuiltinOptions_SquareOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ZerosLikeOptionsT *AsZerosLikeOptions() { + return type == BuiltinOptions_ZerosLikeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ZerosLikeOptionsT *AsZerosLikeOptions() const { + return type == BuiltinOptions_ZerosLikeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FillOptionsT *AsFillOptions() { + return type == BuiltinOptions_FillOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FillOptionsT *AsFillOptions() const { + return type == BuiltinOptions_FillOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BidirectionalSequenceLSTMOptionsT *AsBidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_BidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BidirectionalSequenceRNNOptionsT *AsBidirectionalSequenceRNNOptions() const { + return type == BuiltinOptions_BidirectionalSequenceRNNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnidirectionalSequenceLSTMOptionsT *AsUnidirectionalSequenceLSTMOptions() const { + return type == BuiltinOptions_UnidirectionalSequenceLSTMOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::FloorModOptionsT *AsFloorModOptions() { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::FloorModOptionsT *AsFloorModOptions() const { + return type == BuiltinOptions_FloorModOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RangeOptionsT *AsRangeOptions() { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RangeOptionsT *AsRangeOptions() const { + return type == BuiltinOptions_RangeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ResizeNearestNeighborOptionsT *AsResizeNearestNeighborOptions() const { + return type == BuiltinOptions_ResizeNearestNeighborOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::LeakyReluOptionsT *AsLeakyReluOptions() { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::LeakyReluOptionsT *AsLeakyReluOptions() const { + return type == BuiltinOptions_LeakyReluOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SquaredDifferenceOptionsT *AsSquaredDifferenceOptions() const { + return type == BuiltinOptions_SquaredDifferenceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MirrorPadOptionsT *AsMirrorPadOptions() { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MirrorPadOptionsT *AsMirrorPadOptions() const { + return type == BuiltinOptions_MirrorPadOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AbsOptionsT *AsAbsOptions() { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AbsOptionsT *AsAbsOptions() const { + return type == BuiltinOptions_AbsOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SplitVOptionsT *AsSplitVOptions() { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SplitVOptionsT *AsSplitVOptions() const { + return type == BuiltinOptions_SplitVOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UniqueOptionsT *AsUniqueOptions() { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UniqueOptionsT *AsUniqueOptions() const { + return type == BuiltinOptions_UniqueOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReverseV2OptionsT *AsReverseV2Options() { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReverseV2OptionsT *AsReverseV2Options() const { + return type == BuiltinOptions_ReverseV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::AddNOptionsT *AsAddNOptions() { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AddNOptionsT *AsAddNOptions() const { + return type == BuiltinOptions_AddNOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GatherNdOptionsT *AsGatherNdOptions() { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GatherNdOptionsT *AsGatherNdOptions() const { + return type == BuiltinOptions_GatherNdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CosOptionsT *AsCosOptions() { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CosOptionsT *AsCosOptions() const { + return type == BuiltinOptions_CosOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::WhereOptionsT *AsWhereOptions() { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::WhereOptionsT *AsWhereOptions() const { + return type == BuiltinOptions_WhereOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RankOptionsT *AsRankOptions() { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RankOptionsT *AsRankOptions() const { + return type == BuiltinOptions_RankOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReverseSequenceOptionsT *AsReverseSequenceOptions() const { + return type == BuiltinOptions_ReverseSequenceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MatrixDiagOptionsT *AsMatrixDiagOptions() const { + return type == BuiltinOptions_MatrixDiagOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::QuantizeOptionsT *AsQuantizeOptions() { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::QuantizeOptionsT *AsQuantizeOptions() const { + return type == BuiltinOptions_QuantizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::MatrixSetDiagOptionsT *AsMatrixSetDiagOptions() const { + return type == BuiltinOptions_MatrixSetDiagOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HardSwishOptionsT *AsHardSwishOptions() { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HardSwishOptionsT *AsHardSwishOptions() const { + return type == BuiltinOptions_HardSwishOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::IfOptionsT *AsIfOptions() { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::IfOptionsT *AsIfOptions() const { + return type == BuiltinOptions_IfOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::WhileOptionsT *AsWhileOptions() { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::WhileOptionsT *AsWhileOptions() const { + return type == BuiltinOptions_WhileOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DepthToSpaceOptionsT *AsDepthToSpaceOptions() const { + return type == BuiltinOptions_DepthToSpaceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::NonMaxSuppressionV4OptionsT *AsNonMaxSuppressionV4Options() const { + return type == BuiltinOptions_NonMaxSuppressionV4Options ? + reinterpret_cast(value) : nullptr; + } + tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::NonMaxSuppressionV5OptionsT *AsNonMaxSuppressionV5Options() const { + return type == BuiltinOptions_NonMaxSuppressionV5Options ? + reinterpret_cast(value) : nullptr; + } + tflite::ScatterNdOptionsT *AsScatterNdOptions() { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ScatterNdOptionsT *AsScatterNdOptions() const { + return type == BuiltinOptions_ScatterNdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SelectV2OptionsT *AsSelectV2Options() { + return type == BuiltinOptions_SelectV2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::SelectV2OptionsT *AsSelectV2Options() const { + return type == BuiltinOptions_SelectV2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::DensifyOptionsT *AsDensifyOptions() { + return type == BuiltinOptions_DensifyOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DensifyOptionsT *AsDensifyOptions() const { + return type == BuiltinOptions_DensifyOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::SegmentSumOptionsT *AsSegmentSumOptions() { + return type == BuiltinOptions_SegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SegmentSumOptionsT *AsSegmentSumOptions() const { + return type == BuiltinOptions_SegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() { + return type == BuiltinOptions_BatchMatMulOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BatchMatMulOptionsT *AsBatchMatMulOptions() const { + return type == BuiltinOptions_BatchMatMulOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CumsumOptionsT *AsCumsumOptions() { + return type == BuiltinOptions_CumsumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CumsumOptionsT *AsCumsumOptions() const { + return type == BuiltinOptions_CumsumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::CallOnceOptionsT *AsCallOnceOptions() { + return type == BuiltinOptions_CallOnceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::CallOnceOptionsT *AsCallOnceOptions() const { + return type == BuiltinOptions_CallOnceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BroadcastToOptionsT *AsBroadcastToOptions() { + return type == BuiltinOptions_BroadcastToOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BroadcastToOptionsT *AsBroadcastToOptions() const { + return type == BuiltinOptions_BroadcastToOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Rfft2dOptionsT *AsRfft2dOptions() { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Rfft2dOptionsT *AsRfft2dOptions() const { + return type == BuiltinOptions_Rfft2dOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::Conv3DOptionsT *AsConv3DOptions() { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::Conv3DOptionsT *AsConv3DOptions() const { + return type == BuiltinOptions_Conv3DOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableOptionsT *AsHashtableOptions() { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableOptionsT *AsHashtableOptions() const { + return type == BuiltinOptions_HashtableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableFindOptionsT *AsHashtableFindOptions() { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableFindOptionsT *AsHashtableFindOptions() const { + return type == BuiltinOptions_HashtableFindOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableImportOptionsT *AsHashtableImportOptions() { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableImportOptionsT *AsHashtableImportOptions() const { + return type == BuiltinOptions_HashtableImportOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::HashtableSizeOptionsT *AsHashtableSizeOptions() const { + return type == BuiltinOptions_HashtableSizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::VarHandleOptionsT *AsVarHandleOptions() { + return type == BuiltinOptions_VarHandleOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::VarHandleOptionsT *AsVarHandleOptions() const { + return type == BuiltinOptions_VarHandleOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ReadVariableOptionsT *AsReadVariableOptions() { + return type == BuiltinOptions_ReadVariableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::ReadVariableOptionsT *AsReadVariableOptions() const { + return type == BuiltinOptions_ReadVariableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::AssignVariableOptionsT *AsAssignVariableOptions() { + return type == BuiltinOptions_AssignVariableOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::AssignVariableOptionsT *AsAssignVariableOptions() const { + return type == BuiltinOptions_AssignVariableOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::RandomOptionsT *AsRandomOptions() { + return type == BuiltinOptions_RandomOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::RandomOptionsT *AsRandomOptions() const { + return type == BuiltinOptions_RandomOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::BucketizeOptionsT *AsBucketizeOptions() { + return type == BuiltinOptions_BucketizeOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::BucketizeOptionsT *AsBucketizeOptions() const { + return type == BuiltinOptions_BucketizeOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::GeluOptionsT *AsGeluOptions() { + return type == BuiltinOptions_GeluOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::GeluOptionsT *AsGeluOptions() const { + return type == BuiltinOptions_GeluOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() { + return type == BuiltinOptions_DynamicUpdateSliceOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::DynamicUpdateSliceOptionsT *AsDynamicUpdateSliceOptions() const { + return type == BuiltinOptions_DynamicUpdateSliceOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() { + return type == BuiltinOptions_UnsortedSegmentProdOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentProdOptionsT *AsUnsortedSegmentProdOptions() const { + return type == BuiltinOptions_UnsortedSegmentProdOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() { + return type == BuiltinOptions_UnsortedSegmentMaxOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentMaxOptionsT *AsUnsortedSegmentMaxOptions() const { + return type == BuiltinOptions_UnsortedSegmentMaxOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() { + return type == BuiltinOptions_UnsortedSegmentMinOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentMinOptionsT *AsUnsortedSegmentMinOptions() const { + return type == BuiltinOptions_UnsortedSegmentMinOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() { + return type == BuiltinOptions_UnsortedSegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::UnsortedSegmentSumOptionsT *AsUnsortedSegmentSumOptions() const { + return type == BuiltinOptions_UnsortedSegmentSumOptions ? + reinterpret_cast(value) : nullptr; + } + tflite::ATan2OptionsT *AsATan2Options() { + return type == BuiltinOptions_ATan2Options ? + reinterpret_cast(value) : nullptr; + } + const tflite::ATan2OptionsT *AsATan2Options() const { + return type == BuiltinOptions_ATan2Options ? + reinterpret_cast(value) : nullptr; + } + tflite::SignOptionsT *AsSignOptions() { + return type == BuiltinOptions_SignOptions ? + reinterpret_cast(value) : nullptr; + } + const tflite::SignOptionsT *AsSignOptions() const { + return type == BuiltinOptions_SignOptions ? + reinterpret_cast(value) : nullptr; + } +}; + +bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type); +bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types); + +enum Padding : int8_t { + Padding_SAME = 0, + Padding_VALID = 1, + Padding_MIN = Padding_SAME, + Padding_MAX = Padding_VALID +}; + +inline const Padding (&EnumValuesPadding())[2] { + static const Padding values[] = { + Padding_SAME, + Padding_VALID + }; + return values; +} + +inline const char * const *EnumNamesPadding() { + static const char * const names[3] = { + "SAME", + "VALID", + nullptr + }; + return names; +} + +inline const char *EnumNamePadding(Padding e) { + if (flatbuffers::IsOutRange(e, Padding_SAME, Padding_VALID)) return ""; + const size_t index = static_cast(e); + return EnumNamesPadding()[index]; +} + +enum ActivationFunctionType : int8_t { + ActivationFunctionType_NONE = 0, + ActivationFunctionType_RELU = 1, + ActivationFunctionType_RELU_N1_TO_1 = 2, + ActivationFunctionType_RELU6 = 3, + ActivationFunctionType_TANH = 4, + ActivationFunctionType_SIGN_BIT = 5, + ActivationFunctionType_MIN = ActivationFunctionType_NONE, + ActivationFunctionType_MAX = ActivationFunctionType_SIGN_BIT +}; + +inline const ActivationFunctionType (&EnumValuesActivationFunctionType())[6] { + static const ActivationFunctionType values[] = { + ActivationFunctionType_NONE, + ActivationFunctionType_RELU, + ActivationFunctionType_RELU_N1_TO_1, + ActivationFunctionType_RELU6, + ActivationFunctionType_TANH, + ActivationFunctionType_SIGN_BIT + }; + return values; +} + +inline const char * const *EnumNamesActivationFunctionType() { + static const char * const names[7] = { + "NONE", + "RELU", + "RELU_N1_TO_1", + "RELU6", + "TANH", + "SIGN_BIT", + nullptr + }; + return names; +} + +inline const char *EnumNameActivationFunctionType(ActivationFunctionType e) { + if (flatbuffers::IsOutRange(e, ActivationFunctionType_NONE, ActivationFunctionType_SIGN_BIT)) return ""; + const size_t index = static_cast(e); + return EnumNamesActivationFunctionType()[index]; +} + +enum LSHProjectionType : int8_t { + LSHProjectionType_UNKNOWN = 0, + LSHProjectionType_SPARSE = 1, + LSHProjectionType_DENSE = 2, + LSHProjectionType_MIN = LSHProjectionType_UNKNOWN, + LSHProjectionType_MAX = LSHProjectionType_DENSE +}; + +inline const LSHProjectionType (&EnumValuesLSHProjectionType())[3] { + static const LSHProjectionType values[] = { + LSHProjectionType_UNKNOWN, + LSHProjectionType_SPARSE, + LSHProjectionType_DENSE + }; + return values; +} + +inline const char * const *EnumNamesLSHProjectionType() { + static const char * const names[4] = { + "UNKNOWN", + "SPARSE", + "DENSE", + nullptr + }; + return names; +} + +inline const char *EnumNameLSHProjectionType(LSHProjectionType e) { + if (flatbuffers::IsOutRange(e, LSHProjectionType_UNKNOWN, LSHProjectionType_DENSE)) return ""; + const size_t index = static_cast(e); + return EnumNamesLSHProjectionType()[index]; +} + +enum FullyConnectedOptionsWeightsFormat : int8_t { + FullyConnectedOptionsWeightsFormat_DEFAULT = 0, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 = 1, + FullyConnectedOptionsWeightsFormat_MIN = FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_MAX = FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 +}; + +inline const FullyConnectedOptionsWeightsFormat (&EnumValuesFullyConnectedOptionsWeightsFormat())[2] { + static const FullyConnectedOptionsWeightsFormat values[] = { + FullyConnectedOptionsWeightsFormat_DEFAULT, + FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8 + }; + return values; +} + +inline const char * const *EnumNamesFullyConnectedOptionsWeightsFormat() { + static const char * const names[3] = { + "DEFAULT", + "SHUFFLED4x16INT8", + nullptr + }; + return names; +} + +inline const char *EnumNameFullyConnectedOptionsWeightsFormat(FullyConnectedOptionsWeightsFormat e) { + if (flatbuffers::IsOutRange(e, FullyConnectedOptionsWeightsFormat_DEFAULT, FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8)) return ""; + const size_t index = static_cast(e); + return EnumNamesFullyConnectedOptionsWeightsFormat()[index]; +} + +enum LSTMKernelType : int8_t { + LSTMKernelType_FULL = 0, + LSTMKernelType_BASIC = 1, + LSTMKernelType_MIN = LSTMKernelType_FULL, + LSTMKernelType_MAX = LSTMKernelType_BASIC +}; + +inline const LSTMKernelType (&EnumValuesLSTMKernelType())[2] { + static const LSTMKernelType values[] = { + LSTMKernelType_FULL, + LSTMKernelType_BASIC + }; + return values; +} + +inline const char * const *EnumNamesLSTMKernelType() { + static const char * const names[3] = { + "FULL", + "BASIC", + nullptr + }; + return names; +} + +inline const char *EnumNameLSTMKernelType(LSTMKernelType e) { + if (flatbuffers::IsOutRange(e, LSTMKernelType_FULL, LSTMKernelType_BASIC)) return ""; + const size_t index = static_cast(e); + return EnumNamesLSTMKernelType()[index]; +} + +enum CombinerType : int8_t { + CombinerType_SUM = 0, + CombinerType_MEAN = 1, + CombinerType_SQRTN = 2, + CombinerType_MIN = CombinerType_SUM, + CombinerType_MAX = CombinerType_SQRTN +}; + +inline const CombinerType (&EnumValuesCombinerType())[3] { + static const CombinerType values[] = { + CombinerType_SUM, + CombinerType_MEAN, + CombinerType_SQRTN + }; + return values; +} + +inline const char * const *EnumNamesCombinerType() { + static const char * const names[4] = { + "SUM", + "MEAN", + "SQRTN", + nullptr + }; + return names; +} + +inline const char *EnumNameCombinerType(CombinerType e) { + if (flatbuffers::IsOutRange(e, CombinerType_SUM, CombinerType_SQRTN)) return ""; + const size_t index = static_cast(e); + return EnumNamesCombinerType()[index]; +} + +enum MirrorPadMode : int8_t { + MirrorPadMode_REFLECT = 0, + MirrorPadMode_SYMMETRIC = 1, + MirrorPadMode_MIN = MirrorPadMode_REFLECT, + MirrorPadMode_MAX = MirrorPadMode_SYMMETRIC +}; + +inline const MirrorPadMode (&EnumValuesMirrorPadMode())[2] { + static const MirrorPadMode values[] = { + MirrorPadMode_REFLECT, + MirrorPadMode_SYMMETRIC + }; + return values; +} + +inline const char * const *EnumNamesMirrorPadMode() { + static const char * const names[3] = { + "REFLECT", + "SYMMETRIC", + nullptr + }; + return names; +} + +inline const char *EnumNameMirrorPadMode(MirrorPadMode e) { + if (flatbuffers::IsOutRange(e, MirrorPadMode_REFLECT, MirrorPadMode_SYMMETRIC)) return ""; + const size_t index = static_cast(e); + return EnumNamesMirrorPadMode()[index]; +} + +enum CustomOptionsFormat : int8_t { + CustomOptionsFormat_FLEXBUFFERS = 0, + CustomOptionsFormat_MIN = CustomOptionsFormat_FLEXBUFFERS, + CustomOptionsFormat_MAX = CustomOptionsFormat_FLEXBUFFERS +}; + +inline const CustomOptionsFormat (&EnumValuesCustomOptionsFormat())[1] { + static const CustomOptionsFormat values[] = { + CustomOptionsFormat_FLEXBUFFERS + }; + return values; +} + +inline const char * const *EnumNamesCustomOptionsFormat() { + static const char * const names[2] = { + "FLEXBUFFERS", + nullptr + }; + return names; +} + +inline const char *EnumNameCustomOptionsFormat(CustomOptionsFormat e) { + if (flatbuffers::IsOutRange(e, CustomOptionsFormat_FLEXBUFFERS, CustomOptionsFormat_FLEXBUFFERS)) return ""; + const size_t index = static_cast(e); + return EnumNamesCustomOptionsFormat()[index]; +} + +struct CustomQuantizationT : public flatbuffers::NativeTable { + typedef CustomQuantization TableType; + std::vector custom{}; +}; + +struct CustomQuantization FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CustomQuantizationT NativeTableType; + typedef CustomQuantizationBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CUSTOM = 4 + }; + const flatbuffers::Vector *custom() const { + return GetPointer *>(VT_CUSTOM); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CUSTOM) && + verifier.VerifyVector(custom()) && + verifier.EndTable(); + } + CustomQuantizationT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CustomQuantizationBuilder { + typedef CustomQuantization Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_custom(flatbuffers::Offset> custom) { + fbb_.AddOffset(CustomQuantization::VT_CUSTOM, custom); + } + explicit CustomQuantizationBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCustomQuantization( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> custom = 0) { + CustomQuantizationBuilder builder_(_fbb); + builder_.add_custom(custom); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateCustomQuantizationDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *custom = nullptr) { + if (custom) { _fbb.ForceVectorAlignment(custom->size(), sizeof(uint8_t), 16); } + auto custom__ = custom ? _fbb.CreateVector(*custom) : 0; + return tflite::CreateCustomQuantization( + _fbb, + custom__); +} + +flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizationParametersT : public flatbuffers::NativeTable { + typedef QuantizationParameters TableType; + std::vector min{}; + std::vector max{}; + std::vector scale{}; + std::vector zero_point{}; + tflite::QuantizationDetailsUnion details{}; + int32_t quantized_dimension = 0; +}; + +struct QuantizationParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizationParametersT NativeTableType; + typedef QuantizationParametersBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MIN = 4, + VT_MAX = 6, + VT_SCALE = 8, + VT_ZERO_POINT = 10, + VT_DETAILS_TYPE = 12, + VT_DETAILS = 14, + VT_QUANTIZED_DIMENSION = 16 + }; + const flatbuffers::Vector *min() const { + return GetPointer *>(VT_MIN); + } + const flatbuffers::Vector *max() const { + return GetPointer *>(VT_MAX); + } + const flatbuffers::Vector *scale() const { + return GetPointer *>(VT_SCALE); + } + const flatbuffers::Vector *zero_point() const { + return GetPointer *>(VT_ZERO_POINT); + } + tflite::QuantizationDetails details_type() const { + return static_cast(GetField(VT_DETAILS_TYPE, 0)); + } + const void *details() const { + return GetPointer(VT_DETAILS); + } + template const T *details_as() const; + const tflite::CustomQuantization *details_as_CustomQuantization() const { + return details_type() == tflite::QuantizationDetails_CustomQuantization ? static_cast(details()) : nullptr; + } + int32_t quantized_dimension() const { + return GetField(VT_QUANTIZED_DIMENSION, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_MIN) && + verifier.VerifyVector(min()) && + VerifyOffset(verifier, VT_MAX) && + verifier.VerifyVector(max()) && + VerifyOffset(verifier, VT_SCALE) && + verifier.VerifyVector(scale()) && + VerifyOffset(verifier, VT_ZERO_POINT) && + verifier.VerifyVector(zero_point()) && + VerifyField(verifier, VT_DETAILS_TYPE, 1) && + VerifyOffset(verifier, VT_DETAILS) && + VerifyQuantizationDetails(verifier, details(), details_type()) && + VerifyField(verifier, VT_QUANTIZED_DIMENSION, 4) && + verifier.EndTable(); + } + QuantizationParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const tflite::CustomQuantization *QuantizationParameters::details_as() const { + return details_as_CustomQuantization(); +} + +struct QuantizationParametersBuilder { + typedef QuantizationParameters Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(flatbuffers::Offset> min) { + fbb_.AddOffset(QuantizationParameters::VT_MIN, min); + } + void add_max(flatbuffers::Offset> max) { + fbb_.AddOffset(QuantizationParameters::VT_MAX, max); + } + void add_scale(flatbuffers::Offset> scale) { + fbb_.AddOffset(QuantizationParameters::VT_SCALE, scale); + } + void add_zero_point(flatbuffers::Offset> zero_point) { + fbb_.AddOffset(QuantizationParameters::VT_ZERO_POINT, zero_point); + } + void add_details_type(tflite::QuantizationDetails details_type) { + fbb_.AddElement(QuantizationParameters::VT_DETAILS_TYPE, static_cast(details_type), 0); + } + void add_details(flatbuffers::Offset details) { + fbb_.AddOffset(QuantizationParameters::VT_DETAILS, details); + } + void add_quantized_dimension(int32_t quantized_dimension) { + fbb_.AddElement(QuantizationParameters::VT_QUANTIZED_DIMENSION, quantized_dimension, 0); + } + explicit QuantizationParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizationParameters( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> min = 0, + flatbuffers::Offset> max = 0, + flatbuffers::Offset> scale = 0, + flatbuffers::Offset> zero_point = 0, + tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, + flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { + QuantizationParametersBuilder builder_(_fbb); + builder_.add_quantized_dimension(quantized_dimension); + builder_.add_details(details); + builder_.add_zero_point(zero_point); + builder_.add_scale(scale); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_details_type(details_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateQuantizationParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *min = nullptr, + const std::vector *max = nullptr, + const std::vector *scale = nullptr, + const std::vector *zero_point = nullptr, + tflite::QuantizationDetails details_type = tflite::QuantizationDetails_NONE, + flatbuffers::Offset details = 0, + int32_t quantized_dimension = 0) { + auto min__ = min ? _fbb.CreateVector(*min) : 0; + auto max__ = max ? _fbb.CreateVector(*max) : 0; + auto scale__ = scale ? _fbb.CreateVector(*scale) : 0; + auto zero_point__ = zero_point ? _fbb.CreateVector(*zero_point) : 0; + return tflite::CreateQuantizationParameters( + _fbb, + min__, + max__, + scale__, + zero_point__, + details_type, + details, + quantized_dimension); +} + +flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Int32VectorT : public flatbuffers::NativeTable { + typedef Int32Vector TableType; + std::vector values{}; +}; + +struct Int32Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Int32VectorT NativeTableType; + typedef Int32VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Int32VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Int32VectorBuilder { + typedef Int32Vector Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset> values) { + fbb_.AddOffset(Int32Vector::VT_VALUES, values); + } + explicit Int32VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateInt32Vector( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> values = 0) { + Int32VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateInt32VectorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateInt32Vector( + _fbb, + values__); +} + +flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Uint16VectorT : public flatbuffers::NativeTable { + typedef Uint16Vector TableType; + std::vector values{}; +}; + +struct Uint16Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Uint16VectorT NativeTableType; + typedef Uint16VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Uint16VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Uint16VectorBuilder { + typedef Uint16Vector Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset> values) { + fbb_.AddOffset(Uint16Vector::VT_VALUES, values); + } + explicit Uint16VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUint16Vector( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> values = 0) { + Uint16VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateUint16VectorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint16_t), 4); } + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateUint16Vector( + _fbb, + values__); +} + +flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Uint8VectorT : public flatbuffers::NativeTable { + typedef Uint8Vector TableType; + std::vector values{}; +}; + +struct Uint8Vector FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Uint8VectorT NativeTableType; + typedef Uint8VectorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES = 4 + }; + const flatbuffers::Vector *values() const { + return GetPointer *>(VT_VALUES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_VALUES) && + verifier.VerifyVector(values()) && + verifier.EndTable(); + } + Uint8VectorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Uint8VectorBuilder { + typedef Uint8Vector Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values(flatbuffers::Offset> values) { + fbb_.AddOffset(Uint8Vector::VT_VALUES, values); + } + explicit Uint8VectorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUint8Vector( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> values = 0) { + Uint8VectorBuilder builder_(_fbb); + builder_.add_values(values); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateUint8VectorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *values = nullptr) { + if (values) { _fbb.ForceVectorAlignment(values->size(), sizeof(uint8_t), 4); } + auto values__ = values ? _fbb.CreateVector(*values) : 0; + return tflite::CreateUint8Vector( + _fbb, + values__); +} + +flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DimensionMetadataT : public flatbuffers::NativeTable { + typedef DimensionMetadata TableType; + tflite::DimensionType format = tflite::DimensionType_DENSE; + int32_t dense_size = 0; + tflite::SparseIndexVectorUnion array_segments{}; + tflite::SparseIndexVectorUnion array_indices{}; +}; + +struct DimensionMetadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DimensionMetadataT NativeTableType; + typedef DimensionMetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FORMAT = 4, + VT_DENSE_SIZE = 6, + VT_ARRAY_SEGMENTS_TYPE = 8, + VT_ARRAY_SEGMENTS = 10, + VT_ARRAY_INDICES_TYPE = 12, + VT_ARRAY_INDICES = 14 + }; + tflite::DimensionType format() const { + return static_cast(GetField(VT_FORMAT, 0)); + } + int32_t dense_size() const { + return GetField(VT_DENSE_SIZE, 0); + } + tflite::SparseIndexVector array_segments_type() const { + return static_cast(GetField(VT_ARRAY_SEGMENTS_TYPE, 0)); + } + const void *array_segments() const { + return GetPointer(VT_ARRAY_SEGMENTS); + } + template const T *array_segments_as() const; + const tflite::Int32Vector *array_segments_as_Int32Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_segments()) : nullptr; + } + const tflite::Uint16Vector *array_segments_as_Uint16Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_segments()) : nullptr; + } + const tflite::Uint8Vector *array_segments_as_Uint8Vector() const { + return array_segments_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_segments()) : nullptr; + } + tflite::SparseIndexVector array_indices_type() const { + return static_cast(GetField(VT_ARRAY_INDICES_TYPE, 0)); + } + const void *array_indices() const { + return GetPointer(VT_ARRAY_INDICES); + } + template const T *array_indices_as() const; + const tflite::Int32Vector *array_indices_as_Int32Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Int32Vector ? static_cast(array_indices()) : nullptr; + } + const tflite::Uint16Vector *array_indices_as_Uint16Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Uint16Vector ? static_cast(array_indices()) : nullptr; + } + const tflite::Uint8Vector *array_indices_as_Uint8Vector() const { + return array_indices_type() == tflite::SparseIndexVector_Uint8Vector ? static_cast(array_indices()) : nullptr; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FORMAT, 1) && + VerifyField(verifier, VT_DENSE_SIZE, 4) && + VerifyField(verifier, VT_ARRAY_SEGMENTS_TYPE, 1) && + VerifyOffset(verifier, VT_ARRAY_SEGMENTS) && + VerifySparseIndexVector(verifier, array_segments(), array_segments_type()) && + VerifyField(verifier, VT_ARRAY_INDICES_TYPE, 1) && + VerifyOffset(verifier, VT_ARRAY_INDICES) && + VerifySparseIndexVector(verifier, array_indices(), array_indices_type()) && + verifier.EndTable(); + } + DimensionMetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const tflite::Int32Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Int32Vector(); +} + +template<> inline const tflite::Uint16Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Uint16Vector(); +} + +template<> inline const tflite::Uint8Vector *DimensionMetadata::array_segments_as() const { + return array_segments_as_Uint8Vector(); +} + +template<> inline const tflite::Int32Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Int32Vector(); +} + +template<> inline const tflite::Uint16Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Uint16Vector(); +} + +template<> inline const tflite::Uint8Vector *DimensionMetadata::array_indices_as() const { + return array_indices_as_Uint8Vector(); +} + +struct DimensionMetadataBuilder { + typedef DimensionMetadata Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_format(tflite::DimensionType format) { + fbb_.AddElement(DimensionMetadata::VT_FORMAT, static_cast(format), 0); + } + void add_dense_size(int32_t dense_size) { + fbb_.AddElement(DimensionMetadata::VT_DENSE_SIZE, dense_size, 0); + } + void add_array_segments_type(tflite::SparseIndexVector array_segments_type) { + fbb_.AddElement(DimensionMetadata::VT_ARRAY_SEGMENTS_TYPE, static_cast(array_segments_type), 0); + } + void add_array_segments(flatbuffers::Offset array_segments) { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_SEGMENTS, array_segments); + } + void add_array_indices_type(tflite::SparseIndexVector array_indices_type) { + fbb_.AddElement(DimensionMetadata::VT_ARRAY_INDICES_TYPE, static_cast(array_indices_type), 0); + } + void add_array_indices(flatbuffers::Offset array_indices) { + fbb_.AddOffset(DimensionMetadata::VT_ARRAY_INDICES, array_indices); + } + explicit DimensionMetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDimensionMetadata( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::DimensionType format = tflite::DimensionType_DENSE, + int32_t dense_size = 0, + tflite::SparseIndexVector array_segments_type = tflite::SparseIndexVector_NONE, + flatbuffers::Offset array_segments = 0, + tflite::SparseIndexVector array_indices_type = tflite::SparseIndexVector_NONE, + flatbuffers::Offset array_indices = 0) { + DimensionMetadataBuilder builder_(_fbb); + builder_.add_array_indices(array_indices); + builder_.add_array_segments(array_segments); + builder_.add_dense_size(dense_size); + builder_.add_array_indices_type(array_indices_type); + builder_.add_array_segments_type(array_segments_type); + builder_.add_format(format); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SparsityParametersT : public flatbuffers::NativeTable { + typedef SparsityParameters TableType; + std::vector traversal_order{}; + std::vector block_map{}; + std::vector> dim_metadata{}; + SparsityParametersT() = default; + SparsityParametersT(const SparsityParametersT &o); + SparsityParametersT(SparsityParametersT&&) FLATBUFFERS_NOEXCEPT = default; + SparsityParametersT &operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SparsityParameters FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SparsityParametersT NativeTableType; + typedef SparsityParametersBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TRAVERSAL_ORDER = 4, + VT_BLOCK_MAP = 6, + VT_DIM_METADATA = 8 + }; + const flatbuffers::Vector *traversal_order() const { + return GetPointer *>(VT_TRAVERSAL_ORDER); + } + const flatbuffers::Vector *block_map() const { + return GetPointer *>(VT_BLOCK_MAP); + } + const flatbuffers::Vector> *dim_metadata() const { + return GetPointer> *>(VT_DIM_METADATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TRAVERSAL_ORDER) && + verifier.VerifyVector(traversal_order()) && + VerifyOffset(verifier, VT_BLOCK_MAP) && + verifier.VerifyVector(block_map()) && + VerifyOffset(verifier, VT_DIM_METADATA) && + verifier.VerifyVector(dim_metadata()) && + verifier.VerifyVectorOfTables(dim_metadata()) && + verifier.EndTable(); + } + SparsityParametersT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SparsityParametersBuilder { + typedef SparsityParameters Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_traversal_order(flatbuffers::Offset> traversal_order) { + fbb_.AddOffset(SparsityParameters::VT_TRAVERSAL_ORDER, traversal_order); + } + void add_block_map(flatbuffers::Offset> block_map) { + fbb_.AddOffset(SparsityParameters::VT_BLOCK_MAP, block_map); + } + void add_dim_metadata(flatbuffers::Offset>> dim_metadata) { + fbb_.AddOffset(SparsityParameters::VT_DIM_METADATA, dim_metadata); + } + explicit SparsityParametersBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSparsityParameters( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> traversal_order = 0, + flatbuffers::Offset> block_map = 0, + flatbuffers::Offset>> dim_metadata = 0) { + SparsityParametersBuilder builder_(_fbb); + builder_.add_dim_metadata(dim_metadata); + builder_.add_block_map(block_map); + builder_.add_traversal_order(traversal_order); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSparsityParametersDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *traversal_order = nullptr, + const std::vector *block_map = nullptr, + const std::vector> *dim_metadata = nullptr) { + auto traversal_order__ = traversal_order ? _fbb.CreateVector(*traversal_order) : 0; + auto block_map__ = block_map ? _fbb.CreateVector(*block_map) : 0; + auto dim_metadata__ = dim_metadata ? _fbb.CreateVector>(*dim_metadata) : 0; + return tflite::CreateSparsityParameters( + _fbb, + traversal_order__, + block_map__, + dim_metadata__); +} + +flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct VariantSubTypeT : public flatbuffers::NativeTable { + typedef VariantSubType TableType; + std::vector shape{}; + tflite::TensorType type = tflite::TensorType_FLOAT32; + bool has_rank = false; +}; + +struct VariantSubType FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef VariantSubTypeT NativeTableType; + typedef VariantSubTypeBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_HAS_RANK = 8 + }; + const flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + tflite::TensorType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool has_rank() const { + return GetField(VT_HAS_RANK, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyField(verifier, VT_HAS_RANK, 1) && + verifier.EndTable(); + } + VariantSubTypeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct VariantSubTypeBuilder { + typedef VariantSubType Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_shape(flatbuffers::Offset> shape) { + fbb_.AddOffset(VariantSubType::VT_SHAPE, shape); + } + void add_type(tflite::TensorType type) { + fbb_.AddElement(VariantSubType::VT_TYPE, static_cast(type), 0); + } + void add_has_rank(bool has_rank) { + fbb_.AddElement(VariantSubType::VT_HAS_RANK, static_cast(has_rank), 0); + } + explicit VariantSubTypeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateVariantSubType( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> shape = 0, + tflite::TensorType type = tflite::TensorType_FLOAT32, + bool has_rank = false) { + VariantSubTypeBuilder builder_(_fbb); + builder_.add_shape(shape); + builder_.add_has_rank(has_rank); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateVariantSubTypeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *shape = nullptr, + tflite::TensorType type = tflite::TensorType_FLOAT32, + bool has_rank = false) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + return tflite::CreateVariantSubType( + _fbb, + shape__, + type, + has_rank); +} + +flatbuffers::Offset CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TensorT : public flatbuffers::NativeTable { + typedef Tensor TableType; + std::vector shape{}; + tflite::TensorType type = tflite::TensorType_FLOAT32; + uint32_t buffer = 0; + std::string name{}; + std::unique_ptr quantization{}; + bool is_variable = false; + std::unique_ptr sparsity{}; + std::vector shape_signature{}; + bool has_rank = false; + std::vector> variant_tensors{}; + TensorT() = default; + TensorT(const TensorT &o); + TensorT(TensorT&&) FLATBUFFERS_NOEXCEPT = default; + TensorT &operator=(TensorT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Tensor FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorT NativeTableType; + typedef TensorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SHAPE = 4, + VT_TYPE = 6, + VT_BUFFER = 8, + VT_NAME = 10, + VT_QUANTIZATION = 12, + VT_IS_VARIABLE = 14, + VT_SPARSITY = 16, + VT_SHAPE_SIGNATURE = 18, + VT_HAS_RANK = 20, + VT_VARIANT_TENSORS = 22 + }; + const flatbuffers::Vector *shape() const { + return GetPointer *>(VT_SHAPE); + } + tflite::TensorType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + uint32_t buffer() const { + return GetField(VT_BUFFER, 0); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + const tflite::QuantizationParameters *quantization() const { + return GetPointer(VT_QUANTIZATION); + } + bool is_variable() const { + return GetField(VT_IS_VARIABLE, 0) != 0; + } + const tflite::SparsityParameters *sparsity() const { + return GetPointer(VT_SPARSITY); + } + const flatbuffers::Vector *shape_signature() const { + return GetPointer *>(VT_SHAPE_SIGNATURE); + } + bool has_rank() const { + return GetField(VT_HAS_RANK, 0) != 0; + } + const flatbuffers::Vector> *variant_tensors() const { + return GetPointer> *>(VT_VARIANT_TENSORS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SHAPE) && + verifier.VerifyVector(shape()) && + VerifyField(verifier, VT_TYPE, 1) && + VerifyField(verifier, VT_BUFFER, 4) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyOffset(verifier, VT_QUANTIZATION) && + verifier.VerifyTable(quantization()) && + VerifyField(verifier, VT_IS_VARIABLE, 1) && + VerifyOffset(verifier, VT_SPARSITY) && + verifier.VerifyTable(sparsity()) && + VerifyOffset(verifier, VT_SHAPE_SIGNATURE) && + verifier.VerifyVector(shape_signature()) && + VerifyField(verifier, VT_HAS_RANK, 1) && + VerifyOffset(verifier, VT_VARIANT_TENSORS) && + verifier.VerifyVector(variant_tensors()) && + verifier.VerifyVectorOfTables(variant_tensors()) && + verifier.EndTable(); + } + TensorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorBuilder { + typedef Tensor Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_shape(flatbuffers::Offset> shape) { + fbb_.AddOffset(Tensor::VT_SHAPE, shape); + } + void add_type(tflite::TensorType type) { + fbb_.AddElement(Tensor::VT_TYPE, static_cast(type), 0); + } + void add_buffer(uint32_t buffer) { + fbb_.AddElement(Tensor::VT_BUFFER, buffer, 0); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Tensor::VT_NAME, name); + } + void add_quantization(flatbuffers::Offset quantization) { + fbb_.AddOffset(Tensor::VT_QUANTIZATION, quantization); + } + void add_is_variable(bool is_variable) { + fbb_.AddElement(Tensor::VT_IS_VARIABLE, static_cast(is_variable), 0); + } + void add_sparsity(flatbuffers::Offset sparsity) { + fbb_.AddOffset(Tensor::VT_SPARSITY, sparsity); + } + void add_shape_signature(flatbuffers::Offset> shape_signature) { + fbb_.AddOffset(Tensor::VT_SHAPE_SIGNATURE, shape_signature); + } + void add_has_rank(bool has_rank) { + fbb_.AddElement(Tensor::VT_HAS_RANK, static_cast(has_rank), 0); + } + void add_variant_tensors(flatbuffers::Offset>> variant_tensors) { + fbb_.AddOffset(Tensor::VT_VARIANT_TENSORS, variant_tensors); + } + explicit TensorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensor( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> shape = 0, + tflite::TensorType type = tflite::TensorType_FLOAT32, + uint32_t buffer = 0, + flatbuffers::Offset name = 0, + flatbuffers::Offset quantization = 0, + bool is_variable = false, + flatbuffers::Offset sparsity = 0, + flatbuffers::Offset> shape_signature = 0, + bool has_rank = false, + flatbuffers::Offset>> variant_tensors = 0) { + TensorBuilder builder_(_fbb); + builder_.add_variant_tensors(variant_tensors); + builder_.add_shape_signature(shape_signature); + builder_.add_sparsity(sparsity); + builder_.add_quantization(quantization); + builder_.add_name(name); + builder_.add_buffer(buffer); + builder_.add_shape(shape); + builder_.add_has_rank(has_rank); + builder_.add_is_variable(is_variable); + builder_.add_type(type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTensorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *shape = nullptr, + tflite::TensorType type = tflite::TensorType_FLOAT32, + uint32_t buffer = 0, + const char *name = nullptr, + flatbuffers::Offset quantization = 0, + bool is_variable = false, + flatbuffers::Offset sparsity = 0, + const std::vector *shape_signature = nullptr, + bool has_rank = false, + const std::vector> *variant_tensors = nullptr) { + auto shape__ = shape ? _fbb.CreateVector(*shape) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + auto shape_signature__ = shape_signature ? _fbb.CreateVector(*shape_signature) : 0; + auto variant_tensors__ = variant_tensors ? _fbb.CreateVector>(*variant_tensors) : 0; + return tflite::CreateTensor( + _fbb, + shape__, + type, + buffer, + name__, + quantization, + is_variable, + sparsity, + shape_signature__, + has_rank, + variant_tensors__); +} + +flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Conv2DOptionsT : public flatbuffers::NativeTable { + typedef Conv2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; +}; + +struct Conv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv2DOptionsT NativeTableType; + typedef Conv2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10, + VT_DILATION_W_FACTOR = 12, + VT_DILATION_H_FACTOR = 14 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + verifier.EndTable(); + } + Conv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Conv2DOptionsBuilder { + typedef Conv2DOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Conv2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Conv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Conv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Conv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(Conv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(Conv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv2DOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + Conv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Conv3DOptionsT : public flatbuffers::NativeTable { + typedef Conv3DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_d = 0; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_d_factor = 1; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; +}; + +struct Conv3DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Conv3DOptionsT NativeTableType; + typedef Conv3DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_D = 6, + VT_STRIDE_W = 8, + VT_STRIDE_H = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_D_FACTOR = 14, + VT_DILATION_W_FACTOR = 16, + VT_DILATION_H_FACTOR = 18 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_d() const { + return GetField(VT_STRIDE_D, 0); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_d_factor() const { + return GetField(VT_DILATION_D_FACTOR, 1); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_D, 4) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_D_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + verifier.EndTable(); + } + Conv3DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Conv3DOptionsBuilder { + typedef Conv3DOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Conv3DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_d(int32_t stride_d) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_D, stride_d, 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Conv3DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Conv3DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_d_factor(int32_t dilation_d_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_D_FACTOR, dilation_d_factor, 1); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(Conv3DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit Conv3DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConv3DOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_d = 0, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_d_factor = 1, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + Conv3DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_dilation_d_factor(dilation_d_factor); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_stride_d(stride_d); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Pool2DOptionsT : public flatbuffers::NativeTable { + typedef Pool2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + int32_t filter_width = 0; + int32_t filter_height = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct Pool2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Pool2DOptionsT NativeTableType; + typedef Pool2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FILTER_WIDTH = 10, + VT_FILTER_HEIGHT = 12, + VT_FUSED_ACTIVATION_FUNCTION = 14 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + int32_t filter_width() const { + return GetField(VT_FILTER_WIDTH, 0); + } + int32_t filter_height() const { + return GetField(VT_FILTER_HEIGHT, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FILTER_WIDTH, 4) && + VerifyField(verifier, VT_FILTER_HEIGHT, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + Pool2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Pool2DOptionsBuilder { + typedef Pool2DOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(Pool2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(Pool2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(Pool2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_filter_width(int32_t filter_width) { + fbb_.AddElement(Pool2DOptions::VT_FILTER_WIDTH, filter_width, 0); + } + void add_filter_height(int32_t filter_height) { + fbb_.AddElement(Pool2DOptions::VT_FILTER_HEIGHT, filter_height, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(Pool2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit Pool2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePool2DOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + int32_t filter_width = 0, + int32_t filter_height = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + Pool2DOptionsBuilder builder_(_fbb); + builder_.add_filter_height(filter_height); + builder_.add_filter_width(filter_width); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DepthwiseConv2DOptionsT : public flatbuffers::NativeTable { + typedef DepthwiseConv2DOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + int32_t depth_multiplier = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + int32_t dilation_w_factor = 1; + int32_t dilation_h_factor = 1; +}; + +struct DepthwiseConv2DOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DepthwiseConv2DOptionsT NativeTableType; + typedef DepthwiseConv2DOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_DEPTH_MULTIPLIER = 10, + VT_FUSED_ACTIVATION_FUNCTION = 12, + VT_DILATION_W_FACTOR = 14, + VT_DILATION_H_FACTOR = 16 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + int32_t depth_multiplier() const { + return GetField(VT_DEPTH_MULTIPLIER, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + int32_t dilation_w_factor() const { + return GetField(VT_DILATION_W_FACTOR, 1); + } + int32_t dilation_h_factor() const { + return GetField(VT_DILATION_H_FACTOR, 1); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_DEPTH_MULTIPLIER, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_DILATION_W_FACTOR, 4) && + VerifyField(verifier, VT_DILATION_H_FACTOR, 4) && + verifier.EndTable(); + } + DepthwiseConv2DOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthwiseConv2DOptionsBuilder { + typedef DepthwiseConv2DOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_STRIDE_H, stride_h, 0); + } + void add_depth_multiplier(int32_t depth_multiplier) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DEPTH_MULTIPLIER, depth_multiplier, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_dilation_w_factor(int32_t dilation_w_factor) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_W_FACTOR, dilation_w_factor, 1); + } + void add_dilation_h_factor(int32_t dilation_h_factor) { + fbb_.AddElement(DepthwiseConv2DOptions::VT_DILATION_H_FACTOR, dilation_h_factor, 1); + } + explicit DepthwiseConv2DOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthwiseConv2DOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + int32_t depth_multiplier = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + int32_t dilation_w_factor = 1, + int32_t dilation_h_factor = 1) { + DepthwiseConv2DOptionsBuilder builder_(_fbb); + builder_.add_dilation_h_factor(dilation_h_factor); + builder_.add_dilation_w_factor(dilation_w_factor); + builder_.add_depth_multiplier(depth_multiplier); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ConcatEmbeddingsOptionsT : public flatbuffers::NativeTable { + typedef ConcatEmbeddingsOptions TableType; + int32_t num_channels = 0; + std::vector num_columns_per_channel{}; + std::vector embedding_dim_per_channel{}; +}; + +struct ConcatEmbeddingsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ConcatEmbeddingsOptionsT NativeTableType; + typedef ConcatEmbeddingsOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_CHANNELS = 4, + VT_NUM_COLUMNS_PER_CHANNEL = 6, + VT_EMBEDDING_DIM_PER_CHANNEL = 8 + }; + int32_t num_channels() const { + return GetField(VT_NUM_CHANNELS, 0); + } + const flatbuffers::Vector *num_columns_per_channel() const { + return GetPointer *>(VT_NUM_COLUMNS_PER_CHANNEL); + } + const flatbuffers::Vector *embedding_dim_per_channel() const { + return GetPointer *>(VT_EMBEDDING_DIM_PER_CHANNEL); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_CHANNELS, 4) && + VerifyOffset(verifier, VT_NUM_COLUMNS_PER_CHANNEL) && + verifier.VerifyVector(num_columns_per_channel()) && + VerifyOffset(verifier, VT_EMBEDDING_DIM_PER_CHANNEL) && + verifier.VerifyVector(embedding_dim_per_channel()) && + verifier.EndTable(); + } + ConcatEmbeddingsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ConcatEmbeddingsOptionsBuilder { + typedef ConcatEmbeddingsOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_channels(int32_t num_channels) { + fbb_.AddElement(ConcatEmbeddingsOptions::VT_NUM_CHANNELS, num_channels, 0); + } + void add_num_columns_per_channel(flatbuffers::Offset> num_columns_per_channel) { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_NUM_COLUMNS_PER_CHANNEL, num_columns_per_channel); + } + void add_embedding_dim_per_channel(flatbuffers::Offset> embedding_dim_per_channel) { + fbb_.AddOffset(ConcatEmbeddingsOptions::VT_EMBEDDING_DIM_PER_CHANNEL, embedding_dim_per_channel); + } + explicit ConcatEmbeddingsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConcatEmbeddingsOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_channels = 0, + flatbuffers::Offset> num_columns_per_channel = 0, + flatbuffers::Offset> embedding_dim_per_channel = 0) { + ConcatEmbeddingsOptionsBuilder builder_(_fbb); + builder_.add_embedding_dim_per_channel(embedding_dim_per_channel); + builder_.add_num_columns_per_channel(num_columns_per_channel); + builder_.add_num_channels(num_channels); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateConcatEmbeddingsOptionsDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_channels = 0, + const std::vector *num_columns_per_channel = nullptr, + const std::vector *embedding_dim_per_channel = nullptr) { + auto num_columns_per_channel__ = num_columns_per_channel ? _fbb.CreateVector(*num_columns_per_channel) : 0; + auto embedding_dim_per_channel__ = embedding_dim_per_channel ? _fbb.CreateVector(*embedding_dim_per_channel) : 0; + return tflite::CreateConcatEmbeddingsOptions( + _fbb, + num_channels, + num_columns_per_channel__, + embedding_dim_per_channel__); +} + +flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LSHProjectionOptionsT : public flatbuffers::NativeTable { + typedef LSHProjectionOptions TableType; + tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN; +}; + +struct LSHProjectionOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LSHProjectionOptionsT NativeTableType; + typedef LSHProjectionOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TYPE = 4 + }; + tflite::LSHProjectionType type() const { + return static_cast(GetField(VT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TYPE, 1) && + verifier.EndTable(); + } + LSHProjectionOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LSHProjectionOptionsBuilder { + typedef LSHProjectionOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_type(tflite::LSHProjectionType type) { + fbb_.AddElement(LSHProjectionOptions::VT_TYPE, static_cast(type), 0); + } + explicit LSHProjectionOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLSHProjectionOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::LSHProjectionType type = tflite::LSHProjectionType_UNKNOWN) { + LSHProjectionOptionsBuilder builder_(_fbb); + builder_.add_type(type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SVDFOptionsT : public flatbuffers::NativeTable { + typedef SVDFOptions TableType; + int32_t rank = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct SVDFOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SVDFOptionsT NativeTableType; + typedef SVDFOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_RANK = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + int32_t rank() const { + return GetField(VT_RANK, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_RANK, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + SVDFOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SVDFOptionsBuilder { + typedef SVDFOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_rank(int32_t rank) { + fbb_.AddElement(SVDFOptions::VT_RANK, rank, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SVDFOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(SVDFOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit SVDFOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSVDFOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t rank = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + SVDFOptionsBuilder builder_(_fbb); + builder_.add_rank(rank); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RNNOptionsT : public flatbuffers::NativeTable { + typedef RNNOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct RNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RNNOptionsT NativeTableType; + typedef RNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + RNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RNNOptionsBuilder { + typedef RNNOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(RNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(RNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit RNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + RNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SequenceRNNOptionsT : public flatbuffers::NativeTable { + typedef SequenceRNNOptions TableType; + bool time_major = false; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool asymmetric_quantize_inputs = false; +}; + +struct SequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SequenceRNNOptionsT NativeTableType; + typedef SequenceRNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + SequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SequenceRNNOptionsBuilder { + typedef SequenceRNNOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) { + fbb_.AddElement(SequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(SequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit SequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool time_major = false, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool asymmetric_quantize_inputs = false) { + SequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BidirectionalSequenceRNNOptionsT : public flatbuffers::NativeTable { + typedef BidirectionalSequenceRNNOptions TableType; + bool time_major = false; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool merge_outputs = false; + bool asymmetric_quantize_inputs = false; +}; + +struct BidirectionalSequenceRNNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BidirectionalSequenceRNNOptionsT NativeTableType; + typedef BidirectionalSequenceRNNOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TIME_MAJOR = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6, + VT_MERGE_OUTPUTS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + BidirectionalSequenceRNNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BidirectionalSequenceRNNOptionsBuilder { + typedef BidirectionalSequenceRNNOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_time_major(bool time_major) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BidirectionalSequenceRNNOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceRNNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool time_major = false, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool merge_outputs = false, + bool asymmetric_quantize_inputs = false) { + BidirectionalSequenceRNNOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_time_major(time_major); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FullyConnectedOptionsT : public flatbuffers::NativeTable { + typedef FullyConnectedOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT; + bool keep_num_dims = false; + bool asymmetric_quantize_inputs = false; +}; + +struct FullyConnectedOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FullyConnectedOptionsT NativeTableType; + typedef FullyConnectedOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_WEIGHTS_FORMAT = 6, + VT_KEEP_NUM_DIMS = 8, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 10 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + tflite::FullyConnectedOptionsWeightsFormat weights_format() const { + return static_cast(GetField(VT_WEIGHTS_FORMAT, 0)); + } + bool keep_num_dims() const { + return GetField(VT_KEEP_NUM_DIMS, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_WEIGHTS_FORMAT, 1) && + VerifyField(verifier, VT_KEEP_NUM_DIMS, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + FullyConnectedOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FullyConnectedOptionsBuilder { + typedef FullyConnectedOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(FullyConnectedOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_weights_format(tflite::FullyConnectedOptionsWeightsFormat weights_format) { + fbb_.AddElement(FullyConnectedOptions::VT_WEIGHTS_FORMAT, static_cast(weights_format), 0); + } + void add_keep_num_dims(bool keep_num_dims) { + fbb_.AddElement(FullyConnectedOptions::VT_KEEP_NUM_DIMS, static_cast(keep_num_dims), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(FullyConnectedOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit FullyConnectedOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFullyConnectedOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + tflite::FullyConnectedOptionsWeightsFormat weights_format = tflite::FullyConnectedOptionsWeightsFormat_DEFAULT, + bool keep_num_dims = false, + bool asymmetric_quantize_inputs = false) { + FullyConnectedOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_keep_num_dims(keep_num_dims); + builder_.add_weights_format(weights_format); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SoftmaxOptionsT : public flatbuffers::NativeTable { + typedef SoftmaxOptions TableType; + float beta = 0.0f; +}; + +struct SoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SoftmaxOptionsT NativeTableType; + typedef SoftmaxOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BETA = 4 + }; + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BETA, 4) && + verifier.EndTable(); + } + SoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SoftmaxOptionsBuilder { + typedef SoftmaxOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_beta(float beta) { + fbb_.AddElement(SoftmaxOptions::VT_BETA, beta, 0.0f); + } + explicit SoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSoftmaxOptions( + flatbuffers::FlatBufferBuilder &_fbb, + float beta = 0.0f) { + SoftmaxOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ConcatenationOptionsT : public flatbuffers::NativeTable { + typedef ConcatenationOptions TableType; + int32_t axis = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct ConcatenationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ConcatenationOptionsT NativeTableType; + typedef ConcatenationOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_FUSED_ACTIVATION_FUNCTION = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + ConcatenationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ConcatenationOptionsBuilder { + typedef ConcatenationOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(ConcatenationOptions::VT_AXIS, axis, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(ConcatenationOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit ConcatenationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateConcatenationOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + ConcatenationOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AddOptionsT : public flatbuffers::NativeTable { + typedef AddOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool pot_scale_int16 = true; +}; + +struct AddOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddOptionsT NativeTableType; + typedef AddOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_POT_SCALE_INT16 = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool pot_scale_int16() const { + return GetField(VT_POT_SCALE_INT16, 1) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_POT_SCALE_INT16, 1) && + verifier.EndTable(); + } + AddOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AddOptionsBuilder { + typedef AddOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(AddOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_pot_scale_int16(bool pot_scale_int16) { + fbb_.AddElement(AddOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); + } + explicit AddOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool pot_scale_int16 = true) { + AddOptionsBuilder builder_(_fbb); + builder_.add_pot_scale_int16(pot_scale_int16); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MulOptionsT : public flatbuffers::NativeTable { + typedef MulOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct MulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MulOptionsT NativeTableType; + typedef MulOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + MulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MulOptionsBuilder { + typedef MulOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(MulOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit MulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMulOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + MulOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct L2NormOptionsT : public flatbuffers::NativeTable { + typedef L2NormOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct L2NormOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef L2NormOptionsT NativeTableType; + typedef L2NormOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + L2NormOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct L2NormOptionsBuilder { + typedef L2NormOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(L2NormOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit L2NormOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateL2NormOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + L2NormOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LocalResponseNormalizationOptionsT : public flatbuffers::NativeTable { + typedef LocalResponseNormalizationOptions TableType; + int32_t radius = 0; + float bias = 0.0f; + float alpha = 0.0f; + float beta = 0.0f; +}; + +struct LocalResponseNormalizationOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LocalResponseNormalizationOptionsT NativeTableType; + typedef LocalResponseNormalizationOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_RADIUS = 4, + VT_BIAS = 6, + VT_ALPHA = 8, + VT_BETA = 10 + }; + int32_t radius() const { + return GetField(VT_RADIUS, 0); + } + float bias() const { + return GetField(VT_BIAS, 0.0f); + } + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + float beta() const { + return GetField(VT_BETA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_RADIUS, 4) && + VerifyField(verifier, VT_BIAS, 4) && + VerifyField(verifier, VT_ALPHA, 4) && + VerifyField(verifier, VT_BETA, 4) && + verifier.EndTable(); + } + LocalResponseNormalizationOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LocalResponseNormalizationOptionsBuilder { + typedef LocalResponseNormalizationOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_radius(int32_t radius) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_RADIUS, radius, 0); + } + void add_bias(float bias) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_BIAS, bias, 0.0f); + } + void add_alpha(float alpha) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_ALPHA, alpha, 0.0f); + } + void add_beta(float beta) { + fbb_.AddElement(LocalResponseNormalizationOptions::VT_BETA, beta, 0.0f); + } + explicit LocalResponseNormalizationOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLocalResponseNormalizationOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t radius = 0, + float bias = 0.0f, + float alpha = 0.0f, + float beta = 0.0f) { + LocalResponseNormalizationOptionsBuilder builder_(_fbb); + builder_.add_beta(beta); + builder_.add_alpha(alpha); + builder_.add_bias(bias); + builder_.add_radius(radius); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LSTMOptionsT : public flatbuffers::NativeTable { + typedef LSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL; + bool asymmetric_quantize_inputs = false; +}; + +struct LSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LSTMOptionsT NativeTableType; + typedef LSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_KERNEL_TYPE = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); + } + tflite::LSTMKernelType kernel_type() const { + return static_cast(GetField(VT_KERNEL_TYPE, 0)); + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_KERNEL_TYPE, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + LSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LSTMOptionsBuilder { + typedef LSTMOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(LSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) { + fbb_.AddElement(LSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(LSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_kernel_type(tflite::LSTMKernelType kernel_type) { + fbb_.AddElement(LSTMOptions::VT_KERNEL_TYPE, static_cast(kernel_type), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(LSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit LSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + tflite::LSTMKernelType kernel_type = tflite::LSTMKernelType_FULL, + bool asymmetric_quantize_inputs = false) { + LSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_kernel_type(kernel_type); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { + typedef UnidirectionalSequenceLSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + bool time_major = false; + bool asymmetric_quantize_inputs = false; + bool diagonal_recurrent_tensors = false; +}; + +struct UnidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnidirectionalSequenceLSTMOptionsT NativeTableType; + typedef UnidirectionalSequenceLSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_TIME_MAJOR = 10, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 12, + VT_DIAGONAL_RECURRENT_TENSORS = 14 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); + } + bool time_major() const { + return GetField(VT_TIME_MAJOR, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool diagonal_recurrent_tensors() const { + return GetField(VT_DIAGONAL_RECURRENT_TENSORS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + VerifyField(verifier, VT_DIAGONAL_RECURRENT_TENSORS, 1) && + verifier.EndTable(); + } + UnidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnidirectionalSequenceLSTMOptionsBuilder { + typedef UnidirectionalSequenceLSTMOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_time_major(bool time_major) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + void add_diagonal_recurrent_tensors(bool diagonal_recurrent_tensors) { + fbb_.AddElement(UnidirectionalSequenceLSTMOptions::VT_DIAGONAL_RECURRENT_TENSORS, static_cast(diagonal_recurrent_tensors), 0); + } + explicit UnidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool time_major = false, + bool asymmetric_quantize_inputs = false, + bool diagonal_recurrent_tensors = false) { + UnidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_diagonal_recurrent_tensors(diagonal_recurrent_tensors); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BidirectionalSequenceLSTMOptionsT : public flatbuffers::NativeTable { + typedef BidirectionalSequenceLSTMOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + float cell_clip = 0.0f; + float proj_clip = 0.0f; + bool merge_outputs = false; + bool time_major = true; + bool asymmetric_quantize_inputs = false; +}; + +struct BidirectionalSequenceLSTMOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BidirectionalSequenceLSTMOptionsT NativeTableType; + typedef BidirectionalSequenceLSTMOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_CELL_CLIP = 6, + VT_PROJ_CLIP = 8, + VT_MERGE_OUTPUTS = 10, + VT_TIME_MAJOR = 12, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 14 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + float cell_clip() const { + return GetField(VT_CELL_CLIP, 0.0f); + } + float proj_clip() const { + return GetField(VT_PROJ_CLIP, 0.0f); + } + bool merge_outputs() const { + return GetField(VT_MERGE_OUTPUTS, 0) != 0; + } + bool time_major() const { + return GetField(VT_TIME_MAJOR, 1) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_CELL_CLIP, 4) && + VerifyField(verifier, VT_PROJ_CLIP, 4) && + VerifyField(verifier, VT_MERGE_OUTPUTS, 1) && + VerifyField(verifier, VT_TIME_MAJOR, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + BidirectionalSequenceLSTMOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BidirectionalSequenceLSTMOptionsBuilder { + typedef BidirectionalSequenceLSTMOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_cell_clip(float cell_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_CELL_CLIP, cell_clip, 0.0f); + } + void add_proj_clip(float proj_clip) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_PROJ_CLIP, proj_clip, 0.0f); + } + void add_merge_outputs(bool merge_outputs) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_MERGE_OUTPUTS, static_cast(merge_outputs), 0); + } + void add_time_major(bool time_major) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_TIME_MAJOR, static_cast(time_major), 1); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BidirectionalSequenceLSTMOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit BidirectionalSequenceLSTMOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + float cell_clip = 0.0f, + float proj_clip = 0.0f, + bool merge_outputs = false, + bool time_major = true, + bool asymmetric_quantize_inputs = false) { + BidirectionalSequenceLSTMOptionsBuilder builder_(_fbb); + builder_.add_proj_clip(proj_clip); + builder_.add_cell_clip(cell_clip); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_time_major(time_major); + builder_.add_merge_outputs(merge_outputs); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeBilinearOptionsT : public flatbuffers::NativeTable { + typedef ResizeBilinearOptions TableType; + bool align_corners = false; + bool half_pixel_centers = false; +}; + +struct ResizeBilinearOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeBilinearOptionsT NativeTableType; + typedef ResizeBilinearOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 8, + VT_HALF_PIXEL_CENTERS = 10 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; + } + bool half_pixel_centers() const { + return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS, 1) && + VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && + verifier.EndTable(); + } + ResizeBilinearOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeBilinearOptionsBuilder { + typedef ResizeBilinearOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeBilinearOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); + } + void add_half_pixel_centers(bool half_pixel_centers) { + fbb_.AddElement(ResizeBilinearOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); + } + explicit ResizeBilinearOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResizeBilinearOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false, + bool half_pixel_centers = false) { + ResizeBilinearOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ResizeNearestNeighborOptionsT : public flatbuffers::NativeTable { + typedef ResizeNearestNeighborOptions TableType; + bool align_corners = false; + bool half_pixel_centers = false; +}; + +struct ResizeNearestNeighborOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ResizeNearestNeighborOptionsT NativeTableType; + typedef ResizeNearestNeighborOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALIGN_CORNERS = 4, + VT_HALF_PIXEL_CENTERS = 6 + }; + bool align_corners() const { + return GetField(VT_ALIGN_CORNERS, 0) != 0; + } + bool half_pixel_centers() const { + return GetField(VT_HALF_PIXEL_CENTERS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALIGN_CORNERS, 1) && + VerifyField(verifier, VT_HALF_PIXEL_CENTERS, 1) && + verifier.EndTable(); + } + ResizeNearestNeighborOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ResizeNearestNeighborOptionsBuilder { + typedef ResizeNearestNeighborOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_align_corners(bool align_corners) { + fbb_.AddElement(ResizeNearestNeighborOptions::VT_ALIGN_CORNERS, static_cast(align_corners), 0); + } + void add_half_pixel_centers(bool half_pixel_centers) { + fbb_.AddElement(ResizeNearestNeighborOptions::VT_HALF_PIXEL_CENTERS, static_cast(half_pixel_centers), 0); + } + explicit ResizeNearestNeighborOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateResizeNearestNeighborOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool align_corners = false, + bool half_pixel_centers = false) { + ResizeNearestNeighborOptionsBuilder builder_(_fbb); + builder_.add_half_pixel_centers(half_pixel_centers); + builder_.add_align_corners(align_corners); + return builder_.Finish(); +} + +flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CallOptionsT : public flatbuffers::NativeTable { + typedef CallOptions TableType; + uint32_t subgraph = 0; +}; + +struct CallOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CallOptionsT NativeTableType; + typedef CallOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SUBGRAPH = 4 + }; + uint32_t subgraph() const { + return GetField(VT_SUBGRAPH, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SUBGRAPH, 4) && + verifier.EndTable(); + } + CallOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CallOptionsBuilder { + typedef CallOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_subgraph(uint32_t subgraph) { + fbb_.AddElement(CallOptions::VT_SUBGRAPH, subgraph, 0); + } + explicit CallOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCallOptions( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t subgraph = 0) { + CallOptionsBuilder builder_(_fbb); + builder_.add_subgraph(subgraph); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PadOptionsT : public flatbuffers::NativeTable { + typedef PadOptions TableType; +}; + +struct PadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PadOptionsT NativeTableType; + typedef PadOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + PadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PadOptionsBuilder { + typedef PadOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePadOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + PadOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PadV2OptionsT : public flatbuffers::NativeTable { + typedef PadV2Options TableType; +}; + +struct PadV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PadV2OptionsT NativeTableType; + typedef PadV2OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + PadV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PadV2OptionsBuilder { + typedef PadV2Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PadV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePadV2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + PadV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReshapeOptionsT : public flatbuffers::NativeTable { + typedef ReshapeOptions TableType; + std::vector new_shape{}; +}; + +struct ReshapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReshapeOptionsT NativeTableType; + typedef ReshapeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NEW_SHAPE = 4 + }; + const flatbuffers::Vector *new_shape() const { + return GetPointer *>(VT_NEW_SHAPE); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NEW_SHAPE) && + verifier.VerifyVector(new_shape()) && + verifier.EndTable(); + } + ReshapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReshapeOptionsBuilder { + typedef ReshapeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_new_shape(flatbuffers::Offset> new_shape) { + fbb_.AddOffset(ReshapeOptions::VT_NEW_SHAPE, new_shape); + } + explicit ReshapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReshapeOptions( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> new_shape = 0) { + ReshapeOptionsBuilder builder_(_fbb); + builder_.add_new_shape(new_shape); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateReshapeOptionsDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *new_shape = nullptr) { + auto new_shape__ = new_shape ? _fbb.CreateVector(*new_shape) : 0; + return tflite::CreateReshapeOptions( + _fbb, + new_shape__); +} + +flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SpaceToBatchNDOptionsT : public flatbuffers::NativeTable { + typedef SpaceToBatchNDOptions TableType; +}; + +struct SpaceToBatchNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceToBatchNDOptionsT NativeTableType; + typedef SpaceToBatchNDOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SpaceToBatchNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SpaceToBatchNDOptionsBuilder { + typedef SpaceToBatchNDOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SpaceToBatchNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToBatchNDOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SpaceToBatchNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BatchToSpaceNDOptionsT : public flatbuffers::NativeTable { + typedef BatchToSpaceNDOptions TableType; +}; + +struct BatchToSpaceNDOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchToSpaceNDOptionsT NativeTableType; + typedef BatchToSpaceNDOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + BatchToSpaceNDOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BatchToSpaceNDOptionsBuilder { + typedef BatchToSpaceNDOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BatchToSpaceNDOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchToSpaceNDOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + BatchToSpaceNDOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SkipGramOptionsT : public flatbuffers::NativeTable { + typedef SkipGramOptions TableType; + int32_t ngram_size = 0; + int32_t max_skip_size = 0; + bool include_all_ngrams = false; +}; + +struct SkipGramOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SkipGramOptionsT NativeTableType; + typedef SkipGramOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NGRAM_SIZE = 4, + VT_MAX_SKIP_SIZE = 6, + VT_INCLUDE_ALL_NGRAMS = 8 + }; + int32_t ngram_size() const { + return GetField(VT_NGRAM_SIZE, 0); + } + int32_t max_skip_size() const { + return GetField(VT_MAX_SKIP_SIZE, 0); + } + bool include_all_ngrams() const { + return GetField(VT_INCLUDE_ALL_NGRAMS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NGRAM_SIZE, 4) && + VerifyField(verifier, VT_MAX_SKIP_SIZE, 4) && + VerifyField(verifier, VT_INCLUDE_ALL_NGRAMS, 1) && + verifier.EndTable(); + } + SkipGramOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SkipGramOptionsBuilder { + typedef SkipGramOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_ngram_size(int32_t ngram_size) { + fbb_.AddElement(SkipGramOptions::VT_NGRAM_SIZE, ngram_size, 0); + } + void add_max_skip_size(int32_t max_skip_size) { + fbb_.AddElement(SkipGramOptions::VT_MAX_SKIP_SIZE, max_skip_size, 0); + } + void add_include_all_ngrams(bool include_all_ngrams) { + fbb_.AddElement(SkipGramOptions::VT_INCLUDE_ALL_NGRAMS, static_cast(include_all_ngrams), 0); + } + explicit SkipGramOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSkipGramOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t ngram_size = 0, + int32_t max_skip_size = 0, + bool include_all_ngrams = false) { + SkipGramOptionsBuilder builder_(_fbb); + builder_.add_max_skip_size(max_skip_size); + builder_.add_ngram_size(ngram_size); + builder_.add_include_all_ngrams(include_all_ngrams); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SpaceToDepthOptionsT : public flatbuffers::NativeTable { + typedef SpaceToDepthOptions TableType; + int32_t block_size = 0; +}; + +struct SpaceToDepthOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SpaceToDepthOptionsT NativeTableType; + typedef SpaceToDepthOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE, 4) && + verifier.EndTable(); + } + SpaceToDepthOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SpaceToDepthOptionsBuilder { + typedef SpaceToDepthOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) { + fbb_.AddElement(SpaceToDepthOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit SpaceToDepthOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSpaceToDepthOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t block_size = 0) { + SpaceToDepthOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DepthToSpaceOptionsT : public flatbuffers::NativeTable { + typedef DepthToSpaceOptions TableType; + int32_t block_size = 0; +}; + +struct DepthToSpaceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DepthToSpaceOptionsT NativeTableType; + typedef DepthToSpaceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BLOCK_SIZE = 4 + }; + int32_t block_size() const { + return GetField(VT_BLOCK_SIZE, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BLOCK_SIZE, 4) && + verifier.EndTable(); + } + DepthToSpaceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DepthToSpaceOptionsBuilder { + typedef DepthToSpaceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_block_size(int32_t block_size) { + fbb_.AddElement(DepthToSpaceOptions::VT_BLOCK_SIZE, block_size, 0); + } + explicit DepthToSpaceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDepthToSpaceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t block_size = 0) { + DepthToSpaceOptionsBuilder builder_(_fbb); + builder_.add_block_size(block_size); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubOptionsT : public flatbuffers::NativeTable { + typedef SubOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; + bool pot_scale_int16 = true; +}; + +struct SubOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubOptionsT NativeTableType; + typedef SubOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4, + VT_POT_SCALE_INT16 = 6 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool pot_scale_int16() const { + return GetField(VT_POT_SCALE_INT16, 1) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + VerifyField(verifier, VT_POT_SCALE_INT16, 1) && + verifier.EndTable(); + } + SubOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SubOptionsBuilder { + typedef SubOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(SubOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + void add_pot_scale_int16(bool pot_scale_int16) { + fbb_.AddElement(SubOptions::VT_POT_SCALE_INT16, static_cast(pot_scale_int16), 1); + } + explicit SubOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE, + bool pot_scale_int16 = true) { + SubOptionsBuilder builder_(_fbb); + builder_.add_pot_scale_int16(pot_scale_int16); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DivOptionsT : public flatbuffers::NativeTable { + typedef DivOptions TableType; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct DivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DivOptionsT NativeTableType; + typedef DivOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_FUSED_ACTIVATION_FUNCTION = 4 + }; + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + DivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DivOptionsBuilder { + typedef DivOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(DivOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit DivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDivOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + DivOptionsBuilder builder_(_fbb); + builder_.add_fused_activation_function(fused_activation_function); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TopKV2OptionsT : public flatbuffers::NativeTable { + typedef TopKV2Options TableType; +}; + +struct TopKV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TopKV2OptionsT NativeTableType; + typedef TopKV2OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + TopKV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TopKV2OptionsBuilder { + typedef TopKV2Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TopKV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTopKV2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + TopKV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct EmbeddingLookupSparseOptionsT : public flatbuffers::NativeTable { + typedef EmbeddingLookupSparseOptions TableType; + tflite::CombinerType combiner = tflite::CombinerType_SUM; +}; + +struct EmbeddingLookupSparseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EmbeddingLookupSparseOptionsT NativeTableType; + typedef EmbeddingLookupSparseOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COMBINER = 4 + }; + tflite::CombinerType combiner() const { + return static_cast(GetField(VT_COMBINER, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COMBINER, 1) && + verifier.EndTable(); + } + EmbeddingLookupSparseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct EmbeddingLookupSparseOptionsBuilder { + typedef EmbeddingLookupSparseOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_combiner(tflite::CombinerType combiner) { + fbb_.AddElement(EmbeddingLookupSparseOptions::VT_COMBINER, static_cast(combiner), 0); + } + explicit EmbeddingLookupSparseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::CombinerType combiner = tflite::CombinerType_SUM) { + EmbeddingLookupSparseOptionsBuilder builder_(_fbb); + builder_.add_combiner(combiner); + return builder_.Finish(); +} + +flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherOptionsT : public flatbuffers::NativeTable { + typedef GatherOptions TableType; + int32_t axis = 0; + int32_t batch_dims = 0; +}; + +struct GatherOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherOptionsT NativeTableType; + typedef GatherOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4, + VT_BATCH_DIMS = 6 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + int32_t batch_dims() const { + return GetField(VT_BATCH_DIMS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS, 4) && + VerifyField(verifier, VT_BATCH_DIMS, 4) && + verifier.EndTable(); + } + GatherOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherOptionsBuilder { + typedef GatherOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(GatherOptions::VT_AXIS, axis, 0); + } + void add_batch_dims(int32_t batch_dims) { + fbb_.AddElement(GatherOptions::VT_BATCH_DIMS, batch_dims, 0); + } + explicit GatherOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGatherOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0, + int32_t batch_dims = 0) { + GatherOptionsBuilder builder_(_fbb); + builder_.add_batch_dims(batch_dims); + builder_.add_axis(axis); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TransposeOptionsT : public flatbuffers::NativeTable { + typedef TransposeOptions TableType; +}; + +struct TransposeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TransposeOptionsT NativeTableType; + typedef TransposeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + TransposeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TransposeOptionsBuilder { + typedef TransposeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TransposeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTransposeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + TransposeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ExpOptionsT : public flatbuffers::NativeTable { + typedef ExpOptions TableType; +}; + +struct ExpOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExpOptionsT NativeTableType; + typedef ExpOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ExpOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ExpOptionsBuilder { + typedef ExpOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ExpOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CosOptionsT : public flatbuffers::NativeTable { + typedef CosOptions TableType; +}; + +struct CosOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CosOptionsT NativeTableType; + typedef CosOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + CosOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CosOptionsBuilder { + typedef CosOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit CosOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCosOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + CosOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReducerOptionsT : public flatbuffers::NativeTable { + typedef ReducerOptions TableType; + bool keep_dims = false; +}; + +struct ReducerOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReducerOptionsT NativeTableType; + typedef ReducerOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_KEEP_DIMS = 4 + }; + bool keep_dims() const { + return GetField(VT_KEEP_DIMS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_KEEP_DIMS, 1) && + verifier.EndTable(); + } + ReducerOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReducerOptionsBuilder { + typedef ReducerOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_keep_dims(bool keep_dims) { + fbb_.AddElement(ReducerOptions::VT_KEEP_DIMS, static_cast(keep_dims), 0); + } + explicit ReducerOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReducerOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool keep_dims = false) { + ReducerOptionsBuilder builder_(_fbb); + builder_.add_keep_dims(keep_dims); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SqueezeOptionsT : public flatbuffers::NativeTable { + typedef SqueezeOptions TableType; + std::vector squeeze_dims{}; +}; + +struct SqueezeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SqueezeOptionsT NativeTableType; + typedef SqueezeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SQUEEZE_DIMS = 4 + }; + const flatbuffers::Vector *squeeze_dims() const { + return GetPointer *>(VT_SQUEEZE_DIMS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_SQUEEZE_DIMS) && + verifier.VerifyVector(squeeze_dims()) && + verifier.EndTable(); + } + SqueezeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SqueezeOptionsBuilder { + typedef SqueezeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_squeeze_dims(flatbuffers::Offset> squeeze_dims) { + fbb_.AddOffset(SqueezeOptions::VT_SQUEEZE_DIMS, squeeze_dims); + } + explicit SqueezeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSqueezeOptions( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> squeeze_dims = 0) { + SqueezeOptionsBuilder builder_(_fbb); + builder_.add_squeeze_dims(squeeze_dims); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSqueezeOptionsDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *squeeze_dims = nullptr) { + auto squeeze_dims__ = squeeze_dims ? _fbb.CreateVector(*squeeze_dims) : 0; + return tflite::CreateSqueezeOptions( + _fbb, + squeeze_dims__); +} + +flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SplitOptionsT : public flatbuffers::NativeTable { + typedef SplitOptions TableType; + int32_t num_splits = 0; +}; + +struct SplitOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SplitOptionsT NativeTableType; + typedef SplitOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { + return GetField(VT_NUM_SPLITS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_SPLITS, 4) && + verifier.EndTable(); + } + SplitOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SplitOptionsBuilder { + typedef SplitOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) { + fbb_.AddElement(SplitOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSplitOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) { + SplitOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SplitVOptionsT : public flatbuffers::NativeTable { + typedef SplitVOptions TableType; + int32_t num_splits = 0; +}; + +struct SplitVOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SplitVOptionsT NativeTableType; + typedef SplitVOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM_SPLITS = 4 + }; + int32_t num_splits() const { + return GetField(VT_NUM_SPLITS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM_SPLITS, 4) && + verifier.EndTable(); + } + SplitVOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SplitVOptionsBuilder { + typedef SplitVOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num_splits(int32_t num_splits) { + fbb_.AddElement(SplitVOptions::VT_NUM_SPLITS, num_splits, 0); + } + explicit SplitVOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSplitVOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t num_splits = 0) { + SplitVOptionsBuilder builder_(_fbb); + builder_.add_num_splits(num_splits); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct StridedSliceOptionsT : public flatbuffers::NativeTable { + typedef StridedSliceOptions TableType; + int32_t begin_mask = 0; + int32_t end_mask = 0; + int32_t ellipsis_mask = 0; + int32_t new_axis_mask = 0; + int32_t shrink_axis_mask = 0; +}; + +struct StridedSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef StridedSliceOptionsT NativeTableType; + typedef StridedSliceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BEGIN_MASK = 4, + VT_END_MASK = 6, + VT_ELLIPSIS_MASK = 8, + VT_NEW_AXIS_MASK = 10, + VT_SHRINK_AXIS_MASK = 12 + }; + int32_t begin_mask() const { + return GetField(VT_BEGIN_MASK, 0); + } + int32_t end_mask() const { + return GetField(VT_END_MASK, 0); + } + int32_t ellipsis_mask() const { + return GetField(VT_ELLIPSIS_MASK, 0); + } + int32_t new_axis_mask() const { + return GetField(VT_NEW_AXIS_MASK, 0); + } + int32_t shrink_axis_mask() const { + return GetField(VT_SHRINK_AXIS_MASK, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_BEGIN_MASK, 4) && + VerifyField(verifier, VT_END_MASK, 4) && + VerifyField(verifier, VT_ELLIPSIS_MASK, 4) && + VerifyField(verifier, VT_NEW_AXIS_MASK, 4) && + VerifyField(verifier, VT_SHRINK_AXIS_MASK, 4) && + verifier.EndTable(); + } + StridedSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct StridedSliceOptionsBuilder { + typedef StridedSliceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_begin_mask(int32_t begin_mask) { + fbb_.AddElement(StridedSliceOptions::VT_BEGIN_MASK, begin_mask, 0); + } + void add_end_mask(int32_t end_mask) { + fbb_.AddElement(StridedSliceOptions::VT_END_MASK, end_mask, 0); + } + void add_ellipsis_mask(int32_t ellipsis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_ELLIPSIS_MASK, ellipsis_mask, 0); + } + void add_new_axis_mask(int32_t new_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_NEW_AXIS_MASK, new_axis_mask, 0); + } + void add_shrink_axis_mask(int32_t shrink_axis_mask) { + fbb_.AddElement(StridedSliceOptions::VT_SHRINK_AXIS_MASK, shrink_axis_mask, 0); + } + explicit StridedSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateStridedSliceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t begin_mask = 0, + int32_t end_mask = 0, + int32_t ellipsis_mask = 0, + int32_t new_axis_mask = 0, + int32_t shrink_axis_mask = 0) { + StridedSliceOptionsBuilder builder_(_fbb); + builder_.add_shrink_axis_mask(shrink_axis_mask); + builder_.add_new_axis_mask(new_axis_mask); + builder_.add_ellipsis_mask(ellipsis_mask); + builder_.add_end_mask(end_mask); + builder_.add_begin_mask(begin_mask); + return builder_.Finish(); +} + +flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogSoftmaxOptionsT : public flatbuffers::NativeTable { + typedef LogSoftmaxOptions TableType; +}; + +struct LogSoftmaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogSoftmaxOptionsT NativeTableType; + typedef LogSoftmaxOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LogSoftmaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogSoftmaxOptionsBuilder { + typedef LogSoftmaxOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogSoftmaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogSoftmaxOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LogSoftmaxOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CastOptionsT : public flatbuffers::NativeTable { + typedef CastOptions TableType; + tflite::TensorType in_data_type = tflite::TensorType_FLOAT32; + tflite::TensorType out_data_type = tflite::TensorType_FLOAT32; +}; + +struct CastOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CastOptionsT NativeTableType; + typedef CastOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IN_DATA_TYPE = 4, + VT_OUT_DATA_TYPE = 6 + }; + tflite::TensorType in_data_type() const { + return static_cast(GetField(VT_IN_DATA_TYPE, 0)); + } + tflite::TensorType out_data_type() const { + return static_cast(GetField(VT_OUT_DATA_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IN_DATA_TYPE, 1) && + VerifyField(verifier, VT_OUT_DATA_TYPE, 1) && + verifier.EndTable(); + } + CastOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CastOptionsBuilder { + typedef CastOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_in_data_type(tflite::TensorType in_data_type) { + fbb_.AddElement(CastOptions::VT_IN_DATA_TYPE, static_cast(in_data_type), 0); + } + void add_out_data_type(tflite::TensorType out_data_type) { + fbb_.AddElement(CastOptions::VT_OUT_DATA_TYPE, static_cast(out_data_type), 0); + } + explicit CastOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCastOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType in_data_type = tflite::TensorType_FLOAT32, + tflite::TensorType out_data_type = tflite::TensorType_FLOAT32) { + CastOptionsBuilder builder_(_fbb); + builder_.add_out_data_type(out_data_type); + builder_.add_in_data_type(in_data_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DequantizeOptionsT : public flatbuffers::NativeTable { + typedef DequantizeOptions TableType; +}; + +struct DequantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DequantizeOptionsT NativeTableType; + typedef DequantizeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + DequantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DequantizeOptionsBuilder { + typedef DequantizeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DequantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDequantizeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + DequantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MaximumMinimumOptionsT : public flatbuffers::NativeTable { + typedef MaximumMinimumOptions TableType; +}; + +struct MaximumMinimumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MaximumMinimumOptionsT NativeTableType; + typedef MaximumMinimumOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + MaximumMinimumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MaximumMinimumOptionsBuilder { + typedef MaximumMinimumOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MaximumMinimumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMaximumMinimumOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + MaximumMinimumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TileOptionsT : public flatbuffers::NativeTable { + typedef TileOptions TableType; +}; + +struct TileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TileOptionsT NativeTableType; + typedef TileOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + TileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TileOptionsBuilder { + typedef TileOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit TileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTileOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + TileOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ArgMaxOptionsT : public flatbuffers::NativeTable { + typedef ArgMaxOptions TableType; + tflite::TensorType output_type = tflite::TensorType_FLOAT32; +}; + +struct ArgMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ArgMaxOptionsT NativeTableType; + typedef ArgMaxOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_TYPE = 4 + }; + tflite::TensorType output_type() const { + return static_cast(GetField(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_TYPE, 1) && + verifier.EndTable(); + } + ArgMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ArgMaxOptionsBuilder { + typedef ArgMaxOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(tflite::TensorType output_type) { + fbb_.AddElement(ArgMaxOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); + } + explicit ArgMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMaxOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType output_type = tflite::TensorType_FLOAT32) { + ArgMaxOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ArgMinOptionsT : public flatbuffers::NativeTable { + typedef ArgMinOptions TableType; + tflite::TensorType output_type = tflite::TensorType_FLOAT32; +}; + +struct ArgMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ArgMinOptionsT NativeTableType; + typedef ArgMinOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUTPUT_TYPE = 4 + }; + tflite::TensorType output_type() const { + return static_cast(GetField(VT_OUTPUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUTPUT_TYPE, 1) && + verifier.EndTable(); + } + ArgMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ArgMinOptionsBuilder { + typedef ArgMinOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_output_type(tflite::TensorType output_type) { + fbb_.AddElement(ArgMinOptions::VT_OUTPUT_TYPE, static_cast(output_type), 0); + } + explicit ArgMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateArgMinOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType output_type = tflite::TensorType_FLOAT32) { + ArgMinOptionsBuilder builder_(_fbb); + builder_.add_output_type(output_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GreaterOptionsT : public flatbuffers::NativeTable { + typedef GreaterOptions TableType; +}; + +struct GreaterOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GreaterOptionsT NativeTableType; + typedef GreaterOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + GreaterOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GreaterOptionsBuilder { + typedef GreaterOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreaterOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GreaterEqualOptionsT : public flatbuffers::NativeTable { + typedef GreaterEqualOptions TableType; +}; + +struct GreaterEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GreaterEqualOptionsT NativeTableType; + typedef GreaterEqualOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + GreaterEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GreaterEqualOptionsBuilder { + typedef GreaterEqualOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GreaterEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGreaterEqualOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + GreaterEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LessOptionsT : public flatbuffers::NativeTable { + typedef LessOptions TableType; +}; + +struct LessOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LessOptionsT NativeTableType; + typedef LessOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LessOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LessOptionsBuilder { + typedef LessOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLessOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LessOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LessEqualOptionsT : public flatbuffers::NativeTable { + typedef LessEqualOptions TableType; +}; + +struct LessEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LessEqualOptionsT NativeTableType; + typedef LessEqualOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LessEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LessEqualOptionsBuilder { + typedef LessEqualOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LessEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLessEqualOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LessEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NegOptionsT : public flatbuffers::NativeTable { + typedef NegOptions TableType; +}; + +struct NegOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NegOptionsT NativeTableType; + typedef NegOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + NegOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NegOptionsBuilder { + typedef NegOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NegOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNegOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + NegOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SelectOptionsT : public flatbuffers::NativeTable { + typedef SelectOptions TableType; +}; + +struct SelectOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SelectOptionsT NativeTableType; + typedef SelectOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SelectOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SelectOptionsBuilder { + typedef SelectOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSelectOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SelectOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SliceOptionsT : public flatbuffers::NativeTable { + typedef SliceOptions TableType; +}; + +struct SliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SliceOptionsT NativeTableType; + typedef SliceOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SliceOptionsBuilder { + typedef SliceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSliceOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SliceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TransposeConvOptionsT : public flatbuffers::NativeTable { + typedef TransposeConvOptions TableType; + tflite::Padding padding = tflite::Padding_SAME; + int32_t stride_w = 0; + int32_t stride_h = 0; + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE; +}; + +struct TransposeConvOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TransposeConvOptionsT NativeTableType; + typedef TransposeConvOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_PADDING = 4, + VT_STRIDE_W = 6, + VT_STRIDE_H = 8, + VT_FUSED_ACTIVATION_FUNCTION = 10 + }; + tflite::Padding padding() const { + return static_cast(GetField(VT_PADDING, 0)); + } + int32_t stride_w() const { + return GetField(VT_STRIDE_W, 0); + } + int32_t stride_h() const { + return GetField(VT_STRIDE_H, 0); + } + tflite::ActivationFunctionType fused_activation_function() const { + return static_cast(GetField(VT_FUSED_ACTIVATION_FUNCTION, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_PADDING, 1) && + VerifyField(verifier, VT_STRIDE_W, 4) && + VerifyField(verifier, VT_STRIDE_H, 4) && + VerifyField(verifier, VT_FUSED_ACTIVATION_FUNCTION, 1) && + verifier.EndTable(); + } + TransposeConvOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TransposeConvOptionsBuilder { + typedef TransposeConvOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_padding(tflite::Padding padding) { + fbb_.AddElement(TransposeConvOptions::VT_PADDING, static_cast(padding), 0); + } + void add_stride_w(int32_t stride_w) { + fbb_.AddElement(TransposeConvOptions::VT_STRIDE_W, stride_w, 0); + } + void add_stride_h(int32_t stride_h) { + fbb_.AddElement(TransposeConvOptions::VT_STRIDE_H, stride_h, 0); + } + void add_fused_activation_function(tflite::ActivationFunctionType fused_activation_function) { + fbb_.AddElement(TransposeConvOptions::VT_FUSED_ACTIVATION_FUNCTION, static_cast(fused_activation_function), 0); + } + explicit TransposeConvOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTransposeConvOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::Padding padding = tflite::Padding_SAME, + int32_t stride_w = 0, + int32_t stride_h = 0, + tflite::ActivationFunctionType fused_activation_function = tflite::ActivationFunctionType_NONE) { + TransposeConvOptionsBuilder builder_(_fbb); + builder_.add_stride_h(stride_h); + builder_.add_stride_w(stride_w); + builder_.add_fused_activation_function(fused_activation_function); + builder_.add_padding(padding); + return builder_.Finish(); +} + +flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ExpandDimsOptionsT : public flatbuffers::NativeTable { + typedef ExpandDimsOptions TableType; +}; + +struct ExpandDimsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ExpandDimsOptionsT NativeTableType; + typedef ExpandDimsOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ExpandDimsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ExpandDimsOptionsBuilder { + typedef ExpandDimsOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ExpandDimsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateExpandDimsOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ExpandDimsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SparseToDenseOptionsT : public flatbuffers::NativeTable { + typedef SparseToDenseOptions TableType; + bool validate_indices = false; +}; + +struct SparseToDenseOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SparseToDenseOptionsT NativeTableType; + typedef SparseToDenseOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALIDATE_INDICES = 4 + }; + bool validate_indices() const { + return GetField(VT_VALIDATE_INDICES, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VALIDATE_INDICES, 1) && + verifier.EndTable(); + } + SparseToDenseOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SparseToDenseOptionsBuilder { + typedef SparseToDenseOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_validate_indices(bool validate_indices) { + fbb_.AddElement(SparseToDenseOptions::VT_VALIDATE_INDICES, static_cast(validate_indices), 0); + } + explicit SparseToDenseOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSparseToDenseOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool validate_indices = false) { + SparseToDenseOptionsBuilder builder_(_fbb); + builder_.add_validate_indices(validate_indices); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct EqualOptionsT : public flatbuffers::NativeTable { + typedef EqualOptions TableType; +}; + +struct EqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef EqualOptionsT NativeTableType; + typedef EqualOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + EqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct EqualOptionsBuilder { + typedef EqualOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit EqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateEqualOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + EqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NotEqualOptionsT : public flatbuffers::NativeTable { + typedef NotEqualOptions TableType; +}; + +struct NotEqualOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NotEqualOptionsT NativeTableType; + typedef NotEqualOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + NotEqualOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NotEqualOptionsBuilder { + typedef NotEqualOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NotEqualOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNotEqualOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + NotEqualOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ShapeOptionsT : public flatbuffers::NativeTable { + typedef ShapeOptions TableType; + tflite::TensorType out_type = tflite::TensorType_FLOAT32; +}; + +struct ShapeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ShapeOptionsT NativeTableType; + typedef ShapeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OUT_TYPE = 4 + }; + tflite::TensorType out_type() const { + return static_cast(GetField(VT_OUT_TYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OUT_TYPE, 1) && + verifier.EndTable(); + } + ShapeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ShapeOptionsBuilder { + typedef ShapeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_out_type(tflite::TensorType out_type) { + fbb_.AddElement(ShapeOptions::VT_OUT_TYPE, static_cast(out_type), 0); + } + explicit ShapeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateShapeOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType out_type = tflite::TensorType_FLOAT32) { + ShapeOptionsBuilder builder_(_fbb); + builder_.add_out_type(out_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RankOptionsT : public flatbuffers::NativeTable { + typedef RankOptions TableType; +}; + +struct RankOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RankOptionsT NativeTableType; + typedef RankOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RankOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RankOptionsBuilder { + typedef RankOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RankOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRankOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + RankOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PowOptionsT : public flatbuffers::NativeTable { + typedef PowOptions TableType; +}; + +struct PowOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PowOptionsT NativeTableType; + typedef PowOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + PowOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PowOptionsBuilder { + typedef PowOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit PowOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePowOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + PowOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FakeQuantOptionsT : public flatbuffers::NativeTable { + typedef FakeQuantOptions TableType; + float min = 0.0f; + float max = 0.0f; + int32_t num_bits = 0; + bool narrow_range = false; +}; + +struct FakeQuantOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FakeQuantOptionsT NativeTableType; + typedef FakeQuantOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MIN = 4, + VT_MAX = 6, + VT_NUM_BITS = 8, + VT_NARROW_RANGE = 10 + }; + float min() const { + return GetField(VT_MIN, 0.0f); + } + float max() const { + return GetField(VT_MAX, 0.0f); + } + int32_t num_bits() const { + return GetField(VT_NUM_BITS, 0); + } + bool narrow_range() const { + return GetField(VT_NARROW_RANGE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MIN, 4) && + VerifyField(verifier, VT_MAX, 4) && + VerifyField(verifier, VT_NUM_BITS, 4) && + VerifyField(verifier, VT_NARROW_RANGE, 1) && + verifier.EndTable(); + } + FakeQuantOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FakeQuantOptionsBuilder { + typedef FakeQuantOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_min(float min) { + fbb_.AddElement(FakeQuantOptions::VT_MIN, min, 0.0f); + } + void add_max(float max) { + fbb_.AddElement(FakeQuantOptions::VT_MAX, max, 0.0f); + } + void add_num_bits(int32_t num_bits) { + fbb_.AddElement(FakeQuantOptions::VT_NUM_BITS, num_bits, 0); + } + void add_narrow_range(bool narrow_range) { + fbb_.AddElement(FakeQuantOptions::VT_NARROW_RANGE, static_cast(narrow_range), 0); + } + explicit FakeQuantOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFakeQuantOptions( + flatbuffers::FlatBufferBuilder &_fbb, + float min = 0.0f, + float max = 0.0f, + int32_t num_bits = 0, + bool narrow_range = false) { + FakeQuantOptionsBuilder builder_(_fbb); + builder_.add_num_bits(num_bits); + builder_.add_max(max); + builder_.add_min(min); + builder_.add_narrow_range(narrow_range); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct PackOptionsT : public flatbuffers::NativeTable { + typedef PackOptions TableType; + int32_t values_count = 0; + int32_t axis = 0; +}; + +struct PackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef PackOptionsT NativeTableType; + typedef PackOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VALUES_COUNT = 4, + VT_AXIS = 6 + }; + int32_t values_count() const { + return GetField(VT_VALUES_COUNT, 0); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VALUES_COUNT, 4) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } + PackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct PackOptionsBuilder { + typedef PackOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_values_count(int32_t values_count) { + fbb_.AddElement(PackOptions::VT_VALUES_COUNT, values_count, 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(PackOptions::VT_AXIS, axis, 0); + } + explicit PackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreatePackOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t values_count = 0, + int32_t axis = 0) { + PackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_values_count(values_count); + return builder_.Finish(); +} + +flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalOrOptionsT : public flatbuffers::NativeTable { + typedef LogicalOrOptions TableType; +}; + +struct LogicalOrOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalOrOptionsT NativeTableType; + typedef LogicalOrOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LogicalOrOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalOrOptionsBuilder { + typedef LogicalOrOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalOrOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalOrOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalOrOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct OneHotOptionsT : public flatbuffers::NativeTable { + typedef OneHotOptions TableType; + int32_t axis = 0; +}; + +struct OneHotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OneHotOptionsT NativeTableType; + typedef OneHotOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_AXIS = 4 + }; + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } + OneHotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct OneHotOptionsBuilder { + typedef OneHotOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_axis(int32_t axis) { + fbb_.AddElement(OneHotOptions::VT_AXIS, axis, 0); + } + explicit OneHotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOneHotOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t axis = 0) { + OneHotOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + return builder_.Finish(); +} + +flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AbsOptionsT : public flatbuffers::NativeTable { + typedef AbsOptions TableType; +}; + +struct AbsOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AbsOptionsT NativeTableType; + typedef AbsOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + AbsOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AbsOptionsBuilder { + typedef AbsOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AbsOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAbsOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + AbsOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HardSwishOptionsT : public flatbuffers::NativeTable { + typedef HardSwishOptions TableType; +}; + +struct HardSwishOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HardSwishOptionsT NativeTableType; + typedef HardSwishOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HardSwishOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HardSwishOptionsBuilder { + typedef HardSwishOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HardSwishOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHardSwishOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HardSwishOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalAndOptionsT : public flatbuffers::NativeTable { + typedef LogicalAndOptions TableType; +}; + +struct LogicalAndOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalAndOptionsT NativeTableType; + typedef LogicalAndOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LogicalAndOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalAndOptionsBuilder { + typedef LogicalAndOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalAndOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalAndOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalAndOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LogicalNotOptionsT : public flatbuffers::NativeTable { + typedef LogicalNotOptions TableType; +}; + +struct LogicalNotOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LogicalNotOptionsT NativeTableType; + typedef LogicalNotOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + LogicalNotOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LogicalNotOptionsBuilder { + typedef LogicalNotOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit LogicalNotOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLogicalNotOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + LogicalNotOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnpackOptionsT : public flatbuffers::NativeTable { + typedef UnpackOptions TableType; + int32_t num = 0; + int32_t axis = 0; +}; + +struct UnpackOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnpackOptionsT NativeTableType; + typedef UnpackOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NUM = 4, + VT_AXIS = 6 + }; + int32_t num() const { + return GetField(VT_NUM, 0); + } + int32_t axis() const { + return GetField(VT_AXIS, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_NUM, 4) && + VerifyField(verifier, VT_AXIS, 4) && + verifier.EndTable(); + } + UnpackOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnpackOptionsBuilder { + typedef UnpackOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_num(int32_t num) { + fbb_.AddElement(UnpackOptions::VT_NUM, num, 0); + } + void add_axis(int32_t axis) { + fbb_.AddElement(UnpackOptions::VT_AXIS, axis, 0); + } + explicit UnpackOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnpackOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t num = 0, + int32_t axis = 0) { + UnpackOptionsBuilder builder_(_fbb); + builder_.add_axis(axis); + builder_.add_num(num); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FloorDivOptionsT : public flatbuffers::NativeTable { + typedef FloorDivOptions TableType; +}; + +struct FloorDivOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorDivOptionsT NativeTableType; + typedef FloorDivOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + FloorDivOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FloorDivOptionsBuilder { + typedef FloorDivOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorDivOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorDivOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorDivOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SquareOptionsT : public flatbuffers::NativeTable { + typedef SquareOptions TableType; +}; + +struct SquareOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SquareOptionsT NativeTableType; + typedef SquareOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SquareOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SquareOptionsBuilder { + typedef SquareOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquareOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSquareOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SquareOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ZerosLikeOptionsT : public flatbuffers::NativeTable { + typedef ZerosLikeOptions TableType; +}; + +struct ZerosLikeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ZerosLikeOptionsT NativeTableType; + typedef ZerosLikeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ZerosLikeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ZerosLikeOptionsBuilder { + typedef ZerosLikeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ZerosLikeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateZerosLikeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ZerosLikeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FillOptionsT : public flatbuffers::NativeTable { + typedef FillOptions TableType; +}; + +struct FillOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FillOptionsT NativeTableType; + typedef FillOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + FillOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FillOptionsBuilder { + typedef FillOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FillOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFillOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + FillOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct FloorModOptionsT : public flatbuffers::NativeTable { + typedef FloorModOptions TableType; +}; + +struct FloorModOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef FloorModOptionsT NativeTableType; + typedef FloorModOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + FloorModOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct FloorModOptionsBuilder { + typedef FloorModOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit FloorModOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateFloorModOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + FloorModOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RangeOptionsT : public flatbuffers::NativeTable { + typedef RangeOptions TableType; +}; + +struct RangeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RangeOptionsT NativeTableType; + typedef RangeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + RangeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RangeOptionsBuilder { + typedef RangeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit RangeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRangeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + RangeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct LeakyReluOptionsT : public flatbuffers::NativeTable { + typedef LeakyReluOptions TableType; + float alpha = 0.0f; +}; + +struct LeakyReluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef LeakyReluOptionsT NativeTableType; + typedef LeakyReluOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ALPHA = 4 + }; + float alpha() const { + return GetField(VT_ALPHA, 0.0f); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ALPHA, 4) && + verifier.EndTable(); + } + LeakyReluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct LeakyReluOptionsBuilder { + typedef LeakyReluOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_alpha(float alpha) { + fbb_.AddElement(LeakyReluOptions::VT_ALPHA, alpha, 0.0f); + } + explicit LeakyReluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateLeakyReluOptions( + flatbuffers::FlatBufferBuilder &_fbb, + float alpha = 0.0f) { + LeakyReluOptionsBuilder builder_(_fbb); + builder_.add_alpha(alpha); + return builder_.Finish(); +} + +flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SquaredDifferenceOptionsT : public flatbuffers::NativeTable { + typedef SquaredDifferenceOptions TableType; +}; + +struct SquaredDifferenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SquaredDifferenceOptionsT NativeTableType; + typedef SquaredDifferenceOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SquaredDifferenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SquaredDifferenceOptionsBuilder { + typedef SquaredDifferenceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SquaredDifferenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSquaredDifferenceOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SquaredDifferenceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MirrorPadOptionsT : public flatbuffers::NativeTable { + typedef MirrorPadOptions TableType; + tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT; +}; + +struct MirrorPadOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MirrorPadOptionsT NativeTableType; + typedef MirrorPadOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_MODE = 4 + }; + tflite::MirrorPadMode mode() const { + return static_cast(GetField(VT_MODE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_MODE, 1) && + verifier.EndTable(); + } + MirrorPadOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MirrorPadOptionsBuilder { + typedef MirrorPadOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_mode(tflite::MirrorPadMode mode) { + fbb_.AddElement(MirrorPadOptions::VT_MODE, static_cast(mode), 0); + } + explicit MirrorPadOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMirrorPadOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::MirrorPadMode mode = tflite::MirrorPadMode_REFLECT) { + MirrorPadOptionsBuilder builder_(_fbb); + builder_.add_mode(mode); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UniqueOptionsT : public flatbuffers::NativeTable { + typedef UniqueOptions TableType; + tflite::TensorType idx_out_type = tflite::TensorType_INT32; +}; + +struct UniqueOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UniqueOptionsT NativeTableType; + typedef UniqueOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_IDX_OUT_TYPE = 4 + }; + tflite::TensorType idx_out_type() const { + return static_cast(GetField(VT_IDX_OUT_TYPE, 2)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_IDX_OUT_TYPE, 1) && + verifier.EndTable(); + } + UniqueOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UniqueOptionsBuilder { + typedef UniqueOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_idx_out_type(tflite::TensorType idx_out_type) { + fbb_.AddElement(UniqueOptions::VT_IDX_OUT_TYPE, static_cast(idx_out_type), 2); + } + explicit UniqueOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUniqueOptions( + flatbuffers::FlatBufferBuilder &_fbb, + tflite::TensorType idx_out_type = tflite::TensorType_INT32) { + UniqueOptionsBuilder builder_(_fbb); + builder_.add_idx_out_type(idx_out_type); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReverseV2OptionsT : public flatbuffers::NativeTable { + typedef ReverseV2Options TableType; +}; + +struct ReverseV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseV2OptionsT NativeTableType; + typedef ReverseV2OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ReverseV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReverseV2OptionsBuilder { + typedef ReverseV2Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReverseV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseV2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + ReverseV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AddNOptionsT : public flatbuffers::NativeTable { + typedef AddNOptions TableType; +}; + +struct AddNOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AddNOptionsT NativeTableType; + typedef AddNOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + AddNOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AddNOptionsBuilder { + typedef AddNOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AddNOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAddNOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + AddNOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GatherNdOptionsT : public flatbuffers::NativeTable { + typedef GatherNdOptions TableType; +}; + +struct GatherNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GatherNdOptionsT NativeTableType; + typedef GatherNdOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + GatherNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GatherNdOptionsBuilder { + typedef GatherNdOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit GatherNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGatherNdOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + GatherNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct WhereOptionsT : public flatbuffers::NativeTable { + typedef WhereOptions TableType; +}; + +struct WhereOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef WhereOptionsT NativeTableType; + typedef WhereOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + WhereOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct WhereOptionsBuilder { + typedef WhereOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit WhereOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateWhereOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + WhereOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReverseSequenceOptionsT : public flatbuffers::NativeTable { + typedef ReverseSequenceOptions TableType; + int32_t seq_dim = 0; + int32_t batch_dim = 0; +}; + +struct ReverseSequenceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReverseSequenceOptionsT NativeTableType; + typedef ReverseSequenceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SEQ_DIM = 4, + VT_BATCH_DIM = 6 + }; + int32_t seq_dim() const { + return GetField(VT_SEQ_DIM, 0); + } + int32_t batch_dim() const { + return GetField(VT_BATCH_DIM, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SEQ_DIM, 4) && + VerifyField(verifier, VT_BATCH_DIM, 4) && + verifier.EndTable(); + } + ReverseSequenceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReverseSequenceOptionsBuilder { + typedef ReverseSequenceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seq_dim(int32_t seq_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_SEQ_DIM, seq_dim, 0); + } + void add_batch_dim(int32_t batch_dim) { + fbb_.AddElement(ReverseSequenceOptions::VT_BATCH_DIM, batch_dim, 0); + } + explicit ReverseSequenceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReverseSequenceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t seq_dim = 0, + int32_t batch_dim = 0) { + ReverseSequenceOptionsBuilder builder_(_fbb); + builder_.add_batch_dim(batch_dim); + builder_.add_seq_dim(seq_dim); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MatrixDiagOptionsT : public flatbuffers::NativeTable { + typedef MatrixDiagOptions TableType; +}; + +struct MatrixDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatrixDiagOptionsT NativeTableType; + typedef MatrixDiagOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + MatrixDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatrixDiagOptionsBuilder { + typedef MatrixDiagOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatrixDiagOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + MatrixDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct QuantizeOptionsT : public flatbuffers::NativeTable { + typedef QuantizeOptions TableType; +}; + +struct QuantizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef QuantizeOptionsT NativeTableType; + typedef QuantizeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + QuantizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct QuantizeOptionsBuilder { + typedef QuantizeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit QuantizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateQuantizeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + QuantizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MatrixSetDiagOptionsT : public flatbuffers::NativeTable { + typedef MatrixSetDiagOptions TableType; +}; + +struct MatrixSetDiagOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MatrixSetDiagOptionsT NativeTableType; + typedef MatrixSetDiagOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + MatrixSetDiagOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MatrixSetDiagOptionsBuilder { + typedef MatrixSetDiagOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit MatrixSetDiagOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMatrixSetDiagOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + MatrixSetDiagOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct IfOptionsT : public flatbuffers::NativeTable { + typedef IfOptions TableType; + int32_t then_subgraph_index = 0; + int32_t else_subgraph_index = 0; +}; + +struct IfOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef IfOptionsT NativeTableType; + typedef IfOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_THEN_SUBGRAPH_INDEX = 4, + VT_ELSE_SUBGRAPH_INDEX = 6 + }; + int32_t then_subgraph_index() const { + return GetField(VT_THEN_SUBGRAPH_INDEX, 0); + } + int32_t else_subgraph_index() const { + return GetField(VT_ELSE_SUBGRAPH_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_THEN_SUBGRAPH_INDEX, 4) && + VerifyField(verifier, VT_ELSE_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + IfOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct IfOptionsBuilder { + typedef IfOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_then_subgraph_index(int32_t then_subgraph_index) { + fbb_.AddElement(IfOptions::VT_THEN_SUBGRAPH_INDEX, then_subgraph_index, 0); + } + void add_else_subgraph_index(int32_t else_subgraph_index) { + fbb_.AddElement(IfOptions::VT_ELSE_SUBGRAPH_INDEX, else_subgraph_index, 0); + } + explicit IfOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateIfOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t then_subgraph_index = 0, + int32_t else_subgraph_index = 0) { + IfOptionsBuilder builder_(_fbb); + builder_.add_else_subgraph_index(else_subgraph_index); + builder_.add_then_subgraph_index(then_subgraph_index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CallOnceOptionsT : public flatbuffers::NativeTable { + typedef CallOnceOptions TableType; + int32_t init_subgraph_index = 0; +}; + +struct CallOnceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CallOnceOptionsT NativeTableType; + typedef CallOnceOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INIT_SUBGRAPH_INDEX = 4 + }; + int32_t init_subgraph_index() const { + return GetField(VT_INIT_SUBGRAPH_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_INIT_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + CallOnceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CallOnceOptionsBuilder { + typedef CallOnceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_init_subgraph_index(int32_t init_subgraph_index) { + fbb_.AddElement(CallOnceOptions::VT_INIT_SUBGRAPH_INDEX, init_subgraph_index, 0); + } + explicit CallOnceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCallOnceOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t init_subgraph_index = 0) { + CallOnceOptionsBuilder builder_(_fbb); + builder_.add_init_subgraph_index(init_subgraph_index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct WhileOptionsT : public flatbuffers::NativeTable { + typedef WhileOptions TableType; + int32_t cond_subgraph_index = 0; + int32_t body_subgraph_index = 0; +}; + +struct WhileOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef WhileOptionsT NativeTableType; + typedef WhileOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_COND_SUBGRAPH_INDEX = 4, + VT_BODY_SUBGRAPH_INDEX = 6 + }; + int32_t cond_subgraph_index() const { + return GetField(VT_COND_SUBGRAPH_INDEX, 0); + } + int32_t body_subgraph_index() const { + return GetField(VT_BODY_SUBGRAPH_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_COND_SUBGRAPH_INDEX, 4) && + VerifyField(verifier, VT_BODY_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + WhileOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct WhileOptionsBuilder { + typedef WhileOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_cond_subgraph_index(int32_t cond_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_COND_SUBGRAPH_INDEX, cond_subgraph_index, 0); + } + void add_body_subgraph_index(int32_t body_subgraph_index) { + fbb_.AddElement(WhileOptions::VT_BODY_SUBGRAPH_INDEX, body_subgraph_index, 0); + } + explicit WhileOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateWhileOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t cond_subgraph_index = 0, + int32_t body_subgraph_index = 0) { + WhileOptionsBuilder builder_(_fbb); + builder_.add_body_subgraph_index(body_subgraph_index); + builder_.add_cond_subgraph_index(cond_subgraph_index); + return builder_.Finish(); +} + +flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NonMaxSuppressionV4OptionsT : public flatbuffers::NativeTable { + typedef NonMaxSuppressionV4Options TableType; +}; + +struct NonMaxSuppressionV4Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionV4OptionsT NativeTableType; + typedef NonMaxSuppressionV4OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + NonMaxSuppressionV4OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NonMaxSuppressionV4OptionsBuilder { + typedef NonMaxSuppressionV4Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV4OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNonMaxSuppressionV4Options( + flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV4OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct NonMaxSuppressionV5OptionsT : public flatbuffers::NativeTable { + typedef NonMaxSuppressionV5Options TableType; +}; + +struct NonMaxSuppressionV5Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef NonMaxSuppressionV5OptionsT NativeTableType; + typedef NonMaxSuppressionV5OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + NonMaxSuppressionV5OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct NonMaxSuppressionV5OptionsBuilder { + typedef NonMaxSuppressionV5Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit NonMaxSuppressionV5OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateNonMaxSuppressionV5Options( + flatbuffers::FlatBufferBuilder &_fbb) { + NonMaxSuppressionV5OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ScatterNdOptionsT : public flatbuffers::NativeTable { + typedef ScatterNdOptions TableType; +}; + +struct ScatterNdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ScatterNdOptionsT NativeTableType; + typedef ScatterNdOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ScatterNdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ScatterNdOptionsBuilder { + typedef ScatterNdOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ScatterNdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateScatterNdOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ScatterNdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SelectV2OptionsT : public flatbuffers::NativeTable { + typedef SelectV2Options TableType; +}; + +struct SelectV2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SelectV2OptionsT NativeTableType; + typedef SelectV2OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SelectV2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SelectV2OptionsBuilder { + typedef SelectV2Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SelectV2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSelectV2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + SelectV2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DensifyOptionsT : public flatbuffers::NativeTable { + typedef DensifyOptions TableType; +}; + +struct DensifyOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DensifyOptionsT NativeTableType; + typedef DensifyOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + DensifyOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DensifyOptionsBuilder { + typedef DensifyOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DensifyOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDensifyOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + DensifyOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SegmentSumOptionsT : public flatbuffers::NativeTable { + typedef SegmentSumOptions TableType; +}; + +struct SegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SegmentSumOptionsT NativeTableType; + typedef SegmentSumOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SegmentSumOptionsBuilder { + typedef SegmentSumOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSegmentSumOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SegmentSumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BatchMatMulOptionsT : public flatbuffers::NativeTable { + typedef BatchMatMulOptions TableType; + bool adj_x = false; + bool adj_y = false; + bool asymmetric_quantize_inputs = false; +}; + +struct BatchMatMulOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BatchMatMulOptionsT NativeTableType; + typedef BatchMatMulOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_ADJ_X = 4, + VT_ADJ_Y = 6, + VT_ASYMMETRIC_QUANTIZE_INPUTS = 8 + }; + bool adj_x() const { + return GetField(VT_ADJ_X, 0) != 0; + } + bool adj_y() const { + return GetField(VT_ADJ_Y, 0) != 0; + } + bool asymmetric_quantize_inputs() const { + return GetField(VT_ASYMMETRIC_QUANTIZE_INPUTS, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_ADJ_X, 1) && + VerifyField(verifier, VT_ADJ_Y, 1) && + VerifyField(verifier, VT_ASYMMETRIC_QUANTIZE_INPUTS, 1) && + verifier.EndTable(); + } + BatchMatMulOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BatchMatMulOptionsBuilder { + typedef BatchMatMulOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_adj_x(bool adj_x) { + fbb_.AddElement(BatchMatMulOptions::VT_ADJ_X, static_cast(adj_x), 0); + } + void add_adj_y(bool adj_y) { + fbb_.AddElement(BatchMatMulOptions::VT_ADJ_Y, static_cast(adj_y), 0); + } + void add_asymmetric_quantize_inputs(bool asymmetric_quantize_inputs) { + fbb_.AddElement(BatchMatMulOptions::VT_ASYMMETRIC_QUANTIZE_INPUTS, static_cast(asymmetric_quantize_inputs), 0); + } + explicit BatchMatMulOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBatchMatMulOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool adj_x = false, + bool adj_y = false, + bool asymmetric_quantize_inputs = false) { + BatchMatMulOptionsBuilder builder_(_fbb); + builder_.add_asymmetric_quantize_inputs(asymmetric_quantize_inputs); + builder_.add_adj_y(adj_y); + builder_.add_adj_x(adj_x); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct CumsumOptionsT : public flatbuffers::NativeTable { + typedef CumsumOptions TableType; + bool exclusive = false; + bool reverse = false; +}; + +struct CumsumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef CumsumOptionsT NativeTableType; + typedef CumsumOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_EXCLUSIVE = 4, + VT_REVERSE = 6 + }; + bool exclusive() const { + return GetField(VT_EXCLUSIVE, 0) != 0; + } + bool reverse() const { + return GetField(VT_REVERSE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_EXCLUSIVE, 1) && + VerifyField(verifier, VT_REVERSE, 1) && + verifier.EndTable(); + } + CumsumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct CumsumOptionsBuilder { + typedef CumsumOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_exclusive(bool exclusive) { + fbb_.AddElement(CumsumOptions::VT_EXCLUSIVE, static_cast(exclusive), 0); + } + void add_reverse(bool reverse) { + fbb_.AddElement(CumsumOptions::VT_REVERSE, static_cast(reverse), 0); + } + explicit CumsumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateCumsumOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool exclusive = false, + bool reverse = false) { + CumsumOptionsBuilder builder_(_fbb); + builder_.add_reverse(reverse); + builder_.add_exclusive(exclusive); + return builder_.Finish(); +} + +flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BroadcastToOptionsT : public flatbuffers::NativeTable { + typedef BroadcastToOptions TableType; +}; + +struct BroadcastToOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BroadcastToOptionsT NativeTableType; + typedef BroadcastToOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + BroadcastToOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BroadcastToOptionsBuilder { + typedef BroadcastToOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit BroadcastToOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBroadcastToOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + BroadcastToOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct Rfft2dOptionsT : public flatbuffers::NativeTable { + typedef Rfft2dOptions TableType; +}; + +struct Rfft2dOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef Rfft2dOptionsT NativeTableType; + typedef Rfft2dOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + Rfft2dOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct Rfft2dOptionsBuilder { + typedef Rfft2dOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit Rfft2dOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRfft2dOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + Rfft2dOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableOptionsT : public flatbuffers::NativeTable { + typedef HashtableOptions TableType; + int32_t table_id = 0; + tflite::TensorType key_dtype = tflite::TensorType_FLOAT32; + tflite::TensorType value_dtype = tflite::TensorType_FLOAT32; +}; + +struct HashtableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableOptionsT NativeTableType; + typedef HashtableOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TABLE_ID = 4, + VT_KEY_DTYPE = 6, + VT_VALUE_DTYPE = 8 + }; + int32_t table_id() const { + return GetField(VT_TABLE_ID, 0); + } + tflite::TensorType key_dtype() const { + return static_cast(GetField(VT_KEY_DTYPE, 0)); + } + tflite::TensorType value_dtype() const { + return static_cast(GetField(VT_VALUE_DTYPE, 0)); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_TABLE_ID, 4) && + VerifyField(verifier, VT_KEY_DTYPE, 1) && + VerifyField(verifier, VT_VALUE_DTYPE, 1) && + verifier.EndTable(); + } + HashtableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableOptionsBuilder { + typedef HashtableOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_table_id(int32_t table_id) { + fbb_.AddElement(HashtableOptions::VT_TABLE_ID, table_id, 0); + } + void add_key_dtype(tflite::TensorType key_dtype) { + fbb_.AddElement(HashtableOptions::VT_KEY_DTYPE, static_cast(key_dtype), 0); + } + void add_value_dtype(tflite::TensorType value_dtype) { + fbb_.AddElement(HashtableOptions::VT_VALUE_DTYPE, static_cast(value_dtype), 0); + } + explicit HashtableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int32_t table_id = 0, + tflite::TensorType key_dtype = tflite::TensorType_FLOAT32, + tflite::TensorType value_dtype = tflite::TensorType_FLOAT32) { + HashtableOptionsBuilder builder_(_fbb); + builder_.add_table_id(table_id); + builder_.add_value_dtype(value_dtype); + builder_.add_key_dtype(key_dtype); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableFindOptionsT : public flatbuffers::NativeTable { + typedef HashtableFindOptions TableType; +}; + +struct HashtableFindOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableFindOptionsT NativeTableType; + typedef HashtableFindOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableFindOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableFindOptionsBuilder { + typedef HashtableFindOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableFindOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableFindOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableFindOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableImportOptionsT : public flatbuffers::NativeTable { + typedef HashtableImportOptions TableType; +}; + +struct HashtableImportOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableImportOptionsT NativeTableType; + typedef HashtableImportOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableImportOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableImportOptionsBuilder { + typedef HashtableImportOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableImportOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableImportOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableImportOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct HashtableSizeOptionsT : public flatbuffers::NativeTable { + typedef HashtableSizeOptions TableType; +}; + +struct HashtableSizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef HashtableSizeOptionsT NativeTableType; + typedef HashtableSizeOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + HashtableSizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct HashtableSizeOptionsBuilder { + typedef HashtableSizeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit HashtableSizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateHashtableSizeOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + HashtableSizeOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct VarHandleOptionsT : public flatbuffers::NativeTable { + typedef VarHandleOptions TableType; + std::string container{}; + std::string shared_name{}; +}; + +struct VarHandleOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef VarHandleOptionsT NativeTableType; + typedef VarHandleOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_CONTAINER = 4, + VT_SHARED_NAME = 6 + }; + const flatbuffers::String *container() const { + return GetPointer(VT_CONTAINER); + } + const flatbuffers::String *shared_name() const { + return GetPointer(VT_SHARED_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_CONTAINER) && + verifier.VerifyString(container()) && + VerifyOffset(verifier, VT_SHARED_NAME) && + verifier.VerifyString(shared_name()) && + verifier.EndTable(); + } + VarHandleOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct VarHandleOptionsBuilder { + typedef VarHandleOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_container(flatbuffers::Offset container) { + fbb_.AddOffset(VarHandleOptions::VT_CONTAINER, container); + } + void add_shared_name(flatbuffers::Offset shared_name) { + fbb_.AddOffset(VarHandleOptions::VT_SHARED_NAME, shared_name); + } + explicit VarHandleOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateVarHandleOptions( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset container = 0, + flatbuffers::Offset shared_name = 0) { + VarHandleOptionsBuilder builder_(_fbb); + builder_.add_shared_name(shared_name); + builder_.add_container(container); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateVarHandleOptionsDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *container = nullptr, + const char *shared_name = nullptr) { + auto container__ = container ? _fbb.CreateString(container) : 0; + auto shared_name__ = shared_name ? _fbb.CreateString(shared_name) : 0; + return tflite::CreateVarHandleOptions( + _fbb, + container__, + shared_name__); +} + +flatbuffers::Offset CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ReadVariableOptionsT : public flatbuffers::NativeTable { + typedef ReadVariableOptions TableType; +}; + +struct ReadVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ReadVariableOptionsT NativeTableType; + typedef ReadVariableOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ReadVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ReadVariableOptionsBuilder { + typedef ReadVariableOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ReadVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateReadVariableOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + ReadVariableOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct AssignVariableOptionsT : public flatbuffers::NativeTable { + typedef AssignVariableOptions TableType; +}; + +struct AssignVariableOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef AssignVariableOptionsT NativeTableType; + typedef AssignVariableOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + AssignVariableOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct AssignVariableOptionsBuilder { + typedef AssignVariableOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit AssignVariableOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateAssignVariableOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + AssignVariableOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct RandomOptionsT : public flatbuffers::NativeTable { + typedef RandomOptions TableType; + int64_t seed = 0; + int64_t seed2 = 0; +}; + +struct RandomOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef RandomOptionsT NativeTableType; + typedef RandomOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_SEED = 4, + VT_SEED2 = 6 + }; + int64_t seed() const { + return GetField(VT_SEED, 0); + } + int64_t seed2() const { + return GetField(VT_SEED2, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_SEED, 8) && + VerifyField(verifier, VT_SEED2, 8) && + verifier.EndTable(); + } + RandomOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct RandomOptionsBuilder { + typedef RandomOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_seed(int64_t seed) { + fbb_.AddElement(RandomOptions::VT_SEED, seed, 0); + } + void add_seed2(int64_t seed2) { + fbb_.AddElement(RandomOptions::VT_SEED2, seed2, 0); + } + explicit RandomOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateRandomOptions( + flatbuffers::FlatBufferBuilder &_fbb, + int64_t seed = 0, + int64_t seed2 = 0) { + RandomOptionsBuilder builder_(_fbb); + builder_.add_seed2(seed2); + builder_.add_seed(seed); + return builder_.Finish(); +} + +flatbuffers::Offset CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BucketizeOptionsT : public flatbuffers::NativeTable { + typedef BucketizeOptions TableType; + std::vector boundaries{}; +}; + +struct BucketizeOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BucketizeOptionsT NativeTableType; + typedef BucketizeOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_BOUNDARIES = 4 + }; + const flatbuffers::Vector *boundaries() const { + return GetPointer *>(VT_BOUNDARIES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_BOUNDARIES) && + verifier.VerifyVector(boundaries()) && + verifier.EndTable(); + } + BucketizeOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BucketizeOptionsBuilder { + typedef BucketizeOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_boundaries(flatbuffers::Offset> boundaries) { + fbb_.AddOffset(BucketizeOptions::VT_BOUNDARIES, boundaries); + } + explicit BucketizeOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBucketizeOptions( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> boundaries = 0) { + BucketizeOptionsBuilder builder_(_fbb); + builder_.add_boundaries(boundaries); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBucketizeOptionsDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *boundaries = nullptr) { + auto boundaries__ = boundaries ? _fbb.CreateVector(*boundaries) : 0; + return tflite::CreateBucketizeOptions( + _fbb, + boundaries__); +} + +flatbuffers::Offset CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct GeluOptionsT : public flatbuffers::NativeTable { + typedef GeluOptions TableType; + bool approximate = false; +}; + +struct GeluOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef GeluOptionsT NativeTableType; + typedef GeluOptionsBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_APPROXIMATE = 4 + }; + bool approximate() const { + return GetField(VT_APPROXIMATE, 0) != 0; + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_APPROXIMATE, 1) && + verifier.EndTable(); + } + GeluOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct GeluOptionsBuilder { + typedef GeluOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_approximate(bool approximate) { + fbb_.AddElement(GeluOptions::VT_APPROXIMATE, static_cast(approximate), 0); + } + explicit GeluOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateGeluOptions( + flatbuffers::FlatBufferBuilder &_fbb, + bool approximate = false) { + GeluOptionsBuilder builder_(_fbb); + builder_.add_approximate(approximate); + return builder_.Finish(); +} + +flatbuffers::Offset CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct DynamicUpdateSliceOptionsT : public flatbuffers::NativeTable { + typedef DynamicUpdateSliceOptions TableType; +}; + +struct DynamicUpdateSliceOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef DynamicUpdateSliceOptionsT NativeTableType; + typedef DynamicUpdateSliceOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + DynamicUpdateSliceOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct DynamicUpdateSliceOptionsBuilder { + typedef DynamicUpdateSliceOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit DynamicUpdateSliceOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateDynamicUpdateSliceOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + DynamicUpdateSliceOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnsortedSegmentProdOptionsT : public flatbuffers::NativeTable { + typedef UnsortedSegmentProdOptions TableType; +}; + +struct UnsortedSegmentProdOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsortedSegmentProdOptionsT NativeTableType; + typedef UnsortedSegmentProdOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + UnsortedSegmentProdOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnsortedSegmentProdOptionsBuilder { + typedef UnsortedSegmentProdOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UnsortedSegmentProdOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsortedSegmentProdOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentProdOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnsortedSegmentMaxOptionsT : public flatbuffers::NativeTable { + typedef UnsortedSegmentMaxOptions TableType; +}; + +struct UnsortedSegmentMaxOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsortedSegmentMaxOptionsT NativeTableType; + typedef UnsortedSegmentMaxOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + UnsortedSegmentMaxOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnsortedSegmentMaxOptionsBuilder { + typedef UnsortedSegmentMaxOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UnsortedSegmentMaxOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsortedSegmentMaxOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentMaxOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnsortedSegmentSumOptionsT : public flatbuffers::NativeTable { + typedef UnsortedSegmentSumOptions TableType; +}; + +struct UnsortedSegmentSumOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsortedSegmentSumOptionsT NativeTableType; + typedef UnsortedSegmentSumOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + UnsortedSegmentSumOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnsortedSegmentSumOptionsBuilder { + typedef UnsortedSegmentSumOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UnsortedSegmentSumOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsortedSegmentSumOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentSumOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ATan2OptionsT : public flatbuffers::NativeTable { + typedef ATan2Options TableType; +}; + +struct ATan2Options FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ATan2OptionsT NativeTableType; + typedef ATan2OptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + ATan2OptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ATan2OptionsBuilder { + typedef ATan2Options Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit ATan2OptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateATan2Options( + flatbuffers::FlatBufferBuilder &_fbb) { + ATan2OptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct UnsortedSegmentMinOptionsT : public flatbuffers::NativeTable { + typedef UnsortedSegmentMinOptions TableType; +}; + +struct UnsortedSegmentMinOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef UnsortedSegmentMinOptionsT NativeTableType; + typedef UnsortedSegmentMinOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + UnsortedSegmentMinOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct UnsortedSegmentMinOptionsBuilder { + typedef UnsortedSegmentMinOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit UnsortedSegmentMinOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateUnsortedSegmentMinOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + UnsortedSegmentMinOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SignOptionsT : public flatbuffers::NativeTable { + typedef SignOptions TableType; +}; + +struct SignOptions FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SignOptionsT NativeTableType; + typedef SignOptionsBuilder Builder; + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + verifier.EndTable(); + } + SignOptionsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SignOptionsBuilder { + typedef SignOptions Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + explicit SignOptionsBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSignOptions( + flatbuffers::FlatBufferBuilder &_fbb) { + SignOptionsBuilder builder_(_fbb); + return builder_.Finish(); +} + +flatbuffers::Offset CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + + + + + +struct OperatorCodeBuilder { + typedef OperatorCode Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_deprecated_builtin_code(int8_t deprecated_builtin_code) { + fbb_.AddElement(OperatorCode::VT_DEPRECATED_BUILTIN_CODE, deprecated_builtin_code, 0); + } + void add_custom_code(flatbuffers::Offset custom_code) { + fbb_.AddOffset(OperatorCode::VT_CUSTOM_CODE, custom_code); + } + void add_version(int32_t version) { + fbb_.AddElement(OperatorCode::VT_VERSION, version, 1); + } + void add_builtin_code(tflite::BuiltinOperator builtin_code) { + fbb_.AddElement(OperatorCode::VT_BUILTIN_CODE, static_cast(builtin_code), 0); + } + explicit OperatorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOperatorCode( + flatbuffers::FlatBufferBuilder &_fbb, + int8_t deprecated_builtin_code = 0, + flatbuffers::Offset custom_code = 0, + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { + OperatorCodeBuilder builder_(_fbb); + builder_.add_builtin_code(builtin_code); + builder_.add_version(version); + builder_.add_custom_code(custom_code); + builder_.add_deprecated_builtin_code(deprecated_builtin_code); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateOperatorCodeDirect( + flatbuffers::FlatBufferBuilder &_fbb, + int8_t deprecated_builtin_code = 0, + const char *custom_code = nullptr, + int32_t version = 1, + tflite::BuiltinOperator builtin_code = tflite::BuiltinOperator_ADD) { + auto custom_code__ = custom_code ? _fbb.CreateString(custom_code) : 0; + return tflite::CreateOperatorCode( + _fbb, + deprecated_builtin_code, + custom_code__, + version, + builtin_code); +} + +flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct OperatorT : public flatbuffers::NativeTable { + typedef Operator TableType; + uint32_t opcode_index = 0; + std::vector inputs{}; + std::vector outputs{}; + tflite::BuiltinOptionsUnion builtin_options{}; + std::vector custom_options{}; + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS; + std::vector mutating_variable_inputs{}; + std::vector intermediates{}; +}; + +struct Operator FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef OperatorT NativeTableType; + typedef OperatorBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_OPCODE_INDEX = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_BUILTIN_OPTIONS_TYPE = 10, + VT_BUILTIN_OPTIONS = 12, + VT_CUSTOM_OPTIONS = 14, + VT_CUSTOM_OPTIONS_FORMAT = 16, + VT_MUTATING_VARIABLE_INPUTS = 18, + VT_INTERMEDIATES = 20 + }; + uint32_t opcode_index() const { + return GetField(VT_OPCODE_INDEX, 0); + } + const flatbuffers::Vector *inputs() const { + return GetPointer *>(VT_INPUTS); + } + const flatbuffers::Vector *outputs() const { + return GetPointer *>(VT_OUTPUTS); + } + tflite::BuiltinOptions builtin_options_type() const { + return static_cast(GetField(VT_BUILTIN_OPTIONS_TYPE, 0)); + } + const void *builtin_options() const { + return GetPointer(VT_BUILTIN_OPTIONS); + } + template const T *builtin_options_as() const; + const tflite::Conv2DOptions *builtin_options_as_Conv2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Conv2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DepthwiseConv2DOptions *builtin_options_as_DepthwiseConv2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DepthwiseConv2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ConcatEmbeddingsOptions *builtin_options_as_ConcatEmbeddingsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ConcatEmbeddingsOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LSHProjectionOptions *builtin_options_as_LSHProjectionOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LSHProjectionOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::Pool2DOptions *builtin_options_as_Pool2DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Pool2DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SVDFOptions *builtin_options_as_SVDFOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SVDFOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::RNNOptions *builtin_options_as_RNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RNNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FullyConnectedOptions *builtin_options_as_FullyConnectedOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FullyConnectedOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SoftmaxOptions *builtin_options_as_SoftmaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SoftmaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ConcatenationOptions *builtin_options_as_ConcatenationOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ConcatenationOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::AddOptions *builtin_options_as_AddOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AddOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::L2NormOptions *builtin_options_as_L2NormOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_L2NormOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LocalResponseNormalizationOptions *builtin_options_as_LocalResponseNormalizationOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LocalResponseNormalizationOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LSTMOptions *builtin_options_as_LSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ResizeBilinearOptions *builtin_options_as_ResizeBilinearOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ResizeBilinearOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CallOptions *builtin_options_as_CallOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CallOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReshapeOptions *builtin_options_as_ReshapeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReshapeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SkipGramOptions *builtin_options_as_SkipGramOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SkipGramOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SpaceToDepthOptions *builtin_options_as_SpaceToDepthOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SpaceToDepthOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::EmbeddingLookupSparseOptions *builtin_options_as_EmbeddingLookupSparseOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_EmbeddingLookupSparseOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MulOptions *builtin_options_as_MulOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MulOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PadOptions *builtin_options_as_PadOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PadOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GatherOptions *builtin_options_as_GatherOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GatherOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BatchToSpaceNDOptions *builtin_options_as_BatchToSpaceNDOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BatchToSpaceNDOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SpaceToBatchNDOptions *builtin_options_as_SpaceToBatchNDOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SpaceToBatchNDOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TransposeOptions *builtin_options_as_TransposeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TransposeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReducerOptions *builtin_options_as_ReducerOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReducerOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SubOptions *builtin_options_as_SubOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SubOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DivOptions *builtin_options_as_DivOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DivOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SqueezeOptions *builtin_options_as_SqueezeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SqueezeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SequenceRNNOptions *builtin_options_as_SequenceRNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::StridedSliceOptions *builtin_options_as_StridedSliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_StridedSliceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ExpOptions *builtin_options_as_ExpOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ExpOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TopKV2Options *builtin_options_as_TopKV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_TopKV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::SplitOptions *builtin_options_as_SplitOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SplitOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LogSoftmaxOptions *builtin_options_as_LogSoftmaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogSoftmaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CastOptions *builtin_options_as_CastOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CastOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DequantizeOptions *builtin_options_as_DequantizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DequantizeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MaximumMinimumOptions *builtin_options_as_MaximumMinimumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MaximumMinimumOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ArgMaxOptions *builtin_options_as_ArgMaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ArgMaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LessOptions *builtin_options_as_LessOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LessOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::NegOptions *builtin_options_as_NegOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_NegOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PadV2Options *builtin_options_as_PadV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_PadV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::GreaterOptions *builtin_options_as_GreaterOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GreaterOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GreaterEqualOptions *builtin_options_as_GreaterEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GreaterEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LessEqualOptions *builtin_options_as_LessEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LessEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SelectOptions *builtin_options_as_SelectOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SelectOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SliceOptions *builtin_options_as_SliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SliceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TransposeConvOptions *builtin_options_as_TransposeConvOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TransposeConvOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SparseToDenseOptions *builtin_options_as_SparseToDenseOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SparseToDenseOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::TileOptions *builtin_options_as_TileOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_TileOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ExpandDimsOptions *builtin_options_as_ExpandDimsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ExpandDimsOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::EqualOptions *builtin_options_as_EqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_EqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::NotEqualOptions *builtin_options_as_NotEqualOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_NotEqualOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ShapeOptions *builtin_options_as_ShapeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ShapeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PowOptions *builtin_options_as_PowOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PowOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ArgMinOptions *builtin_options_as_ArgMinOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ArgMinOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FakeQuantOptions *builtin_options_as_FakeQuantOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FakeQuantOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::PackOptions *builtin_options_as_PackOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_PackOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LogicalOrOptions *builtin_options_as_LogicalOrOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalOrOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::OneHotOptions *builtin_options_as_OneHotOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_OneHotOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LogicalAndOptions *builtin_options_as_LogicalAndOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalAndOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LogicalNotOptions *builtin_options_as_LogicalNotOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LogicalNotOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnpackOptions *builtin_options_as_UnpackOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnpackOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FloorDivOptions *builtin_options_as_FloorDivOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FloorDivOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SquareOptions *builtin_options_as_SquareOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SquareOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ZerosLikeOptions *builtin_options_as_ZerosLikeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ZerosLikeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FillOptions *builtin_options_as_FillOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FillOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BidirectionalSequenceLSTMOptions *builtin_options_as_BidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BidirectionalSequenceRNNOptions *builtin_options_as_BidirectionalSequenceRNNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BidirectionalSequenceRNNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnidirectionalSequenceLSTMOptions *builtin_options_as_UnidirectionalSequenceLSTMOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnidirectionalSequenceLSTMOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::FloorModOptions *builtin_options_as_FloorModOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_FloorModOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::RangeOptions *builtin_options_as_RangeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RangeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ResizeNearestNeighborOptions *builtin_options_as_ResizeNearestNeighborOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ResizeNearestNeighborOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::LeakyReluOptions *builtin_options_as_LeakyReluOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_LeakyReluOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SquaredDifferenceOptions *builtin_options_as_SquaredDifferenceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SquaredDifferenceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MirrorPadOptions *builtin_options_as_MirrorPadOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MirrorPadOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::AbsOptions *builtin_options_as_AbsOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AbsOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SplitVOptions *builtin_options_as_SplitVOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SplitVOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UniqueOptions *builtin_options_as_UniqueOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UniqueOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReverseV2Options *builtin_options_as_ReverseV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_ReverseV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::AddNOptions *builtin_options_as_AddNOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AddNOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GatherNdOptions *builtin_options_as_GatherNdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GatherNdOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CosOptions *builtin_options_as_CosOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CosOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::WhereOptions *builtin_options_as_WhereOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_WhereOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::RankOptions *builtin_options_as_RankOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RankOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReverseSequenceOptions *builtin_options_as_ReverseSequenceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReverseSequenceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MatrixDiagOptions *builtin_options_as_MatrixDiagOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MatrixDiagOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::QuantizeOptions *builtin_options_as_QuantizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_QuantizeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::MatrixSetDiagOptions *builtin_options_as_MatrixSetDiagOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_MatrixSetDiagOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HardSwishOptions *builtin_options_as_HardSwishOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HardSwishOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::IfOptions *builtin_options_as_IfOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_IfOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::WhileOptions *builtin_options_as_WhileOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_WhileOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DepthToSpaceOptions *builtin_options_as_DepthToSpaceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DepthToSpaceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::NonMaxSuppressionV4Options *builtin_options_as_NonMaxSuppressionV4Options() const { + return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV4Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::NonMaxSuppressionV5Options *builtin_options_as_NonMaxSuppressionV5Options() const { + return builtin_options_type() == tflite::BuiltinOptions_NonMaxSuppressionV5Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::ScatterNdOptions *builtin_options_as_ScatterNdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ScatterNdOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SelectV2Options *builtin_options_as_SelectV2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_SelectV2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::DensifyOptions *builtin_options_as_DensifyOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DensifyOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::SegmentSumOptions *builtin_options_as_SegmentSumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SegmentSumOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BatchMatMulOptions *builtin_options_as_BatchMatMulOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BatchMatMulOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CumsumOptions *builtin_options_as_CumsumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CumsumOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::CallOnceOptions *builtin_options_as_CallOnceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_CallOnceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BroadcastToOptions *builtin_options_as_BroadcastToOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BroadcastToOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::Rfft2dOptions *builtin_options_as_Rfft2dOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Rfft2dOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::Conv3DOptions *builtin_options_as_Conv3DOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_Conv3DOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableOptions *builtin_options_as_HashtableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableFindOptions *builtin_options_as_HashtableFindOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableFindOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableImportOptions *builtin_options_as_HashtableImportOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableImportOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::HashtableSizeOptions *builtin_options_as_HashtableSizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_HashtableSizeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::VarHandleOptions *builtin_options_as_VarHandleOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_VarHandleOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ReadVariableOptions *builtin_options_as_ReadVariableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_ReadVariableOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::AssignVariableOptions *builtin_options_as_AssignVariableOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_AssignVariableOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::RandomOptions *builtin_options_as_RandomOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_RandomOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::BucketizeOptions *builtin_options_as_BucketizeOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_BucketizeOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::GeluOptions *builtin_options_as_GeluOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_GeluOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::DynamicUpdateSliceOptions *builtin_options_as_DynamicUpdateSliceOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_DynamicUpdateSliceOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnsortedSegmentProdOptions *builtin_options_as_UnsortedSegmentProdOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentProdOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnsortedSegmentMaxOptions *builtin_options_as_UnsortedSegmentMaxOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMaxOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnsortedSegmentMinOptions *builtin_options_as_UnsortedSegmentMinOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentMinOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::UnsortedSegmentSumOptions *builtin_options_as_UnsortedSegmentSumOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_UnsortedSegmentSumOptions ? static_cast(builtin_options()) : nullptr; + } + const tflite::ATan2Options *builtin_options_as_ATan2Options() const { + return builtin_options_type() == tflite::BuiltinOptions_ATan2Options ? static_cast(builtin_options()) : nullptr; + } + const tflite::SignOptions *builtin_options_as_SignOptions() const { + return builtin_options_type() == tflite::BuiltinOptions_SignOptions ? static_cast(builtin_options()) : nullptr; + } + const flatbuffers::Vector *custom_options() const { + return GetPointer *>(VT_CUSTOM_OPTIONS); + } + tflite::CustomOptionsFormat custom_options_format() const { + return static_cast(GetField(VT_CUSTOM_OPTIONS_FORMAT, 0)); + } + const flatbuffers::Vector *mutating_variable_inputs() const { + return GetPointer *>(VT_MUTATING_VARIABLE_INPUTS); + } + const flatbuffers::Vector *intermediates() const { + return GetPointer *>(VT_INTERMEDIATES); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_OPCODE_INDEX, 4) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyField(verifier, VT_BUILTIN_OPTIONS_TYPE, 1) && + VerifyOffset(verifier, VT_BUILTIN_OPTIONS) && + VerifyBuiltinOptions(verifier, builtin_options(), builtin_options_type()) && + VerifyOffset(verifier, VT_CUSTOM_OPTIONS) && + verifier.VerifyVector(custom_options()) && + VerifyField(verifier, VT_CUSTOM_OPTIONS_FORMAT, 1) && + VerifyOffset(verifier, VT_MUTATING_VARIABLE_INPUTS) && + verifier.VerifyVector(mutating_variable_inputs()) && + VerifyOffset(verifier, VT_INTERMEDIATES) && + verifier.VerifyVector(intermediates()) && + verifier.EndTable(); + } + OperatorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +template<> inline const tflite::Conv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv2DOptions(); +} + +template<> inline const tflite::DepthwiseConv2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthwiseConv2DOptions(); +} + +template<> inline const tflite::ConcatEmbeddingsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatEmbeddingsOptions(); +} + +template<> inline const tflite::LSHProjectionOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSHProjectionOptions(); +} + +template<> inline const tflite::Pool2DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Pool2DOptions(); +} + +template<> inline const tflite::SVDFOptions *Operator::builtin_options_as() const { + return builtin_options_as_SVDFOptions(); +} + +template<> inline const tflite::RNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_RNNOptions(); +} + +template<> inline const tflite::FullyConnectedOptions *Operator::builtin_options_as() const { + return builtin_options_as_FullyConnectedOptions(); +} + +template<> inline const tflite::SoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_SoftmaxOptions(); +} + +template<> inline const tflite::ConcatenationOptions *Operator::builtin_options_as() const { + return builtin_options_as_ConcatenationOptions(); +} + +template<> inline const tflite::AddOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddOptions(); +} + +template<> inline const tflite::L2NormOptions *Operator::builtin_options_as() const { + return builtin_options_as_L2NormOptions(); +} + +template<> inline const tflite::LocalResponseNormalizationOptions *Operator::builtin_options_as() const { + return builtin_options_as_LocalResponseNormalizationOptions(); +} + +template<> inline const tflite::LSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_LSTMOptions(); +} + +template<> inline const tflite::ResizeBilinearOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeBilinearOptions(); +} + +template<> inline const tflite::CallOptions *Operator::builtin_options_as() const { + return builtin_options_as_CallOptions(); +} + +template<> inline const tflite::ReshapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReshapeOptions(); +} + +template<> inline const tflite::SkipGramOptions *Operator::builtin_options_as() const { + return builtin_options_as_SkipGramOptions(); +} + +template<> inline const tflite::SpaceToDepthOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToDepthOptions(); +} + +template<> inline const tflite::EmbeddingLookupSparseOptions *Operator::builtin_options_as() const { + return builtin_options_as_EmbeddingLookupSparseOptions(); +} + +template<> inline const tflite::MulOptions *Operator::builtin_options_as() const { + return builtin_options_as_MulOptions(); +} + +template<> inline const tflite::PadOptions *Operator::builtin_options_as() const { + return builtin_options_as_PadOptions(); +} + +template<> inline const tflite::GatherOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherOptions(); +} + +template<> inline const tflite::BatchToSpaceNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_BatchToSpaceNDOptions(); +} + +template<> inline const tflite::SpaceToBatchNDOptions *Operator::builtin_options_as() const { + return builtin_options_as_SpaceToBatchNDOptions(); +} + +template<> inline const tflite::TransposeOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeOptions(); +} + +template<> inline const tflite::ReducerOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReducerOptions(); +} + +template<> inline const tflite::SubOptions *Operator::builtin_options_as() const { + return builtin_options_as_SubOptions(); +} + +template<> inline const tflite::DivOptions *Operator::builtin_options_as() const { + return builtin_options_as_DivOptions(); +} + +template<> inline const tflite::SqueezeOptions *Operator::builtin_options_as() const { + return builtin_options_as_SqueezeOptions(); +} + +template<> inline const tflite::SequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_SequenceRNNOptions(); +} + +template<> inline const tflite::StridedSliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_StridedSliceOptions(); +} + +template<> inline const tflite::ExpOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpOptions(); +} + +template<> inline const tflite::TopKV2Options *Operator::builtin_options_as() const { + return builtin_options_as_TopKV2Options(); +} + +template<> inline const tflite::SplitOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitOptions(); +} + +template<> inline const tflite::LogSoftmaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogSoftmaxOptions(); +} + +template<> inline const tflite::CastOptions *Operator::builtin_options_as() const { + return builtin_options_as_CastOptions(); +} + +template<> inline const tflite::DequantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_DequantizeOptions(); +} + +template<> inline const tflite::MaximumMinimumOptions *Operator::builtin_options_as() const { + return builtin_options_as_MaximumMinimumOptions(); +} + +template<> inline const tflite::ArgMaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMaxOptions(); +} + +template<> inline const tflite::LessOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessOptions(); +} + +template<> inline const tflite::NegOptions *Operator::builtin_options_as() const { + return builtin_options_as_NegOptions(); +} + +template<> inline const tflite::PadV2Options *Operator::builtin_options_as() const { + return builtin_options_as_PadV2Options(); +} + +template<> inline const tflite::GreaterOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterOptions(); +} + +template<> inline const tflite::GreaterEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_GreaterEqualOptions(); +} + +template<> inline const tflite::LessEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_LessEqualOptions(); +} + +template<> inline const tflite::SelectOptions *Operator::builtin_options_as() const { + return builtin_options_as_SelectOptions(); +} + +template<> inline const tflite::SliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SliceOptions(); +} + +template<> inline const tflite::TransposeConvOptions *Operator::builtin_options_as() const { + return builtin_options_as_TransposeConvOptions(); +} + +template<> inline const tflite::SparseToDenseOptions *Operator::builtin_options_as() const { + return builtin_options_as_SparseToDenseOptions(); +} + +template<> inline const tflite::TileOptions *Operator::builtin_options_as() const { + return builtin_options_as_TileOptions(); +} + +template<> inline const tflite::ExpandDimsOptions *Operator::builtin_options_as() const { + return builtin_options_as_ExpandDimsOptions(); +} + +template<> inline const tflite::EqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_EqualOptions(); +} + +template<> inline const tflite::NotEqualOptions *Operator::builtin_options_as() const { + return builtin_options_as_NotEqualOptions(); +} + +template<> inline const tflite::ShapeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ShapeOptions(); +} + +template<> inline const tflite::PowOptions *Operator::builtin_options_as() const { + return builtin_options_as_PowOptions(); +} + +template<> inline const tflite::ArgMinOptions *Operator::builtin_options_as() const { + return builtin_options_as_ArgMinOptions(); +} + +template<> inline const tflite::FakeQuantOptions *Operator::builtin_options_as() const { + return builtin_options_as_FakeQuantOptions(); +} + +template<> inline const tflite::PackOptions *Operator::builtin_options_as() const { + return builtin_options_as_PackOptions(); +} + +template<> inline const tflite::LogicalOrOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalOrOptions(); +} + +template<> inline const tflite::OneHotOptions *Operator::builtin_options_as() const { + return builtin_options_as_OneHotOptions(); +} + +template<> inline const tflite::LogicalAndOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalAndOptions(); +} + +template<> inline const tflite::LogicalNotOptions *Operator::builtin_options_as() const { + return builtin_options_as_LogicalNotOptions(); +} + +template<> inline const tflite::UnpackOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnpackOptions(); +} + +template<> inline const tflite::FloorDivOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorDivOptions(); +} + +template<> inline const tflite::SquareOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquareOptions(); +} + +template<> inline const tflite::ZerosLikeOptions *Operator::builtin_options_as() const { + return builtin_options_as_ZerosLikeOptions(); +} + +template<> inline const tflite::FillOptions *Operator::builtin_options_as() const { + return builtin_options_as_FillOptions(); +} + +template<> inline const tflite::BidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceLSTMOptions(); +} + +template<> inline const tflite::BidirectionalSequenceRNNOptions *Operator::builtin_options_as() const { + return builtin_options_as_BidirectionalSequenceRNNOptions(); +} + +template<> inline const tflite::UnidirectionalSequenceLSTMOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnidirectionalSequenceLSTMOptions(); +} + +template<> inline const tflite::FloorModOptions *Operator::builtin_options_as() const { + return builtin_options_as_FloorModOptions(); +} + +template<> inline const tflite::RangeOptions *Operator::builtin_options_as() const { + return builtin_options_as_RangeOptions(); +} + +template<> inline const tflite::ResizeNearestNeighborOptions *Operator::builtin_options_as() const { + return builtin_options_as_ResizeNearestNeighborOptions(); +} + +template<> inline const tflite::LeakyReluOptions *Operator::builtin_options_as() const { + return builtin_options_as_LeakyReluOptions(); +} + +template<> inline const tflite::SquaredDifferenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_SquaredDifferenceOptions(); +} + +template<> inline const tflite::MirrorPadOptions *Operator::builtin_options_as() const { + return builtin_options_as_MirrorPadOptions(); +} + +template<> inline const tflite::AbsOptions *Operator::builtin_options_as() const { + return builtin_options_as_AbsOptions(); +} + +template<> inline const tflite::SplitVOptions *Operator::builtin_options_as() const { + return builtin_options_as_SplitVOptions(); +} + +template<> inline const tflite::UniqueOptions *Operator::builtin_options_as() const { + return builtin_options_as_UniqueOptions(); +} + +template<> inline const tflite::ReverseV2Options *Operator::builtin_options_as() const { + return builtin_options_as_ReverseV2Options(); +} + +template<> inline const tflite::AddNOptions *Operator::builtin_options_as() const { + return builtin_options_as_AddNOptions(); +} + +template<> inline const tflite::GatherNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_GatherNdOptions(); +} + +template<> inline const tflite::CosOptions *Operator::builtin_options_as() const { + return builtin_options_as_CosOptions(); +} + +template<> inline const tflite::WhereOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhereOptions(); +} + +template<> inline const tflite::RankOptions *Operator::builtin_options_as() const { + return builtin_options_as_RankOptions(); +} + +template<> inline const tflite::ReverseSequenceOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReverseSequenceOptions(); +} + +template<> inline const tflite::MatrixDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixDiagOptions(); +} + +template<> inline const tflite::QuantizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_QuantizeOptions(); +} + +template<> inline const tflite::MatrixSetDiagOptions *Operator::builtin_options_as() const { + return builtin_options_as_MatrixSetDiagOptions(); +} + +template<> inline const tflite::HardSwishOptions *Operator::builtin_options_as() const { + return builtin_options_as_HardSwishOptions(); +} + +template<> inline const tflite::IfOptions *Operator::builtin_options_as() const { + return builtin_options_as_IfOptions(); +} + +template<> inline const tflite::WhileOptions *Operator::builtin_options_as() const { + return builtin_options_as_WhileOptions(); +} + +template<> inline const tflite::DepthToSpaceOptions *Operator::builtin_options_as() const { + return builtin_options_as_DepthToSpaceOptions(); +} + +template<> inline const tflite::NonMaxSuppressionV4Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV4Options(); +} + +template<> inline const tflite::NonMaxSuppressionV5Options *Operator::builtin_options_as() const { + return builtin_options_as_NonMaxSuppressionV5Options(); +} + +template<> inline const tflite::ScatterNdOptions *Operator::builtin_options_as() const { + return builtin_options_as_ScatterNdOptions(); +} + +template<> inline const tflite::SelectV2Options *Operator::builtin_options_as() const { + return builtin_options_as_SelectV2Options(); +} + +template<> inline const tflite::DensifyOptions *Operator::builtin_options_as() const { + return builtin_options_as_DensifyOptions(); +} + +template<> inline const tflite::SegmentSumOptions *Operator::builtin_options_as() const { + return builtin_options_as_SegmentSumOptions(); +} + +template<> inline const tflite::BatchMatMulOptions *Operator::builtin_options_as() const { + return builtin_options_as_BatchMatMulOptions(); +} + +template<> inline const tflite::CumsumOptions *Operator::builtin_options_as() const { + return builtin_options_as_CumsumOptions(); +} + +template<> inline const tflite::CallOnceOptions *Operator::builtin_options_as() const { + return builtin_options_as_CallOnceOptions(); +} + +template<> inline const tflite::BroadcastToOptions *Operator::builtin_options_as() const { + return builtin_options_as_BroadcastToOptions(); +} + +template<> inline const tflite::Rfft2dOptions *Operator::builtin_options_as() const { + return builtin_options_as_Rfft2dOptions(); +} + +template<> inline const tflite::Conv3DOptions *Operator::builtin_options_as() const { + return builtin_options_as_Conv3DOptions(); +} + +template<> inline const tflite::HashtableOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableOptions(); +} + +template<> inline const tflite::HashtableFindOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableFindOptions(); +} + +template<> inline const tflite::HashtableImportOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableImportOptions(); +} + +template<> inline const tflite::HashtableSizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_HashtableSizeOptions(); +} + +template<> inline const tflite::VarHandleOptions *Operator::builtin_options_as() const { + return builtin_options_as_VarHandleOptions(); +} + +template<> inline const tflite::ReadVariableOptions *Operator::builtin_options_as() const { + return builtin_options_as_ReadVariableOptions(); +} + +template<> inline const tflite::AssignVariableOptions *Operator::builtin_options_as() const { + return builtin_options_as_AssignVariableOptions(); +} + +template<> inline const tflite::RandomOptions *Operator::builtin_options_as() const { + return builtin_options_as_RandomOptions(); +} + +template<> inline const tflite::BucketizeOptions *Operator::builtin_options_as() const { + return builtin_options_as_BucketizeOptions(); +} + +template<> inline const tflite::GeluOptions *Operator::builtin_options_as() const { + return builtin_options_as_GeluOptions(); +} + +template<> inline const tflite::DynamicUpdateSliceOptions *Operator::builtin_options_as() const { + return builtin_options_as_DynamicUpdateSliceOptions(); +} + +template<> inline const tflite::UnsortedSegmentProdOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentProdOptions(); +} + +template<> inline const tflite::UnsortedSegmentMaxOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentMaxOptions(); +} + +template<> inline const tflite::UnsortedSegmentMinOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentMinOptions(); +} + +template<> inline const tflite::UnsortedSegmentSumOptions *Operator::builtin_options_as() const { + return builtin_options_as_UnsortedSegmentSumOptions(); +} + +template<> inline const tflite::ATan2Options *Operator::builtin_options_as() const { + return builtin_options_as_ATan2Options(); +} + +template<> inline const tflite::SignOptions *Operator::builtin_options_as() const { + return builtin_options_as_SignOptions(); +} + +struct OperatorBuilder { + typedef Operator Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_opcode_index(uint32_t opcode_index) { + fbb_.AddElement(Operator::VT_OPCODE_INDEX, opcode_index, 0); + } + void add_inputs(flatbuffers::Offset> inputs) { + fbb_.AddOffset(Operator::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset> outputs) { + fbb_.AddOffset(Operator::VT_OUTPUTS, outputs); + } + void add_builtin_options_type(tflite::BuiltinOptions builtin_options_type) { + fbb_.AddElement(Operator::VT_BUILTIN_OPTIONS_TYPE, static_cast(builtin_options_type), 0); + } + void add_builtin_options(flatbuffers::Offset builtin_options) { + fbb_.AddOffset(Operator::VT_BUILTIN_OPTIONS, builtin_options); + } + void add_custom_options(flatbuffers::Offset> custom_options) { + fbb_.AddOffset(Operator::VT_CUSTOM_OPTIONS, custom_options); + } + void add_custom_options_format(tflite::CustomOptionsFormat custom_options_format) { + fbb_.AddElement(Operator::VT_CUSTOM_OPTIONS_FORMAT, static_cast(custom_options_format), 0); + } + void add_mutating_variable_inputs(flatbuffers::Offset> mutating_variable_inputs) { + fbb_.AddOffset(Operator::VT_MUTATING_VARIABLE_INPUTS, mutating_variable_inputs); + } + void add_intermediates(flatbuffers::Offset> intermediates) { + fbb_.AddOffset(Operator::VT_INTERMEDIATES, intermediates); + } + explicit OperatorBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateOperator( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + flatbuffers::Offset> inputs = 0, + flatbuffers::Offset> outputs = 0, + tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, + flatbuffers::Offset builtin_options = 0, + flatbuffers::Offset> custom_options = 0, + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + flatbuffers::Offset> mutating_variable_inputs = 0, + flatbuffers::Offset> intermediates = 0) { + OperatorBuilder builder_(_fbb); + builder_.add_intermediates(intermediates); + builder_.add_mutating_variable_inputs(mutating_variable_inputs); + builder_.add_custom_options(custom_options); + builder_.add_builtin_options(builtin_options); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_opcode_index(opcode_index); + builder_.add_custom_options_format(custom_options_format); + builder_.add_builtin_options_type(builtin_options_type); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateOperatorDirect( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t opcode_index = 0, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + tflite::BuiltinOptions builtin_options_type = tflite::BuiltinOptions_NONE, + flatbuffers::Offset builtin_options = 0, + const std::vector *custom_options = nullptr, + tflite::CustomOptionsFormat custom_options_format = tflite::CustomOptionsFormat_FLEXBUFFERS, + const std::vector *mutating_variable_inputs = nullptr, + const std::vector *intermediates = nullptr) { + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto custom_options__ = custom_options ? _fbb.CreateVector(*custom_options) : 0; + auto mutating_variable_inputs__ = mutating_variable_inputs ? _fbb.CreateVector(*mutating_variable_inputs) : 0; + auto intermediates__ = intermediates ? _fbb.CreateVector(*intermediates) : 0; + return tflite::CreateOperator( + _fbb, + opcode_index, + inputs__, + outputs__, + builtin_options_type, + builtin_options, + custom_options__, + custom_options_format, + mutating_variable_inputs__, + intermediates__); +} + +flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SubGraphT : public flatbuffers::NativeTable { + typedef SubGraph TableType; + std::vector> tensors{}; + std::vector inputs{}; + std::vector outputs{}; + std::vector> operators{}; + std::string name{}; + SubGraphT() = default; + SubGraphT(const SubGraphT &o); + SubGraphT(SubGraphT&&) FLATBUFFERS_NOEXCEPT = default; + SubGraphT &operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SubGraph FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SubGraphT NativeTableType; + typedef SubGraphBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_TENSORS = 4, + VT_INPUTS = 6, + VT_OUTPUTS = 8, + VT_OPERATORS = 10, + VT_NAME = 12 + }; + const flatbuffers::Vector> *tensors() const { + return GetPointer> *>(VT_TENSORS); + } + const flatbuffers::Vector *inputs() const { + return GetPointer *>(VT_INPUTS); + } + const flatbuffers::Vector *outputs() const { + return GetPointer *>(VT_OUTPUTS); + } + const flatbuffers::Vector> *operators() const { + return GetPointer> *>(VT_OPERATORS); + } + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_TENSORS) && + verifier.VerifyVector(tensors()) && + verifier.VerifyVectorOfTables(tensors()) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + VerifyOffset(verifier, VT_OPERATORS) && + verifier.VerifyVector(operators()) && + verifier.VerifyVectorOfTables(operators()) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + verifier.EndTable(); + } + SubGraphT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SubGraphBuilder { + typedef SubGraph Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_tensors(flatbuffers::Offset>> tensors) { + fbb_.AddOffset(SubGraph::VT_TENSORS, tensors); + } + void add_inputs(flatbuffers::Offset> inputs) { + fbb_.AddOffset(SubGraph::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset> outputs) { + fbb_.AddOffset(SubGraph::VT_OUTPUTS, outputs); + } + void add_operators(flatbuffers::Offset>> operators) { + fbb_.AddOffset(SubGraph::VT_OPERATORS, operators); + } + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(SubGraph::VT_NAME, name); + } + explicit SubGraphBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSubGraph( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> tensors = 0, + flatbuffers::Offset> inputs = 0, + flatbuffers::Offset> outputs = 0, + flatbuffers::Offset>> operators = 0, + flatbuffers::Offset name = 0) { + SubGraphBuilder builder_(_fbb); + builder_.add_name(name); + builder_.add_operators(operators); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + builder_.add_tensors(tensors); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSubGraphDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *tensors = nullptr, + const std::vector *inputs = nullptr, + const std::vector *outputs = nullptr, + const std::vector> *operators = nullptr, + const char *name = nullptr) { + auto tensors__ = tensors ? _fbb.CreateVector>(*tensors) : 0; + auto inputs__ = inputs ? _fbb.CreateVector(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector(*outputs) : 0; + auto operators__ = operators ? _fbb.CreateVector>(*operators) : 0; + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateSubGraph( + _fbb, + tensors__, + inputs__, + outputs__, + operators__, + name__); +} + +flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct BufferT : public flatbuffers::NativeTable { + typedef Buffer TableType; + std::vector data{}; +}; + +struct Buffer FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef BufferT NativeTableType; + typedef BufferBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_DATA = 4 + }; + const flatbuffers::Vector *data() const { + return GetPointer *>(VT_DATA); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_DATA) && + verifier.VerifyVector(data()) && + verifier.EndTable(); + } + BufferT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct BufferBuilder { + typedef Buffer Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_data(flatbuffers::Offset> data) { + fbb_.AddOffset(Buffer::VT_DATA, data); + } + explicit BufferBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateBuffer( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset> data = 0) { + BufferBuilder builder_(_fbb); + builder_.add_data(data); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateBufferDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector *data = nullptr) { + if (data) { _fbb.ForceVectorAlignment(data->size(), sizeof(uint8_t), 16); } + auto data__ = data ? _fbb.CreateVector(*data) : 0; + return tflite::CreateBuffer( + _fbb, + data__); +} + +flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct MetadataT : public flatbuffers::NativeTable { + typedef Metadata TableType; + std::string name{}; + uint32_t buffer = 0; +}; + +struct Metadata FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef MetadataT NativeTableType; + typedef MetadataBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_BUFFER = 6 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + uint32_t buffer() const { + return GetField(VT_BUFFER, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_BUFFER, 4) && + verifier.EndTable(); + } + MetadataT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct MetadataBuilder { + typedef Metadata Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(Metadata::VT_NAME, name); + } + void add_buffer(uint32_t buffer) { + fbb_.AddElement(Metadata::VT_BUFFER, buffer, 0); + } + explicit MetadataBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateMetadata( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + uint32_t buffer = 0) { + MetadataBuilder builder_(_fbb); + builder_.add_buffer(buffer); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateMetadataDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t buffer = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateMetadata( + _fbb, + name__, + buffer); +} + +flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct TensorMapT : public flatbuffers::NativeTable { + typedef TensorMap TableType; + std::string name{}; + uint32_t tensor_index = 0; +}; + +struct TensorMap FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef TensorMapT NativeTableType; + typedef TensorMapBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_NAME = 4, + VT_TENSOR_INDEX = 6 + }; + const flatbuffers::String *name() const { + return GetPointer(VT_NAME); + } + uint32_t tensor_index() const { + return GetField(VT_TENSOR_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_NAME) && + verifier.VerifyString(name()) && + VerifyField(verifier, VT_TENSOR_INDEX, 4) && + verifier.EndTable(); + } + TensorMapT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct TensorMapBuilder { + typedef TensorMap Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_name(flatbuffers::Offset name) { + fbb_.AddOffset(TensorMap::VT_NAME, name); + } + void add_tensor_index(uint32_t tensor_index) { + fbb_.AddElement(TensorMap::VT_TENSOR_INDEX, tensor_index, 0); + } + explicit TensorMapBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateTensorMap( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset name = 0, + uint32_t tensor_index = 0) { + TensorMapBuilder builder_(_fbb); + builder_.add_tensor_index(tensor_index); + builder_.add_name(name); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateTensorMapDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const char *name = nullptr, + uint32_t tensor_index = 0) { + auto name__ = name ? _fbb.CreateString(name) : 0; + return tflite::CreateTensorMap( + _fbb, + name__, + tensor_index); +} + +flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct SignatureDefT : public flatbuffers::NativeTable { + typedef SignatureDef TableType; + std::vector> inputs{}; + std::vector> outputs{}; + std::string signature_key{}; + uint32_t subgraph_index = 0; + SignatureDefT() = default; + SignatureDefT(const SignatureDefT &o); + SignatureDefT(SignatureDefT&&) FLATBUFFERS_NOEXCEPT = default; + SignatureDefT &operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT; +}; + +struct SignatureDef FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef SignatureDefT NativeTableType; + typedef SignatureDefBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_INPUTS = 4, + VT_OUTPUTS = 6, + VT_SIGNATURE_KEY = 8, + VT_SUBGRAPH_INDEX = 12 + }; + const flatbuffers::Vector> *inputs() const { + return GetPointer> *>(VT_INPUTS); + } + const flatbuffers::Vector> *outputs() const { + return GetPointer> *>(VT_OUTPUTS); + } + const flatbuffers::String *signature_key() const { + return GetPointer(VT_SIGNATURE_KEY); + } + uint32_t subgraph_index() const { + return GetField(VT_SUBGRAPH_INDEX, 0); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyOffset(verifier, VT_INPUTS) && + verifier.VerifyVector(inputs()) && + verifier.VerifyVectorOfTables(inputs()) && + VerifyOffset(verifier, VT_OUTPUTS) && + verifier.VerifyVector(outputs()) && + verifier.VerifyVectorOfTables(outputs()) && + VerifyOffset(verifier, VT_SIGNATURE_KEY) && + verifier.VerifyString(signature_key()) && + VerifyField(verifier, VT_SUBGRAPH_INDEX, 4) && + verifier.EndTable(); + } + SignatureDefT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct SignatureDefBuilder { + typedef SignatureDef Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_inputs(flatbuffers::Offset>> inputs) { + fbb_.AddOffset(SignatureDef::VT_INPUTS, inputs); + } + void add_outputs(flatbuffers::Offset>> outputs) { + fbb_.AddOffset(SignatureDef::VT_OUTPUTS, outputs); + } + void add_signature_key(flatbuffers::Offset signature_key) { + fbb_.AddOffset(SignatureDef::VT_SIGNATURE_KEY, signature_key); + } + void add_subgraph_index(uint32_t subgraph_index) { + fbb_.AddElement(SignatureDef::VT_SUBGRAPH_INDEX, subgraph_index, 0); + } + explicit SignatureDefBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateSignatureDef( + flatbuffers::FlatBufferBuilder &_fbb, + flatbuffers::Offset>> inputs = 0, + flatbuffers::Offset>> outputs = 0, + flatbuffers::Offset signature_key = 0, + uint32_t subgraph_index = 0) { + SignatureDefBuilder builder_(_fbb); + builder_.add_subgraph_index(subgraph_index); + builder_.add_signature_key(signature_key); + builder_.add_outputs(outputs); + builder_.add_inputs(inputs); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateSignatureDefDirect( + flatbuffers::FlatBufferBuilder &_fbb, + const std::vector> *inputs = nullptr, + const std::vector> *outputs = nullptr, + const char *signature_key = nullptr, + uint32_t subgraph_index = 0) { + auto inputs__ = inputs ? _fbb.CreateVector>(*inputs) : 0; + auto outputs__ = outputs ? _fbb.CreateVector>(*outputs) : 0; + auto signature_key__ = signature_key ? _fbb.CreateString(signature_key) : 0; + return tflite::CreateSignatureDef( + _fbb, + inputs__, + outputs__, + signature_key__, + subgraph_index); +} + +flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +struct ModelT : public flatbuffers::NativeTable { + typedef Model TableType; + uint32_t version = 0; + std::vector> operator_codes{}; + std::vector> subgraphs{}; + std::string description{}; + std::vector> buffers{}; + std::vector metadata_buffer{}; + std::vector> metadata{}; + std::vector> signature_defs{}; + ModelT() = default; + ModelT(const ModelT &o); + ModelT(ModelT&&) FLATBUFFERS_NOEXCEPT = default; + ModelT &operator=(ModelT o) FLATBUFFERS_NOEXCEPT; +}; + +struct Model FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table { + typedef ModelT NativeTableType; + typedef ModelBuilder Builder; + enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE { + VT_VERSION = 4, + VT_OPERATOR_CODES = 6, + VT_SUBGRAPHS = 8, + VT_DESCRIPTION = 10, + VT_BUFFERS = 12, + VT_METADATA_BUFFER = 14, + VT_METADATA = 16, + VT_SIGNATURE_DEFS = 18 + }; + uint32_t version() const { + return GetField(VT_VERSION, 0); + } + const flatbuffers::Vector> *operator_codes() const { + return GetPointer> *>(VT_OPERATOR_CODES); + } + const flatbuffers::Vector> *subgraphs() const { + return GetPointer> *>(VT_SUBGRAPHS); + } + const flatbuffers::String *description() const { + return GetPointer(VT_DESCRIPTION); + } + const flatbuffers::Vector> *buffers() const { + return GetPointer> *>(VT_BUFFERS); + } + const flatbuffers::Vector *metadata_buffer() const { + return GetPointer *>(VT_METADATA_BUFFER); + } + const flatbuffers::Vector> *metadata() const { + return GetPointer> *>(VT_METADATA); + } + const flatbuffers::Vector> *signature_defs() const { + return GetPointer> *>(VT_SIGNATURE_DEFS); + } + bool Verify(flatbuffers::Verifier &verifier) const { + return VerifyTableStart(verifier) && + VerifyField(verifier, VT_VERSION, 4) && + VerifyOffset(verifier, VT_OPERATOR_CODES) && + verifier.VerifyVector(operator_codes()) && + verifier.VerifyVectorOfTables(operator_codes()) && + VerifyOffset(verifier, VT_SUBGRAPHS) && + verifier.VerifyVector(subgraphs()) && + verifier.VerifyVectorOfTables(subgraphs()) && + VerifyOffset(verifier, VT_DESCRIPTION) && + verifier.VerifyString(description()) && + VerifyOffset(verifier, VT_BUFFERS) && + verifier.VerifyVector(buffers()) && + verifier.VerifyVectorOfTables(buffers()) && + VerifyOffset(verifier, VT_METADATA_BUFFER) && + verifier.VerifyVector(metadata_buffer()) && + VerifyOffset(verifier, VT_METADATA) && + verifier.VerifyVector(metadata()) && + verifier.VerifyVectorOfTables(metadata()) && + VerifyOffset(verifier, VT_SIGNATURE_DEFS) && + verifier.VerifyVector(signature_defs()) && + verifier.VerifyVectorOfTables(signature_defs()) && + verifier.EndTable(); + } + ModelT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const; + void UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const; + static flatbuffers::Offset Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); +}; + +struct ModelBuilder { + typedef Model Table; + flatbuffers::FlatBufferBuilder &fbb_; + flatbuffers::uoffset_t start_; + void add_version(uint32_t version) { + fbb_.AddElement(Model::VT_VERSION, version, 0); + } + void add_operator_codes(flatbuffers::Offset>> operator_codes) { + fbb_.AddOffset(Model::VT_OPERATOR_CODES, operator_codes); + } + void add_subgraphs(flatbuffers::Offset>> subgraphs) { + fbb_.AddOffset(Model::VT_SUBGRAPHS, subgraphs); + } + void add_description(flatbuffers::Offset description) { + fbb_.AddOffset(Model::VT_DESCRIPTION, description); + } + void add_buffers(flatbuffers::Offset>> buffers) { + fbb_.AddOffset(Model::VT_BUFFERS, buffers); + } + void add_metadata_buffer(flatbuffers::Offset> metadata_buffer) { + fbb_.AddOffset(Model::VT_METADATA_BUFFER, metadata_buffer); + } + void add_metadata(flatbuffers::Offset>> metadata) { + fbb_.AddOffset(Model::VT_METADATA, metadata); + } + void add_signature_defs(flatbuffers::Offset>> signature_defs) { + fbb_.AddOffset(Model::VT_SIGNATURE_DEFS, signature_defs); + } + explicit ModelBuilder(flatbuffers::FlatBufferBuilder &_fbb) + : fbb_(_fbb) { + start_ = fbb_.StartTable(); + } + flatbuffers::Offset Finish() { + const auto end = fbb_.EndTable(start_); + auto o = flatbuffers::Offset(end); + return o; + } +}; + +inline flatbuffers::Offset CreateModel( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + flatbuffers::Offset>> operator_codes = 0, + flatbuffers::Offset>> subgraphs = 0, + flatbuffers::Offset description = 0, + flatbuffers::Offset>> buffers = 0, + flatbuffers::Offset> metadata_buffer = 0, + flatbuffers::Offset>> metadata = 0, + flatbuffers::Offset>> signature_defs = 0) { + ModelBuilder builder_(_fbb); + builder_.add_signature_defs(signature_defs); + builder_.add_metadata(metadata); + builder_.add_metadata_buffer(metadata_buffer); + builder_.add_buffers(buffers); + builder_.add_description(description); + builder_.add_subgraphs(subgraphs); + builder_.add_operator_codes(operator_codes); + builder_.add_version(version); + return builder_.Finish(); +} + +inline flatbuffers::Offset CreateModelDirect( + flatbuffers::FlatBufferBuilder &_fbb, + uint32_t version = 0, + const std::vector> *operator_codes = nullptr, + const std::vector> *subgraphs = nullptr, + const char *description = nullptr, + const std::vector> *buffers = nullptr, + const std::vector *metadata_buffer = nullptr, + const std::vector> *metadata = nullptr, + const std::vector> *signature_defs = nullptr) { + auto operator_codes__ = operator_codes ? _fbb.CreateVector>(*operator_codes) : 0; + auto subgraphs__ = subgraphs ? _fbb.CreateVector>(*subgraphs) : 0; + auto description__ = description ? _fbb.CreateString(description) : 0; + auto buffers__ = buffers ? _fbb.CreateVector>(*buffers) : 0; + auto metadata_buffer__ = metadata_buffer ? _fbb.CreateVector(*metadata_buffer) : 0; + auto metadata__ = metadata ? _fbb.CreateVector>(*metadata) : 0; + auto signature_defs__ = signature_defs ? _fbb.CreateVector>(*signature_defs) : 0; + return tflite::CreateModel( + _fbb, + version, + operator_codes__, + subgraphs__, + description__, + buffers__, + metadata_buffer__, + metadata__, + signature_defs__); +} + +flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr); + +inline CustomQuantizationT *CustomQuantization::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CustomQuantizationT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CustomQuantization::UnPackTo(CustomQuantizationT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = custom(); if (_e) { _o->custom.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom.begin()); } } +} + +inline flatbuffers::Offset CustomQuantization::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCustomQuantization(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCustomQuantization(flatbuffers::FlatBufferBuilder &_fbb, const CustomQuantizationT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CustomQuantizationT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->custom.size(), sizeof(uint8_t), 16); + auto _custom = _o->custom.size() ? _fbb.CreateVector(_o->custom) : 0; + return tflite::CreateCustomQuantization( + _fbb, + _custom); +} + +inline QuantizationParametersT *QuantizationParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new QuantizationParametersT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void QuantizationParameters::UnPackTo(QuantizationParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = min(); if (_e) { _o->min.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->min[_i] = _e->Get(_i); } } } + { auto _e = max(); if (_e) { _o->max.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->max[_i] = _e->Get(_i); } } } + { auto _e = scale(); if (_e) { _o->scale.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->scale[_i] = _e->Get(_i); } } } + { auto _e = zero_point(); if (_e) { _o->zero_point.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->zero_point[_i] = _e->Get(_i); } } } + { auto _e = details_type(); _o->details.type = _e; } + { auto _e = details(); if (_e) _o->details.value = tflite::QuantizationDetailsUnion::UnPack(_e, details_type(), _resolver); } + { auto _e = quantized_dimension(); _o->quantized_dimension = _e; } +} + +inline flatbuffers::Offset QuantizationParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizationParameters(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizationParameters(flatbuffers::FlatBufferBuilder &_fbb, const QuantizationParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizationParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min.size() ? _fbb.CreateVector(_o->min) : 0; + auto _max = _o->max.size() ? _fbb.CreateVector(_o->max) : 0; + auto _scale = _o->scale.size() ? _fbb.CreateVector(_o->scale) : 0; + auto _zero_point = _o->zero_point.size() ? _fbb.CreateVector(_o->zero_point) : 0; + auto _details_type = _o->details.type; + auto _details = _o->details.Pack(_fbb); + auto _quantized_dimension = _o->quantized_dimension; + return tflite::CreateQuantizationParameters( + _fbb, + _min, + _max, + _scale, + _zero_point, + _details_type, + _details, + _quantized_dimension); +} + +inline Int32VectorT *Int32Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Int32VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Int32Vector::UnPackTo(Int32VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset Int32Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateInt32Vector(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateInt32Vector(flatbuffers::FlatBufferBuilder &_fbb, const Int32VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Int32VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateInt32Vector( + _fbb, + _values); +} + +inline Uint16VectorT *Uint16Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Uint16VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Uint16Vector::UnPackTo(Uint16VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset Uint16Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUint16Vector(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUint16Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint16VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint16VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint16_t), 4); + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateUint16Vector( + _fbb, + _values); +} + +inline Uint8VectorT *Uint8Vector::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Uint8VectorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Uint8Vector::UnPackTo(Uint8VectorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values(); if (_e) { _o->values.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->values.begin()); } } +} + +inline flatbuffers::Offset Uint8Vector::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUint8Vector(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUint8Vector(flatbuffers::FlatBufferBuilder &_fbb, const Uint8VectorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Uint8VectorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->values.size(), sizeof(uint8_t), 4); + auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0; + return tflite::CreateUint8Vector( + _fbb, + _values); +} + +inline DimensionMetadataT *DimensionMetadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DimensionMetadataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DimensionMetadata::UnPackTo(DimensionMetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = format(); _o->format = _e; } + { auto _e = dense_size(); _o->dense_size = _e; } + { auto _e = array_segments_type(); _o->array_segments.type = _e; } + { auto _e = array_segments(); if (_e) _o->array_segments.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_segments_type(), _resolver); } + { auto _e = array_indices_type(); _o->array_indices.type = _e; } + { auto _e = array_indices(); if (_e) _o->array_indices.value = tflite::SparseIndexVectorUnion::UnPack(_e, array_indices_type(), _resolver); } +} + +inline flatbuffers::Offset DimensionMetadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDimensionMetadata(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDimensionMetadata(flatbuffers::FlatBufferBuilder &_fbb, const DimensionMetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DimensionMetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _format = _o->format; + auto _dense_size = _o->dense_size; + auto _array_segments_type = _o->array_segments.type; + auto _array_segments = _o->array_segments.Pack(_fbb); + auto _array_indices_type = _o->array_indices.type; + auto _array_indices = _o->array_indices.Pack(_fbb); + return tflite::CreateDimensionMetadata( + _fbb, + _format, + _dense_size, + _array_segments_type, + _array_segments, + _array_indices_type, + _array_indices); +} + +inline SparsityParametersT::SparsityParametersT(const SparsityParametersT &o) + : traversal_order(o.traversal_order), + block_map(o.block_map) { + dim_metadata.reserve(o.dim_metadata.size()); + for (const auto &dim_metadata_ : o.dim_metadata) { dim_metadata.emplace_back((dim_metadata_) ? new tflite::DimensionMetadataT(*dim_metadata_) : nullptr); } +} + +inline SparsityParametersT &SparsityParametersT::operator=(SparsityParametersT o) FLATBUFFERS_NOEXCEPT { + std::swap(traversal_order, o.traversal_order); + std::swap(block_map, o.block_map); + std::swap(dim_metadata, o.dim_metadata); + return *this; +} + +inline SparsityParametersT *SparsityParameters::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SparsityParametersT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SparsityParameters::UnPackTo(SparsityParametersT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = traversal_order(); if (_e) { _o->traversal_order.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->traversal_order[_i] = _e->Get(_i); } } } + { auto _e = block_map(); if (_e) { _o->block_map.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->block_map[_i] = _e->Get(_i); } } } + { auto _e = dim_metadata(); if (_e) { _o->dim_metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->dim_metadata[_i]) { _e->Get(_i)->UnPackTo(_o->dim_metadata[_i].get(), _resolver); } else { _o->dim_metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } +} + +inline flatbuffers::Offset SparsityParameters::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSparsityParameters(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSparsityParameters(flatbuffers::FlatBufferBuilder &_fbb, const SparsityParametersT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparsityParametersT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _traversal_order = _o->traversal_order.size() ? _fbb.CreateVector(_o->traversal_order) : 0; + auto _block_map = _o->block_map.size() ? _fbb.CreateVector(_o->block_map) : 0; + auto _dim_metadata = _o->dim_metadata.size() ? _fbb.CreateVector> (_o->dim_metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateDimensionMetadata(*__va->__fbb, __va->__o->dim_metadata[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateSparsityParameters( + _fbb, + _traversal_order, + _block_map, + _dim_metadata); +} + +inline VariantSubTypeT *VariantSubType::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new VariantSubTypeT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void VariantSubType::UnPackTo(VariantSubTypeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } + { auto _e = type(); _o->type = _e; } + { auto _e = has_rank(); _o->has_rank = _e; } +} + +inline flatbuffers::Offset VariantSubType::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateVariantSubType(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateVariantSubType(flatbuffers::FlatBufferBuilder &_fbb, const VariantSubTypeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VariantSubTypeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _type = _o->type; + auto _has_rank = _o->has_rank; + return tflite::CreateVariantSubType( + _fbb, + _shape, + _type, + _has_rank); +} + +inline TensorT::TensorT(const TensorT &o) + : shape(o.shape), + type(o.type), + buffer(o.buffer), + name(o.name), + quantization((o.quantization) ? new tflite::QuantizationParametersT(*o.quantization) : nullptr), + is_variable(o.is_variable), + sparsity((o.sparsity) ? new tflite::SparsityParametersT(*o.sparsity) : nullptr), + shape_signature(o.shape_signature), + has_rank(o.has_rank) { + variant_tensors.reserve(o.variant_tensors.size()); + for (const auto &variant_tensors_ : o.variant_tensors) { variant_tensors.emplace_back((variant_tensors_) ? new tflite::VariantSubTypeT(*variant_tensors_) : nullptr); } +} + +inline TensorT &TensorT::operator=(TensorT o) FLATBUFFERS_NOEXCEPT { + std::swap(shape, o.shape); + std::swap(type, o.type); + std::swap(buffer, o.buffer); + std::swap(name, o.name); + std::swap(quantization, o.quantization); + std::swap(is_variable, o.is_variable); + std::swap(sparsity, o.sparsity); + std::swap(shape_signature, o.shape_signature); + std::swap(has_rank, o.has_rank); + std::swap(variant_tensors, o.variant_tensors); + return *this; +} + +inline TensorT *Tensor::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TensorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Tensor::UnPackTo(TensorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = shape(); if (_e) { _o->shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape[_i] = _e->Get(_i); } } } + { auto _e = type(); _o->type = _e; } + { auto _e = buffer(); _o->buffer = _e; } + { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = quantization(); if (_e) { if(_o->quantization) { _e->UnPackTo(_o->quantization.get(), _resolver); } else { _o->quantization = std::unique_ptr(_e->UnPack(_resolver)); } } } + { auto _e = is_variable(); _o->is_variable = _e; } + { auto _e = sparsity(); if (_e) { if(_o->sparsity) { _e->UnPackTo(_o->sparsity.get(), _resolver); } else { _o->sparsity = std::unique_ptr(_e->UnPack(_resolver)); } } } + { auto _e = shape_signature(); if (_e) { _o->shape_signature.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->shape_signature[_i] = _e->Get(_i); } } } + { auto _e = has_rank(); _o->has_rank = _e; } + { auto _e = variant_tensors(); if (_e) { _o->variant_tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->variant_tensors[_i]) { _e->Get(_i)->UnPackTo(_o->variant_tensors[_i].get(), _resolver); } else { _o->variant_tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } +} + +inline flatbuffers::Offset Tensor::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensor(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTensor(flatbuffers::FlatBufferBuilder &_fbb, const TensorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _shape = _o->shape.size() ? _fbb.CreateVector(_o->shape) : 0; + auto _type = _o->type; + auto _buffer = _o->buffer; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _quantization = _o->quantization ? CreateQuantizationParameters(_fbb, _o->quantization.get(), _rehasher) : 0; + auto _is_variable = _o->is_variable; + auto _sparsity = _o->sparsity ? CreateSparsityParameters(_fbb, _o->sparsity.get(), _rehasher) : 0; + auto _shape_signature = _o->shape_signature.size() ? _fbb.CreateVector(_o->shape_signature) : 0; + auto _has_rank = _o->has_rank; + auto _variant_tensors = _o->variant_tensors.size() ? _fbb.CreateVector> (_o->variant_tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateVariantSubType(*__va->__fbb, __va->__o->variant_tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateTensor( + _fbb, + _shape, + _type, + _buffer, + _name, + _quantization, + _is_variable, + _sparsity, + _shape_signature, + _has_rank, + _variant_tensors); +} + +inline Conv2DOptionsT *Conv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Conv2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Conv2DOptions::UnPackTo(Conv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } +} + +inline flatbuffers::Offset Conv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv2DOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateConv2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor); +} + +inline Conv3DOptionsT *Conv3DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Conv3DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Conv3DOptions::UnPackTo(Conv3DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_d(); _o->stride_d = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_d_factor(); _o->dilation_d_factor = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } +} + +inline flatbuffers::Offset Conv3DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConv3DOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConv3DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Conv3DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Conv3DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_d = _o->stride_d; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_d_factor = _o->dilation_d_factor; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateConv3DOptions( + _fbb, + _padding, + _stride_d, + _stride_w, + _stride_h, + _fused_activation_function, + _dilation_d_factor, + _dilation_w_factor, + _dilation_h_factor); +} + +inline Pool2DOptionsT *Pool2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Pool2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Pool2DOptions::UnPackTo(Pool2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = filter_width(); _o->filter_width = _e; } + { auto _e = filter_height(); _o->filter_height = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset Pool2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePool2DOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePool2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const Pool2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Pool2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _filter_width = _o->filter_width; + auto _filter_height = _o->filter_height; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreatePool2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _filter_width, + _filter_height, + _fused_activation_function); +} + +inline DepthwiseConv2DOptionsT *DepthwiseConv2DOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DepthwiseConv2DOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DepthwiseConv2DOptions::UnPackTo(DepthwiseConv2DOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = depth_multiplier(); _o->depth_multiplier = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = dilation_w_factor(); _o->dilation_w_factor = _e; } + { auto _e = dilation_h_factor(); _o->dilation_h_factor = _e; } +} + +inline flatbuffers::Offset DepthwiseConv2DOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthwiseConv2DOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDepthwiseConv2DOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthwiseConv2DOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthwiseConv2DOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _depth_multiplier = _o->depth_multiplier; + auto _fused_activation_function = _o->fused_activation_function; + auto _dilation_w_factor = _o->dilation_w_factor; + auto _dilation_h_factor = _o->dilation_h_factor; + return tflite::CreateDepthwiseConv2DOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _depth_multiplier, + _fused_activation_function, + _dilation_w_factor, + _dilation_h_factor); +} + +inline ConcatEmbeddingsOptionsT *ConcatEmbeddingsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ConcatEmbeddingsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ConcatEmbeddingsOptions::UnPackTo(ConcatEmbeddingsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num_channels(); _o->num_channels = _e; } + { auto _e = num_columns_per_channel(); if (_e) { _o->num_columns_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->num_columns_per_channel[_i] = _e->Get(_i); } } } + { auto _e = embedding_dim_per_channel(); if (_e) { _o->embedding_dim_per_channel.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->embedding_dim_per_channel[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset ConcatEmbeddingsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatEmbeddingsOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConcatEmbeddingsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatEmbeddingsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatEmbeddingsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_channels = _o->num_channels; + auto _num_columns_per_channel = _o->num_columns_per_channel.size() ? _fbb.CreateVector(_o->num_columns_per_channel) : 0; + auto _embedding_dim_per_channel = _o->embedding_dim_per_channel.size() ? _fbb.CreateVector(_o->embedding_dim_per_channel) : 0; + return tflite::CreateConcatEmbeddingsOptions( + _fbb, + _num_channels, + _num_columns_per_channel, + _embedding_dim_per_channel); +} + +inline LSHProjectionOptionsT *LSHProjectionOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LSHProjectionOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LSHProjectionOptions::UnPackTo(LSHProjectionOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = type(); _o->type = _e; } +} + +inline flatbuffers::Offset LSHProjectionOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSHProjectionOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLSHProjectionOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSHProjectionOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSHProjectionOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _type = _o->type; + return tflite::CreateLSHProjectionOptions( + _fbb, + _type); +} + +inline SVDFOptionsT *SVDFOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SVDFOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SVDFOptions::UnPackTo(SVDFOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = rank(); _o->rank = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset SVDFOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSVDFOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSVDFOptions(flatbuffers::FlatBufferBuilder &_fbb, const SVDFOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SVDFOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _rank = _o->rank; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateSVDFOptions( + _fbb, + _rank, + _fused_activation_function, + _asymmetric_quantize_inputs); +} + +inline RNNOptionsT *RNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void RNNOptions::UnPackTo(RNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset RNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRNNOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const RNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateRNNOptions( + _fbb, + _fused_activation_function, + _asymmetric_quantize_inputs); +} + +inline SequenceRNNOptionsT *SequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SequenceRNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SequenceRNNOptions::UnPackTo(SequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset SequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSequenceRNNOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const SequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateSequenceRNNOptions( + _fbb, + _time_major, + _fused_activation_function, + _asymmetric_quantize_inputs); +} + +inline BidirectionalSequenceRNNOptionsT *BidirectionalSequenceRNNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BidirectionalSequenceRNNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BidirectionalSequenceRNNOptions::UnPackTo(BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = merge_outputs(); _o->merge_outputs = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset BidirectionalSequenceRNNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceRNNOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBidirectionalSequenceRNNOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceRNNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceRNNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _time_major = _o->time_major; + auto _fused_activation_function = _o->fused_activation_function; + auto _merge_outputs = _o->merge_outputs; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBidirectionalSequenceRNNOptions( + _fbb, + _time_major, + _fused_activation_function, + _merge_outputs, + _asymmetric_quantize_inputs); +} + +inline FullyConnectedOptionsT *FullyConnectedOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FullyConnectedOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void FullyConnectedOptions::UnPackTo(FullyConnectedOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = weights_format(); _o->weights_format = _e; } + { auto _e = keep_num_dims(); _o->keep_num_dims = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset FullyConnectedOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFullyConnectedOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFullyConnectedOptions(flatbuffers::FlatBufferBuilder &_fbb, const FullyConnectedOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FullyConnectedOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _weights_format = _o->weights_format; + auto _keep_num_dims = _o->keep_num_dims; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateFullyConnectedOptions( + _fbb, + _fused_activation_function, + _weights_format, + _keep_num_dims, + _asymmetric_quantize_inputs); +} + +inline SoftmaxOptionsT *SoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SoftmaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SoftmaxOptions::UnPackTo(SoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = beta(); _o->beta = _e; } +} + +inline flatbuffers::Offset SoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSoftmaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const SoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _beta = _o->beta; + return tflite::CreateSoftmaxOptions( + _fbb, + _beta); +} + +inline ConcatenationOptionsT *ConcatenationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ConcatenationOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ConcatenationOptions::UnPackTo(ConcatenationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset ConcatenationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateConcatenationOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateConcatenationOptions(flatbuffers::FlatBufferBuilder &_fbb, const ConcatenationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ConcatenationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateConcatenationOptions( + _fbb, + _axis, + _fused_activation_function); +} + +inline AddOptionsT *AddOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AddOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void AddOptions::UnPackTo(AddOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } +} + +inline flatbuffers::Offset AddOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAddOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _pot_scale_int16 = _o->pot_scale_int16; + return tflite::CreateAddOptions( + _fbb, + _fused_activation_function, + _pot_scale_int16); +} + +inline MulOptionsT *MulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MulOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void MulOptions::UnPackTo(MulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset MulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMulOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const MulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateMulOptions( + _fbb, + _fused_activation_function); +} + +inline L2NormOptionsT *L2NormOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new L2NormOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void L2NormOptions::UnPackTo(L2NormOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset L2NormOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateL2NormOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateL2NormOptions(flatbuffers::FlatBufferBuilder &_fbb, const L2NormOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const L2NormOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateL2NormOptions( + _fbb, + _fused_activation_function); +} + +inline LocalResponseNormalizationOptionsT *LocalResponseNormalizationOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LocalResponseNormalizationOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LocalResponseNormalizationOptions::UnPackTo(LocalResponseNormalizationOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = radius(); _o->radius = _e; } + { auto _e = bias(); _o->bias = _e; } + { auto _e = alpha(); _o->alpha = _e; } + { auto _e = beta(); _o->beta = _e; } +} + +inline flatbuffers::Offset LocalResponseNormalizationOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLocalResponseNormalizationOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLocalResponseNormalizationOptions(flatbuffers::FlatBufferBuilder &_fbb, const LocalResponseNormalizationOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LocalResponseNormalizationOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _radius = _o->radius; + auto _bias = _o->bias; + auto _alpha = _o->alpha; + auto _beta = _o->beta; + return tflite::CreateLocalResponseNormalizationOptions( + _fbb, + _radius, + _bias, + _alpha, + _beta); +} + +inline LSTMOptionsT *LSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LSTMOptions::UnPackTo(LSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = kernel_type(); _o->kernel_type = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset LSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLSTMOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const LSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _kernel_type = _o->kernel_type; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _kernel_type, + _asymmetric_quantize_inputs); +} + +inline UnidirectionalSequenceLSTMOptionsT *UnidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnidirectionalSequenceLSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnidirectionalSequenceLSTMOptions::UnPackTo(UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } + { auto _e = diagonal_recurrent_tensors(); _o->diagonal_recurrent_tensors = _e; } +} + +inline flatbuffers::Offset UnidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _time_major = _o->time_major; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + auto _diagonal_recurrent_tensors = _o->diagonal_recurrent_tensors; + return tflite::CreateUnidirectionalSequenceLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _time_major, + _asymmetric_quantize_inputs, + _diagonal_recurrent_tensors); +} + +inline BidirectionalSequenceLSTMOptionsT *BidirectionalSequenceLSTMOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BidirectionalSequenceLSTMOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BidirectionalSequenceLSTMOptions::UnPackTo(BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = cell_clip(); _o->cell_clip = _e; } + { auto _e = proj_clip(); _o->proj_clip = _e; } + { auto _e = merge_outputs(); _o->merge_outputs = _e; } + { auto _e = time_major(); _o->time_major = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset BidirectionalSequenceLSTMOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBidirectionalSequenceLSTMOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBidirectionalSequenceLSTMOptions(flatbuffers::FlatBufferBuilder &_fbb, const BidirectionalSequenceLSTMOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BidirectionalSequenceLSTMOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _cell_clip = _o->cell_clip; + auto _proj_clip = _o->proj_clip; + auto _merge_outputs = _o->merge_outputs; + auto _time_major = _o->time_major; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBidirectionalSequenceLSTMOptions( + _fbb, + _fused_activation_function, + _cell_clip, + _proj_clip, + _merge_outputs, + _time_major, + _asymmetric_quantize_inputs); +} + +inline ResizeBilinearOptionsT *ResizeBilinearOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ResizeBilinearOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ResizeBilinearOptions::UnPackTo(ResizeBilinearOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = align_corners(); _o->align_corners = _e; } + { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } +} + +inline flatbuffers::Offset ResizeBilinearOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeBilinearOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateResizeBilinearOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeBilinearOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeBilinearOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + auto _half_pixel_centers = _o->half_pixel_centers; + return tflite::CreateResizeBilinearOptions( + _fbb, + _align_corners, + _half_pixel_centers); +} + +inline ResizeNearestNeighborOptionsT *ResizeNearestNeighborOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ResizeNearestNeighborOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ResizeNearestNeighborOptions::UnPackTo(ResizeNearestNeighborOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = align_corners(); _o->align_corners = _e; } + { auto _e = half_pixel_centers(); _o->half_pixel_centers = _e; } +} + +inline flatbuffers::Offset ResizeNearestNeighborOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateResizeNearestNeighborOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateResizeNearestNeighborOptions(flatbuffers::FlatBufferBuilder &_fbb, const ResizeNearestNeighborOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ResizeNearestNeighborOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _align_corners = _o->align_corners; + auto _half_pixel_centers = _o->half_pixel_centers; + return tflite::CreateResizeNearestNeighborOptions( + _fbb, + _align_corners, + _half_pixel_centers); +} + +inline CallOptionsT *CallOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CallOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CallOptions::UnPackTo(CallOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = subgraph(); _o->subgraph = _e; } +} + +inline flatbuffers::Offset CallOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCallOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCallOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _subgraph = _o->subgraph; + return tflite::CreateCallOptions( + _fbb, + _subgraph); +} + +inline PadOptionsT *PadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PadOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void PadOptions::UnPackTo(PadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset PadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePadOptions(flatbuffers::FlatBufferBuilder &_fbb, const PadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadOptions( + _fbb); +} + +inline PadV2OptionsT *PadV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PadV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void PadV2Options::UnPackTo(PadV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset PadV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePadV2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePadV2Options(flatbuffers::FlatBufferBuilder &_fbb, const PadV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PadV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePadV2Options( + _fbb); +} + +inline ReshapeOptionsT *ReshapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReshapeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ReshapeOptions::UnPackTo(ReshapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = new_shape(); if (_e) { _o->new_shape.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->new_shape[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset ReshapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReshapeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReshapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReshapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReshapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _new_shape = _o->new_shape.size() ? _fbb.CreateVector(_o->new_shape) : 0; + return tflite::CreateReshapeOptions( + _fbb, + _new_shape); +} + +inline SpaceToBatchNDOptionsT *SpaceToBatchNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SpaceToBatchNDOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SpaceToBatchNDOptions::UnPackTo(SpaceToBatchNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SpaceToBatchNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToBatchNDOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSpaceToBatchNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToBatchNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToBatchNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSpaceToBatchNDOptions( + _fbb); +} + +inline BatchToSpaceNDOptionsT *BatchToSpaceNDOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BatchToSpaceNDOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BatchToSpaceNDOptions::UnPackTo(BatchToSpaceNDOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset BatchToSpaceNDOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchToSpaceNDOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBatchToSpaceNDOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchToSpaceNDOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchToSpaceNDOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBatchToSpaceNDOptions( + _fbb); +} + +inline SkipGramOptionsT *SkipGramOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SkipGramOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SkipGramOptions::UnPackTo(SkipGramOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = ngram_size(); _o->ngram_size = _e; } + { auto _e = max_skip_size(); _o->max_skip_size = _e; } + { auto _e = include_all_ngrams(); _o->include_all_ngrams = _e; } +} + +inline flatbuffers::Offset SkipGramOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSkipGramOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSkipGramOptions(flatbuffers::FlatBufferBuilder &_fbb, const SkipGramOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SkipGramOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _ngram_size = _o->ngram_size; + auto _max_skip_size = _o->max_skip_size; + auto _include_all_ngrams = _o->include_all_ngrams; + return tflite::CreateSkipGramOptions( + _fbb, + _ngram_size, + _max_skip_size, + _include_all_ngrams); +} + +inline SpaceToDepthOptionsT *SpaceToDepthOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SpaceToDepthOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SpaceToDepthOptions::UnPackTo(SpaceToDepthOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = block_size(); _o->block_size = _e; } +} + +inline flatbuffers::Offset SpaceToDepthOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSpaceToDepthOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSpaceToDepthOptions(flatbuffers::FlatBufferBuilder &_fbb, const SpaceToDepthOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SpaceToDepthOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateSpaceToDepthOptions( + _fbb, + _block_size); +} + +inline DepthToSpaceOptionsT *DepthToSpaceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DepthToSpaceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DepthToSpaceOptions::UnPackTo(DepthToSpaceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = block_size(); _o->block_size = _e; } +} + +inline flatbuffers::Offset DepthToSpaceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDepthToSpaceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDepthToSpaceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DepthToSpaceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DepthToSpaceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _block_size = _o->block_size; + return tflite::CreateDepthToSpaceOptions( + _fbb, + _block_size); +} + +inline SubOptionsT *SubOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SubOptions::UnPackTo(SubOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } + { auto _e = pot_scale_int16(); _o->pot_scale_int16 = _e; } +} + +inline flatbuffers::Offset SubOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSubOptions(flatbuffers::FlatBufferBuilder &_fbb, const SubOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + auto _pot_scale_int16 = _o->pot_scale_int16; + return tflite::CreateSubOptions( + _fbb, + _fused_activation_function, + _pot_scale_int16); +} + +inline DivOptionsT *DivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DivOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DivOptions::UnPackTo(DivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset DivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDivOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const DivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateDivOptions( + _fbb, + _fused_activation_function); +} + +inline TopKV2OptionsT *TopKV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TopKV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TopKV2Options::UnPackTo(TopKV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset TopKV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTopKV2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTopKV2Options(flatbuffers::FlatBufferBuilder &_fbb, const TopKV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TopKV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTopKV2Options( + _fbb); +} + +inline EmbeddingLookupSparseOptionsT *EmbeddingLookupSparseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new EmbeddingLookupSparseOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void EmbeddingLookupSparseOptions::UnPackTo(EmbeddingLookupSparseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = combiner(); _o->combiner = _e; } +} + +inline flatbuffers::Offset EmbeddingLookupSparseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEmbeddingLookupSparseOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEmbeddingLookupSparseOptions(flatbuffers::FlatBufferBuilder &_fbb, const EmbeddingLookupSparseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EmbeddingLookupSparseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _combiner = _o->combiner; + return tflite::CreateEmbeddingLookupSparseOptions( + _fbb, + _combiner); +} + +inline GatherOptionsT *GatherOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GatherOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void GatherOptions::UnPackTo(GatherOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; } + { auto _e = batch_dims(); _o->batch_dims = _e; } +} + +inline flatbuffers::Offset GatherOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGatherOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + auto _batch_dims = _o->batch_dims; + return tflite::CreateGatherOptions( + _fbb, + _axis, + _batch_dims); +} + +inline TransposeOptionsT *TransposeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TransposeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TransposeOptions::UnPackTo(TransposeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset TransposeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTransposeOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTransposeOptions( + _fbb); +} + +inline ExpOptionsT *ExpOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ExpOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ExpOptions::UnPackTo(ExpOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ExpOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateExpOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpOptions( + _fbb); +} + +inline CosOptionsT *CosOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CosOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CosOptions::UnPackTo(CosOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset CosOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCosOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCosOptions(flatbuffers::FlatBufferBuilder &_fbb, const CosOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CosOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateCosOptions( + _fbb); +} + +inline ReducerOptionsT *ReducerOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReducerOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ReducerOptions::UnPackTo(ReducerOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = keep_dims(); _o->keep_dims = _e; } +} + +inline flatbuffers::Offset ReducerOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReducerOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReducerOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReducerOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReducerOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _keep_dims = _o->keep_dims; + return tflite::CreateReducerOptions( + _fbb, + _keep_dims); +} + +inline SqueezeOptionsT *SqueezeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SqueezeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SqueezeOptions::UnPackTo(SqueezeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = squeeze_dims(); if (_e) { _o->squeeze_dims.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->squeeze_dims[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset SqueezeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSqueezeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSqueezeOptions(flatbuffers::FlatBufferBuilder &_fbb, const SqueezeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SqueezeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _squeeze_dims = _o->squeeze_dims.size() ? _fbb.CreateVector(_o->squeeze_dims) : 0; + return tflite::CreateSqueezeOptions( + _fbb, + _squeeze_dims); +} + +inline SplitOptionsT *SplitOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SplitOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SplitOptions::UnPackTo(SplitOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num_splits(); _o->num_splits = _e; } +} + +inline flatbuffers::Offset SplitOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSplitOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitOptions( + _fbb, + _num_splits); +} + +inline SplitVOptionsT *SplitVOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SplitVOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SplitVOptions::UnPackTo(SplitVOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num_splits(); _o->num_splits = _e; } +} + +inline flatbuffers::Offset SplitVOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSplitVOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSplitVOptions(flatbuffers::FlatBufferBuilder &_fbb, const SplitVOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SplitVOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num_splits = _o->num_splits; + return tflite::CreateSplitVOptions( + _fbb, + _num_splits); +} + +inline StridedSliceOptionsT *StridedSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new StridedSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void StridedSliceOptions::UnPackTo(StridedSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = begin_mask(); _o->begin_mask = _e; } + { auto _e = end_mask(); _o->end_mask = _e; } + { auto _e = ellipsis_mask(); _o->ellipsis_mask = _e; } + { auto _e = new_axis_mask(); _o->new_axis_mask = _e; } + { auto _e = shrink_axis_mask(); _o->shrink_axis_mask = _e; } +} + +inline flatbuffers::Offset StridedSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateStridedSliceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateStridedSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const StridedSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const StridedSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _begin_mask = _o->begin_mask; + auto _end_mask = _o->end_mask; + auto _ellipsis_mask = _o->ellipsis_mask; + auto _new_axis_mask = _o->new_axis_mask; + auto _shrink_axis_mask = _o->shrink_axis_mask; + return tflite::CreateStridedSliceOptions( + _fbb, + _begin_mask, + _end_mask, + _ellipsis_mask, + _new_axis_mask, + _shrink_axis_mask); +} + +inline LogSoftmaxOptionsT *LogSoftmaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogSoftmaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LogSoftmaxOptions::UnPackTo(LogSoftmaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogSoftmaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogSoftmaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogSoftmaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogSoftmaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogSoftmaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogSoftmaxOptions( + _fbb); +} + +inline CastOptionsT *CastOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CastOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CastOptions::UnPackTo(CastOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = in_data_type(); _o->in_data_type = _e; } + { auto _e = out_data_type(); _o->out_data_type = _e; } +} + +inline flatbuffers::Offset CastOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCastOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCastOptions(flatbuffers::FlatBufferBuilder &_fbb, const CastOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CastOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _in_data_type = _o->in_data_type; + auto _out_data_type = _o->out_data_type; + return tflite::CreateCastOptions( + _fbb, + _in_data_type, + _out_data_type); +} + +inline DequantizeOptionsT *DequantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DequantizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DequantizeOptions::UnPackTo(DequantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset DequantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDequantizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDequantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const DequantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DequantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDequantizeOptions( + _fbb); +} + +inline MaximumMinimumOptionsT *MaximumMinimumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MaximumMinimumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void MaximumMinimumOptions::UnPackTo(MaximumMinimumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset MaximumMinimumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMaximumMinimumOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMaximumMinimumOptions(flatbuffers::FlatBufferBuilder &_fbb, const MaximumMinimumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MaximumMinimumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMaximumMinimumOptions( + _fbb); +} + +inline TileOptionsT *TileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TileOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TileOptions::UnPackTo(TileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset TileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTileOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTileOptions(flatbuffers::FlatBufferBuilder &_fbb, const TileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateTileOptions( + _fbb); +} + +inline ArgMaxOptionsT *ArgMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ArgMaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ArgMaxOptions::UnPackTo(ArgMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = output_type(); _o->output_type = _e; } +} + +inline flatbuffers::Offset ArgMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateArgMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMaxOptions( + _fbb, + _output_type); +} + +inline ArgMinOptionsT *ArgMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ArgMinOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ArgMinOptions::UnPackTo(ArgMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = output_type(); _o->output_type = _e; } +} + +inline flatbuffers::Offset ArgMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateArgMinOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateArgMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const ArgMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ArgMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _output_type = _o->output_type; + return tflite::CreateArgMinOptions( + _fbb, + _output_type); +} + +inline GreaterOptionsT *GreaterOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GreaterOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void GreaterOptions::UnPackTo(GreaterOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset GreaterOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGreaterOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterOptions( + _fbb); +} + +inline GreaterEqualOptionsT *GreaterEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GreaterEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void GreaterEqualOptions::UnPackTo(GreaterEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset GreaterEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGreaterEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGreaterEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const GreaterEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GreaterEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGreaterEqualOptions( + _fbb); +} + +inline LessOptionsT *LessOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LessOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LessOptions::UnPackTo(LessOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LessOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLessOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessOptions( + _fbb); +} + +inline LessEqualOptionsT *LessEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LessEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LessEqualOptions::UnPackTo(LessEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LessEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLessEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLessEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const LessEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LessEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLessEqualOptions( + _fbb); +} + +inline NegOptionsT *NegOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NegOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void NegOptions::UnPackTo(NegOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NegOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNegOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNegOptions(flatbuffers::FlatBufferBuilder &_fbb, const NegOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NegOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNegOptions( + _fbb); +} + +inline SelectOptionsT *SelectOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SelectOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SelectOptions::UnPackTo(SelectOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SelectOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelectOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSelectOptions(flatbuffers::FlatBufferBuilder &_fbb, const SelectOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSelectOptions( + _fbb); +} + +inline SliceOptionsT *SliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SliceOptions::UnPackTo(SliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSliceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSliceOptions( + _fbb); +} + +inline TransposeConvOptionsT *TransposeConvOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TransposeConvOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TransposeConvOptions::UnPackTo(TransposeConvOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = padding(); _o->padding = _e; } + { auto _e = stride_w(); _o->stride_w = _e; } + { auto _e = stride_h(); _o->stride_h = _e; } + { auto _e = fused_activation_function(); _o->fused_activation_function = _e; } +} + +inline flatbuffers::Offset TransposeConvOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTransposeConvOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTransposeConvOptions(flatbuffers::FlatBufferBuilder &_fbb, const TransposeConvOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TransposeConvOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _padding = _o->padding; + auto _stride_w = _o->stride_w; + auto _stride_h = _o->stride_h; + auto _fused_activation_function = _o->fused_activation_function; + return tflite::CreateTransposeConvOptions( + _fbb, + _padding, + _stride_w, + _stride_h, + _fused_activation_function); +} + +inline ExpandDimsOptionsT *ExpandDimsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ExpandDimsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ExpandDimsOptions::UnPackTo(ExpandDimsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ExpandDimsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateExpandDimsOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateExpandDimsOptions(flatbuffers::FlatBufferBuilder &_fbb, const ExpandDimsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ExpandDimsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateExpandDimsOptions( + _fbb); +} + +inline SparseToDenseOptionsT *SparseToDenseOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SparseToDenseOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SparseToDenseOptions::UnPackTo(SparseToDenseOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = validate_indices(); _o->validate_indices = _e; } +} + +inline flatbuffers::Offset SparseToDenseOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSparseToDenseOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSparseToDenseOptions(flatbuffers::FlatBufferBuilder &_fbb, const SparseToDenseOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SparseToDenseOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _validate_indices = _o->validate_indices; + return tflite::CreateSparseToDenseOptions( + _fbb, + _validate_indices); +} + +inline EqualOptionsT *EqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new EqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void EqualOptions::UnPackTo(EqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset EqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const EqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateEqualOptions( + _fbb); +} + +inline NotEqualOptionsT *NotEqualOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NotEqualOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void NotEqualOptions::UnPackTo(NotEqualOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NotEqualOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNotEqualOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNotEqualOptions(flatbuffers::FlatBufferBuilder &_fbb, const NotEqualOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NotEqualOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNotEqualOptions( + _fbb); +} + +inline ShapeOptionsT *ShapeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ShapeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ShapeOptions::UnPackTo(ShapeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = out_type(); _o->out_type = _e; } +} + +inline flatbuffers::Offset ShapeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateShapeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateShapeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ShapeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ShapeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _out_type = _o->out_type; + return tflite::CreateShapeOptions( + _fbb, + _out_type); +} + +inline RankOptionsT *RankOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RankOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void RankOptions::UnPackTo(RankOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset RankOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRankOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRankOptions(flatbuffers::FlatBufferBuilder &_fbb, const RankOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RankOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRankOptions( + _fbb); +} + +inline PowOptionsT *PowOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PowOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void PowOptions::UnPackTo(PowOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset PowOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePowOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePowOptions(flatbuffers::FlatBufferBuilder &_fbb, const PowOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PowOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreatePowOptions( + _fbb); +} + +inline FakeQuantOptionsT *FakeQuantOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FakeQuantOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void FakeQuantOptions::UnPackTo(FakeQuantOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = min(); _o->min = _e; } + { auto _e = max(); _o->max = _e; } + { auto _e = num_bits(); _o->num_bits = _e; } + { auto _e = narrow_range(); _o->narrow_range = _e; } +} + +inline flatbuffers::Offset FakeQuantOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFakeQuantOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFakeQuantOptions(flatbuffers::FlatBufferBuilder &_fbb, const FakeQuantOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FakeQuantOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _min = _o->min; + auto _max = _o->max; + auto _num_bits = _o->num_bits; + auto _narrow_range = _o->narrow_range; + return tflite::CreateFakeQuantOptions( + _fbb, + _min, + _max, + _num_bits, + _narrow_range); +} + +inline PackOptionsT *PackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new PackOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void PackOptions::UnPackTo(PackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = values_count(); _o->values_count = _e; } + { auto _e = axis(); _o->axis = _e; } +} + +inline flatbuffers::Offset PackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreatePackOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreatePackOptions(flatbuffers::FlatBufferBuilder &_fbb, const PackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const PackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _values_count = _o->values_count; + auto _axis = _o->axis; + return tflite::CreatePackOptions( + _fbb, + _values_count, + _axis); +} + +inline LogicalOrOptionsT *LogicalOrOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalOrOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LogicalOrOptions::UnPackTo(LogicalOrOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalOrOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalOrOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalOrOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalOrOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalOrOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalOrOptions( + _fbb); +} + +inline OneHotOptionsT *OneHotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OneHotOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void OneHotOptions::UnPackTo(OneHotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = axis(); _o->axis = _e; } +} + +inline flatbuffers::Offset OneHotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOneHotOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOneHotOptions(flatbuffers::FlatBufferBuilder &_fbb, const OneHotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OneHotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _axis = _o->axis; + return tflite::CreateOneHotOptions( + _fbb, + _axis); +} + +inline AbsOptionsT *AbsOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AbsOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void AbsOptions::UnPackTo(AbsOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset AbsOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAbsOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAbsOptions(flatbuffers::FlatBufferBuilder &_fbb, const AbsOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AbsOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAbsOptions( + _fbb); +} + +inline HardSwishOptionsT *HardSwishOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HardSwishOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void HardSwishOptions::UnPackTo(HardSwishOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HardSwishOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHardSwishOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHardSwishOptions(flatbuffers::FlatBufferBuilder &_fbb, const HardSwishOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HardSwishOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHardSwishOptions( + _fbb); +} + +inline LogicalAndOptionsT *LogicalAndOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalAndOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LogicalAndOptions::UnPackTo(LogicalAndOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalAndOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalAndOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalAndOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalAndOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalAndOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalAndOptions( + _fbb); +} + +inline LogicalNotOptionsT *LogicalNotOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LogicalNotOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LogicalNotOptions::UnPackTo(LogicalNotOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset LogicalNotOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLogicalNotOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLogicalNotOptions(flatbuffers::FlatBufferBuilder &_fbb, const LogicalNotOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LogicalNotOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateLogicalNotOptions( + _fbb); +} + +inline UnpackOptionsT *UnpackOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnpackOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnpackOptions::UnPackTo(UnpackOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = num(); _o->num = _e; } + { auto _e = axis(); _o->axis = _e; } +} + +inline flatbuffers::Offset UnpackOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnpackOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnpackOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnpackOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnpackOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _num = _o->num; + auto _axis = _o->axis; + return tflite::CreateUnpackOptions( + _fbb, + _num, + _axis); +} + +inline FloorDivOptionsT *FloorDivOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FloorDivOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void FloorDivOptions::UnPackTo(FloorDivOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FloorDivOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorDivOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFloorDivOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorDivOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorDivOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorDivOptions( + _fbb); +} + +inline SquareOptionsT *SquareOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SquareOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SquareOptions::UnPackTo(SquareOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SquareOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquareOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSquareOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquareOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquareOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquareOptions( + _fbb); +} + +inline ZerosLikeOptionsT *ZerosLikeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ZerosLikeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ZerosLikeOptions::UnPackTo(ZerosLikeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ZerosLikeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateZerosLikeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateZerosLikeOptions(flatbuffers::FlatBufferBuilder &_fbb, const ZerosLikeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ZerosLikeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateZerosLikeOptions( + _fbb); +} + +inline FillOptionsT *FillOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FillOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void FillOptions::UnPackTo(FillOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FillOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFillOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFillOptions(flatbuffers::FlatBufferBuilder &_fbb, const FillOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FillOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFillOptions( + _fbb); +} + +inline FloorModOptionsT *FloorModOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new FloorModOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void FloorModOptions::UnPackTo(FloorModOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset FloorModOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateFloorModOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateFloorModOptions(flatbuffers::FlatBufferBuilder &_fbb, const FloorModOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FloorModOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateFloorModOptions( + _fbb); +} + +inline RangeOptionsT *RangeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RangeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void RangeOptions::UnPackTo(RangeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset RangeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRangeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRangeOptions(flatbuffers::FlatBufferBuilder &_fbb, const RangeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RangeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRangeOptions( + _fbb); +} + +inline LeakyReluOptionsT *LeakyReluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new LeakyReluOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void LeakyReluOptions::UnPackTo(LeakyReluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = alpha(); _o->alpha = _e; } +} + +inline flatbuffers::Offset LeakyReluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateLeakyReluOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateLeakyReluOptions(flatbuffers::FlatBufferBuilder &_fbb, const LeakyReluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const LeakyReluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _alpha = _o->alpha; + return tflite::CreateLeakyReluOptions( + _fbb, + _alpha); +} + +inline SquaredDifferenceOptionsT *SquaredDifferenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SquaredDifferenceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SquaredDifferenceOptions::UnPackTo(SquaredDifferenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SquaredDifferenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSquaredDifferenceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSquaredDifferenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const SquaredDifferenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SquaredDifferenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSquaredDifferenceOptions( + _fbb); +} + +inline MirrorPadOptionsT *MirrorPadOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MirrorPadOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void MirrorPadOptions::UnPackTo(MirrorPadOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = mode(); _o->mode = _e; } +} + +inline flatbuffers::Offset MirrorPadOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMirrorPadOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMirrorPadOptions(flatbuffers::FlatBufferBuilder &_fbb, const MirrorPadOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MirrorPadOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _mode = _o->mode; + return tflite::CreateMirrorPadOptions( + _fbb, + _mode); +} + +inline UniqueOptionsT *UniqueOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UniqueOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UniqueOptions::UnPackTo(UniqueOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = idx_out_type(); _o->idx_out_type = _e; } +} + +inline flatbuffers::Offset UniqueOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUniqueOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUniqueOptions(flatbuffers::FlatBufferBuilder &_fbb, const UniqueOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UniqueOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _idx_out_type = _o->idx_out_type; + return tflite::CreateUniqueOptions( + _fbb, + _idx_out_type); +} + +inline ReverseV2OptionsT *ReverseV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReverseV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ReverseV2Options::UnPackTo(ReverseV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ReverseV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseV2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReverseV2Options(flatbuffers::FlatBufferBuilder &_fbb, const ReverseV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateReverseV2Options( + _fbb); +} + +inline AddNOptionsT *AddNOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AddNOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void AddNOptions::UnPackTo(AddNOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset AddNOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAddNOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAddNOptions(flatbuffers::FlatBufferBuilder &_fbb, const AddNOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AddNOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAddNOptions( + _fbb); +} + +inline GatherNdOptionsT *GatherNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GatherNdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void GatherNdOptions::UnPackTo(GatherNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset GatherNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGatherNdOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGatherNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const GatherNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GatherNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateGatherNdOptions( + _fbb); +} + +inline WhereOptionsT *WhereOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new WhereOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void WhereOptions::UnPackTo(WhereOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset WhereOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhereOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateWhereOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhereOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhereOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateWhereOptions( + _fbb); +} + +inline ReverseSequenceOptionsT *ReverseSequenceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReverseSequenceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ReverseSequenceOptions::UnPackTo(ReverseSequenceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = seq_dim(); _o->seq_dim = _e; } + { auto _e = batch_dim(); _o->batch_dim = _e; } +} + +inline flatbuffers::Offset ReverseSequenceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReverseSequenceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReverseSequenceOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReverseSequenceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReverseSequenceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _seq_dim = _o->seq_dim; + auto _batch_dim = _o->batch_dim; + return tflite::CreateReverseSequenceOptions( + _fbb, + _seq_dim, + _batch_dim); +} + +inline MatrixDiagOptionsT *MatrixDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MatrixDiagOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void MatrixDiagOptions::UnPackTo(MatrixDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset MatrixDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixDiagOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMatrixDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixDiagOptions( + _fbb); +} + +inline QuantizeOptionsT *QuantizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new QuantizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void QuantizeOptions::UnPackTo(QuantizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset QuantizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateQuantizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateQuantizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const QuantizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const QuantizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateQuantizeOptions( + _fbb); +} + +inline MatrixSetDiagOptionsT *MatrixSetDiagOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MatrixSetDiagOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void MatrixSetDiagOptions::UnPackTo(MatrixSetDiagOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset MatrixSetDiagOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMatrixSetDiagOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMatrixSetDiagOptions(flatbuffers::FlatBufferBuilder &_fbb, const MatrixSetDiagOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MatrixSetDiagOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateMatrixSetDiagOptions( + _fbb); +} + +inline IfOptionsT *IfOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new IfOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void IfOptions::UnPackTo(IfOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = then_subgraph_index(); _o->then_subgraph_index = _e; } + { auto _e = else_subgraph_index(); _o->else_subgraph_index = _e; } +} + +inline flatbuffers::Offset IfOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateIfOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateIfOptions(flatbuffers::FlatBufferBuilder &_fbb, const IfOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const IfOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _then_subgraph_index = _o->then_subgraph_index; + auto _else_subgraph_index = _o->else_subgraph_index; + return tflite::CreateIfOptions( + _fbb, + _then_subgraph_index, + _else_subgraph_index); +} + +inline CallOnceOptionsT *CallOnceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CallOnceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CallOnceOptions::UnPackTo(CallOnceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = init_subgraph_index(); _o->init_subgraph_index = _e; } +} + +inline flatbuffers::Offset CallOnceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCallOnceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCallOnceOptions(flatbuffers::FlatBufferBuilder &_fbb, const CallOnceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CallOnceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _init_subgraph_index = _o->init_subgraph_index; + return tflite::CreateCallOnceOptions( + _fbb, + _init_subgraph_index); +} + +inline WhileOptionsT *WhileOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new WhileOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void WhileOptions::UnPackTo(WhileOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = cond_subgraph_index(); _o->cond_subgraph_index = _e; } + { auto _e = body_subgraph_index(); _o->body_subgraph_index = _e; } +} + +inline flatbuffers::Offset WhileOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateWhileOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateWhileOptions(flatbuffers::FlatBufferBuilder &_fbb, const WhileOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const WhileOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _cond_subgraph_index = _o->cond_subgraph_index; + auto _body_subgraph_index = _o->body_subgraph_index; + return tflite::CreateWhileOptions( + _fbb, + _cond_subgraph_index, + _body_subgraph_index); +} + +inline NonMaxSuppressionV4OptionsT *NonMaxSuppressionV4Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NonMaxSuppressionV4OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void NonMaxSuppressionV4Options::UnPackTo(NonMaxSuppressionV4OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NonMaxSuppressionV4Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV4Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNonMaxSuppressionV4Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV4OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV4OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV4Options( + _fbb); +} + +inline NonMaxSuppressionV5OptionsT *NonMaxSuppressionV5Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new NonMaxSuppressionV5OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void NonMaxSuppressionV5Options::UnPackTo(NonMaxSuppressionV5OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset NonMaxSuppressionV5Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateNonMaxSuppressionV5Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateNonMaxSuppressionV5Options(flatbuffers::FlatBufferBuilder &_fbb, const NonMaxSuppressionV5OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NonMaxSuppressionV5OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateNonMaxSuppressionV5Options( + _fbb); +} + +inline ScatterNdOptionsT *ScatterNdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ScatterNdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ScatterNdOptions::UnPackTo(ScatterNdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ScatterNdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateScatterNdOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateScatterNdOptions(flatbuffers::FlatBufferBuilder &_fbb, const ScatterNdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ScatterNdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateScatterNdOptions( + _fbb); +} + +inline SelectV2OptionsT *SelectV2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SelectV2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SelectV2Options::UnPackTo(SelectV2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SelectV2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSelectV2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSelectV2Options(flatbuffers::FlatBufferBuilder &_fbb, const SelectV2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SelectV2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSelectV2Options( + _fbb); +} + +inline DensifyOptionsT *DensifyOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DensifyOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DensifyOptions::UnPackTo(DensifyOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset DensifyOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDensifyOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDensifyOptions(flatbuffers::FlatBufferBuilder &_fbb, const DensifyOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DensifyOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDensifyOptions( + _fbb); +} + +inline SegmentSumOptionsT *SegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SegmentSumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SegmentSumOptions::UnPackTo(SegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSegmentSumOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const SegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSegmentSumOptions( + _fbb); +} + +inline BatchMatMulOptionsT *BatchMatMulOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BatchMatMulOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BatchMatMulOptions::UnPackTo(BatchMatMulOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = adj_x(); _o->adj_x = _e; } + { auto _e = adj_y(); _o->adj_y = _e; } + { auto _e = asymmetric_quantize_inputs(); _o->asymmetric_quantize_inputs = _e; } +} + +inline flatbuffers::Offset BatchMatMulOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBatchMatMulOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBatchMatMulOptions(flatbuffers::FlatBufferBuilder &_fbb, const BatchMatMulOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BatchMatMulOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _adj_x = _o->adj_x; + auto _adj_y = _o->adj_y; + auto _asymmetric_quantize_inputs = _o->asymmetric_quantize_inputs; + return tflite::CreateBatchMatMulOptions( + _fbb, + _adj_x, + _adj_y, + _asymmetric_quantize_inputs); +} + +inline CumsumOptionsT *CumsumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new CumsumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void CumsumOptions::UnPackTo(CumsumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = exclusive(); _o->exclusive = _e; } + { auto _e = reverse(); _o->reverse = _e; } +} + +inline flatbuffers::Offset CumsumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateCumsumOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateCumsumOptions(flatbuffers::FlatBufferBuilder &_fbb, const CumsumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CumsumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _exclusive = _o->exclusive; + auto _reverse = _o->reverse; + return tflite::CreateCumsumOptions( + _fbb, + _exclusive, + _reverse); +} + +inline BroadcastToOptionsT *BroadcastToOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BroadcastToOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BroadcastToOptions::UnPackTo(BroadcastToOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset BroadcastToOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBroadcastToOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBroadcastToOptions(flatbuffers::FlatBufferBuilder &_fbb, const BroadcastToOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BroadcastToOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateBroadcastToOptions( + _fbb); +} + +inline Rfft2dOptionsT *Rfft2dOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new Rfft2dOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Rfft2dOptions::UnPackTo(Rfft2dOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset Rfft2dOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRfft2dOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRfft2dOptions(flatbuffers::FlatBufferBuilder &_fbb, const Rfft2dOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const Rfft2dOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateRfft2dOptions( + _fbb); +} + +inline HashtableOptionsT *HashtableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void HashtableOptions::UnPackTo(HashtableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = table_id(); _o->table_id = _e; } + { auto _e = key_dtype(); _o->key_dtype = _e; } + { auto _e = value_dtype(); _o->value_dtype = _e; } +} + +inline flatbuffers::Offset HashtableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _table_id = _o->table_id; + auto _key_dtype = _o->key_dtype; + auto _value_dtype = _o->value_dtype; + return tflite::CreateHashtableOptions( + _fbb, + _table_id, + _key_dtype, + _value_dtype); +} + +inline HashtableFindOptionsT *HashtableFindOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableFindOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void HashtableFindOptions::UnPackTo(HashtableFindOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableFindOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableFindOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableFindOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableFindOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableFindOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableFindOptions( + _fbb); +} + +inline HashtableImportOptionsT *HashtableImportOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableImportOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void HashtableImportOptions::UnPackTo(HashtableImportOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableImportOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableImportOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableImportOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableImportOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableImportOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableImportOptions( + _fbb); +} + +inline HashtableSizeOptionsT *HashtableSizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new HashtableSizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void HashtableSizeOptions::UnPackTo(HashtableSizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset HashtableSizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateHashtableSizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateHashtableSizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const HashtableSizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HashtableSizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateHashtableSizeOptions( + _fbb); +} + +inline VarHandleOptionsT *VarHandleOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new VarHandleOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void VarHandleOptions::UnPackTo(VarHandleOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = container(); if (_e) _o->container = _e->str(); } + { auto _e = shared_name(); if (_e) _o->shared_name = _e->str(); } +} + +inline flatbuffers::Offset VarHandleOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateVarHandleOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateVarHandleOptions(flatbuffers::FlatBufferBuilder &_fbb, const VarHandleOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const VarHandleOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _container = _o->container.empty() ? 0 : _fbb.CreateString(_o->container); + auto _shared_name = _o->shared_name.empty() ? 0 : _fbb.CreateString(_o->shared_name); + return tflite::CreateVarHandleOptions( + _fbb, + _container, + _shared_name); +} + +inline ReadVariableOptionsT *ReadVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ReadVariableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ReadVariableOptions::UnPackTo(ReadVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ReadVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateReadVariableOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateReadVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const ReadVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ReadVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateReadVariableOptions( + _fbb); +} + +inline AssignVariableOptionsT *AssignVariableOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new AssignVariableOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void AssignVariableOptions::UnPackTo(AssignVariableOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset AssignVariableOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateAssignVariableOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateAssignVariableOptions(flatbuffers::FlatBufferBuilder &_fbb, const AssignVariableOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const AssignVariableOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateAssignVariableOptions( + _fbb); +} + +inline RandomOptionsT *RandomOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new RandomOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void RandomOptions::UnPackTo(RandomOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = seed(); _o->seed = _e; } + { auto _e = seed2(); _o->seed2 = _e; } +} + +inline flatbuffers::Offset RandomOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateRandomOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateRandomOptions(flatbuffers::FlatBufferBuilder &_fbb, const RandomOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const RandomOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _seed = _o->seed; + auto _seed2 = _o->seed2; + return tflite::CreateRandomOptions( + _fbb, + _seed, + _seed2); +} + +inline BucketizeOptionsT *BucketizeOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BucketizeOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void BucketizeOptions::UnPackTo(BucketizeOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = boundaries(); if (_e) { _o->boundaries.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->boundaries[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset BucketizeOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBucketizeOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBucketizeOptions(flatbuffers::FlatBufferBuilder &_fbb, const BucketizeOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BucketizeOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _boundaries = _o->boundaries.size() ? _fbb.CreateVector(_o->boundaries) : 0; + return tflite::CreateBucketizeOptions( + _fbb, + _boundaries); +} + +inline GeluOptionsT *GeluOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new GeluOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void GeluOptions::UnPackTo(GeluOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = approximate(); _o->approximate = _e; } +} + +inline flatbuffers::Offset GeluOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateGeluOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateGeluOptions(flatbuffers::FlatBufferBuilder &_fbb, const GeluOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GeluOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _approximate = _o->approximate; + return tflite::CreateGeluOptions( + _fbb, + _approximate); +} + +inline DynamicUpdateSliceOptionsT *DynamicUpdateSliceOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new DynamicUpdateSliceOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void DynamicUpdateSliceOptions::UnPackTo(DynamicUpdateSliceOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset DynamicUpdateSliceOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateDynamicUpdateSliceOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateDynamicUpdateSliceOptions(flatbuffers::FlatBufferBuilder &_fbb, const DynamicUpdateSliceOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const DynamicUpdateSliceOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateDynamicUpdateSliceOptions( + _fbb); +} + +inline UnsortedSegmentProdOptionsT *UnsortedSegmentProdOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentProdOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnsortedSegmentProdOptions::UnPackTo(UnsortedSegmentProdOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset UnsortedSegmentProdOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentProdOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnsortedSegmentProdOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentProdOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentProdOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentProdOptions( + _fbb); +} + +inline UnsortedSegmentMaxOptionsT *UnsortedSegmentMaxOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentMaxOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnsortedSegmentMaxOptions::UnPackTo(UnsortedSegmentMaxOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset UnsortedSegmentMaxOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentMaxOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnsortedSegmentMaxOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMaxOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMaxOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentMaxOptions( + _fbb); +} + +inline UnsortedSegmentSumOptionsT *UnsortedSegmentSumOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentSumOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnsortedSegmentSumOptions::UnPackTo(UnsortedSegmentSumOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset UnsortedSegmentSumOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentSumOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnsortedSegmentSumOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentSumOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentSumOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentSumOptions( + _fbb); +} + +inline ATan2OptionsT *ATan2Options::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ATan2OptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void ATan2Options::UnPackTo(ATan2OptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset ATan2Options::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateATan2Options(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateATan2Options(flatbuffers::FlatBufferBuilder &_fbb, const ATan2OptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ATan2OptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateATan2Options( + _fbb); +} + +inline UnsortedSegmentMinOptionsT *UnsortedSegmentMinOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new UnsortedSegmentMinOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void UnsortedSegmentMinOptions::UnPackTo(UnsortedSegmentMinOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset UnsortedSegmentMinOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateUnsortedSegmentMinOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateUnsortedSegmentMinOptions(flatbuffers::FlatBufferBuilder &_fbb, const UnsortedSegmentMinOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const UnsortedSegmentMinOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateUnsortedSegmentMinOptions( + _fbb); +} + +inline SignOptionsT *SignOptions::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SignOptionsT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SignOptions::UnPackTo(SignOptionsT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; +} + +inline flatbuffers::Offset SignOptions::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSignOptions(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSignOptions(flatbuffers::FlatBufferBuilder &_fbb, const SignOptionsT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignOptionsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + return tflite::CreateSignOptions( + _fbb); +} + +inline OperatorCodeT *OperatorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OperatorCodeT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void OperatorCode::UnPackTo(OperatorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = deprecated_builtin_code(); _o->deprecated_builtin_code = _e; } + { auto _e = custom_code(); if (_e) _o->custom_code = _e->str(); } + { auto _e = version(); _o->version = _e; } + { auto _e = builtin_code(); _o->builtin_code = _e; } +} + +inline flatbuffers::Offset OperatorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOperatorCode(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOperatorCode(flatbuffers::FlatBufferBuilder &_fbb, const OperatorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _deprecated_builtin_code = _o->deprecated_builtin_code; + auto _custom_code = _o->custom_code.empty() ? 0 : _fbb.CreateString(_o->custom_code); + auto _version = _o->version; + auto _builtin_code = _o->builtin_code; + return tflite::CreateOperatorCode( + _fbb, + _deprecated_builtin_code, + _custom_code, + _version, + _builtin_code); +} + +inline OperatorT *Operator::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new OperatorT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Operator::UnPackTo(OperatorT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = opcode_index(); _o->opcode_index = _e; } + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } + { auto _e = builtin_options_type(); _o->builtin_options.type = _e; } + { auto _e = builtin_options(); if (_e) _o->builtin_options.value = tflite::BuiltinOptionsUnion::UnPack(_e, builtin_options_type(), _resolver); } + { auto _e = custom_options(); if (_e) { _o->custom_options.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->custom_options.begin()); } } + { auto _e = custom_options_format(); _o->custom_options_format = _e; } + { auto _e = mutating_variable_inputs(); if (_e) { _o->mutating_variable_inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->mutating_variable_inputs[_i] = _e->Get(_i) != 0; } } } + { auto _e = intermediates(); if (_e) { _o->intermediates.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->intermediates[_i] = _e->Get(_i); } } } +} + +inline flatbuffers::Offset Operator::Pack(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateOperator(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateOperator(flatbuffers::FlatBufferBuilder &_fbb, const OperatorT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const OperatorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _opcode_index = _o->opcode_index; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; + auto _builtin_options_type = _o->builtin_options.type; + auto _builtin_options = _o->builtin_options.Pack(_fbb); + auto _custom_options = _o->custom_options.size() ? _fbb.CreateVector(_o->custom_options) : 0; + auto _custom_options_format = _o->custom_options_format; + auto _mutating_variable_inputs = _o->mutating_variable_inputs.size() ? _fbb.CreateVector(_o->mutating_variable_inputs) : 0; + auto _intermediates = _o->intermediates.size() ? _fbb.CreateVector(_o->intermediates) : 0; + return tflite::CreateOperator( + _fbb, + _opcode_index, + _inputs, + _outputs, + _builtin_options_type, + _builtin_options, + _custom_options, + _custom_options_format, + _mutating_variable_inputs, + _intermediates); +} + +inline SubGraphT::SubGraphT(const SubGraphT &o) + : inputs(o.inputs), + outputs(o.outputs), + name(o.name) { + tensors.reserve(o.tensors.size()); + for (const auto &tensors_ : o.tensors) { tensors.emplace_back((tensors_) ? new tflite::TensorT(*tensors_) : nullptr); } + operators.reserve(o.operators.size()); + for (const auto &operators_ : o.operators) { operators.emplace_back((operators_) ? new tflite::OperatorT(*operators_) : nullptr); } +} + +inline SubGraphT &SubGraphT::operator=(SubGraphT o) FLATBUFFERS_NOEXCEPT { + std::swap(tensors, o.tensors); + std::swap(inputs, o.inputs); + std::swap(outputs, o.outputs); + std::swap(operators, o.operators); + std::swap(name, o.name); + return *this; +} + +inline SubGraphT *SubGraph::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SubGraphT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SubGraph::UnPackTo(SubGraphT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = tensors(); if (_e) { _o->tensors.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->tensors[_i]) { _e->Get(_i)->UnPackTo(_o->tensors[_i].get(), _resolver); } else { _o->tensors[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inputs[_i] = _e->Get(_i); } } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->outputs[_i] = _e->Get(_i); } } } + { auto _e = operators(); if (_e) { _o->operators.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operators[_i]) { _e->Get(_i)->UnPackTo(_o->operators[_i].get(), _resolver); } else { _o->operators[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = name(); if (_e) _o->name = _e->str(); } +} + +inline flatbuffers::Offset SubGraph::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSubGraph(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSubGraph(flatbuffers::FlatBufferBuilder &_fbb, const SubGraphT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SubGraphT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _tensors = _o->tensors.size() ? _fbb.CreateVector> (_o->tensors.size(), [](size_t i, _VectorArgs *__va) { return CreateTensor(*__va->__fbb, __va->__o->tensors[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector(_o->inputs) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector(_o->outputs) : 0; + auto _operators = _o->operators.size() ? _fbb.CreateVector> (_o->operators.size(), [](size_t i, _VectorArgs *__va) { return CreateOperator(*__va->__fbb, __va->__o->operators[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + return tflite::CreateSubGraph( + _fbb, + _tensors, + _inputs, + _outputs, + _operators, + _name); +} + +inline BufferT *Buffer::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new BufferT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Buffer::UnPackTo(BufferT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = data(); if (_e) { _o->data.resize(_e->size()); std::copy(_e->begin(), _e->end(), _o->data.begin()); } } +} + +inline flatbuffers::Offset Buffer::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BufferT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateBuffer(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateBuffer(flatbuffers::FlatBufferBuilder &_fbb, const BufferT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BufferT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + _fbb.ForceVectorAlignment(_o->data.size(), sizeof(uint8_t), 16); + auto _data = _o->data.size() ? _fbb.CreateVector(_o->data) : 0; + return tflite::CreateBuffer( + _fbb, + _data); +} + +inline MetadataT *Metadata::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new MetadataT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Metadata::UnPackTo(MetadataT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = buffer(); _o->buffer = _e; } +} + +inline flatbuffers::Offset Metadata::Pack(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateMetadata(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateMetadata(flatbuffers::FlatBufferBuilder &_fbb, const MetadataT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const MetadataT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _buffer = _o->buffer; + return tflite::CreateMetadata( + _fbb, + _name, + _buffer); +} + +inline TensorMapT *TensorMap::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new TensorMapT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void TensorMap::UnPackTo(TensorMapT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = name(); if (_e) _o->name = _e->str(); } + { auto _e = tensor_index(); _o->tensor_index = _e; } +} + +inline flatbuffers::Offset TensorMap::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateTensorMap(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateTensorMap(flatbuffers::FlatBufferBuilder &_fbb, const TensorMapT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TensorMapT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name); + auto _tensor_index = _o->tensor_index; + return tflite::CreateTensorMap( + _fbb, + _name, + _tensor_index); +} + +inline SignatureDefT::SignatureDefT(const SignatureDefT &o) + : signature_key(o.signature_key), + subgraph_index(o.subgraph_index) { + inputs.reserve(o.inputs.size()); + for (const auto &inputs_ : o.inputs) { inputs.emplace_back((inputs_) ? new tflite::TensorMapT(*inputs_) : nullptr); } + outputs.reserve(o.outputs.size()); + for (const auto &outputs_ : o.outputs) { outputs.emplace_back((outputs_) ? new tflite::TensorMapT(*outputs_) : nullptr); } +} + +inline SignatureDefT &SignatureDefT::operator=(SignatureDefT o) FLATBUFFERS_NOEXCEPT { + std::swap(inputs, o.inputs); + std::swap(outputs, o.outputs); + std::swap(signature_key, o.signature_key); + std::swap(subgraph_index, o.subgraph_index); + return *this; +} + +inline SignatureDefT *SignatureDef::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new SignatureDefT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void SignatureDef::UnPackTo(SignatureDefT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = inputs(); if (_e) { _o->inputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->inputs[_i]) { _e->Get(_i)->UnPackTo(_o->inputs[_i].get(), _resolver); } else { _o->inputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = outputs(); if (_e) { _o->outputs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->outputs[_i]) { _e->Get(_i)->UnPackTo(_o->outputs[_i].get(), _resolver); } else { _o->outputs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = signature_key(); if (_e) _o->signature_key = _e->str(); } + { auto _e = subgraph_index(); _o->subgraph_index = _e; } +} + +inline flatbuffers::Offset SignatureDef::Pack(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateSignatureDef(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateSignatureDef(flatbuffers::FlatBufferBuilder &_fbb, const SignatureDefT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const SignatureDefT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _inputs = _o->inputs.size() ? _fbb.CreateVector> (_o->inputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->inputs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _outputs = _o->outputs.size() ? _fbb.CreateVector> (_o->outputs.size(), [](size_t i, _VectorArgs *__va) { return CreateTensorMap(*__va->__fbb, __va->__o->outputs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _signature_key = _o->signature_key.empty() ? 0 : _fbb.CreateString(_o->signature_key); + auto _subgraph_index = _o->subgraph_index; + return tflite::CreateSignatureDef( + _fbb, + _inputs, + _outputs, + _signature_key, + _subgraph_index); +} + +inline ModelT::ModelT(const ModelT &o) + : version(o.version), + description(o.description), + metadata_buffer(o.metadata_buffer) { + operator_codes.reserve(o.operator_codes.size()); + for (const auto &operator_codes_ : o.operator_codes) { operator_codes.emplace_back((operator_codes_) ? new tflite::OperatorCodeT(*operator_codes_) : nullptr); } + subgraphs.reserve(o.subgraphs.size()); + for (const auto &subgraphs_ : o.subgraphs) { subgraphs.emplace_back((subgraphs_) ? new tflite::SubGraphT(*subgraphs_) : nullptr); } + buffers.reserve(o.buffers.size()); + for (const auto &buffers_ : o.buffers) { buffers.emplace_back((buffers_) ? new tflite::BufferT(*buffers_) : nullptr); } + metadata.reserve(o.metadata.size()); + for (const auto &metadata_ : o.metadata) { metadata.emplace_back((metadata_) ? new tflite::MetadataT(*metadata_) : nullptr); } + signature_defs.reserve(o.signature_defs.size()); + for (const auto &signature_defs_ : o.signature_defs) { signature_defs.emplace_back((signature_defs_) ? new tflite::SignatureDefT(*signature_defs_) : nullptr); } +} + +inline ModelT &ModelT::operator=(ModelT o) FLATBUFFERS_NOEXCEPT { + std::swap(version, o.version); + std::swap(operator_codes, o.operator_codes); + std::swap(subgraphs, o.subgraphs); + std::swap(description, o.description); + std::swap(buffers, o.buffers); + std::swap(metadata_buffer, o.metadata_buffer); + std::swap(metadata, o.metadata); + std::swap(signature_defs, o.signature_defs); + return *this; +} + +inline ModelT *Model::UnPack(const flatbuffers::resolver_function_t *_resolver) const { + auto _o = std::unique_ptr(new ModelT()); + UnPackTo(_o.get(), _resolver); + return _o.release(); +} + +inline void Model::UnPackTo(ModelT *_o, const flatbuffers::resolver_function_t *_resolver) const { + (void)_o; + (void)_resolver; + { auto _e = version(); _o->version = _e; } + { auto _e = operator_codes(); if (_e) { _o->operator_codes.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->operator_codes[_i]) { _e->Get(_i)->UnPackTo(_o->operator_codes[_i].get(), _resolver); } else { _o->operator_codes[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = subgraphs(); if (_e) { _o->subgraphs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->subgraphs[_i]) { _e->Get(_i)->UnPackTo(_o->subgraphs[_i].get(), _resolver); } else { _o->subgraphs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = description(); if (_e) _o->description = _e->str(); } + { auto _e = buffers(); if (_e) { _o->buffers.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->buffers[_i]) { _e->Get(_i)->UnPackTo(_o->buffers[_i].get(), _resolver); } else { _o->buffers[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = metadata_buffer(); if (_e) { _o->metadata_buffer.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metadata_buffer[_i] = _e->Get(_i); } } } + { auto _e = metadata(); if (_e) { _o->metadata.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->metadata[_i]) { _e->Get(_i)->UnPackTo(_o->metadata[_i].get(), _resolver); } else { _o->metadata[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } + { auto _e = signature_defs(); if (_e) { _o->signature_defs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { if(_o->signature_defs[_i]) { _e->Get(_i)->UnPackTo(_o->signature_defs[_i].get(), _resolver); } else { _o->signature_defs[_i] = std::unique_ptr(_e->Get(_i)->UnPack(_resolver)); }; } } } +} + +inline flatbuffers::Offset Model::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ModelT* _o, const flatbuffers::rehasher_function_t *_rehasher) { + return CreateModel(_fbb, _o, _rehasher); +} + +inline flatbuffers::Offset CreateModel(flatbuffers::FlatBufferBuilder &_fbb, const ModelT *_o, const flatbuffers::rehasher_function_t *_rehasher) { + (void)_rehasher; + (void)_o; + struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ModelT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va; + auto _version = _o->version; + auto _operator_codes = _o->operator_codes.size() ? _fbb.CreateVector> (_o->operator_codes.size(), [](size_t i, _VectorArgs *__va) { return CreateOperatorCode(*__va->__fbb, __va->__o->operator_codes[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _subgraphs = _o->subgraphs.size() ? _fbb.CreateVector> (_o->subgraphs.size(), [](size_t i, _VectorArgs *__va) { return CreateSubGraph(*__va->__fbb, __va->__o->subgraphs[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _description = _o->description.empty() ? 0 : _fbb.CreateString(_o->description); + auto _buffers = _o->buffers.size() ? _fbb.CreateVector> (_o->buffers.size(), [](size_t i, _VectorArgs *__va) { return CreateBuffer(*__va->__fbb, __va->__o->buffers[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _metadata_buffer = _o->metadata_buffer.size() ? _fbb.CreateVector(_o->metadata_buffer) : 0; + auto _metadata = _o->metadata.size() ? _fbb.CreateVector> (_o->metadata.size(), [](size_t i, _VectorArgs *__va) { return CreateMetadata(*__va->__fbb, __va->__o->metadata[i].get(), __va->__rehasher); }, &_va ) : 0; + auto _signature_defs = _o->signature_defs.size() ? _fbb.CreateVector> (_o->signature_defs.size(), [](size_t i, _VectorArgs *__va) { return CreateSignatureDef(*__va->__fbb, __va->__o->signature_defs[i].get(), __va->__rehasher); }, &_va ) : 0; + return tflite::CreateModel( + _fbb, + _version, + _operator_codes, + _subgraphs, + _description, + _buffers, + _metadata_buffer, + _metadata, + _signature_defs); +} + +inline bool VerifyQuantizationDetails(flatbuffers::Verifier &verifier, const void *obj, QuantizationDetails type) { + switch (type) { + case QuantizationDetails_NONE: { + return true; + } + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyQuantizationDetailsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyQuantizationDetails( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *QuantizationDetailsUnion::UnPack(const void *obj, QuantizationDetails type, const flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline flatbuffers::Offset QuantizationDetailsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + return CreateCustomQuantization(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline QuantizationDetailsUnion::QuantizationDetailsUnion(const QuantizationDetailsUnion &u) : type(u.type), value(nullptr) { + switch (type) { + case QuantizationDetails_CustomQuantization: { + value = new tflite::CustomQuantizationT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void QuantizationDetailsUnion::Reset() { + switch (type) { + case QuantizationDetails_CustomQuantization: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = QuantizationDetails_NONE; +} + +inline bool VerifySparseIndexVector(flatbuffers::Verifier &verifier, const void *obj, SparseIndexVector type) { + switch (type) { + case SparseIndexVector_NONE: { + return true; + } + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifySparseIndexVectorVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifySparseIndexVector( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *SparseIndexVectorUnion::UnPack(const void *obj, SparseIndexVector type, const flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline flatbuffers::Offset SparseIndexVectorUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(value); + return CreateInt32Vector(_fbb, ptr, _rehasher).Union(); + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(value); + return CreateUint16Vector(_fbb, ptr, _rehasher).Union(); + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(value); + return CreateUint8Vector(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline SparseIndexVectorUnion::SparseIndexVectorUnion(const SparseIndexVectorUnion &u) : type(u.type), value(nullptr) { + switch (type) { + case SparseIndexVector_Int32Vector: { + value = new tflite::Int32VectorT(*reinterpret_cast(u.value)); + break; + } + case SparseIndexVector_Uint16Vector: { + value = new tflite::Uint16VectorT(*reinterpret_cast(u.value)); + break; + } + case SparseIndexVector_Uint8Vector: { + value = new tflite::Uint8VectorT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void SparseIndexVectorUnion::Reset() { + switch (type) { + case SparseIndexVector_Int32Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case SparseIndexVector_Uint16Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case SparseIndexVector_Uint8Vector: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = SparseIndexVector_NONE; +} + +inline bool VerifyBuiltinOptions(flatbuffers::Verifier &verifier, const void *obj, BuiltinOptions type) { + switch (type) { + case BuiltinOptions_NONE: { + return true; + } + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(obj); + return verifier.VerifyTable(ptr); + } + default: return true; + } +} + +inline bool VerifyBuiltinOptionsVector(flatbuffers::Verifier &verifier, const flatbuffers::Vector> *values, const flatbuffers::Vector *types) { + if (!values || !types) return !values && !types; + if (values->size() != types->size()) return false; + for (flatbuffers::uoffset_t i = 0; i < values->size(); ++i) { + if (!VerifyBuiltinOptions( + verifier, values->Get(i), types->GetEnum(i))) { + return false; + } + } + return true; +} + +inline void *BuiltinOptionsUnion::UnPack(const void *obj, BuiltinOptions type, const flatbuffers::resolver_function_t *resolver) { + (void)resolver; + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(obj); + return ptr->UnPack(resolver); + } + default: return nullptr; + } +} + +inline flatbuffers::Offset BuiltinOptionsUnion::Pack(flatbuffers::FlatBufferBuilder &_fbb, const flatbuffers::rehasher_function_t *_rehasher) const { + (void)_rehasher; + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(value); + return CreateConv2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(value); + return CreateDepthwiseConv2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(value); + return CreateConcatEmbeddingsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(value); + return CreateLSHProjectionOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(value); + return CreatePool2DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(value); + return CreateSVDFOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(value); + return CreateFullyConnectedOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(value); + return CreateConcatenationOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(value); + return CreateAddOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(value); + return CreateL2NormOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(value); + return CreateLocalResponseNormalizationOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(value); + return CreateResizeBilinearOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(value); + return CreateCallOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(value); + return CreateReshapeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(value); + return CreateSkipGramOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(value); + return CreateSpaceToDepthOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(value); + return CreateEmbeddingLookupSparseOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(value); + return CreateMulOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(value); + return CreatePadOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(value); + return CreateGatherOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(value); + return CreateBatchToSpaceNDOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(value); + return CreateSpaceToBatchNDOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(value); + return CreateTransposeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(value); + return CreateReducerOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(value); + return CreateSubOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(value); + return CreateDivOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(value); + return CreateSqueezeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateStridedSliceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(value); + return CreateExpOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(value); + return CreateTopKV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(value); + return CreateSplitOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogSoftmaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(value); + return CreateCastOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateDequantizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(value); + return CreateMaximumMinimumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateArgMaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(value); + return CreateLessOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(value); + return CreateNegOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(value); + return CreatePadV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(value); + return CreateGreaterOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateGreaterEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateLessEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(value); + return CreateSelectOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateSliceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(value); + return CreateTransposeConvOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(value); + return CreateSparseToDenseOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(value); + return CreateTileOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(value); + return CreateExpandDimsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(value); + return CreateNotEqualOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(value); + return CreateShapeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(value); + return CreatePowOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(value); + return CreateArgMinOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(value); + return CreateFakeQuantOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(value); + return CreatePackOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalOrOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(value); + return CreateOneHotOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalAndOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(value); + return CreateLogicalNotOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnpackOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(value); + return CreateFloorDivOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(value); + return CreateSquareOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(value); + return CreateZerosLikeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(value); + return CreateFillOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + return CreateBidirectionalSequenceRNNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnidirectionalSequenceLSTMOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(value); + return CreateFloorModOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(value); + return CreateRangeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(value); + return CreateResizeNearestNeighborOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(value); + return CreateLeakyReluOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(value); + return CreateSquaredDifferenceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(value); + return CreateMirrorPadOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(value); + return CreateAbsOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(value); + return CreateSplitVOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(value); + return CreateUniqueOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(value); + return CreateReverseV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(value); + return CreateAddNOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(value); + return CreateGatherNdOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(value); + return CreateCosOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(value); + return CreateWhereOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(value); + return CreateRankOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(value); + return CreateReverseSequenceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateQuantizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(value); + return CreateMatrixSetDiagOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(value); + return CreateHardSwishOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(value); + return CreateIfOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(value); + return CreateWhileOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(value); + return CreateDepthToSpaceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV4Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(value); + return CreateNonMaxSuppressionV5Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(value); + return CreateScatterNdOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(value); + return CreateSelectV2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(value); + return CreateDensifyOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(value); + return CreateSegmentSumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(value); + return CreateBatchMatMulOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(value); + return CreateCumsumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(value); + return CreateCallOnceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(value); + return CreateBroadcastToOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(value); + return CreateRfft2dOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); + return CreateConv3DOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableFindOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableImportOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateHashtableSizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(value); + return CreateVarHandleOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(value); + return CreateReadVariableOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(value); + return CreateAssignVariableOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(value); + return CreateRandomOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(value); + return CreateBucketizeOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(value); + return CreateGeluOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(value); + return CreateDynamicUpdateSliceOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentProdOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentMaxOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentMinOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(value); + return CreateUnsortedSegmentSumOptions(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(value); + return CreateATan2Options(_fbb, ptr, _rehasher).Union(); + } + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(value); + return CreateSignOptions(_fbb, ptr, _rehasher).Union(); + } + default: return 0; + } +} + +inline BuiltinOptionsUnion::BuiltinOptionsUnion(const BuiltinOptionsUnion &u) : type(u.type), value(nullptr) { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + value = new tflite::Conv2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DepthwiseConv2DOptions: { + value = new tflite::DepthwiseConv2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + value = new tflite::ConcatEmbeddingsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LSHProjectionOptions: { + value = new tflite::LSHProjectionOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Pool2DOptions: { + value = new tflite::Pool2DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SVDFOptions: { + value = new tflite::SVDFOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RNNOptions: { + value = new tflite::RNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FullyConnectedOptions: { + value = new tflite::FullyConnectedOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SoftmaxOptions: { + value = new tflite::SoftmaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ConcatenationOptions: { + value = new tflite::ConcatenationOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AddOptions: { + value = new tflite::AddOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_L2NormOptions: { + value = new tflite::L2NormOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + value = new tflite::LocalResponseNormalizationOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LSTMOptions: { + value = new tflite::LSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ResizeBilinearOptions: { + value = new tflite::ResizeBilinearOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CallOptions: { + value = new tflite::CallOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReshapeOptions: { + value = new tflite::ReshapeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SkipGramOptions: { + value = new tflite::SkipGramOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SpaceToDepthOptions: { + value = new tflite::SpaceToDepthOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + value = new tflite::EmbeddingLookupSparseOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MulOptions: { + value = new tflite::MulOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PadOptions: { + value = new tflite::PadOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GatherOptions: { + value = new tflite::GatherOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BatchToSpaceNDOptions: { + value = new tflite::BatchToSpaceNDOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SpaceToBatchNDOptions: { + value = new tflite::SpaceToBatchNDOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TransposeOptions: { + value = new tflite::TransposeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReducerOptions: { + value = new tflite::ReducerOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SubOptions: { + value = new tflite::SubOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DivOptions: { + value = new tflite::DivOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SqueezeOptions: { + value = new tflite::SqueezeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SequenceRNNOptions: { + value = new tflite::SequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_StridedSliceOptions: { + value = new tflite::StridedSliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ExpOptions: { + value = new tflite::ExpOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TopKV2Options: { + value = new tflite::TopKV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SplitOptions: { + value = new tflite::SplitOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogSoftmaxOptions: { + value = new tflite::LogSoftmaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CastOptions: { + value = new tflite::CastOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DequantizeOptions: { + value = new tflite::DequantizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MaximumMinimumOptions: { + value = new tflite::MaximumMinimumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ArgMaxOptions: { + value = new tflite::ArgMaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LessOptions: { + value = new tflite::LessOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NegOptions: { + value = new tflite::NegOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PadV2Options: { + value = new tflite::PadV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GreaterOptions: { + value = new tflite::GreaterOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GreaterEqualOptions: { + value = new tflite::GreaterEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LessEqualOptions: { + value = new tflite::LessEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SelectOptions: { + value = new tflite::SelectOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SliceOptions: { + value = new tflite::SliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TransposeConvOptions: { + value = new tflite::TransposeConvOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SparseToDenseOptions: { + value = new tflite::SparseToDenseOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_TileOptions: { + value = new tflite::TileOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ExpandDimsOptions: { + value = new tflite::ExpandDimsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_EqualOptions: { + value = new tflite::EqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NotEqualOptions: { + value = new tflite::NotEqualOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ShapeOptions: { + value = new tflite::ShapeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PowOptions: { + value = new tflite::PowOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ArgMinOptions: { + value = new tflite::ArgMinOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FakeQuantOptions: { + value = new tflite::FakeQuantOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_PackOptions: { + value = new tflite::PackOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalOrOptions: { + value = new tflite::LogicalOrOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_OneHotOptions: { + value = new tflite::OneHotOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalAndOptions: { + value = new tflite::LogicalAndOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LogicalNotOptions: { + value = new tflite::LogicalNotOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnpackOptions: { + value = new tflite::UnpackOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FloorDivOptions: { + value = new tflite::FloorDivOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SquareOptions: { + value = new tflite::SquareOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ZerosLikeOptions: { + value = new tflite::ZerosLikeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FillOptions: { + value = new tflite::FillOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + value = new tflite::BidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + value = new tflite::BidirectionalSequenceRNNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + value = new tflite::UnidirectionalSequenceLSTMOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_FloorModOptions: { + value = new tflite::FloorModOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RangeOptions: { + value = new tflite::RangeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + value = new tflite::ResizeNearestNeighborOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_LeakyReluOptions: { + value = new tflite::LeakyReluOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SquaredDifferenceOptions: { + value = new tflite::SquaredDifferenceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MirrorPadOptions: { + value = new tflite::MirrorPadOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AbsOptions: { + value = new tflite::AbsOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SplitVOptions: { + value = new tflite::SplitVOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UniqueOptions: { + value = new tflite::UniqueOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReverseV2Options: { + value = new tflite::ReverseV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AddNOptions: { + value = new tflite::AddNOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GatherNdOptions: { + value = new tflite::GatherNdOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CosOptions: { + value = new tflite::CosOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_WhereOptions: { + value = new tflite::WhereOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RankOptions: { + value = new tflite::RankOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReverseSequenceOptions: { + value = new tflite::ReverseSequenceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MatrixDiagOptions: { + value = new tflite::MatrixDiagOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_QuantizeOptions: { + value = new tflite::QuantizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_MatrixSetDiagOptions: { + value = new tflite::MatrixSetDiagOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HardSwishOptions: { + value = new tflite::HardSwishOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_IfOptions: { + value = new tflite::IfOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_WhileOptions: { + value = new tflite::WhileOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DepthToSpaceOptions: { + value = new tflite::DepthToSpaceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + value = new tflite::NonMaxSuppressionV4OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + value = new tflite::NonMaxSuppressionV5OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ScatterNdOptions: { + value = new tflite::ScatterNdOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SelectV2Options: { + value = new tflite::SelectV2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DensifyOptions: { + value = new tflite::DensifyOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SegmentSumOptions: { + value = new tflite::SegmentSumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BatchMatMulOptions: { + value = new tflite::BatchMatMulOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CumsumOptions: { + value = new tflite::CumsumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_CallOnceOptions: { + value = new tflite::CallOnceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BroadcastToOptions: { + value = new tflite::BroadcastToOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Rfft2dOptions: { + value = new tflite::Rfft2dOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_Conv3DOptions: { + value = new tflite::Conv3DOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableOptions: { + value = new tflite::HashtableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableFindOptions: { + value = new tflite::HashtableFindOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableImportOptions: { + value = new tflite::HashtableImportOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_HashtableSizeOptions: { + value = new tflite::HashtableSizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_VarHandleOptions: { + value = new tflite::VarHandleOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ReadVariableOptions: { + value = new tflite::ReadVariableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_AssignVariableOptions: { + value = new tflite::AssignVariableOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_RandomOptions: { + value = new tflite::RandomOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_BucketizeOptions: { + value = new tflite::BucketizeOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_GeluOptions: { + value = new tflite::GeluOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + value = new tflite::DynamicUpdateSliceOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + value = new tflite::UnsortedSegmentProdOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + value = new tflite::UnsortedSegmentMaxOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + value = new tflite::UnsortedSegmentMinOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + value = new tflite::UnsortedSegmentSumOptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_ATan2Options: { + value = new tflite::ATan2OptionsT(*reinterpret_cast(u.value)); + break; + } + case BuiltinOptions_SignOptions: { + value = new tflite::SignOptionsT(*reinterpret_cast(u.value)); + break; + } + default: + break; + } +} + +inline void BuiltinOptionsUnion::Reset() { + switch (type) { + case BuiltinOptions_Conv2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DepthwiseConv2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ConcatEmbeddingsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LSHProjectionOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_Pool2DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SVDFOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FullyConnectedOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SoftmaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ConcatenationOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AddOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_L2NormOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LocalResponseNormalizationOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ResizeBilinearOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CallOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReshapeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SkipGramOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SpaceToDepthOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_EmbeddingLookupSparseOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MulOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PadOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GatherOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BatchToSpaceNDOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SpaceToBatchNDOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TransposeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReducerOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SubOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DivOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SqueezeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_StridedSliceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ExpOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TopKV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SplitOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogSoftmaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CastOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DequantizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MaximumMinimumOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ArgMaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LessOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NegOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PadV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GreaterOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GreaterEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LessEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SelectOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SliceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TransposeConvOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SparseToDenseOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_TileOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ExpandDimsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_EqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NotEqualOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ShapeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PowOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ArgMinOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FakeQuantOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_PackOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalOrOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_OneHotOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalAndOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LogicalNotOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnpackOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FloorDivOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SquareOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ZerosLikeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FillOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BidirectionalSequenceRNNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnidirectionalSequenceLSTMOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_FloorModOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RangeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ResizeNearestNeighborOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_LeakyReluOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SquaredDifferenceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MirrorPadOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AbsOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SplitVOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UniqueOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReverseV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AddNOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GatherNdOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CosOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_WhereOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RankOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReverseSequenceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MatrixDiagOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_QuantizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_MatrixSetDiagOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HardSwishOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_IfOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_WhileOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DepthToSpaceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NonMaxSuppressionV4Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_NonMaxSuppressionV5Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ScatterNdOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SelectV2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DensifyOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SegmentSumOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BatchMatMulOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CumsumOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_CallOnceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BroadcastToOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_Rfft2dOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_Conv3DOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableFindOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableImportOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_HashtableSizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_VarHandleOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ReadVariableOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_AssignVariableOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_RandomOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_BucketizeOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_GeluOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_DynamicUpdateSliceOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnsortedSegmentProdOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnsortedSegmentMaxOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnsortedSegmentMinOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_UnsortedSegmentSumOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_ATan2Options: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + case BuiltinOptions_SignOptions: { + auto ptr = reinterpret_cast(value); + delete ptr; + break; + } + default: break; + } + value = nullptr; + type = BuiltinOptions_NONE; +} + +inline const tflite::Model *GetModel(const void *buf) { + return flatbuffers::GetRoot(buf); +} + +inline const tflite::Model *GetSizePrefixedModel(const void *buf) { + return flatbuffers::GetSizePrefixedRoot(buf); +} + +inline const char *ModelIdentifier() { + return "TFL3"; +} + +inline bool ModelBufferHasIdentifier(const void *buf) { + return flatbuffers::BufferHasIdentifier( + buf, ModelIdentifier()); +} + +inline bool SizePrefixedModelBufferHasIdentifier(const void *buf) { + return flatbuffers::BufferHasIdentifier( + buf, ModelIdentifier(), true); +} + +inline bool VerifyModelBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifyBuffer(ModelIdentifier()); +} + +inline bool VerifySizePrefixedModelBuffer( + flatbuffers::Verifier &verifier) { + return verifier.VerifySizePrefixedBuffer(ModelIdentifier()); +} + +inline const char *ModelExtension() { + return "tflite"; +} + +inline void FinishModelBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.Finish(root, ModelIdentifier()); +} + +inline void FinishSizePrefixedModelBuffer( + flatbuffers::FlatBufferBuilder &fbb, + flatbuffers::Offset root) { + fbb.FinishSizePrefixed(root, ModelIdentifier()); +} + +inline std::unique_ptr UnPackModel( + const void *buf, + const flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetModel(buf)->UnPack(res)); +} + +inline std::unique_ptr UnPackSizePrefixedModel( + const void *buf, + const flatbuffers::resolver_function_t *res = nullptr) { + return std::unique_ptr(GetSizePrefixedModel(buf)->UnPack(res)); +} + +} // namespace tflite + +#endif // FLATBUFFERS_GENERATED_SCHEMA_TFLITE_H_ diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/LICENSE b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/LICENSE new file mode 100644 index 0000000..5ad4eaf --- /dev/null +++ b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/LICENSE @@ -0,0 +1,13 @@ +embARC Machine Learning Inference (embARC MLI) library + +Copyright (c) 2019-2020 Synopsys, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + +1) Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + +2) Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + +3) Neither the name of the Synopsys, Inc., nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/bin/emsdp_em11d_em9d_dfss/release/libmli.a b/firmware/lib/ei-artificial_nose-arduino/src/edge-impulse-sdk/third_party/arc_mli_package/bin/emsdp_em11d_em9d_dfss/release/libmli.a new file mode 100644 index 0000000000000000000000000000000000000000..2020ff2ded08f0c8de4fa3ee2c137856792b1cfc GIT binary patch literal 2419476 zcmeFadwg6~x&OcR%p{pk(&?m4+O$pS!+x znWU*!NlPi91qxQJM~{>P9IB#L59i3yem!Z+r6QMtMLELJ3WzAEML|)k{XWmyx3!SBRop{ z8xF^sV{LJ6-wlwUyx~&|!RzoL~X?lwPzu@26}|i2Xf%f}hB;zo$>=Xg<`pod} zW;s89nq)J`r{ZJxG5V?0s+j9@(_U6-d}eyTz^u=F3Om9LEb zJ$z=L7iNDCpE)1j%W{3@){L{9pTl2Zb8~#EllAQH#A7j?%OKVt-@u^+-Fta}Q9%Qw~r>_1HW_|8C%Ig0A&;KWFP~W?cNj`%E z{au5@V_oTyR5~$|Vy4cfhBCt=U70OIJ(exevuU({B-PcI7)lPE|+P}jCN-dgK42MW=uLYGB}!bJ4z>#$^M~@u_P2|vnz8u zwr{iDZs-$g%9<{?pzC6urf(n(r5_#2c0mITVjV5B(eP+bL42nFBYGh)iMdjs-BMkd zzW!dlMl_20`bcVIxNEq#HBzJBZh~@QX`Ox_YI*s z*N+T!btf|YJzXP1LtQ?L#du@D(P_U zjuX7QBX#%K%duoMK9gwg(m%~HQig#{cFb61<@1VQ z!CqU&TMBCd1Pk`sI^J5CR|E_8+BV)+m{$Y~_G-02p8o1+fIMup(0&yyw5!C-QI&7) zHx~*<$A@@psz>OM;YvxEZL9=v9#BT|nIdmPM){g72x~5E%83_-wdccdPRO@5%y}ap zhf_u&47PY4ZewaFHPWwdvH`_yjW|K`)sakfk8b3f4H|22*j;Jc3!B>w%%QoiA(xw%IfBUD!md`!0Mk*7?FF zWSc#cau+r+>%I%0oOQmi3EF1Qq}+u~)VlA&CTpE7Y{J&r!X|CA3^^xm9m+F#bI5EY z#RSfo9Fw?Nn%q;gO<{J0&k2Q+xhHio_uLb^PUfB5Z49eZ;7m~3qN%;|tObk1b|quk8B8h&bytkxX4 z<=WWgNS5p1B1f`Zo2VSga-HwVkxaF-k|T*~A0$T-)viU3B&t1z97$9=0y&bXw(sgB zXV`aeBX4zZ=O9++!H>EAC#n7?c{v6}!6kHziGou(hD5=s93!IORE_~ra4Ks&6r92u z4h5&MMnl0Vtie!l3TrGBoWdFk1*dSGJ!^*ws&f-pbZ{B zW*5dU2sf>Sxhb-}3|g*C1V zPGODff>T)Iy5JPnxGp$_HLeRyab_Ln%r|g!927L)&|y$$qBGyRK~9cAP-sq$F;HkC z#}Ftqkz)iDn#dXeg(k52f1wGi?q6sEtM?b0!0P;kCb0T`p$Q7zq(LVsbc-fAVXfHw zw09M_J)@^qRsP5kwF=YEEjR{pfpdj&O2=3~qmCW)+( zb0&$bfpaE_tZ{QDi3;9|>V3c9Eh)zfJv`K}A{M$uMc0KLB+AZYySA)w+8HjE2)7?f z&pQ{A3|Do&IZKgSo>eAEg_)gks=Q6?$|zq$yA$Y#_ZM5X~Zgd3o|5N>etLXug>ogHpa@_NDzNM5LY91f_Q zw;#K+TQ_Sw9v4Ua1efC}ijxSx>M6JAlw3*l`Kc_G}3$=?F``w`x|k%jGP`+u2GkInu&g7Yt zfXO>20h4!10w(W_#LJU=LgGm7`G_OArz1e}%|?Lan~VU-Hy622caq5@=S)QJ2r|=} zd&RlNkj-W?mCe)YpoZ&%AmxUnQz_Zz1`rYj{!2-wroFW0KYW9l5UBNmY(xoc{9$x#c-O&%s!O~FTwm5Q;>bqdEUlKp;u8q&E{mxK8j zccZxdxVIT@#=X&SGw!X1n{jV8+>Cp>;bz<$4mTs)a!4rGae<7X6R4YUPmgZKJwLh` z$=u_HBy*1&lI%~pA;}Jv8a*LcdVQ(rJrk%~RUH-f%_dnz)Q9I`6zCoywg# z0g`Xl0wmw01xUU*3y^$M79ja%EI{&2Sb*f4uK>wET_Kcbw&FjklVoe^V)D*YkXPPm3YfgJ6fk)wDPZ!>QNZM#qJSx2hC(Xe1jU&=^Aj+6rzc?Y z&Q3fp)#L<`>bZ%RKs7b-tW`6U`=EgIkeu_9YJW=Kb2ZPS%6F4&Mm^!!OpXn+4dvJ{ zTaaoVHUUL_t!Vi?tVipsft(OB+tWhwRoC#M7`P5Md6?XFrMt?cpo(2(%-`CbSFc%X zZ$1x_XT4f(a?})=Ts4LG94i%Lo$C~yxpV!NE_n9NsN)iX!|b|(;KB`q3(WLy9CY|K z3~q3^Hw&x8$yzoSo=pt(IatHs0DJnbHy=O4FV}(0z|(LNBcj!KOiq8sh~tfiDZBd< znXcZE1YRI>8qrCrE0NivKZplm;}>t@)54 zA&q!Z&}DY#Zs3mGU6+x?Y-Ev+>M8&sYXzZS2sdl!g2a}VMdmJqKCR>`gihoyg!4MQ zwKmP!MQkg<<|v)hE)>d)5bG+~q)uEl6BC$o1NH>-q=;BT!RB!8v7Vv;4vnS$A=Mauc+Vh~v8<%jcB<3Yu z@p?3_52sxyl^G#k~xBA#_~?)G+_%051$i#IilLtF*pn)vF3I_86|UR^d*H$*B+AS+qIVf9iOkv z0w70G6pr|)%;^gX@7!GwX={**Mti6@*NQy`MB3a^=$jT<4D1X*m8CKK#w2MBpUh=3 z`j$tGF*oJ1rgg?STGWh^bqQyjG8t^E#e$f7J+gv0HuT~L-AMv;Ozwmo7R zV^2Xe8OKHrlXVCrT8)DpH1Uy*&ty%c`$31p#%vLd#+=X8hgqVvwKW`WZf{uzU$ATl z+^}lRg_@=@bJ5rlW-G?(yY!ei|7u`Kt%pa4%mn|7?`a-LT$hb>4fGGfLtt4s>RQk+ z5V9k~1L&GFHQWY|KV_`c=bNt$rnii4W-Q?IorPftvMIhIuQoC~f@vV^(GnRmtZ4&Q zn1P-94fu-6J;1jE58)7m2aNDE&p^fu2O(@SQ$64lnNnX-8HQmafMLI9Xn16hm#0YM zVVx<(v<4GEpGJ6!C$l+`PVsyh4+HPl&P(*bGi&|Yx$r%}0?-!TqGYTO%ZFiKGKK}` zVQ(^O**(c9+RJ>!@6nQ}@t%PghL`K%D4XglK2J-+Fzt)Q07HGQp`(Ugrt7E~-)?9# zzD-NQbYiD(HMAMuqH8OC+|XuxvzCOtL_2-V&}Mv8*H-$7p?N&ig?V~2nN9G>;8I_4 ziPoD*_Kz_Z@fZ8C{L00zcVn%$XDkc*?%<~w(hfpPV7LgwP%HG~Q~JONAM3=g&fZMV zV7iwr^%r|lD;Eck{iE0ed11B~)*y`8VwhnWMq%Y)zxQgr=}~yHSRy-`LEqT#y$o$V z*%4rZpo{N<2OB37BN2$_`cBXpT_az-8#LkoaD6Lk*kOgf#`#-NkMwUc{1@Mh=^;yK z=@})EekVjg<8zzoW2ddBBXbil9_}ZFez@P8Y|Z`bFqxtBNWW2*9moj!M$~;gp8MTy zrb%YHO~2cb5%f0HsUa(V0yS?QRsr=d`eS0_Xjfu%9G*~}j5X`FKZv%NZhJ?xMRnWT zqAjA^elOUf%XHiCL|eOVdrP#n>9&)ityQ=ER@hxNiHkXfx{Zrf4(j@rGzK z>hUYl7GX26U4ALpmchD4+v|c2CN*yRg4!|}+={C(4wNJq_jJR#W3dVJ5e!RpZc9kp$^jp6w| zZriN-K4#mj`aWvgtolA;+pPK?v29j;58F1YzK3j^Ro_Y5hV?Dg+R!ix!wkZ$dKes( zYAt#g5|(OlJq!s-wPqfMVFclr9)^mRYEeB56)e>vdKfBJsx8&SP@z(-Ne@FsO0^|= znCIK5hk3r|=wY7k*?JiA4QPvb7~46ZHSjRDZ$Mkb!`Q9?Z6Ob1dj_-xJdEua(CT>@ z+b^Ki@i4YqK&$0pY_EV;!^7B40WHkK*gj>xqVqj44JFcPnCpv*{1EQ%Wh{jK_)lAU zdIs?TEv4&s$1M%)Qcrg#J)Gt~nlPM7U61~nIw3ro>C^k#>Fx}l>NewVRG2<*{LQd+ z_h-9?GkPETt6_t!&Ipu?_p4Ke&6qnO&!`8tuuwkO7mDyO*2h;I(qL-~W;V@NJloUL z-8-mxYnbQFV;1W zMqLCty~R%NF|?Uph@%sf4Q z0QYCA^|S-n&Pz1h%h{~22&iYHrNJ-6&oMPt4B{&+t>-%%cL93(x(AJNR2iDL3zTCP zXjsoy{7gghcqmIH_m}L&eViFsMhIefI+hW`z)a%|bOf*m^)P3qB9Hw8`d%3iLm_}E z#k7V!z{9+(MN9OuVGJKHjfRHyDmq6mmlb!mp)syO4{ye<%|gE7^3E}Av{)bZ9%!R- zZ43&cOKc+_XdZ8j1+S%f8y1B@r?mw58~&6M&69CHZ{fr z?_1qFkk|;L$IzM|j$1N@hPi;DMhWa}e#~LVRe3h1vM7O@+6galFt|p~7#^tbLkiwL zpi>(&x(4=au0ev&#h$T5cgBuu;2Pq9TcC|WQ^avUx~N_ogNelmV;#p02EzSA5Qe(e zb&kQoQf34cUq^M?7#tXZf_!TAI39)mYQdiwSA%f_b{T3gZUE4buV_{$mI1QHXwWl( z1p%+;(EV2Ocq1+3Q=!M}`AqW_E%Jx;4}ga9&i9O^;YB7f7)8CP&$2WCz|XU^UcPEe zLk=)sH)&&d0*IG&ai`%63j<|c3~}7IQP!!@ZUY-}WxxXS`y5Xufm1z(OFY;GAPhc> zc^D|L7kO~?>vkUx>vpeaWGIP_fbpOoBfLn%JghST zJPtSb5)IdzZZC$kNetuhOVQoQ47~S*@8I+Ot1qJN?adlCzAfF0Hry^n^fWg@Z;y0s z?1SSDJT@uX&NWIsew!XYFl@w6aE+3kJiK~U!-b9M^*=t@acsqb3wE#UyU61YvruR0 z;f9LR$dt)T>e;4sHDzblcJ2>P6!~U)D}1w$F<+&(qURpp3~&ALJ-!NW{q^_wW_c?% z-lI*wv0kfwE_B^&Pq-}ozS*AoYiDl{ubX|i_Sj=@9$vd5(usNPzi@Ylo{xXk+IbW6 zJzl0Y&g%>{z^5KQVf^eYOGoan*%`gRc4zGVx}Dd~^Hi*xcX-Bi^R`#KZ{CWE=KG&~ zjIsZn&+EYF`@rW1z~?oV&sPuYErRXfU&YIEJC>>}y>{M;P|fXF$}1sMdpnl-DhSox zel`4k1j_T;;nf#m>QB^aC3^Z?^=r64d^6T@@%=S7V?EElzxHOV>pAz=-Q0ZtV`uEI z(eSs#@YiJcTWa}x-CE!24qm>C8b{X7H`c&>SOf5>hff$j{+X=8KR*=mSHiB=1O5=J z4?b{eCfj{#K71HO)GVK< z`R5AO$o%jM!OB_>+aG3{Z_btM#0_O>&%}uv#={-jhKcX5M7k_Jwzc!V6E}qKW2|PP zX37X#lMYW@m#AFW344yP%H`Pr1I+H ze}7e{*7}j~cHb{IY|*wqJBcJ?dTyON{GDZ^YiilViSUyp{)*y?Wm85%fx3(_*Kd~`u+lwr&xn|>Co-CmzP@p0eWj_GXfMEN`h z{b_Dz$kZ6-^__huFXL-R?mhJVi?AXc7lc=^PVHuYwHEQgr_&G934AlKbaD%;W)WO} z6O)VmQRt;?@(4d?Gjq%2HejOV!AR(<=?x#(w)sw8vY?|}n_LlIKJmmK*dhNfDi>X$ z=R5z(&^;ZGJ^tszr>c+s`+-AW-nJa_xbX0o@4a+eq%(cNrT0e4J>ht{H@v*OEZkl` zExdTJvgYz|eC}j;`CL}lK6kP%GIt{U^3IO>(&d$Pt3&UkpIDR#typmD?so9B;?X$# zjo^pX!`hgf;W-Q~ldi10bb4jYt}xWyd#b{_XU7}8JLc3K@t&G{YB^dC!o`o>vg5UzU+h^o z<92rU#-DF4T6+GBYiHaD=UKm#VRe5=`{o=A`YSw<1%Vlw=kV&YoOW#=h#_TE0q3-)GgEi8!6k+tX7>8XCrt8Hr!PxCgc zIyGI}@#syyx=C;2*7llhUSIeE);PGVG=h0G_jWAo45mYy4ti?XB{gum3i?KEhkup+ zJtw@vyRZ|(y&eAqTZj|m&!_izG$_r-SB9!P;y2VBVx5N?(-%#aRZqq%syiYjhvMnR zbP~K>bSON-6RxP36$#W#uy}eo*c#LIGg!DWz4}l^x*Bv@`jQ1KTzuF+qdZ(Tqb%G| z&EjjP-_8cMUp4(^cIEV)?CR;a!uX!>R#$`~OSXqY6|hU-XFu(JIa66Xsdd!PT>j+? zMpj+=NM-FVt$q4@7Mf8MUN`HmP-X399#(sY77qCsn_#s+(5lY)xR2Fd;JNs*lMC;x zduF@;aJZr>G_ix#oaI|LYg=ee{WDOOqcd3L>`=IJZip?qEmBcAEj*`wJDby8Iyom? z758QnW9w$@VV>~adpz~4L+fU23kRFdkIY&4+>Db8@2EMs@Cba?)CQMyl+USqF~;gX z*jlytZ;LjqYP}@5^!(|by3&^5IorH*>Ytq6P*D+C5B>YAn?e=kktv5OYqp0k+RU?J(FD1aAtJm&QVsH7jZ_-2T;c zW$BJkISVsi#ncG;L=P-{yDt##@ZmbVJF(cmiFL#L&DcB$HKwOdjfSuX{4(*vhPyfv zkd{@gF#5n^|2Hi58^BI8*tcsAd5@3Z)zJcU_yGDv{|o$4?UFF7L*3mm8&qSu)JXBO zi=PKRybss?zicX7!1jfTA3a@mQm^BZz+Dqxd&_fpS>U;eFTaI-#($`O)7niZihh22 zHT1KN&XOa^O`*&r=!=f9Ll;d}lm}T6Fwbt<1GZI%JRJuneHC-#KZfl?=X+q#bzXG= z)9|cBg*KHHd5e9a6<4#+)$i|I%R-l5an)rkbiqo{7p{FD{Ox3+l^0)g)vC+bbZD0~ zYp%I0ysRnG6kU4dl2{}bX^ONqg&Vp@`v;QYNTg+PxWPsihnHN@9A1(bi=_s`OL~S! zQt|MT%OH4dcLx9TXW_2N%#Zj2jJF0lyE?!|Bbf7_c#hX-hi)60{_585BP#p z+DVkK+XjC}hY|z*8;4ThX?0&>WGpd~WNbCm@-!?kCb!Nnt7avv1a=@wyd}PpqLSj0 zk`jN(l#)P6X-S!Xsz2zT<`4PH{nPz3{1yI6|4jcZf0ci>e~y2yf1dxW3Gihucfqjf z4EjE&1^s(H!LrYKgQ3s(f)$@G3RdkY4px7vBpANSAFRJ~O0Z#fAlP_EX)tnoSulRv z)L{GF!4<*fpP$wl-0EH72`*ppVz9k47=J1lc{RR#__<*94}w+4gB8yQ zLq80b{V3>vA?W+BAnPy-0N>;Afiip^%5Vpi;dUs)ZBT~0i>A)~XwV<@Jr!h62Twue zr#%yV7r$N%zKvfnM_3E`SRE+s3Y5N&RasWX&Vn%bzFS)pWVlNa65` z4*qWg|95-W2mMb4eNUs~4ikcJtSfML0e2^GyP+I+zA1CQyT9DAS~pMrAS1?9LC%CQ^DaR-#+cId;mK_9+*%4jhB zRIvK#VAV6hietghv%#|Gg8m-_eaC|g`%?K|=Tu`-W*3!?^xUdszWtk%tW| zoBLkfKMX%M_k24qv?E(A*hQY(jkMU0$~Wq1I|TkXn98^4_LngH5k34mhCix@AN1Px zhlTJ9Likl7?1O@uekz18WXo8u?vsa8LO3aedxUVe5Kai;4MMm}2!B`ze@Fr`wMS_Tw1FN?`qIy$KELnz;iRh(7=Ii*$jdGzdJWNd#=%~^>~dQ`$jh!E8n;^Z;$>Ib0zzKCCuf_y>}p%^c-+$2iVIz@CZ(Hn{GBf6jHe<1pLqWK=FW%IH@D~tgz8zOp`=rqxrh#nytuQu3=mt}1M zAuk&xdW`7JM2{1_h3JnE{ZXQ~68$lvKTh-wL{AXCjp*$}-$?WhqHiL4C($<(eGAdI z68#CHKS}g1qHiPmcB1bfdN(Vr&zGem!u=)FXLj_7|R`twA8f#`oC z`in&0P4qsZ?;-l1iN2TU`-r}u=zk&l0iyR4{UxHmO!QZXevs&|68$xzzfSZ4qQ61( zH;MjNq8}pq--!NqqQ6D-!$kiF(cdQeJ47EO`u`C9U84V!=t-gv5q+5GBSb$!^rJ*S zM)c!EA0_&GL_b0F_lbUz=%w%DO(YK;y(OIzyDRa(rU zXY;Fyrpd$RXziweZx5TRo#)Wi+W8JWPwRB(v$T~CJzu-Xp~KoG4qc;N=Fqj;T8FOF zu6F2p?HY$(pj`+0^j}UJ8`8H}WcDr~@QX}vy-@FL?%tEh{;2Z9e zfX9Tn%80kgyRCS0g{#ax!^u=vGBuRx&l*i2Ru=BB$aG25@fgU*?a{gmgGAw@1h5~b z;a=y&NXqJ$#siZwTZZ&!9r3=}vuU({B-PcI7)nAnHIu^63*ncWrrMl<-}#yX8Z!ky zfuSj&kM?J>sS&f=TYP^eIXonIPG!4>MhClaK)?@YO|S5>k*?n15%>*pdW*}L8R+k^ zJ=iF|?8ObkUC9aT`>q({xA$&4+`QaBv{8RpLXKGYk+>rk2DtD@uD${2lEd(8XLuk? zY7pzlVKf^JkMK}q1|0I%p{%Oj>P3)|R|3at<@8@nx zjP!tMFp+M8pGh_~x40~Em!(;>^bcjSP0da5rsk;J9Fdq?nq)p&nqpE`=D4NF?Kjrc z(iClTx!`7aMUGulUS@7~RM$2;>TH|ch2~D&rA9M6xkHwt-0H3(_bjjDGN}&-i+5=? zwKzcCZTE?I{av$=lyFFlC6z$>aH^|0& zrRHY2S>77*#&FbwcbQg4JLo1y4bT*AZFl5i*c^EnHdhYOR(E#>o5Wum^#@xS$huAD zlG=3CU72XB%VwaGO18>uj+G3RG?F{kr(X9~na$DR4bIWqxveEevuTR9#N7T`Vs3ve zF_}Nd&@u9Gw|Ps9wmIfY8y9TwlVD-))cr->&DG+Z>77$@+*$>JgFDBjT<{e3_ezFOzZZ`o@>J>l3T?Hq4&w{yHr(td4{_LIytapzp4Z*(2*bd$rI zZgMn=ZgQ*(-Q;Kz-Q;KyZgOsKbd#e!bd#ewbd#esbd$UEad+wC?$XEIrH{KyA9t6& z*|9o#A)4K#Z+4fy*EQ`6!no&~Y}_Slb(^DLcC?W(0Xk}7*c{cc zoW>nhvCWR^7<&yZP4QO8rdjvwtfygfRMfCJYHHZrb;7DTJCR)~cO7sy0v=`LZm-rR z=O)Cc1DM+!br9?>FX()koNkw!6~3L#W@XtTcM_AEqjGaZZg!3eD@*6du+5IqVVfNz z#4-?%TOPWIK_!2|RL(juLDW=Z=x(#JL}2o8^^v?&a9De5E<}b!=K* zKzMkKtlZ9>9>WRD9I~bYv#cawmNgZaWhDW#eCNGCC{OJ*_Lq)3j-(Cl6jXmvLV&ft>Cmz!m^2hWm2KE54rPn+a}q z*AiDnT;7l3il%k$wOX^BWCrDVx}Bq0-mr33-mHpNb@eJ<2$z+2LD|L>x`@1$oJYQP zwcRTQeJeHv?rJ0Ks5ajTjXV3PVRIDBusP-z-R9gn88*kvqT3|L<8jCFIBUX-1_S-w z{2MP~;1vOWGF}#eR~781@!~mrX?iDykJeUW#rMnX6)`nclx6(W~azl!8CC!E)uT=C*{Dp#a=QL71l zS>2S#W=HzFN3*F6Yw8&u#H%ZeUp=Ys8s`__MpC`jRWj=WA|=c#Y`ForIHv@}YkSsJ zJ(^7}2Y4M(nUG(5RK}9)o81}fN-!+|Au|ajiC0pUk$CY{8HrbN`9)i0TLGc0%lMXN zWt??QSxMl4bdhm{#!4zK7A4s_R zxizOLW1TY=pV$gy`RtaPHK(|olrYcbb+(+JcJ**yFiyqxK_gL11GI#e~S= z9rZHq6yfmBbs2|usEhva4sjWWcUFrW-ia;aItdpCEn9Eb60SqR4G|8X>mbW_2jS4a zEZ@%)E~((YNw~CvJ4(2%pk?Lz3E^%5jpL8@Q@r0+Z0Fqy4xbw#a(jW3$gL}%_khTyR3YzgxjFtE+br8!6gYdq2NABxZMiwF2d~t zPSy|h6Ass-Y@C0Wa1R5A_k`2_a-49lEAssn;oeqo_=pp6z2OE{mhUXWMS#QKQqc0@ z{oSHJy#HKQ-<5Y%Qr`uHs|HTie#;2g2psO4(f+cQa2>$O z+Br$MwTkk6lyFH!`R*WGR>9p%xE;XB>hW#D?FCL&kEaQ@U%|ahxQ7+o9|(68xC%)- z2Vh5B9G5RCxU&d%Qo+Rt2ajQw=6fOGsubJ@2-m3Kh6vZ8;I%if{)N+}{XyOu^x^ zP{n@ms)AccxOWxYxr8f&a>(j&IpM+zuA6Xi1vf^x)xgR6-zNySUcr5laA`%p-z3}) z1@{=?_A0m^6YfC;_Zz}ZDmV|E1Q6HnaRoP%aIY)4vk7-f!JSVyc$l-aA6!MadIi@@ zxOU)V`;%J;w^G6FBHUU9cQ@hIE4XhGu1~=|O1P|odx3Bh3hoWU?NV@mBivpER}Kf` zVt=_$!POJ)fP!ly+(89*3E_?^xDOHTxPnU)?gVgPa_x8CNVqo@+-C^)u7dj#;e62U zvV0E`E~Ma|AzZbBdzo+z3ho`k#TA?%2A9|mIuzVo!mUwoO@zA^IP_1C16C0(2^^SQ z+y@Dl1`dB$O#KZLZbHG`NVwgKeE*SfUjR;4k8coezk>T7;T{4`w%z(~!W{)pwtxQ? z;a&nxw*4)FLW}G7q=K7GI3M&M$imgm5yFLlga0n>BEnTGxDOMq0XW(5Z{U8~@}L%2RgzRwYE95~r|u3r-F7Dc|87*K23tKeo4Zoi^@O@w<`QNC4# zJF3WcJ>gzZa2dkAuE_TjgnL)v?;gV8`~@ag|9hBlRlv#m!83$wQ22X|aP10ze% zg}(|YjJSTURrouHaD57YD+o8P@OK^IZc+G46K=19yP0tN72FpI_b_l+e>#u<8{r;N za8DBM7;v(A;}yc401iy9eE&?ilL~+3kcqh7P{{l>5H189bPZR&?XBk?#q@O(^{Rk#M^dTo85|Aj(?y zf`BQ;RgVRP!|jBuzRL-BK;iFd!W{%ow%-1Ma8CjU|6S$VMz|Lg{`L~?b%npL6Ygz= zzsCsYgGzwORlc7Pt^zn%MK10g!qo#O8%L#ZQXkvFT;d9UHH2HK@Yhbbbqa1R;gZ0? ze^)*F2$xm(n;_f{g}=Ro+oRyVPPqFN+@pkh2sms{I`6(nxJiY--x2PZ!rv6w85P&t zOA4-raIY)!Z71Bj3hoNR`Hgv<*0+~%xIG4!u6Fqt;p!Fs_7E4k`el>*K3eIHhw~TPR6y;k>xV;L0eT3Vu@OJ~@ z9s*7_UwxKvk0`jW6YjX8e2)|ERR#A`!o3ZgY@YlB;d}texT$cED6Y2(fWW0|{nit% zUcsG5xHxdK^?McJRw}rD!mR@i{=3RILAa!X`z+zIz{%F{*9f;m!97a2J&Jr^BHVon z?)QXy2sqh(M4-~S-X?*Qt+!gj9aC`22zLTF*?L<`xRb!i_J=kS?v#T27~$Y8q0;(( zif|Rc$@2XY;lc`k|4Fz;g}>(s*RJ5+Alym?=b7nTZ)+9YY{IPvPSy`1gzHoIyNGaE z1@~dXO(?i5;dUvwPY@3GFQF^A#_tyiw;wo|!d%=p33m`U`0wH#BixgUe1A;1mw=Ov zqu&tjO@%+tEa&YqMW46n+ z&{@uwaEl4I1Gst#cQN7i0|%$@t;MWIf^Y|bgCbgs$=yh}M}Ui1l-(}(67Fr_a9%SP z)8Bs(4o~jlvqH_qrO6C^Uev1p_E-8>3Es(pXK<=>uxmOI%92Gb| z@E%HYF~`SA;4o}1Z^MJJVd%32bT)iSJI3Muf3VugmBHkKcI5Es?Iw3F;i?1fXRiSy)bMp z8w3uYGH-IL2sa>b__TSG8z9`cz=Z|wPQvXFID8tt>F=wA+bwYTjBS&9j&NT9t{y6d z>$hIuuzz40bD0DV_cxHkr`ntTrh{GN8t2>n1%E?fa?1$UE^zpidz0%T+)9DN-)NZJ zO@v!3aQIYwllwa1)(adyE#Ksh5w1_*@G1Hx_dCL61rDFCZ*rAj$FR9f2pm3j-{i1< zBDYK6A_A8o++Kmhr}UftK25m$1P-6xZ*t!v+yQ~Zr}~@RPY8EV;Nk*zns7%24&MS` z`U{8ku-Puh1rFa1U~(4`?u5YMTLVmPkZ^AbT)V*i6XD(!IDE^1>F;U6`9PcVAHIFS zy{FjpHK973=-*oEnx z?ODdyD$LAS0Ok7+%L(_=dl7p>RQn7^W^LZJenOA&eU)lJjof3cYW|yTKFaE=w{d__Q9KIAq+t_#aHCj zMutZ)4TL>fB4dU%ZNLgMuyelwUs1US_;%nS9D?wG5uWB5$e7_EgiU6u2Yez^>MJV4 zFg&~s!+sC`FqN05NaJChDaEt~6F{Fvc!~#qrkdjUG9Curubl@!@a%){bK!e{1)wdw zMaftnmJh?eWDE<=!`@`nvU`$Iw3qpc-=if{<2?g03@_KiQ8v|Ae4dts--Y+VL$qK4 zeXgOShF+#?cyNgx-)?9#zD-NQ^SJHwt%f$^TXb!uj~m*IZ`P87qXTyOn4!)1sIIN_ z5kvEMs0;J-X7FeK(ALEzT5kq^*b2u@{$lY^42$ku{CfA(dV9vQ@UR^4Qw(Vbp(QX} zgkh)^`VmHq(*;*P)`{Ppy_ufDbT3=#FBXTKJAs48{!#3Kyf9k~YY@h4G0d4%j&<`)iO}6HK zc9_gidZgbd%MN4&eIx2V9?$)5H`63D-KO8|$Ow8H>eP@GKY^Mz537Ls7yS``-3{4!|}+={C(4wNW;UG zblVex&8)}wY#Xc&-QQ8$hT9mP@8h=3s_$dA&8qLCw#};VBeu<|?-AQ()%UP%v+8@u zwpsO^v~5`5QmqXQqcF@M+^UDcL8;cFhaq997T3d&pj2z-VHidbj_F~jXsH&}!%)Fe zEux2^Vx`(rJq#5p)tdA$RHRf}qKA3Dje3~pdyXFF`JSzZA>V+un1`{Q16l(QWBUfQ zMLdk{8qgN(?xXf-^H?G(_$JdEvA<|{hi z1Jh6Ts!v@cu=o-%Gedw=-4YoQXP%hrDP8l|1?u0y}9^Ar0 z`Cwlt!oye}UvWr-tu2_@G+*&-PfvI6z?Qz=C^!N=x6{xO&_U1|Y#||f5c2^&rLza7 z=bpY^)WG`4=LeVk4C;&*E||^5$N<5JH5xyW_pclJ3Z62mHr&QZ`gM|ZelUN zvki?B{V&#_c$@9`21A?ii*(&PfaQnu3r%gLU!eQ5;_FRq#Mc=-Zx2XcYic9D#^BBP zu&FT~c-YUy^6}!M*7tP{4f`91(ZOrHz*t59?2|gEl#uD(N8!N7XYmmS#(8i!C z;-#Btw7S*Jp~ z4Q#}f0SnCUb3B;@PW2cr@n9E#F!(IyVW7ZX@hTElxp5{jA?UAmH@HRGnY*MtHYm|EYHa#BR+@{A*aE+2(GBL1e zMeXv5YF5Jn;f_#8xU6`8ctZ14cq-=9-N?RFdKHU=wud62UF<6h-#_jPAJV?E@U!Fo z@Uv{i)=MWY3#?^N1*0u!FEW7DH0#W!|w1S07*%Ng`>?^hJXJ4tC z6$;c(ru{GO4>M0)gXc)i*@q`;&;HrOtxy)zhd;c*b6p_9R@ZH1n*y7ZUciS|*HC-p zGDv&khB8H7C&o=~0dRVG%(A%4ammC2$azir5;#}X5%$i+_Q1BN-{hOS8(Qm~DLW?0 z(kE_csG119S$*_djrfDTi+-DaWmUPqrMzS1$%RFHUT&CG%);fRtfT&{=!%6Gu*yYG zvK2cj7d{zTmwuu)!B#B1b)pf^JXcTf7O7mgCRVv;G_F0xoiRB9-+M z+Rl9|_O;J~aXw9pu<|J^uwX*#Sa>1pSa?>nKKqMJa5{~lpU}^iq`^;7CHV1`m-*Jz zckHfm`0DU1*tBz>ziNV~g?0@!R8M*$;5!Z%N|wU|Y}#jeS-iS~Ew7$n?NHtbT$5;o z_$5%@MhLf8!1aIsbQXv22;4~5UfrQphC4hJHJzR~#77`)MaBF$^nvt!hdwzGD*q&` zlklot>GCpICl{Pt=)>W~aGk8EyEwFB;hKpEv^|!;quw9EcCQTkXTZ8H3e+%I2b0>V z3byA|)!N-<@I%QkV~2c|%;#CS+EY{dP-srgxsbMFWZU6=oqRo%Gh;oRADUXZi|xGk z)eaV(2rl+N6sn#ZVxEPkJe7+Md#YaE0Q`irr z*ovC%Y+7IstmaQc+DK>u{w@zq;+RU8PE=%`2rpqL!cUc)T*zP#9mX1@V=Dp^p77R* zvP~zpwtS@O<^ACf%{N{1HIAR)uOx-3jGv71!FMNi7I;!B<8O3*i+|WJRKadPZA}x!<4K}hkyyS}J@RGz>EHxNjg0Jz2hnHLi!E3uS_@_S$&t^*{ z(9n~FKd`UgHG=51uydmSjkdz~I11c%fUisf|G)`M_=3|@Nt7@J!(Z#UuHhxC`w}B# ziIJrKJhs!YXw0*9epxjuVI^9Lr^H+0D=8`|E-5MTmrN-Ml$4f~`KS7W{%QV@zuZ6F zKf_<)uk_FK&+=FKXZz>)=lbXQ&k8})nG1AvwklA1v1pqKwv~a>i(qJ2OBh0{1Em)P zN>}k~oMyyA2mpIYpmYVq>h}woabXC6?@L8LuYs*I81le9tShQa5We?lu=;})cNcjc zgBa*@LL8pc_ap5Q3*ps5_#z>Ep%7jrgjWjT3xx0rA>1j1JB09ih4A@$crBFGXS_ms9ftA3 zEG*E4EIaON`0&CmEaMn9_^lW=UJt%oua1>PlZ3kZ4PpSXDW6(rl@Wkt4tkk`87 zNjH08l!=Sian};^Q^aV{;@w3UUrOR(ODZg0eix{1@bbIkL_N(G5c2Z7@obZqKV3=G zFj)(Wm*3@Q8@!HDh5jn-NC7b=-A@`N0K6VezuCpfJ9M=vt!dh^{9Z_ulNq>v)qn`d?&6LdeS+h+a&zc$XaV z=aBeDqL&ceMD$XkBShobGJEl|m@OdWWz9s#iN>>Y_TpuD=FVQctj!h>^0IcK@ob>I zcpY~!B7dG82_Y|APBd=B?ZxZ3s}1@0+K~|QvJRpaujB4cEZ-G&B!s*SXLoz?vMX%?A+O`ERMc0Kc-$h` zi;iN-Cby?9x_EgNc0fV!$hZv-bC~W(fkm_vUyq73S+>_Mu{FH zdNa}EL~kJ)_t5Oc%RXug2zmLFfNhhPeasd-9uETS#mjE61%$k8g6M5TZzuXjqIVE| z6VW?~zM1G-h`yERPZ0e{qIVH}8_~BDeFxFIiN2HQyNLc2(R+ygG|`_S`m;puCHiwj z|0B_#C;AIS{}a()B>HZm_Yr*$(f>^Jy+q$f^!-Ht3(*e{y`ShW5&dPNze4nbM1Pg& zuMz!qq7M-L4WhqE^uH4Q5Yhie^uH7REutSL`ag*NHqqZ9`XJH&hv@GT{hvfn5`Bp1 z!$cn;`VpcZCHgU2Z$~ux{T#mn%nc6;$MynEeVyevv|Obgl3a5ij)Hp8JSw3!ZFsm*rinOe0&&(h{Q zbd^@?(6hA#4n0R}0DXEcGd5IjosNSpu}k+ohABnM2mY2m|omMp%!OA z)Zz$6E%rdvZ9)s87Ek@6?ji9ziT)DNlSKcJ=r@R-;(<(z1@o&T8sD8@E*O8Up>aJ; zXLGb+L*sg#&Sq<$G<2BjD(znky^!l!+IJlBGqoQY8n=AYVLrfj4d8<3*VRV2LH7DCxMFOD;p%c%7hVPF;_tY# z-;ZNj^>>jeVx2GQQ9y8&I@8w=m%e*P2L==jylPDnV_oIZA=cdF@aD)@$rQ_)u4ES3@P zG;&+c!(GF@y>R1?6_p1Xsi2wQcNI++T+nqf>*^nZdkYguroYuud|xWQEY$ff zU~X}|f0bL@FSKoLiZ?aOUXTiA#|ufV*Qv^U!24{)@>s0QvFhE=k{4g&i(%oH8}voq z+$?|PY}8R_UJg#X3(d{$Qlr`N5?9-Yyd3T-a?kFzL$mvJwrF**c&WIl#R2LjN2zda zwzWBGrJEcz(oK#M>Ly2y#@cFaYITsdSx!64XwfZ>a@uCudv#lzTHI?FUoYR{Uc21v zUc21vXa_q>M_brtc`aMywTwH~gykgeu4N3$E%za>y4wTRMbRFvzK3jF_O@_%DZJb) zZw+~4IO@T>Osk_Ebd#e7Xo|MBJ905>jyw#TD~D*SyE}tT;;)VRgZ%=?x=rSi+H}-i znP{uaW}uQvw#sadl?;_Mk~`L?UiVg+&C%fv&e7YsttCdYX^OVQ-2Pf(ZhtK?nLo$S zG4gP?c}tA8Ip#|n7i{nw17Wt-{YBl))#9A&4Ng*zsH7gwIo?PssR#V3&0Q8rJ)AEB z*ZoOeyxrn>@phBJN!CY1QjdtF9ufEYh%a+<@ntg3UElaJcYWi_-1UtwbFa5}yL-LG z+uiFe-Y&^kGUns$l6>1G`L;>&ZIk5N=5FVBo4cLkZIbqDleC{?u8BM68hxYdaHpFb z-gJ|rQFN1IUFar9ljtT#i*S>3d!w5i?V+0-&7qqdt)ZLTrH{KyA9t5N?k;`YUHZ7Y z^v#ad$qUi!E`77R^v&+lJ9lVxU+&U3yGsv0ITgk|-(=%nR;$|_1+$}#^g$hS)WWbi zs$n^eJE~%v9n~@R8d{p-t&UBz?%7#S!{(@{VRO{fu(|7mRdseEyHxHv;BJI;54*L= zxd}1q0OmGF9R$0p)zSGfIo&QdD||bf&C0Sx?j$BRN9E>--0U0`R+i3@VVfPJ!!|oc zh-G$;65A|aJZ6|PI^aUz)IwRDam%Xds(%ecJU z&iTrABCou2kHn_szMcC+HZ5-x_zgduRj^H*J4Tih=YEiFmRH`nmt)iNl@^s3(7C%~ zJCS!y=T477!^|OTDlp4R0%loBz$|MjFw6T%R59qCyNFhOWF>(Q%fDE-UYXj$Kl#BF>#_ z%Pid#z%=1rG3Z;dDR5UCVMn$3PH5cOPYs)6^%*wD{G!{OMKWxTnMJorj>qGU<8ju6 zr~3!`yZLt?Kke^4iSCN0XWv~OB&Y6OjByn~JoB$&_|*h=ISmiuwFZ?3$F&Lb0(?SeI>@ zvha&sfiSPd8B4B&b!VE*3tY4Sgv=zABwlk< zM&e~UWh7qn|3(h2EDQNR8)&gvey7dvs%0??_@Wg%^JZ5|Zxk9Pe_-HRz?7Y~)KpWAHi$CMU^| zkqjys<{nu}$}u`*lzfcI$h@wcqfO?53^^G>Mw2-13c$s|C&Qg1P)0b1p^T7>MFnOL zDH-4xl`=pwG-a53e5xqbAXV|ok*Z4I93KGfej!r?tk0SR|K;U+*&k#O4xw@bl&mT-F&9NuLv*7rUIcZ6^UK+EcZ zclnC`4k|djJ5}WH?rE7nysJ{=jw?948&Kpa1+2`IcR;?6K z+`|g)VZt3%aL*9#1qJsS;a*p8eZe1{+v$ zy}b_HQc3yh2v-IEW%XS~xOxS51>qtJE=9QIz{$4fvs}3ydRv-e{%?TT;Z>YaIXS4UE*&Q z;ob#K)_&I!&JXpHl`l=WYT#t+cL(7b72ICJEeB3kkFOJMEpW1W93k8W1@|Mur4`&8 zgqr}aLekEs3AabV&43+jah&g0aEl0cP{Ex?xMK?L3c|gr;Ccx6u7cZ4xH71}tX)1y zxUhn|n{aUj_pgLot>7Lf+_ojloo^W^qK(>BwAY6rl z`xN0CfRn9{2MD)Z!F`8tYZcrxgi9*8R|q$*;QmOsT?(!gPAZ7~V4s4UPq>E^Tr=U0 zD!7XX_mYD9AmL5|C)=MKBHSqjw~cUqXeXJ!J%p=Ja1RhJtl+*)xJCu{6ye$x+zG<1 zRB*p1+*$=!3gX>%=x31B)y{Q< z3mN^99#@=8xN6{J<8m$G8i2#!I#Pd0!nG^7al)-uqtQo(H}92QP?T=qG_g%sQY!i9m8mG5!FMSzo?=lTiZIu!Z7L%6jHt_(5( z)LJ$u%2!9Yw4!|H5^h3~@0Enxt>F3yw@;Dp4TL+O@b?+QO)C6-m2k%t{vILR35CBG z33pQA?{|d5K?Ek(IGO^TQe1C1zsmY)m~df*zc#`}6#gzJT!+G6if}j&%j&U}a2pid zrwEq@4(m_n(=QWltAaa3xLbgejmsAZw--1tx$^xj;r1*16+4X}zbN7Q6#iBdE(@HjA9N9J2XOG;RlYI8?N<1^jd1%E z{_Z2(g9?A&A>2XWz~n054+!@paIlJ8+^-1t5^%C{i6`|T%39u3_?t<%Qwo1e2#4EG zS-z_XR}GwOy?uyqjS7FGgj=rgcN^i>D7gCww_d@0n{WfbVSCbf_ZZ>E75;ufxLXwd z{zkYx3T`Isw2JF@pCaESggc<%RuXPfk?#iy_oTw#2;p8*_`8*GuPeBHgnL`TeT#6| zNoDKp8N!t*xL**i3OHH6KTWs>g}<3_@FK3aIB-xESHEf^+)9PNRfJos@b@9YZBY2j z5^exE*?hH&aJWAsw4QJeDYy~BO#&yY z?=6ITQsM85gnL2Z?;*mys_^#&;Z7>J6NEdZ;QmNBKXg7>`vu|QTjKbr08Z8}3kVlh z_*+i6Mg?~@;o22kKjBs?xElz!7C2}c*ZlG+!fgN!rZ5-x0O7L0!G9O`9m4HU1ukoI zu9a|jQV*ZEW-caoE#dY+Bfn-s@NIHi3AYvIo3Mo2N4PHlw@|`;mvE0L{Jl)LSAl~j zwHC8{B~^O8&31wJAUWOyVRDNM4sI9WOBIYiv8he2P3O{PMh(E7ZwmM}xr+>cW>g<= zUpEDOo9k_@RlW`$TP0QkET+Ga0=Z8W$URgb_g@8a?-a;Qha07_?!_#3eIo^OA1IL9 zQ6P6uf!ucs&T@5IB6Yt;v0uaJvK! zpLlC>@B&6hzIz1@pM-02_Y&?tfmxhs}P9Pxv*t zzYs1YaQNh3lRM`uJ#6-uYJtNi0-M}=!ZipSJ}KDbb`mZwaQFmaliN?Y4&WxC5IBBg z0{3m;;8tOKfq8QhwypSk3VaP2`orz^Vc^VdIl_}oa38W@0F-Zso*>-GC}YRLW-cbT zG!k|!(UxT`&9P`S+8z$;QTpF#B+}a28V<+XBh4|MVng7DRckKPH1wn|8au-CHuZ*E z^iZ6CHL%s%MH;?4mDOb<@Jr;u{w&;kjJg(d&+t%oWOx8k&eU)lJeU;r`+Yum-*kG* z=w>)p@cHz2O#?T@SLD@3;Q60=*n{tIHNu((ukkg)4BzL9VR+ALxd-@mJsg7YfDxYN z8OWI7AcRe3st0@`gYS#QFgyzg!+!o=V_u#jjfZun6w?|^0DT(aDIWZBU5b~1@i6dy z?L7EdS|5C$3*Q4Q0BzwdO7fTC@~|%%!-Dg$HyQ0S!=7Xm?eMPZ_h`w~c+Wr#!^`z> z6y5<`e4d6c9PWc>vcLlRTth=I;Ch*^;dk)5Za1_U-=-zuWw&@9 zH^V!uM+fZmF+-d2QC(Z+Udo%Es_g=QtUo4C;NiqkI{iE0ed11B~ z)*y`8VwhnW@U7|C@4XuQnzbvFO=L$i=oR}tye|oC`a6V!po{N<7oH~*BN2$_`cBXp zT_az-8()YHuOSA2>swKGCo<62IDZT3k^W7F|Kgi5J!AYD`q*jf z>B!s!jEDP4p&#z|CR=kqJ4|LMJ<@NKWd|~Xz7cgFkLP~3n`x4nZqx5}WCXnpb!y0p zpFquDyUZ$}{zZSp-=xAb5V{73ld)#q_6N}x({1mFwy17kq-bl^ZNC+5ExPSDqAjl5el6OJdb}yxjC#Bw+KhVqO0-27e;@TP1=})M z*VtCC3pSY4xa}8$t_PRj|P%&$(9wTeD$%S+K#3!?~XcHtdU7 zz7v8C`r>r7{m8TpXP|hx?T5Avil^J2w{1{7-FDoz;Wmi-`+;qP;_0^MY#UC9oO{-` zLGg6kG24dYk(c@VrY(_%XLabdCj^^WkMG$wSRJ~*qqYsVF+AVLZJSl!$84Kb-$!km zRo_Q!n^oT=R?@@{hjap&g*x6=ggTiGlyMSw!9fvM&->n=8#|*-3;MY zD+~qsMvE23iTOs-3gd))qgjU0jSz0K!YF9I(P)KHz0?!6-Ghw zjoDUMrdwx)WxBJhuuS(tD~xnwMy(7}J;#h18K(M;88c;=>NRFm%P`ev%$Om=RF5&E zN`|TaVn(G5Q@zEE3K^#QiW%iHO!X8qOc|#7DTw4;9D*C_OsC=Nb8?~xuUm_G8`<$c zwsv=~?~smJ;@3yA7~FHGD4Ya5{mcv`!S6TpSQ21ZCkucvf?$F+CEJ$Lu5B@BMl#v4e0c}SkbRy6T*Ei9u6 z=K!a-y61;%?3`~%?4B{gi*4 zU+rM~{27+MY>8&?{-zITWqc;nmgc?FwS7urnk{g9UwvVMSj zuOGB9xED$c2%WW|&7EC?Zd{GThy!niu^Cm7qoy>avR1Zl z#>LX$0FZ2>N@FuFjDV0%g%u~GNM8l?IdSC_*YD<`oZ|XHjYo1y+9?l6nxcWHfD16( zMHqS~%Xs@-;9@J@N@qeOXJ%;gAYRkd)YS(o2d>UySTXWEBeXdiO}NUzWaQ9S8QMJ1 zn{J}BC9bfs>;uqO?(o;cV1P{r_X8up(?grno!vJyp%<+sd_fk20RH(|Y~`;ki;)8S zb-l4U-8+Cdd)%CQn4lHE-HOL&ajf`Ji3y7TK61M3*w=2Z3eAfzj%`mYSkb<8(~3(MnDa*WUs5c} zMf#G`U+ym>Y=Rn5S~i*}*ff#2nbIj zU6yLvkt*L_H#urvUMVK+Eo)EQQ6`cjWkzDCER`4^Z4~#EiKfJqJL4~1nW_lY6{o*b z(JqX~D{fwM^DQ?oIGXuDq$pQ3REWr;8OwJ4F}?8dePu_Q79Cl3WHruP1$|4PZ|Tu@ z9De6xs~+ts8*RMuiRDkMc;c!j-uA?`PaHSPM~_#2c3*+05Z64hJCzw-dUW!@<$q61 z9yJpax0_Ru+k@XdBT=(5bfEequnAnUHDY#C{MM*h z4*8y~ar0Lczg3to; z^94HpCLw=U-V`xAA>RWGJyj(p!tu*8H&%(FPj59yaGeyOM+PlSqSsxeI%)H9H zrs~>w4eVlk{zuInsp52%VTk-p>~Q|pL~fUuk{*}2#@sf#4^}oMJtck0=ocSP-)EGI zyNB)`GDEXOYdR4r7k?WRId$So6(#X#CbZyyxl-h0$`>9m_lSa;%q6iZF}$p1+vtJv zlZpMNFiM*aT@kAq9VXi^ZM}Hu)Yi-u1vR6QSXF9B@+wZoU0#auL~a9KK(Vu8eokmw zv3Qq}604A_yN9C4&EE!V!m50nwDU1)=VK4@fwbowddJi`qu1GKTX7Y8khV82n_iUC z`=dzN_48QH0%6t8^QaH1o#PRzod;1LR68{}R6A9nPEWm@6vf3NalkAw3T9>A?$*mn zt6o~REy>nP;~UpaZ4~eD*UL()URqL1CQoQ8M}1j!ljS;jeIydPpg8{4NJ>0lP%h#* z)JDa*|8m{f?Lu{f_G`KLx@_9nI?Se>OZD_P)_z*kcZ4Y2rDtsu$2O=<4vSF zKabM93TggrFm3Qs#wt^x1EymGDcR0cuLsc9WN+&Bek4XZ|CuYqt0A(lJ0fIX&*k~G zXWLWSxno}^Q5M)x>l>F%Z55aM?Tc*N8|T#9{3x~A2SX{;+p#d!+p&m0Eq|Y~`ifPK zlXf{!f3k0B`^-2hQ!UCw{girW33{sRr?aS^I{7DiL>?P2UOlxj^G-^G>Zzc{$*Wa2 zzsw;$u;QWecG86Qi@>fgvAF(oLf8a%KlpzeoEIpS)jn_9SJ?g4sZYm`Wb-KP@U1e< zGEJI}O=xZK96sR@=@WhuaqIIW>XZDSU8jmZ?(uz|`gMJu)5k~Q6P>bH_PxoI4tZEB zopY9f^&{$ES-sL8rb6$$F!_mnx#d1@MYiGiDsCJ98~-#vmu&r0(nkzQ-*lXfAO3#j zv=8+wvhmasyHBz4g8Zjr`%d4>mNk#ktd&0Efe`OmejnkKm3$KVLDt8~G3p#PfqqQ> zFGRM!`yA<{-jpxeBz4{!@TRhjr2p~ib9@5wpY=mdU7-J>F3$J(fj#60s!wA6P+?eh zwNjujuXTNbc2&oj56wxV;ltmFj>Cten{a)w<1k$(#Ac+5Xz$)V@~VMQcD7mV%s!pj zs53itW~v{VV!AmEZ1DV=GTqH}@m(@7a9IKCtPm zdYyjY_$ViDW38J<}-%W&puQ;aUmL~%_V%Ir>=qhG>odBZ@>B})$Tm%c)+&&W2+b>TgCadZ*}JT8PB+idhm|{(qC^1 zKXvA{2?M6LeXKdL<6|uyR0rGtSuO_uEb#kl&{K>BpD%g-FNP=h(}@|0t^?zfbYj(b?_ppE*|k z6?SLWiIac5-F*W(-i+OS5d77}(ZuCdxb=0UEmL=%2qn%7iCa_KAFPdjRES4ox5Pdd zyQMao5D#8a`U0)3-L@%$M@VD0yjBp|H4%1hWXjGHk(%?ii`>r@q(`wn7b%{W{OM6= zy)K-P>vgGBSg%{PkXF$S;*lD@UiWOST(6sviYwRaW{i)E;v%_TH#?Rv>#$x|I=K{U zdpC_Pf-bD{3ICd1i!jCIM@W~6_z8hEyUUNzN*&Hi#7vP(YcfYuB2H^{6V0WWnR2b} zGCAtLtxB{_jooj)&|ZfT?u)72?X>!3uh*4~Gf5gVQG7MYhGd@_9y>vfl4J+2?;^g-7u z=t`AklJ7p;@!0A|-*x2bqt_h0_UPM=UUziWQMq1M@!5TG_>YxGccUbe1aVKeYj)Vq z99XkUSZj7~Us4wjnThyztl5p4*J91i#F|~iTC+=p%8!I~*}A_@#9` znyJwG-ASzTbx}Ol^RPZg>u{BO%xk}YR9>1WD9g0eEM72c zFV+@!$FHe;TYUMq39pwBU(FM+Mr;M7@{c-0?AMt!~6H#Dq*C*6Q|R zt!{F9yj-h$3Dz|^JvlAH{o-rsC0KzO9vU8c39DnTrX!hG(@9)!EjJg03gq>pAy?_f z!NXuChc%M2hBqE>sC^`-p@yDfdi{5?%F%5!uh>~r0H3|mB!4x#hUOtcu074JzxXox z!#Tn}=l8LS49>}H+F27FJy2m?&t6co?U2ol2Z6=I4Y>3ixbV@zI4K=HB z+iOH*XXT-SP+4(&MkpmrtjP=y-GfqX84P8%3{r_AnP+fqwA@^1mu=jl16YYf*{Z2* zd&I1oHx^Sqk=`)YB2ku6V|Lxem(yQE?HQ79cIHo6dCgu!rd{<&DUHQn#CoGu=dWV@ z&#Lh>>X>T$d943^EnS8Br5Znh^*^h|?+bOZ8b2wj>~+Ah+OgF4Ay0j8Lw#=>Lwz55 z6ZLKBRMz*jVsl(1B_h&(La-mJrf-#2v@o<~@E&_ru^{ti>pN>NuobFzvKQD_i1Kkk z?Hk`FTTs}D+h*CenLmlzX1qv=r_mN$2Itw=$ORcgu13QK#n#~y$x#&yX+Z-n*(b-+)1C(%a%>{F{=+~ z?XSYRW~KFu5o!C!upa5P{bN{}4B9^RX}{fLDOETaZASQ_W!6Z<@()RtWl6<)m}ogka1X#S}A7D^7i9BRzI#eblq(HQF*fd zqH6v$vWwTS23U|m?;dd0DMxe4X4(CYwS(oCEPq35mfw))8VhS}Ut(R?z)#wCcD`7|QbF_B3Wl-3w3i|q1*|I-h z=d>-ke*PrZ87LL9RPvivS^vd81Dp2N<<1IRW1(-XLmDGkYkW2B`jUREI{Gcw^^db^ zy=_h759$m4&+>uP)^eQEj5+@YKCo(jT-rbS-D>zesuQav+&nry+-e8PBm74V($4x6 zXAJy5$WO&1A8h zp8m!9C*_;$>Oh6YUs-eLAbp@qLcpvTV)rGZ!=< zU#y-yd9!tmoLVFO_XDiP$*213+ZqR{eqqNKZgb1b$3c4xQH+D+yUp)o=lj3nyJdY! z&wT;P|F8M&9jS4bOvUQuuEX!c>LpzH5bv z_)uo+ln&WD%IxuJRvzqhD&!qBb05+fw}_|XpC37O>@5SoHt^}DD+AQtz^N}HAMW_z z)Stg^dHXx%SLe%6Hx-Z2s+W0*qHg>;wf$A)Q_Kra{j2toBi95M%QeApc1`fgfyBZU z?wa6?&y^LIf4|~>*wRgr<7YPI55%{mlY!YVpEIHi1J*T}Z$56fC!3FZ$|bMO?77AG z$Wv#wq)X44mhFefs#~r--jDS{BYO3&*JOE?1k&*OP^&C7&$j)iH4i{~3ZLpJ3$?kt zduT3w#TiF}exK^;+yBsJD_ES}E4VmQjPZ!} z3X0pr6k2~QGB36F3NDiCkDru#1-sizvCio170ji*f|GI+B%}2~XRqM;q1{+1ycH{4 zLs)fOeVFzM(i&u9(wzy^;k;C}f6XycG)@>t#H4uQkLg7f3r6q3nqvZI(^})T(6k6) zthFYuJN%BvRzA80E1b)ou=WXF`@}WbU4lhe3A`G+1Vub9h&>svil`O^D5Gz`;N+X* zzWsuekHqDE!6}BBnBwdgq%(J>{d)y}BII7da_kkH;_MZ?PVN=l9l~D0n!|I7F%QUJ zgq6jTP(`W8!H&s`zj5(H#?b?EPo8y$@_^hgNc;Qt;Etu-&*#}MSONJ;>>HH(`LLc? zDQI2M+UMuYSytYN{eY0u9aOpQXzw4SJFMl9uY9kyk8lt63CjJ3*f-d1EXmwhDT+SP z**SQ*+!+`x%d|Rs0%=F%HC0z(74q76>haA3ujf^WD%b+;4SYE-Bx0FySZ~bahH%%? z*%|n^^xx8VU=`@&=_q!>(c0Ij+;vum`6l^|yvz$&h1r9}Y})&Vdo^{Qd+Ze}<@}+v zaoc6t`}VDeF2|lg>@g6z82jyegVr@vX=CcL$puXlX-+`>p)|`)Yi6jm#Shx(A8y9dkcD;X<4E8YBf_ij~?Zza9XJw%xY&%G&coHxx!@+i&iNOP$y zeGf{1_h8(ab17+VALKLozh0+wS~0@G$qrcHa+0ocbBbaUs+;QmKNx2O^?d5 zRM}T8j%Lf2un)qzFDvWH((SpYm3#yHOzk>r-1bh~x5r*KZ#s`eDV-;Aonq-Nv26l+ zz4!j5jiTKf>E3?^`$cjl$pej>}p z+llO8)V6)c4n*$x=o9W90BSp}o#s%QKf(O$?!i)KA>Q`l>1OT2v757`F<;i}Fw&rU z9b37w`kCF6+*{99n@yGZzvnFWHtOW_P50+EIS->+`st~=2S?A6#@K9{^gSZ2<3CTi z_2Kx#m9{^0@@rjxlqO-XNwQb34?Jo4f&}fCw(3OcmbHDWM>qFv&Rzp=-xIKBw{B&b z^Qp{hq#m-}Y#;OPHE{fOGKIYcetULxT&&UIl#zVCzCXG3b>68r+n2(b{3F)Rg&5`l zv~z*x06VKEO`&%^GBAHC^8Gs71Z`7Og&9)!Q`euc#pWBB= zjA=#UNvww+!yLpJ>v1dFGxu02th`|E@qabeBhBlO<_bC1KPSt-TUq`wrOABW7{+>} z{}VaZ<90Ya`RTKkS9pA(W3%46eiLIo((OdLcMlc>{36yw4LQHF#^g6Q-b3#=S)S9f zJkBmx%l4i2!?^sv9q*COXECoJy=0vwund};{!8OMbiGaL!qx9^#@+)zs_k1eH#Mdf ziE=qtDqu632<}l*{W)cH+U%VD#co>~8%E*NRr4Io4UyL`VxDpL;7b8Z@wc0k?jAlb zbZqGC<37@9x92ldzb8?@*7%Qd)hg!wNzc6BGrw}@p)0L1Px?3CJV2J|NA?_p(jqId z(mH9|iMA}Pzq#X}b^U-j2FkHj&M{7(!_G&YdU@0H3_GnnN^87K>zQ+;l^Ro@vd2$o zeNK+UmKS#FX666Id;_}wBda@D?_-{CK+lJ=daS;P{Q2e^wjCxs^WG$$JxJ2CQHuG; zII2x+4ioI(jyN;?zh6aZQdOJzb-Gl$|4w8-m zqOQVA6RiImTk$(WfZl0s-_Q$9OZqwoHg^v6 z2(bjEJcGj!p_`@`l;IPYc}89+FPs<2%gM{l%gc-AjmwMW<>wVd$43jJ6Qc3x#As1; zQnWZaIXWd;5-p8Rjh+{s7Ck?D!L88c9LI}MjfIg<7=_Wjp~8Ya;llXGBZb9x=M
  • j6PEs`3b$IS$Ok- zMdtr?K296qw*PHD4o>>t=i_A~{Pehgp+1hGP9|OfM}^~50m4^W;WsF}+zM0vFzJ_^ z$MCEFYtuhJ>irmJl_+h z7ePA5TRma#3$fqgkzeEq&-H}cJmEQ>aH}WW;t3}`;btqm0{Mv0i#TyyOW`S2_!$b@ z^5-Z#*^;lpk0bA)Fujykad@Q>@yZ!5B`mzL^sYQixI9M~67LZf5idk6E>1j0Sm5H% zbA;g$yhm8T$$5^j80SHhKE1i#J;Ea21qg)&jLvg}#dr^*c-e)svaq0;fqR5~Z|WvK z?TmMiu>5kN7n5PTM;LcFyhm8jh~OS!-7MF~gTId?~{;V{nhKxXc9zg~jCzU%~KF zhL_cDAF!|!8w8^iBsc$DGo4ByP~Eezkv@NEo# zfZ^L2zJuWpGW;Qi?__uf!#f%NFvA~Vco)MTW%y$Z-^K85hVN$h;|%X%crU}BVEF$r z{7HuIVfa%Ff12UXFuaf9&ocZuhCk2ny$t^w!~f3k7Z~2p@D~~W62k`=zK`KAGyES6 ze}&mp{ zGF;AZ1;dpLS1~+;;cABIOFHfm7WBm&_XrF6GLCzM1$_a>J;GuZ!*#|~HyW!|lZ-MS zE;gq7@MNO`7)#XCEb%L6>Ls3FpZ3a`i#+@|!Ydfwz_52lNPO=IPuM%65cYa3!rmT7 z*lRI_8$Bf^d?~~4VtAC{&olf@hM#8m*9_xTK-q)RKcC?_3}0+!jc8#?g*+H4r_y6xzodmkO$eZAc`G&gq)ZS3gWur({)xIW$A zJEDojy%u^Ee8)g%&$^K&4GHhe9qe0&7xAtg?(f%d)(zp?C4mfN@9kJ|O`g1sY_<7b zA-zyvYT?5E&VlQDt*Azi61oFqtMN$b9#K%*)j2TG+utEe(mT*G*m+J0-kWa~$)g*$ zq#9c-u{V!+eRxk$N_VoP25+9bH}>>)^z?2RTsLG_fmac3W({^I&yz7oC#W%d%Zo(? z4XV2r4Y+osO<`+x6uw%dKW$56Ymca3xUl0gvA%ab-Zh@(4s~wm^C=!&cl~;wz-D;# z+Z0GT)@{Hy3_5#+s2{?MxJ7;EKsSEZcc$y{(S!QtmVhJ~kTiQG>oyDy)i>8C>zf2lY19x70Vz38;|bR<+nyTv-&!_LbQc2Qw|%!CaHLO`W1z z8Z4sJtS;kRr4ApLZvED`_=uLsmn&-VIdgoavP8ZTSt4H!Es-xp+YVamTYXGdtY-W3 zm}R&4^683$t=nARTHg|ET`3N>t`rAbSBic0;3ny-3sLXw`#7(YMiksD9b#16C!&eSzGOfOPutdHRkf^b>&6kQT^QB?S0x2}M z2F)2Vg}ymlA4Ys6Jx8UI%PeZJFpaGNnN3v`vQ;JXH8KimpYCfI(8btI&W#>bx!H>%7qNyGGgNrWWnlc@jH7fU5s(T za~0)iRFtDpQ4V}PFQ`vZjz&c}5{hyp6y-=L+9RPTM?z7KM6evmxj|lXu8J2dZ*p$1 zyve!2@+Rj7%bRQqmN(fJEN`++k*=c8C)*V1wkgt`qeyp-BHcN`dQQ#>)^l=>qWQCWol73%f4Z1#cmdK~g68S2{68YM~68Wmc68UOGiu~h^CGypWCGu5=CGypVB?{&~ z8O(h$nEPZf_sL-Hlfm3K`oX-d`_ z6gNWbt0UV1`buHTe8tErP5O%BihaegXALd&$yVRcY-#qF)0X)PYRh~jwPnFFQBnOy zqq_ZF9}GO<>68R)Wo2IY!xWsk-(O|L{wa?u*5pL3+dm(2m8k0k-wNhlh3dpVG0H0O&x2gC zy6}za4EiTJt`c94u9+Gt#Vd1)wW^0oFE@P7HE|7o#CXaa~5@U_+2JSSjSe70y?RRi%e!#U-B{ z2ZD4d4%U_w2Wv};gZY-?V8)w~aizC{Smmuqtn$93MI%?beDKv~+pK5_naN)tFtA!tAJq6~lxS2YrYXt7jsLikIwQg-LO+Vx(AYFpBOa*=mDP z3=_mw3=>iuEG0EXQf)^`P1X8mtywSUzgm=HnPyNqiPaUW=BlezQ>y{1l9>odr3tDA zQ<#XlmXt#Q+(fWpNVjGv2o{^jzGBNsXwq+|w#=6?TjujGmdu|eTjujDmP~Oyp7dRh zi+Wm-?_bv?fBkoeoCSSrW&WHznh&3og!19hp!&|Cp@DT>!$ZA;qP~0M`t`jVhOFiJ zs*VwPQ)8faZT8MX_6CPs!1pkmyB23B;D(2m;Jsgxy;H)IIhz8yH={jK-p|p-vU^5d zgW0=lJOhk5*r{dG-6?G(-O$oT(%mt6BTQRYU}(!Svb9+om%UG?WzY>q+~ad^FY*c? zg6~G**{NtS&yB~kb76gS3-hcbPh?MT*YNcnYX>^l_tI_B{?0L&yw!ikMy5_QHYR$K zB+^YSPoGfq8Wk`Kws!PNZU&3c17I@!TzXWdOD?K_E#09K`2fW1=Q{H@mTrZZljhS$V+LmJ*<#-lxKln-9s)r_}8!y9D0z2J>k==%`k z?bq-=%XklJcn>n(F%9o2#(PG?`wip0q~ZOO@m|;Pio^c)jp962z06{~QVnkb`USmA6 zA62>q7+}5q@+I&Z6#1)Uyi({_m3J=VRcUz3884yX^)lW(@KodZ2;;2)Pc@$3$#`qP zD^!&CGmJ;}qvCy)@kTZ2KEZgqG`wdSZ=WXJKQrDzjlOV>zkM^{VS8-A|DDHpCp7x% z8Sh2#iWK@5Gv4dqsp{`K#*3m{s{ExHuM9j@``*fUbsFAY#+wJ8svKWoycOW7%JC@U ztRf>*4l=QE7ATf>`#3AVSN@7M5VGTy@)-dh;&X$@~VQcUnHt__7%!>eeU9=+a0)xPg%ykZUSF2<_?Pt_h@V7z%6-b0MHLc@EC@p?48UoqZ@hWB^I+o9p* zV^P6t2m3U<>5TV)hS$t^$27c48Sgm_@12Zy3Ov<(as%U?*6_A7UKI7D(zl!OiZ#41 zFrKO5eUtI(G`uGnuT8^yp79oGczx}$#`+w9{IY;MT}Plo~mE2V7wae=$SySuZQv4G`tbU zTcSz#BaC+~cvOC#?!PnM8V&Cd z_hImGYM{I`81EQ(Dm$3RcqcTxYZ#CGoa*}RJ&bos!@HUBD08aovQIExT*EuacqVwN z{5`>V3Gh_wTt8>LlqTKR7;lA!SAax7%^qts`Kx5Sv?hNSG2W;q-K!XHmxkBJc>6Tz zzMt_9YV>`a@iH2HUuL|gHToW7yyrFgo@KmK8hw9ZJnBRc1^UrA7^Sz}$iJ%W)MPwU zqi+u5B{cf3WW1C{UoYd4A6Av)CdON%;oZe}Y4E80+@F4l@ov)a4l~{z;HmoMGmN(v zJct76{+aRiYxLzJ5pTOasL^*m;~fJ}Rewpwdq%^%g7IF`@VXf9bq#MT<3&*@ReS7W zykd}`GEjeCU_4X9JH&W(nsk4}cx@VeFEidE4KIvI1ER9W3IKHBKsnB1yww_gjf~f) z(YJ*0hQL$VK?mdA3LgFi^0%4sc4_qOWW0SEefKineHwiaG2X-AK@`Z}4;k+`cxa*k z@Ar)N9C)gJNsIc3${sIk^i5&B(;9uV8IQ(KRl18AuM9j@ySNNU>8E>9O-%iF` zs^Q(sc&jzMZ!%s#cvPR^0VBN~0bWxP8y`rcr?-5TB$Oj^C|yHAtuY{omN;Vojk zj3(W8GTw2Gz5&L2PNVOGjQ5g;w~z5&)$qQ~cx0ric6*BP=pEN8-ftPN6g-vPpJBWj zjlLW@_JXHsx5bRNA3W83Xf@+Kpy3TLUIsi>dGBDn;~IURX1r%K`W|4s7d866 z!+57OyyqG3w1)S0#*4!ERP|Rl#ounl;Hm0m2IH9;ee)QvPQ$x~@!B-Jb&R)2!+SsD ztpE>I6Ywv0G2R;R;KBmDFEHK^c=#9KJ;ZpoYSMj*@pgl!>PNp~JevQi%JFx`dqATv zzr@<7imc1yG4Q+xwnfQ(tW#_rZfnVdc=DSSHpXpj$-9{GhN^@Zg^zF!hu6(`ueJ$6 z?>}=6hj+WhOFQz+GU0neyTjXK>vMSGN+G_I1y$Qrey3v@Vtkk@5mT=zZxSi zQflQPSEOWex^!#D$Xh%{Ue_3TqhsWKX^gxRW8}RuMqVK{KvVmjtNs#Wy`V2r#UkC7KTZ*284bBw%2HqYr3WT!7u#oLF|Kdu6g!p^aKg|!clJ~1;1e-uaa zxowR17hSh6-b)@Hy&=@$eV_3PuCmf0eWf1W>x@_G;n5pI9eo$zt(Fva zjw%n2-X!Yq7BODJ!=pEhI=l^xH_yYPz0eNtBaFAi!=pEnI=uTC?^+L!-c;)FPB7jY z50CbiJG?O7+DT#O==bnU53h;wMm#)v5hePchy- z9v;0p)!`jty!{^D3=i)YjQ4OHL>?EUV4dPkQVDO#G_KLgP(u96-vsV8oc-Q5@P}t zvT`Var&QOk8^Ru2!WF>X8#fFMZ0skhbkxn~2E=13f^~!XG41*sv-hwBv`tW-Ze)~lX zbrH_#Y3if=Q8?1mL>ZUia8F}a9_nc%c|j!itwvApNOyk|h38q}Mln8;`xc{TaJZ`v z@9V$;e36YCZ9Lb)ct+ZaZ?mxzKga0l?CEjOZ?&-#-(unH`AHi)@y$lh`r&@}{3aVa z@r@SFo}aL>j7M2SXzd_9^@+O9%`?^x;>k0NXVF~jCOFFxwam|0+r4=R&*ei;F3w$# znxJqFg;6TfL)}S#L_|A9wyz!RUY}ko8lt(L9;P_W$0z&94IwQ`mcjF z)N`xD*^nMsXXoWs;&|{ag!^Q?)O)jYPS0SMqxWXwc<^?@y&JOeql9Jkh+>pK=kN6R zC7zG(SiiBSso9eK%`0oNWUqN;jh5_HuPkB7{_2t81vWB`zj$SBmh2U;Y>p*6<(0Ks zvOjxeEtc$0URlzT{n0D4%ki>TW|!j+UYT8v-+N^VF^THscOKbXv^B|I^2p%Sr0lmI z*&JK;8;=bBPV#>3k+s;e7djM^aAn!@9(85e@;>g$vgLium1WEOs4L5s z_lPUYmiMqL%a%9e%BZ~g#vBq1qnjbzYK5U7-)OPII5FQyT49`!Z#2s=x)H)nRu~1% zHyW)l3Yc#stS}0eZ!}n86e!=Qx56k$zA@Vh%XI6kuuONB6_)8Zg z13a$18}7NgZ!KZ)qc+|^0?b45B(|c7pKD)d{-=h+6$a<*6vfpXSmG4vw- zJO@+35MP|dR=QJZ3eerxwcgH0sf}g5ARi^bXy9svX^u0A@`xaX zizts224{lgzz`rWw8D}zp7>-Ru)P8qMkc_?r*mz2OonA%b7ot4qZrv=>THbq%9&;5 zD;syAjVZ3i3U49P7V${##P-dU>83ui9@No9V>2>hL8_w&u#C6+LO6?M9p;$8>8(wD zQej~vXL`GJzDykHUEp9!5Ao*%6W@t1b1=n&A6c@tzw>(Z9vd4`T(=C`7`{LdCJ63S zdClR*m4>eG9U=(c_;$Q+-{#eYHq(WQyrhuz1KfN4poPJ`P+~yntPO4M>>70AY9vM+ zcr%R6sEQ=bqjRc^&2TJsn94X}a}Zv)0b!K2vVAiymIeoaWE)i)n{i4Lk*0fZ;B}&^uYi+vfroTk%#p6Cyb?Lz@Tj zdX=WGK3F+$br!>lk>?qq&Dm(eRSqU2hrY_t=7HXH6P+z_g^gt&fWC5vza9nyY&y6f z82OzZ+MMp}zM%=dXf5FjvKR#L&(C5je`Q&W6yUGxjm_!a0mLD1we7Yp$_!z|N!@l{ z$D`i*ucyYq0snqpXt0x9J%#f^WC937XRZu`1$j=0n%|N~WZ062LjxOnND(+8(qo5b z8iQTk#D~1v=U@ZGN6X?Pkeyac&QV>G~z-Gx7r^a)K}C zSDz-l7M~Qc1}-_4?j;$GO9|_oTVU;pj_dJ(6M1ctbF;(*t@!O$JU)hE#g9r%Q2h6i z(`CoL_PN99_9crG3*)ITnLo}uQ2paA0^d2NaCG7nQ8sb1zOY%4N~bOzN=3>V-4FzfqWLh<{_m?8NUL{H`&sEH?@o zs_rO0P+1Z=Fk@v^XvV^dhOKwKFQ=UDQ;U24W9!oN&ip1L`tt>|j-$QyY!&A7$j!;E zar|EV@F%+$%(_K1)OAfdRlYM3t4a+lo4sw)TVe&X9t}ZPGjz?ebv;x(D{SeSb;~Uk z_&Y3h{UH8t&_#9S)YRPt1Ey6|3s6)3GG19O3c!=KGhw8C_TA={6^5B(-Yn~9$&7_& zLv5G%a{BUWQFN+2)l6kwRzvyol(+HFxT%fD#}(9NivGw@j@%l%krK&BQ;AKSqY1Ut(2Fb&BXA2GZQJO$t+k_yDek2_+H`HLZ$w| zD=!@BOU0^36JM(I_y>gFGg>+k_uu2wo&U7vm9@q4akujd06A75v71>C!tA6Ss>*{N{LK%590ZH6e4SsD&>noRNe7 zE6|En@r;-^@vxW|e>^^K$`Q#~W`xY!&=X6g7qw|pPr`2(^(6Gja_SSP(Idd< znW4j%-TtCU{jX{z^^iyykNYr%yd5U{DDf|`XX)MI*?W&1{H)3znCs?kkU=F*l!mrZSX^s<6k3&IWc zZ;X&-HC%YJ*je?+)X?SItO7V)W@N-eMv!WyIZc>1!8CABl)4}*CxIUdqdp}fET9o=p4xf zhvJXIX}kz~T6L+;nhIb|Sa_Qlfi>kwYdXG(?CHyXdz$g2$DZ14dqS^Ccr2>q)3BDe zFOl|CeL;fk=}C`0{X9Oe#ImOk=Y`Bq!}25c~(e$ifQM0QrAUzA-N_uJ%SmQ7Y?CXpu{ zl_@rq-<}td;WMX~m*<7vHhv(!+-wgWw0aUs8mr784~LM4GY0z5Alae*FTa)=Zeyakz$X3 zG*cnzALEXHoW02Mj>V35oH!akr+=K7iEI2L*>kmJ&#r&0kp9sceSH3L-0d$`d;DY6 zte~^uAIVSB?BbX3kL~b}e}rv)?1}?40>eKhmVPlx{xKvL)Q}~QtXWXw_{UNB4frrQ z_QG#89%BA6b6G)6D(gosuMxRz50Zz(xRY3P>NcMrZm%1Ke>B7rbAj}aDd-+`#&wUs z4eFaejr5hBLc79e^E{0%;dy9BK6X}FXU$&Z_{X|Z0qbN%=Wp+gS>JGL@opC?8fGQ2b-Bw7(S{4KJA$26^{lW4y=-=SJcSt`YHT zR<^GY@hg{Ky-dUxE&{$}#oO?IyNEBk?Cn=CUM7m7LM&bS_GRYW`b2$W!&S4J5>1Kv zL~FfS(>1)VzsF1@T58Q2mr`raUfyiZ?%dqeyWX5lU)xBUvzH;bs%wz`TQ`Ik5B7GF zpt}eEVV$gFfT*i*VQ>B4*oxl~0(9MtpTSQ04>$ktgIha21abL-|FbXmG-ogA>m1nJ zIe>daOHfLD+w+VeLN`q>D8uUr@{GJtUN|q3my?&9mzNjK89|mt{sIcHO;c&45a`ef<$oIf6$PcZM`$e2nuJq~si>ntGbayx;{MJNd z`k#Q_z0kWSTp0g&q_FtzoWj!GxrJqS}?>2$P=FH3AcH|b39>sDWY?{~@GahP5r;yJ=1>OsUG$1v^mb&s&z(d)$pEl z5f<1q$JK#p%jdtwQj43{%p!EhzRUQbH;aDm`C!nlg_9$_)l3&F2p zxR&7y8J@+k*F#hKvsrvS!wn247^XW{?hzJEE50|2xB9V0b^n zUu5`83?E?lK8C-{@P9D;6^0Kod_Tkg$?#Vhet_ZsV)$zef1Tk68U6;t-(>h9h9745 zTMQp!_z{LP3?F9r2*Zyu{20TJGklccCm24)@V6QM4#VGN_&CE)GW1U2h2g(2{8xrwW%xCQ|Hkm&8GfDNe=z*t z4F3zuHBV$6HKe~~3D?8O+H5*GHNO%~gWJ6TMtVD1qX zQyHcwyW9iU{|wWUTJC}Ce}?JFEBCEs@`{%4q;TyhUw|1(Ta zBDqIc)G|y@7P&`Q&=W-N5tdI1dGTyxx(l&-HOZ*(;bLQk4^K8~e0Yj63mD7HwxfOJ z%pAgb&O*y8XBK++afGj9cnibc85;33EZ*zkDc?E7Bw7vXigE6y-{PuWk0)xyp5o&eZAc`xL>$~kaloOsh3GY`3*fQ zD*Gip@acArt)^pWV@KzPtyCFw!zUhQW11b`^LP$uGz$&yz7oC#W%d|B6NFEvxM;R)+`W&T^vs>Ygrs zu(Peh`lFArE?n4gnds<8VX*zagBk@Dx-mV3CR)Fx16vR~I$M(t80f&>Mza3Rkdgkr z{&dF>YH8y@$Bn%M8#^|xU5nkcqGR0#?62?a5!Qz-y`LwgZ*%%Ty(=ZY&z(w1@cXgN z^-1;j#3A;5|1BlcYD9|s}xa))DkWI6!)}Xi%VqYCuoxdq) zlr2rZVq}#jeMND_zT()khL-wdt8ZwwQuCM7miY>5%X}rZWx+C0QT;~b<|C)8qpugE6p?-{Px z*E?LXuZLvC{$ApW)y?DYEiOCQYE<8TYjW8>gL1__n{vfLvy!^irBq%g!ccs@D63o1 zkw|w^ZEb#kN>%z1$cL$4B|aDO0l|5{PUn}PW)3LSFFxpqdJ5B zxsIzuok9PM$7QQb6P`nr6Nptgfml^l5UX+mu_{TpA5EwCPZ6^jSLFm|uV|c>dc{c4 z4ENZqbOn$reF4@sluitCg%_hCMR8q5;-ICFG*~I*!4=L|idChDX2m6+90!7QDGt__ z6bGA4ii7!<;$X&Q-3A9lDOPn@)VR{KL$0(5$dxXidsl7=< zZ&D6GXj6^ZH7U%FYFIH$NO5qOkm6v)OL4H`r8rn&QXH%pDOMYdqI*fU+F%sJ1hEyv zgcJu$NllRq7DtLTRqLO%+CGH(OQ1nznnC3xR#&W=tFBf}t*SE-kV+F&4W=*=buHnY zX2G!v#SS(M>Gog$W-BRJY$E%LEhnK#zn$7LU&d^iFISe#pCw!7^DLH3aXp^&U5|@; zS~~Av*Cl^dca{80^ygx)9chyb{^wM?c7*27V^AmR@eEOY=g`o=x~}1&-a%2{y>b2e zUc6nfehBL&RUISphRHzh+U#A8?9CI-aPEkloq+ofT7vgpO!h7ePv&e2=)RBkM0u}B z8_VuTbqzKHgl4vywcKJ-uDS*LSQP=v?1Rw_y7_ z&uR1i4ku4Jx1psqJ3HJf|vu4^XLK-9SZQ7Vhl}W1xFGgo^I(6)J|m zYp58C{-L3{X09UmdWwpWy=8uOJ;6SsQg*hEqvLCPkM4wE7t%02{YWx=ooR6E`k~G) zQI7{Zt=~R7*rSGkb*9V-BKc4Qjj5h=vQ}AwJyiwqB3f^|c+o9p@VZ6N>9_@PemiJx_?@`7( z2&^gx?Md?Lqg}=-9_^O&@@O}&ibuQryu1?{9_?-N@}Af5=rKw!?`7~*?Y4~ZXt$+` z*Ufmet5H>st&B&z2UYrZF&^#GQ}MpQc(fZ%#rqcHC4p7>`yu0{fTJigx98t5-VzP( zAB;zP%T(#o9x!ivtk&?RGhUyD*Ti^38s1{Y8wHQ@!OKf~&Aj>Bq2UcO-d^y=EA)Mc z@%C$YpJluUHM|EI@0f=76yrUk;r)j3UefUX$#}18c(iBETYpiUr>d7(j904REnvJF z4evU}Yt!)3jJHI?yPfe?fv2*UPcdE(c&dK+RmMw$mr&Ttw;6ArCf#2!-hCS0Ym7(s zqe`~`1FW~*UIMQ{k-tjDD}{bldFL`-m4>&R@e&$dFXPPvPc@#8Fy0FARO9)bjJF27 zLPdE$!+2yrD&AKaZ&Z`+6O6Y@!+VzT_G!}nGvgi9=nLog+cyK=Bt?18W4sd@ef5m@ zB6vj#eTy0Ib?{X6cOBzJQ7%>f(u`LIo~nIsWxP5KZ!hD`15Z_suQ1*U@Koh^l=0SR zcqbVzt>OKF@kYTbR@Czu#@nsoO~M4*+t2rFcrzLAVGZvsjQ6yLx18}_)bP3)?{y7t z3*!}_{Hl7nlkrRq?=y^-)bPH_cuO?ACm3(FhW9Mvk$tPm@n^;(`&RM7xIpr@FWI-M zAC)lPeHz{@#>;4U^BM1ihIb9)y`Uby!SF*9QC9s??)J~O2hj+-ldH9oQC&K#ybU`YCgGv@lI=a+Ziv4dQ$1z&3MHc-WM3p z)bPH^cy$`ylZ@A<;XTiIi!{8yGTsUeFBcc%UOQN=;Z0?{J`Jyq@rE?K`HVNJ;a$ym zJ2bpr#@h=XM1kx05yrb$!@HC54r+LxX1s?ry!#pNn1=Tl)_G@^BD4e%_ z9|jMn2Fg2w@s5F~vV(bycS6IvhVjVHsjlDN!+57Oyqg)1GN-yO`vl{~HN1n2XM(57 z-xG|N08h2f^>fBcY0`a-@m6Sf1xN(c?6F3Zze>hSYw~vy#xpg%LyT9aN%u#L*QU|;GUF}M@WPlhAS!#T06-TGl;b?cTdmR8$asAk zeM=Z`2t1V?bTHnn;Nf2&f14R^mqy=C#@naScQ51Jr_uKi<2?)>M1lPMknxU#hb9W} ze$RN%fv4)1w5X4$?D4Wj-xS6>tA z8s5E(w_3ydCgb&kNA=15?$eAnqS5zT#=AqK?+wP=t>I0z-a8oYxJKWn8Sfd5z6TiZMUB4i zFy1K*?|H^Mt>OKh@uDz3Rs9uC@wZzsc&d7t!FZ-d-#o^{x5Jd_Uc-288s0j_TcqK= zpYc|JhpGwqm%A8m4R~;20p1rFZwNg63-BIdyjwNtKE-&u!Bh2*Uojrde^ur9JL5f| z(U)Ih?L$S@Mp(Sa0kH(g{EzM1hjZKZFnUE3I|BZ=6Yip}% zCR^KDtuxld-oJS1C5AzotiuqG%CsH4>J}@Mls`4%3gdEvz6&iXhZ1OW81<>J<5r$aMN<{O}gcIU|t^D2$*O7s&}50~-hE9E3ykeQG;w z82CiF9TxPhY6{~U))PbEqbevIN4Vb(PYCr7I^jZu9nSa=bP|WYg-u~Rqe|hZ{ARn% zPmUqO7AK$1wK*};X@|#!==rc-nXZsw@T10C@JM1GelNmrzlfnO!Z|(iL*FtS>1m>j z%W$}-vCj#IdKyVy5XpV3(bGH9-QPsvc~-a)-#5>Fi$Nb#@58$`Z~$Lq<3=0LwJ@G< zw&L4t?8MJ8dhj7{_xx5HJMk?R&YqvNu@m2H^sFE5ch7IKu@m2D;q3Vd8_RfN>)bqJ?I4~gLw|_oVwb>Kj;LjR#@g=9LwKehdUA2@dej7kb0~~bksj(!`XeIR zDYAX-VE6j;TG0^A_4F{sX+A#LM{WpdQL+>^5T;})EGQ4dXgsp}uz?3MI|he3hlU49 zE7^Tmkj(l%dLeM`$9j5+PGbFtKOtHarl9$07RXG5)Gg9AQys|b+_KH_F$C91$%33YipS`jcOZF$PENRL9 z=#|;!c-bqn%kc-V%r3|8y|RRuMD_AJk8Cd5nq)6|WN>Oy_FIo^jxGC*M+Sc{+rOxH58xlJ|XAhU{6gr(GHKN15mEIN1F(25gs1wv+U{& z-hNH4Zc9(+P$%|hk={3K86NuX*f?m}&_8V%Mx6oVOWM_GTW0%Cq}dpv5f=HwTqq&K zRGvs~+`woHvH1V&y?=aE)tNtj?wuc*OolK?CYdA#xbq{KKteJJ2|p}_nE(=g2q<8* zwS_=Hr~xNGzyv7{V3k&AV~aw!wxXq5HFno-)wQkFNrHeDf3#I`x9w-KwOws(OS`&t z`*Gp>e$Ksj=FXXWfx6w#U*G#8%z2*kzR!7{=RD{4ojc?0&I)<`x{cj~n>IE<5a`R| z8XW~)2wK7jiR>*@4(R;&dUVh0H*F*uc$Y?RB?jgpd7M_W3GY;Co=vm^bYd`bzpT;v z{Sv1$_cN7F-(Sb)hMR7rA(qOUqR~W>{K*p3yE&6ztI>LXM5Q-&lm58BMyIv=t5tdF z{IE`I`5_I@{ek;~I<4ggG`ya#=rrX6k9jWX$JHlVo$G2e<}ee~{BD}hRH^rNlb@$b zG|RbJtq9mzpQa%f`IS0Nh9TdVrqyyQXbP}?)4DC19_1R%{Q^Dwpi$51{4$N^`Ou}5 z%j?=mbDR>=5lKoHla7=IW<19r5Wp`~(;PF7@Dv}2Jr7Sq31AB7UXAbOX|Ahds;V31 z@b*%#(eRg}PSq=&H$|fBiaQ}eba6_L;13QJ&AR%+I zyuEW>UnZ}X)5rs^TG|d*w9q`-5tg>2W6{!Nm;o5qWzb-VfBgZWE#$bWNH@6hr4~d3!pW=&(*xkV9Tb-rl<*(M)%9I;hdS4M1K% z$8SNvfL3(68#MHtByUf2uD`Jvt!N|B6Vo&Rz)wiis(w{z8YQ5=ZjiPoHuNG7x=oI2 zvPc=Ck;i3gx{ib2x^JM$z=i(3Qts=duAb6)GKB!rkm=-Upul&?RQ)R7&eJO2CgXP% zNDweG$HUb$0PIWO`^FPOES53wT<^xM>}(;qxS_H1^^4;^ zdf|vVNb&jA*NEPTUk*`ueC#|-JQ|mx>OFfA?a?(i;CD>;+QhMk(?qKIyVZRB=82l0 zO^{ELO#uVgbqdppumA zMa*8Qlsnyfm-Z0ONBJR-n&*>zmBBXV9ZBwnPxBIAbZ{6)j_Og;^(z_=S!2NAXna%U=_|e%VDTUu#JgVJ$54$Fu zx_of}%Rp?L#r_r4%=(0yUL8(;)R3N~r^jM&FAF8xlAXz9(w-cDVt-XKI#d;l9;{-~ z)#3QeYr>-e3wcUQ%u@#ODg-HGd~*z~JLu1;NlY~|RhvDIVOjD2kEy0L4=irh`?(_`?R(qAK! z`~sh+2buCqeEN5I`DX_{-=O>*OwHfnQbLq}i(ToY`)^^CAM`7WQad87*w*uxbxTq^ z6xJ*UDyrKie~GnC`662uDvGXDRz+7vKJKlBFIiQrwIJmlDHwc|tz-R({M1TiSMms= zvp>Mz~1YBufJcaWA`L{wgB_@y=kjwYuj9@ys=@WgUP9fgMRO@a)^0q zWAokN)b=ZDcO{3F)5?Pilgg*fxy~I<_EVgfN1y&!MKpOGa87raZR5D$X>SIX5^zaJ zBVMj>AUM|{U*=;EIT<_ZtcO0=LmPjeM>f;rw5CV0U)AFf^g!9u&n>E$&K9d>r_{XQ zA(U;*OPA-O@@ApD`P}ZOxZMf+hb>RHmCA!pns>FsPpYkHz4Wr5aZ=eoa4`70T;~41 zpq;wE$xf0TYtQ!YrIa~0kIGzxGW~r_GFs_E8&vC8_p9nVDq|S_BOjal@EJGBd=I;8 z_axrrb{xE5+^9ZM<(}5$!X6$!mT{3!W!!}_T&cBf6pwzyBag*8ibXvJI@B0wLi{zU z@ke@lYSTXFx-Q~!P&0|V5t0Od+C7k?Vq;F@Ju)6*ssP^K=gP7w?Ex{F zZYyaWd0c!0yN~|9x3VH4Q)ar3v^FTlgkFYK+&{zPK8`YUf1W0P@*XWDnYVNDE$XRt z&Z2ZXH~2Zz&O6lDZ)J-zepln}M;Pn%@-qF&#cUDV6ve&ntGPTb1|b*md=X#iF+#qA z-_z}p`lyTfYcBT}?ce(Q&iqZ=4ZSl-_sOAj9R#&H=n(q2)2i;#T_3NA#y*za{wbcQ z9kGvR{G-{cdcJdOL?4-oK>%TtMxQesxmU)z8}>^yBSM?3?B+Nyo_aT}vva z$9R8iY**;(dENa0^M!1ghXk2=+)+Gbw@t|4dJS62(CeIJpw12Pnyri4htb}SD|PAp z!uXME!fmsoMM=gUPFBiEN?j8kx%_0698H0y`3=pXFk6~M>DA%gvuGBj@<(RUEK1{l z6!XeQfA*jDV~*pt6Xm{fQRwTCeX&xxEM6*?&MTGuZKe5x6GKVnnLOf{xprx7%0^@6 zxe4|mZ%Rs5xDxEHkqNFQcGoDLr;LUeFOSRjr1H)ZkIK4h1uvI{GeRN)Dl zr>HX-C5ahE)-2|KYX(!1-@}Sh#iKq&a$Yqe2PJ1a=7`i6AE<7q^C>Tf=p6Tm^VIpy z@mG5va`i@Y=OO8SqmSwmV}aT~WyVFdZ{%ZtPCWD2i#uOBf7b}AH}D@Ft2$=)`Eig@8ug~4lSY;LNi4*Y%Q25`a^1zsC$XAV z#6s<)xWd;pHJ-A?kr*F)?Am&y$M%os7gxuWDqzbe#j0y3?N&UAB-TfE-(@ZTdOW>e zn&n-H_0kpX^Op9!z*j=A$2TpUsjZlnV#QSc2DLN3UaBedy6@=Dtd}C5GM-0ehUT#n zte0y2lCm_+hCUG2O#UJz7V$*Vc&x8VMz*SJraS6!*O5C|8CFexud+C^YPutP)l~0i zigMdkSU1h*>!w8`QGXI^A#L5{-|yw?CM=<5`?a-?|CCo%c1Ny0K4; zT|f45zM4v69ThFxtI+54gvZv+yvJu^J+&;93XR{{Hg&{*G!~z{H_s8?O^?icwLNtd z+O{>k5zbzuNx}))Y@9Y^f&_DXc4&D{Znn zwR>LvF`Bn}VZA=zu9|#qta0+K&c<4Lg8Kcp-J#?zjGx;M)Oygbsr|;(+NZIWVB4up zOl{b;i2fQUKf|px;<2-Z^wf_jLdc03gJ>Ewa*-~AYcP&@u!M6P4szn{l z&gbTJwddxMj->Z6UnfHMsdE<5U*qKS;HS0^^L3$tUt8|r<%XXrmnZtXrBp8WqYOW) zC92+>CqLdtD>W*U%BD5yrInlZ8T>`%lg}`BlF9aoQ+Aysf0+0wqkCG%xh14y57txH zV&(IA-!b??(}%B_?J0MV*{Hs&YSlWXdR2YFBq|Gja8SLee$aS&9Ef$K`7TpOny-S- z*lB+DHr6TSn4M;8u)EDzK9!fYjmMc92Wt5y^`Onu4nX!%Zes%LpzIPv8~;W&@pVus z%2jLE&)W>ua$TJSRNktw)>1-vqQnwM|g$tLro3(_dQe z5b6_ct^#F0-nZgI#Zm`^ z(!ZkJTJ;yvCl7?@R!~j9d-n0ZM>4W4+TdT)1|U2544*9?Kbz^{)c%{t;$&kio>pUV zDq<0TYAo_TUFe_Hwc5lfbJG1Y^*Oq~$ew3P$)5dGre%Fxu$^Vg3(n;`A6;pAe%@Dh zFufh9{gN8D6DLD2i*`Z&P-V^G{-8NKjb)eGF1c;IRZhhHAFfRnji~dOgHra|i-mXd zdC@D{`b(X0bxE|+!EEbtb*&Y@T;=15SB?+s`E#`VImId3<^S1#I*;WYcRcGlf&L}4 z7V~2*R+?Fh?Y0}%Vw3DcC0L79V=Z=YWHQ!b2S;bZH)*-gdXJAk*2EO{yB^{dVqK=^ z9~?P|by1(V& zL#M^XbHWyy){lU0iZ+5SqJ#HXA`b@^HKGShaj6J@4QuD`$E>eaT&li=+ z=LcG=q;;3xMoeNFI$u1djq<+1<$bug+>CpZzP-_%K2Ox{tH7G92=y1O7>(9fuxW-h zSw#%Lxr8-YMLf#aWD!rz;$V!?IL6mx!8TSzYq9{=WWbhJ#9C@Ac1QV|Y?itvdtfAX zMeRIS{9)y_yy2SHZpO|OqjYVN%e%NH7DWUPk?h6QF@|$2b?wE^u@*?RB{pwcNI%Ci z=6x-%yv$?Y5U*}~r;H{06T8Rii}Jkos`=6910$?75ex5aa#r0IeLgtCd_+Z`507}? z?WMKRxCB}w&ZYKxD_~oB**;dzWH!5q&ZNe?;j)uBS4y!$#pN9ibgRHTN8|#AXVB#%9 zspAJ7fjx#Y&!No2hB8%}%EP*V=TVv8z__`xdS~)|l(|evl{6~QT}fiS#C||?*D9Lp zPlm^H*dsb!hrMa>@F=0`L57^y9ige_b!- z0j_Zf8pATuPx<-=IErFEj7V9gjFdD67NYKV8~isX)mV2e{P&8tJ3{_z;rfl`4RHT8 zhNXml55)`jIggiXdEGfkepr;hCtLpH!)pCN{;RHkg7u?$*Jh+6E_U;}QDsxztFj+L zf!`Ph{M1z5k%U=ow*OhLD|^)n?}}=PZOb2`{sb|s)-V5@QqEk+|DY;4v>Gz6`YUAy z1eqjDm;XUkY>4`d1+FOi8(U>8#40`2Nwmio7)#Q48fNvpCyIp%%6LxbN^h!-_jx6f z&tAELm7?G9;yXv!_iXaw3pj60WXbv>OfSGYbL#`C^gh)8!nytX;vUq8J-09Ve|By! z{r~6oVcR90+e;Taw+~^AIfR|>byDuReUOho>iPR2oZp8qZqd0to!`^BJ$-iIoIVIX zo!is-{b}tSpU&;+nE*Ps=X+7NsC!bUZ^3f_GzKak?i~ND;T-=cdNex6@8ak9F7&^2 zj-N#Dbx)#&pJ#ha&$DsPVs?(-VmP~L!Z}k@66Z-K=lC3_o!6#uNyp+^=EQu;iB*$b zx~!PJfc=}3{49SHH1hW;S*LXRGX?ryzJ6Xqb2gmUT;ds>q0Brg^E#Bdg4_QyZvR13 z`!A(T;q0-A*7LdN`I++PI;s4B!G6#^i70kU{Cyu>&|cx(tSM=Jrhn0LpW)?pqFg*J zf%R|Zw8Z4hm_HMdeWqdbwXqK8y2+k=ZqEnYo}*c(;ClV(v1B|;G@R{UJXTy(&i&Zy zF+W4m%w^6UN z)6fNHL6+@BKU1c)KKyIiM$~wD2=mFm`m8@4D|$KVUQ(vbWYr`0Zye?5{ymNJTXioB zc1<=u&qF>YyLJiZ{HnjPGSZ&s>405?i}7=Q-H&MZJQiQYd{8ed_sm)MA+>YJNZX>z zCR=pA(T-=h9qM`i2N#KRJ!YtkWa97@+i z2lLb$pJk!=g5ERG2hXtRgQIK-o%NINxoqy8e@M&D^}Dfs8qWH;|1)yt7Eo*aTw0DF zIu2t;(DbZ7N_}Edea30Vx$7!su$*>Dy3NfenQH}^h`p@m2lUvZn97{>>-MR7&9RW7 z*Eh+4-z@rwOdV80&R^F*{H@RXW6Z;x$85)azVT5nKl5i!sWj!2{3AFe9rHFrx>JLx#jeinfDo!N12lyeuC^Vh}b zCveZ-;+cTbi#!wXe$~l;ZjWOJDweYY<;_!fpdz#R6VUs6VtBrp+N9kp*((Ef>_16f zdPZ7(j#|%??Uh07KNaEaVkI80W8OPOY^Vi$Ot&}HdnK>4N}l$7cz4me;2dwt!zuIg zq1_VgLB*P$4@#_s??L?`5%sFiM)N(WqT9WE56Y`P7acA-uAT4SCk=-?yu%@N56Z5r zr8zqGo)ntHBk}{FX>S#Cbbg*Z%GUrc>_x5U=X+2gdGD#vp5#-Qdk=);*h!d^T#K3a z+L$YRJJ!7UHauFxo)16g9S-|(mK$)CyChiZ6=O)jK;r*m=7 zl+?|hx{;d~*Ls|bCo|`gdZ}y)`gOVTYIYva62X7@Rg+V36?^JtPi-m>PXgt6>c(&n z!#$F7Nv+elxDM%hTiFs$qd%!^V@v8Jr@iuv63IDN*_MoBol?c?ea~6qq)HhN)n84vTVhC8()ljTTULg5CKH%}9_;lb9#mpBn)j7AOYP(XvVG=v zB}nTI)6UIWo4&JzT`%l;)ze-V_OGhycD3tz(_YcyTAS)?_=kM0JS;tI*#8}vc60wdB0cge^w-q3X`hXreL>vXuAGdzirQT8SswW(jz6jL$E>@CYU?qq z)YN@I+GBfAN!hd8Lv|fW4^xXsoi#7R@-MFUl*REURTkUSGH5lJQ-)>Ssbzfr zY&qRkpUO)nRA2L|GsKYYGu>Z$TU`-m?B_1pe~aZ;)y3LX{~b59*;87ZwZU(QnR-^X zxWVIG0>6+SJ0D(x7EG-}ZLy=t_Ok~(*#G;Gx?II|LH}_9-P7&y0vNqLQqRoy`4onI zKB>|Z#y($Q9AmrF`+SO2#`pOs2m5@1+&YKVViWRHzf1G*e zFS_RH1y{0SoB=Lfdd-!}tcGYqQ{z=ro1@LqhUoMLrFPx6&D~u}G}<~@sm(wpD^r(G zQ>J!qZ{Dy)nY#X_-VH6v)GLv^c3mI+r~f=T((AkMAHF}krk9}C;tPN3|4q~JIY0z` z9}gdWo%A1G3W5*3@}Y}Je60!pr+>9xncBXovv+%EZx>_ju<|@EiOD-Bd8$|*%aiit zJX@YU&ynZM%gb}+<>$Hc3i3Rzajrtwc$e2z7F2uB zg~}IQh2gvM3u}kmh4o)3D2#sIQ`mA~T;a_73fl^2e`S1I;VU-V)6T-#Z9grX882*k zp)mSlVf{;mwJ#Tje^jXaxUlLch2^gl`d%&czE{MyER#CcKP1AR3bv|Cv!0djN|E_`mKBdc4z84=l-bd+Dm4Dd4zn9XKX>u94 zcJ{~fMu?3WbLo9Sm^2zL8#u3LzgaHZ)u|h8Ox7E*rj)YrJgC@Des;*cc8p4VR5gGEkJS2y{T8 zg905A=&(Rn3v`V@M+CZ7peGCT6oIZ2=z4*kD$orA-6+sefu?WfWG)+P&Jal1@NkXs zvauE;iF}$bW-c3}H^5{r8=H|Kkg~Cv0!?p~$y_#ed4@pB#y%p@vjv(au9?f0^|o8O zACIjWE*py(D9VouG|h-ImyONK5J=hBe1Tpd&{qibLV>24XXdi8MHvDq8(S>UO9XnU zKwl})%LID4KvS=rxoqsJ41tu5T`kZn1)6&D%w=Qrv_s~yv1>8}Qa08h(ANs|bprh{ zf&REaUoX&~5a>?|^csO)E6|++y-uLl3v`!2ZxHB>0)2x(ZxZOu0{tn0zEPlQ^vql~ zwk1O#Wn)_f`X+&<5jS($SWkvP%Es`b3BzS$eFlp1`vrQNKyMdlJU(Z*Y;3?lkv#f_ zVdk>29T@^C8@olIcM9~U1v-0yYh$+x`ALD^Ezo-edapp=CeWV|=-UPQ4uRe$(4Q6P z&k6LM0=-|L4+!+<1^NpDJtWX~3G^2Q`bz?RP@wM?=z9eEkU$?6=r0TO-wE_r1o~cq z{;EKKO`z`+=pzDszd%1A&|eqmqXPX6f&P1e{-!`bD9{fH^uq!@EYOb#^tS~19|Zaz z1^Q8e{lR$q*pdSw>i|5TuVCeS|@=>H?o|0d991o{nu{)IsQQlQ@y=wAu+uLb(w z1^O+4ep{e_BhbGU=ywGAcLM!;f&PO)pB3mo3iO`@`acBvU4ed2p#M{#|18k&3-o^p z^j`$}{|fX60{!0teNLdy3v~8^AM1a~2*CPZplt$e7ifn-I|VvVpj`r;FVJp*E)Zys zK#vpXLV>2oq%#-R{{mek(E2aRsrT6UuM-&W&)#IQF>NzOgV>n1X_Kb4&7Cx@Z33lf zZR0de)2|m~F0B6rx=NtwR|hf|*8c)cUuw==SpN%jK%j#H9TMoUKvxSiy`4XE*;quN zYXzF##*n#ejNYD~xooUXpy_S+najqe3Uq@&H%dMYPG=J`^GjPan$7`a)Sq$0l2PBr z5lO~bka5Hzym3U4QQyW9gXGhQfTIE68N#@?-Ky8HUKtl2owuzo#T^RZ>qz+T!seM5Ku&71o+tV!Tyke$67)^u*|TGP92 zD{zZCmo8hmrfE9euA}+~6B`yZFIcc(`-&w^O=~Etf5X?)wme z!~M+ouc3Ft4S37@jcf4Q!*m+2NL;gqjvd#m-?U-AwjE4W_v_ZAo-*7_i92 zF(MPOiBHnTQ_AfG=!m44Fb-Z&0Fyr?9MK991Z<=nJa7P>|Kw~EuD!5yg0i7U$qoD zEjgTNM$YD~efr+}h>r+}h>rEI(w5 z%<@HsZEj_&xs@$hHIWw5l51r%%rTc?ZhEc+vcui>(yo%33o#bO$oTAOp_wKnI}WEv1HZtOE6>D^CKEfNCU!(k z?1-AwN7TfQsEHlXTsvB3<>Fdqnc;HnZJCv8Z_BJ)ds}AZ+S@WS*WQ+yx%RfqG%44l z&9}@nDR-txxid`4oncb$j9fpr%*gd~%M26$%`owwNng{F-PfptZk9Nel_gDOWjRG< zWz~hs%5sUy%JK+jWsf&1E6X1$E6W`!E6W=yD_8rLT%C&)}5!wB&hMwM#J%ngB zfITD21_M9G>m+=ZIeMl!+d_7>+tNk0nhR+*XE&L%qvq`FR*^0;yJck9S*;_(&T1iP zc6KYtu+6I{yR~G{W+66RSO#X$Spk({XT?;8of}qEm!|7Pu+6P(&T2(z+0Euwwq&)# zG&;M#O0%=4JQ=oyCg!rU=R+AG=03siO^Uq=`6PQ{lopab56ZC3jc+nnFngkt5n`@j z_H-wMHV@70Nsop`&taws*k<7awwWeio4E>X^V(^$X!O}r#I!wTnji*EDhFE?=88-+ z?Fs-l?F+ECVcLm-Z_`H3+L=yZ*g|6 zw>UdjZ_dtDd>Rxt?XAEz>#c}w*87qa3%+TW54?HUPBW@fBDZ2lw#878Yc_$i%;uBOmh3pycv*^RysZ93GZvj*Cz z@7}zQf2L2k^xb`R!~Q}Lng#3mmvkcbVGgd+C77Nn{FMrPm?OD{Ui^k@dpED!*1w^THLSmB%a#rJWpaMT5MDFDk7;^0Y)qe}q>pSwgnkZlVE~SB zECI$7p!C^~Sj>e4&}oz9jr@elGFLbSTi2I9FBBCJkbRw&NIGw`%%mea%S<}&<41dz zz5>G1mVv=(mU-!uM@s}9nc~zkb6_eufB;!XpBILr!VE{L7sev`=uq~8AVX%?hIQL+ zShKOWbIS%g`0ehzaQjj_fs5j(C%7mG{Ts*7QQU=_i5V=ntyrS6;@u2o-fGN{7ZsJ= ze#}G&jmQilv=3uTx&*kfIhnz;+msn1yJ49jOj?%(EYnEL09ozK3}DjO%wV}K&I)DK z?5yyXZO`h4+y-cYG_*pF$ZCpxgIoGL*Rh7a{$BNSla^8+mbrxg>9HUB!(&qXYSBLR z4?Hf#^YifO(V6%p+<5%yPtNIbD!?=1=7TOW!PN=4YeAdI`>2511iAo!V!5jXTt8^H z3GPM#mjs<}g4-?N_FLfSNq?ighb?g55^zT?aAN}QQP5^~&=bSPa_I?iGaNnXYQ)i# z*k(9-Leq$Q#R5l978-G9EO65W+&jRT)f;^^*eH*llr_Vx7jX1Ms+k>w0*;>iG?Oe`PTxj-2(St0&WFpvvNInp3bP> z^%l5E0&bH9u35nKTj1#T8I0v7fg@V9_j&=h-vZYs;OP6%c;qif-sc3|gBG~^1>BPs zxF-eNNekRd0`7GS+^+@PTNb$g7I5!d;Cy)g(CB9u?lbdCoq#L1z_kfD{4|`YJbI4O zSnf;9QT z3Qg?2Pry<9nBl%7;F1>Qo)BI7WM0ykH{y<&k|Dd66+z}+C=KCr;uBH+C6lbO9=5O84&+}8!% zObgsU3%I2gxE~6*wZNJ6FaIXs`Ymw(A>j5|;PNn;H^#xe7P!j<+@lt_Mge!s0yj^< zy>5Z)5OD8U;BFLf^hzJI`n^@a`7Cf>5^%M^nbpTP1>9^4-1i0C3Jcsz09NS%;u9@1>6S~xZMKI z1wWa|J1F3M7PxN;IK=|@Jpos5fqOx~&9uOs5peS@aQ`9TR#@PiSd1Iv;Cc&Og@D^+ zfvXpA{T8^n0xoHRyIR2Qx4>->aEE~dD`(w3AmEN#;O-P~k6PfqCg7g3zaa1UDG3SpeFexCvkx8~SeE#OW9XBG#u1>7qZxRnBq`Z=@p+ouHF zSqt1A0Y}Q2t;@bF;Jg;NM+KY$oSA+n1Y8t2vwg1rBj92d<-RB2R#@OXC=Jp9;9M z7V`ci;HVLSmD7&$5tPPyqyE(_P89*CSjd|p;G!1tmI=6+g}e;{j{0FUJ9Y}VwHCN9 z3AhAsWWU&-J}lsNTHr1O;NG&ptrKwXTi^x-oC`*o)yI&4^8o~rIsW^mfKx1RPYbwui*jETa5F9B zy(8e}Ti|S%G$1Q|tpI^0oMT6&fVBH@ zyaNL6h=shP0`3tDdEXasPXPy3j($H8aHoJn73JW5BjA1poLRf1O?_mguXilul?k{H zEaXiUa5R3JmAgQ|RRL#KZ=Vow^%nBB3Aou7@(u{Nr53oO0`7VX-1h`rH*n-nvEO}J zzztZ)`*#7i&qCfg0Y~pcF)OzWlUCyE*Aa_yrwX`7EpYP%T*{){j|;d{7V>%p+|MlJ zeOAD|Wq~^);NG>s{j-3hAT{&fO9IYgf%|s>R}P$6+@BY4wHETquy`@nTMKY7B`2;L z1l)WJc?$&G3JZCk5O8ZP*7xO**duL!tDfHUhS ze=FdgvcTcjq_XPc6ma;LQy&!q?q?RbsDOJ1IJ5d)DBwP@zA$>AZj&0M4x5{H59TyUzmGAm9!I zXI5_u1l)tbnazi;7jTbR;CcmI3OF-+_X)UD7V^F(;9j?o_n3ft(?Z_!0`9B@?u>x@ zzykMY0p~*Snfb4M}-0`4GiX6@*g0*>atW_J8pz&&OmufVT9 zhYHpCbqqM;g~y`!bE;(;4#!&jA{_s17>&lUHpk5paPLlFjOI@IrQ@y>aQo^Qqjw1E zmyY|sfZG{mY`=-Te-&`9IgHUe3iV5uH@jTb6OV21Ytwvne^SIPRdETOzY{o!OkcXZ z>os|L##6xkqfX#c_umE$=VURS+r7X5LQrQz z#4Wf)+)bB=`|>5?p14HZPc9Mn-X-FSDlg4{Q!f#>{1S1AOT>NU5^*Ok5%3HL_E82LrS)syLdH&kXh%?%;E(cXUOz{`t!mHU=&iFlZoPn07BQC6 zw(1a5o6JS_6*WsjAESoPcXF;OOnQI_|pyE@r^dTXA*V zTLNyW0Y`7k)p2f&E0or+YYjMhi>{8V7jRt$9KBsv$E_4_2?LJax~t>*1>8;puGWCN zTfprz;OH&Ay1b_Z+(845-rlR@ej(tF7;yAfUmaIGQBCW9dBlLDxBcq4c>?Z91CHJT ztmC!_xMKz!y&YJ`eL=vz4BP;ep!U&dz}*j=p;@!`C5#PWF1iR9`l&CHM^li8fYZll zvgP%)jExx>fbwzZ+XC+1F2=qOo_^`L#u?KTu*{dl&Q z=pg9zH*M|jy{Vg^924NYZTQA2E3n%qN%(@=wwp00w%aFC8cCLKci5!fn|kRUq-CkI zPftrycRJ0$=W^Y4N0AKtOyH6BBHgW}$IIP)db$v49WzdbOu`h{9Ue;KJA{;W$@G0S zu8%|FX%$mI_i7k7$<)&MGJSh(1232HH1ICzBlzCeCVXCw&u-?1FKmvk=1rs@rR`nK zq&QF8x|-5_xvPoz9=r3SQrCuo_1(>so~@>v*f_iMBT^UMQ@RQ7Ho*n@a*b}%=vgX_ z?~|+fGc{VzpCNU1c6DX$pRUn*eyd8S?{Cp)J%5_iwPjm(=Kf}l*7KWGI(>gsqj^4T zVe-a4`f@3J?aY%l_Tl?(m{+@;c#7cymrD&lY2*6s{dk7~q^Vd+GtyxjDv#pcCgJO%Yx?>- z`?vLxREm2WBi{P{USJAAJHOZkojZFmy;teGK=-LM;hjUE2@U|KKTGtw&OXE%zkeUm zy_CZ5mb?$Ph*>RYGF-9s1yy_@I_ThsYTqPcsR5B59$OkeuMy9Cy3xv8sp zn#%i6Bd=NIy=UY#sl0cMyr|0i4+9S``{8B$$;g|j^8RS#%}{w~jlAh9?+->^tIGSm zk=LT~erM!qcD!TcX?Fb9$kXijjgc2+CFGa44ZK;XYvR3S;Gt9Fyni?FW@x-$8+hpN zIPO;lUaQ7?)4)R~&vCyr@TO_JUl@4kaX9V`1CL^n^gCnVAr^~?cUtG&)Ccuc-fI~i z)Kht{W_VCf<-L;O(HO+#{UpPKdMfY786I_r9QUIP59+DBmoq$Sk6h;;>b%Yb-g%+& zo;UDxJD$t%P#vnglNlb3F}&Op8D84n;~8Gs-e)tsw7thNytKVz8D84n(F`wb??{H1 zwl|gGk-Y`d3}S3UGedg1nudS^sZ~wm#saBDP2+|FX&O(X86n-QreSD-)TE|iV1X1> z(=e<+YE;uOs6c8^(=en!nyRLGx%FzAms_W%dAU>6G|F{LlX;r_?3QYIn*8gQB0Nog zbxSooP5yLC)jUmpbW35LCjYsm5Koie+)|LI$zN_Mz|-U>x1{hi`Nv~-%#qOzbtV$% z>m3dk(wjG8-bQi!UxVw{Z&|}TW=_99kfwocl-KnoZc1<&4V2!n;YO0z5J!4j-zGJ# z&f^8`)YaY0U-6;#8|O40zLLJ?ram=>{#)Z=)aiv@Jgz>_cv|0yGMi)?VWA)9LQ$S3 zd+biHgwYmkHs0>6kk_x<*gd#uV-o~{zAUcMQP72;C5({B-a_So&X2E0_q=}7Mxudt zY4lcNU>=gkX*HYhPL<}_L_0ty1~d1|8m-?iaXNE9Q|a{mb$o8P=|&o2sk|u~O(e;m zEJ3}SGx@a|t>;HndSf@~kNazMTD!kmm6y&B>$H|1((v3LxId`VT7E#o>-maKQ$FyR z=aPP0eWKO5u0~@HGeOPoruj^jdT%%Rd8$OSoSW5(fSvVe8gh|esncW_@_lJqEw_TE z0P8oc+oI`FuF>2t(8CWJ^_x&x;mz+x={{qFZCJ?e>v(@z0!G8G@9~i)%492+RSTr z7R9%d)0#I?^uR|&(sn4KlH?;hXr8aN1zVctK6EIc6NAm0xWGcYV^UnbpBIkuCh9bm zhx`ek39si@=``g7Z*Sk&-FX9Ak48%_tXujt8hrsHnn++P_?jb=S1#YMp`S?L#>Mfw z1sbki-cAb@zNFy(0lnddK9vS`3a3Fr=45$$=eoX3UM;7Q2VAwZ9j<7hd9))eZAZtV zrOCzt4TJRNtw_VxP<%TUOMShdcpZhL?N}IrLODS-k7tp*Amr(J0m|#n=n$a1Za}?u zhd)j_plHelT?Qu*8_CCDSrFq>ZL^s^R&eihv zbT-jpou(j%ypX&dKcCV}cXK+Z(Yy^nUO>lhLBN1kbh;Zf^qnMcPjs%ou^FvsBheGn zGyuR)NYko*RcRU}pucXAwkJ09A`iMvj%%_=8KRNLWox>QgWtMupvu67{=HJ}>!hxp z(s?q40Md}@b?XTaevQ=G#nX`Kp>;T| zzQHfxX%*w9b-0Gllc?TQz7zL$QJU5-j{Ep8yYLA< z@7arJkFL1^zYN3ICXPLvCQ{Act>)ubWYqj5r-@`|M?Z7t{5!)%lbBMJ#|FZ2??AAP z*@IWGfyhXcJ+NCEh>S{kC7)%^l6@?{J|;Wvg(h481~FYv~Y zmjzk#z*r1|*~`ZV!z146fiq8vh=EA5NypfgBudyVg$I0^#QQag_=LnlNGvu;+~Zx4 zmZ&0%ff$Iy1c`A-90=l0@W2nRAbSc~0GeD|lbrDKaaD)RJ!JPlgf*?MVVAvpT(wCu*whJ|2#rsa zAllTX*_1)3@_rm$U6Vp-_L`)9Ao3EkDw(B^QF{$TZ499#$*z|a&`QE*pp)yK@!ep!$mN2sLwst`M(=h>^r4e5FAP_mKg`kK(l)K#Hm{p^Gzvg+1kW|~>vo7|0b=><(;?2BYoc}?>9!!PQICj1}uJG>YH6hmc^6{NDyQ6gj z>b-0#>NCvjD(V#MD|ol0;BN1DSCO|M|8DPfH4L>>Jm64bcE4*9%j7-oT^))+nmyPi zts=|!kOdS=gCQne6G}m=s|UH)G`=IU5SFkhFCV`u!jih590AF3O(<6P@^RHaT*<&Rpo4=eRR`XV~wFD*pWFfD&T^fjF}V zCZf+s6(QpD1_C2R_QAZ8&!DHc13ks<%ynmZiObv4!=qF|jo09hYQFl7B-Kju;4kB{ zOJSWcPvZ6CgUSo+8`KsJC?2Wd-9<(m`XrQfpy(*|K@|VW{hDNaLNeXcJm?Oj@{F>- zzC;Aie)Z`yKR8tb znGuqEzeV}QiX(8q?jJYT`zt{=vN-$hfv{ARt-osXID^fdu$k_{r)8N}YW~c|4TOK< zHR^A#8EF{^yyCS7#yj5AJWA5>$EA1jy86?t#(PwoGYIneV?EhA)7=xn9|&CO9Ujd8 zQ%PY{R=ES=yS?_Bc&YeZ&Hl>f$&vSW(y(*V+*2(mqpq;s?Y7tmN6LE?`_`o?9MY!q24cmvj>*i z?uPDxyC^RVX7pxBM*zKSTIY9ioHu%pNcxF2QRJ+1CPvZV>uFp8O?ljR4$mRHRbwm694r|y*er5&!|p7H0; zvt$vt`Qfkm&kQUNj@q2Z9;-@5SwwC<>Kh*&^4X$)x31z?mDkSx@A|K-mxD{CWWT+B zPU-EtPbQZn7an6?k5>-ub^LVwl4Hr;uBz0WRlTc^B|l@YJ7;^ODmiWOv1D>5xK*r% zeezh;yN5-KcC)B=ulKr6(nk)Qk`=`Cfy8X@DK@)kgv~A;_0IN=dB5B#2lgZXITlaF zo_$z(EpNEywVN69GfL;Bj{2miFR@R4hb;@gP_onKkGgyxi7pF!e2+l$gH@@f#z>y6 zGIVO&kngf+WzEXKqGOd2PpLbYyj5cCYjP;r+Hvfl&lCN>JKkSg8D3gc8Cj;t%0AnC z&XptaresfL$P@AhlaA|x6$6za+cr6HoW&HhhU$f(n6i(p2#jl~j1(2gkvmHttV+%p zR!)~LICd0LS>$AyBl1a`Tyr9HYt3?H*@;`L$FrZbp{_o-^2EdwYpYT-Vxcx~Oc|15 zH8Hj<;AAa{;jq7SdC(Jf1g>lK)!c_2D?2*^KMn^EIZAen*6c6xC6p=A~BF#FXL4p%zxNMrjNzl;aIAM6OB6 zn2EJDydHtR;n8jOnxU2zspSDC$7>JI7!GxmUNtrv+Ru*q8u+0meq- z!!?HnTuK+{)A(GEPyZle$H4CxWbzp04>A|wP7ivOUr_!Iukr@nzk|6)D1QfY(dX$M z%+uM1{99bgI?BI=*$IE>7O(P4e4f6AIXm&`-|155vtuWd`TaXt;X3)@pue=SCO++; zZ(KA9tCuUWAX5&vK8my_;i$f|p{@2S(dFSq#YE92D%EoJSbr63JzSM+ zo(<%X&mNtT7@vr;PWD2=GiD#jPkDT$(eA3Lq+>ivhrKP?<;b4QYM z4ovnuD%Ttif5z1tb%#=nKuWABa3t+1T*( zU}G&k1^fQnhat}$O71#P>q(NlM^Wa3uv`xARvK%|S$m+EJ%-j8eX_caC0XQ<60VIU z|GwwS@!|XBNj{d3F6Z}&KP{-N-mS<1CcziVbA@x2VWmUC^T<((;|G1kQ4f09wYDj2 z6a1g;8)!0#G-*1lNdnz@P@aOGm~5%4J;!Ugu|f6e z=Sweo({4Y#M3**%dM?2%v(JHqWjP3`0M_}G8p6js;#b!iH#*ECn-6Lr92^YO$Z zay`SSQ%HJ$wy-%sk*ew5!|Nc-%F?>mdxqTT=0)8PD02(B-u1j@)p{Vkfj#??5P z^w#QOvIM>LdWfrfqaOBa^$>^N`_Shj272y~$@Ac=GW1~#y`C58-UIcnA$KsfOO5wA zKW$XYe4Ap1$2-cT@(w7^W~l*3S9ufv_5<6rb=%fe*gX+au{sbxQ4#+9K60)ho3{s#~hp zIO>|}m|CLhCm*YTz6sx?XaFPbKJ=R7QzQveh6=}I<&=YBrS6u;(6Z5zaW7#B=cs?$ z8?H?yugB^%7TTGBjxx1tDi{64jvrw-9iJjOvNFP@kPOGr;|XSDJ?==XgpW}Xj7`*k2gzv0P0$!c zc0dn4Mv)h(rjBTSpqips)%!nV`-o}%U%$^6?e>*NZ$m#!vkIn^x5^RL>QQ1(ulII^m8SrhG`kOris8!4cl0Cla&xg_LM3`@w;tOM>I~bGT=RWRd z^gA?vI1rp$h|isB-!nI>?;#)4tdRVy`}pE>f!ljJX)aLii{`_Fiz7^_44cmds1B&F zk2r%m+PE21=K*9V_3tzfQ2SVI3~H3VlQkD8UlkqXa{e0|)daok-K!#p#4&T;^4IBVP|ebJ8jxF5v0k3LM* z*A}HVs`@>JIg2e?-N+tQ`>vEarXijr=xbKKI~?n|q;a46%%D2%UrM=r>_@o=w6ULJ z^)}4%%>DFQVn`eRF+WdI3nV|GuPfF4r25D(?rZZ?J}(Jk+&A=b6x*aDwLR+F_QNN9 z3`XDPiKsrpC}iTL-=D?!tc|1N#>mGsY88&C=z>rzN&cl#Y!$}%^!Nok^)dc_b&NM3 zi%}xoYj)peeQY<#TCI=EyL6pMBho;dGh9Gkg??`jjff^=`UUR&Ir<*>fo9W)Sh`mq z%Y|_qGR$oGD(=6(jbYaIFdwDe0h*(0<8%#;SBN=2PBS}=d{Y~rUP7bZ)G6mS@v#j& zthKWH9Gc(Z9%`qELnqDi^f-(@l^y-kcUBFk{mw{ofW|DeSL%<^yYn&oZ9ZoI%52QW zsl)qk(>S4x)kiX8HCB|o1zC>OM`*0ZSi;BY``aGS){=e+aa?3NPDc#mbP(foXv}UL zr}Y@-GB0JE=JgRv&gVA!qQxnU%%wC+(-`be(RlnA`|WvOB#${$zC;&`vm!n|U(Xg_ z;`qGAWPHwy>~Uvh;1u(YOiC5RY(2EMa%w=XUX59sM5A?}LL04r&PVHVn=oqC9_Idk zc)=4r&sT5t{yu>-U|$V4EF%>}5uu|BuG(3bQf0JgTkV`S?hE;84zbk!DbOtv{YP zV#m6@8teA4x?A3|tLt_eEot3P{o4I)wt2r@$l~Y^(|sdvsTW$i>thGn59Ta9en+MI2xWjqORX7Rz-h~KD;dTHpXjTA8AYa_~al)@8YywCoiyqPr{~$=fBq1 z?x<%=t1*`kZ>71HYRdx5;c5O}t*zf{80sv0EY$WRve)lqQ}qk#`aL+;Fdj;V@i2z* zP+h~5-aZ9(VV!pywGY&n19s{5Nrof@o}Ef8YOFQ!gQP`yJ&q+<2=^edR;}oo7K-vi62Q@h1%k)6x7z?v(#(z zI5Ct@?ckEed0OAoI1e2ztz16RqufuPr8(e8?iyd43%!uZ>_x3IR>V}RN`^^oJ3!Z1GYH1=VdkHD4-tnGL| z#bdcTYi(y3FD|;a6UK{QX2y$Zv~9C>UG;^>3$jo5#U+m!+TI6W+o4Sq(U{Rndv%#H zLy^@nqZRFi=2Yr_NZ}Z@3$y(Y@~6HRLi3$Xm@AP_c|S&bA}B|nFMZkh(s@;;D$Q!| zf$k-_G~Xaud>_Oh=PJlClRpc5ZU4r6P6qo)#|z1R&!Nen{gERkbE!*^@og@{Y)(Zb z3-c<-BOB9mD}$YzA&2757inYEhVGx&zM$ZW-!7zfz~@=CUajuHnw9Sh^!-)ytm-R$ zS$iZQ%(F1BrV}KZLr0%vkq+#j=_3X9Z4q%TJ4jETa&5F<>sP5wearrb^AwAok-Sv6 z$J>+2t2-a9ibbUqL#{mHPF7XLSl&o2zE3zQRgScRlqZqI>7LzpSgisS`$vp5 zwBfNXI@>$#n2dJ5ID);vx;ER&TU??0_}N}5xRiTl0A~o;y=BRT$-EPa7pw5#Dd~a! z54y@vu)twIQwm$pZT$7_8RHH`Ea)9eGv#bV6(}AyKJ$P zy^1wG959^fA?>2soAd}<78+-tFhb{fm4V9~IK7#^?#agB!q76s!P=E^$;wa+J0Hng z=`3wo=#*_ysxlBO{oy=`v9Eq%=ZcWccU`JYNkv(;RO$#7lsJMvR+a~C{f!Y;Y7hO; z`pm;f`vYU4=KHuNkdbDy>Qk z(+L_sIqSfQ8J%>|2^yVfogPq6wEQ?JdxK86@Og&v2brBt+)fX=_{p1p2V->d)v<%g zv^RJfCu;m;4JT$K=MYZNK1FzZZpJ5_nB9QS4t(B7{7*9%owS_>y@~Qa%{cDRrqm^XzvU=5v!X=CAU+<{c#gwaZ6UaK6M=d4t4G(I}o zhnkRC&0eK!^kbpDk~_$@v*4?&baYwGzbNk{d_kA(vQc+1R{DO-7yP`fkkv)4$hH1Aogf$z=GM0n&8f;1Oy*{i>$(9}1FILaPG+V@S zzwG+O6I5;jXL9~$DW>I`dDRbW^Hj&~X%CgLO-}5G2Cq%b!Wdef8V>lGFEM~Rt&2HR zb+LUDDYv4wm} z8|x|TWe?3mB<`TIXK$*$r*6uw6%-@J$f!?Xel?NiSD1hH3|Hg4Ppv!Hh;y>~bBm|e z@4B|J{&nA;p6FB6jh?UjLN-UZ{?wrt@*mhOSNd#4`J>Knqhnkgz;}Q1$DDeJ{?vh< zsws1RLZ$IiwX;yfQ3DHjr1r3f#|im-&Z4!pp&W(?fbBP=!v~B)%O#p z?4mQ*OdM(FQB)3%cQi+CtUsKi^JnvaaXaY@KbEr4m8H|RwJR?6AZj45=ew|>8*{s~ z{>q(scfp7EFH}80&nfzF!ia=6v|L7gM13QATU(~Q=w-@0Z`la7s-=mKtS=pb?uEX< zIGZmtQi4?}e4Xs0wW#K6v<<=6y_&D_Bp~;-8@`_4E8%)au5^&r8^(3r6T{eAlJahA z!9FAP3!YJw8cf<*pp3<8Y=J#D=BElq-s~Q|Y3V?$rl4rmpnG%}=OM#5BS1SXf(}L0 zldPypwysX$K~c7BjNP@g2hr3j2jb4U+NaT*RN~nsJ#HL0gXJf4zj23{)2*cD_>={# zas=avqja>XeF8_L-N@Yo+>`9Y?%$y=DP@2=8RJt zcU`YypB^AS$Dz+x`>i=kQNQR{yw!4zRLXb#j>r+&ff>#n?RZY%S3Rw+qu)dwDU}!@ zWjt;9kn8IKpW^azFDM)3nK)l@G|q7#{@p6?SMqSs?_EL91_X3omrS0fd8a#)tWOdM(a@k;Z-KtMXy5Ygp+(nr$bKpuAGF+1S*^IqT7G=i!`-K4Vj-%!xHNyzV3Y z;|+zr>c-E1J>2l(A$h{V(9d0CHER*~uVUA$P1#e7=|`Yl!F(R_(4Vlj$n4}n)C-jX z{}m%{6y5tQVDpv+B&9U?B5TB0r24)ve@sji-D9ayncJDB|}>~ZF$s6X~4e0X-{fwp3_BoF%(8;H>< zRM}W%MDpT^6s%-o*uC<2KNi7SA3b(K$~{J{?cD)iD9*YtLU)YVN7xY6oG(ydLr;rQ zI0m2co^~qVyZQ~wob(<@_>?W@QdX7Nn{$-(;SbTi1_u7~oc(DYvftmS>it1ZZduhBtt>bgowc7JF*st9+*w~2iBcYd7 z-im!j2bK427y31nGwAxI^T}BGXph^=m@-}>Uy$r&5q(_N#)b&SWf~h`C67bRCm0{7 zU0lFct!%B_uevY}pxmm_;#6J_3m!;#w{@Yu);bX#p#rbNsgA`R7>gCV#GEPHJQ|5< zeDEt@&eQw5-~f6`c|yt;h)2=i1v+HZ*N$(;Ud6bvNgI1R`Pe&5y$*T-8iOe+s5d}F zd{pJDJppeU7^``Yprj_Kd0o8qj~f}LHji%`s9fq3Qib}M=X>+!h~y(T{ATs^g6&c?>cuiK;Ft!|7T z!G7Nza^))co44HEef`*puUv}VFU7Vi?4^DD5q9=K3~;$ zolNc7Fc-i9@a_O2OP) z!T#M**_t zhrAyeW0-5$d0#}<$fFXPV56?S3v!{S(poo?qO9xd>WpMN zKH%5^0uB_~Bm@XFP{~OeJ5G%39DtIL0HF>`4=#l+UBhnLuls$prC}sFNof)eCEJ7^ zwkd}#P*~C}y{_)HZ1~;JJ2SHEn3iw9-=F)3ql`2fy{G4NKll5*GD>;y^>`0_Lqki= zN>$6<0`v;JL2vq*wQbNFl0VA2`Tu&d4Y1gim<%5})v!EpESV!}@|> zIAY@EYzyWQgRsM*Kz$kgX@KLvVa9jO1WN`hxh|}!SES1@JZn_T6%$e zM$$|3$Y<2Hy3tPeRG$&NDa>Wxv~x##(Q3EZ=6h+LCUiw`KO;+WkTeCdCI5C|CUk>W zzB~Ur+S`{%y(R6CUOwixOp;pP;x!c{xwBGS8z{+78Fv}lpjy3FM88cRV69M{BE4Wp z6LbQ+fMUg#HwLW${|t=19L)g9uSiSbDEGVF&;|Tk6u&B7&a{#I!_HhiP*(?^VL@XP zUYqI`9K|bqhXsRN3q-*k%hdy&SaVpR6{SV~gS0+>%wGIxs^9$0mZvcmtF&sFH3)L& zG@f&HpsUQ%URbk{(o|>sd=pbTZ8rc@qSyf1v8F3-|J)vDaLSm{W;-YFdljr#@4TTv)Kkk)NPhg&lEFj*Mp3lXB^r@<4zvs@eLgmd;e! z`Aux>qaEO%Fx`=ZRk=7;HER-9B}~Dpo=g4izKzXmcU^zt?R{-pL>dM&y`1Gqz^SBz zqC*-HSDbijU#5kys**UNRI>`>?AjAb;Ssh!QCCP<)h=+EfmNw3wji9e4wXgL?05)B z)uSA#GFa#>rSKg%PIY&(5;#>U%R63u>>IZV+nDma@|#Kmos-`R0&)WoS)A7fGxbJSm~*aV!a{n#j|!6CNh*aS{>04Nl-;Oce@qe`0?6|gGt zMex##)h1wA=PZos66U!SZ)Q)iNAIdk%}g^T&n~2@{b!s|Z511yTmMY8QmJ;SFWBL8 zIfPK5ov^HGYO!ZU4ueUGfYztgZ5w1kzY(PJfx=433w_ zU^UuO)ru|fIk38uH7oThJ*M6-3gMH=s+1HWPQQb7g@@qR8Yq|~N736)`0+Wop z-W_6PSB|G4ab3zki`Rh5e%+#Of1BSEziNDgk2P!oJ{7^*Un5mT-ix1(d*g9M}_azuJ3q;*OFxz6-k{*67u0 zrb19Nwyh;<1g(v1ki^KGCOl)?dd(iYr}ggdvzVuA}4>8F<&T7)tAsg1t>bR6%XX0Xce8u#j zmuc}Jd`e0F9IF5l=Bpcoj{$siuEB~QD(sbCZkctw1NcAYW$^Zvds?CVqckUD;J{;E z;)r<-{^)|ED={|{{L%1%in8TJ8rd6}EqD+R`#d|ql+N3JXJFBuE; zu=_8(-12_B9apoC--B2b>uIj%U=dKHDq+@f=iG}5L zs(4(5JTSPQBaj#TB6gs-{aWvOO^uBYVBFe&{3@FfQ;&>Q+*g3jsD*+YC!2y_jf;{^tj z9D1hM(lZ|D89UYr_AE)yH%an819>ez{(k(AG14p-H0&{13}zMZk5lr~CLfe3C4v7O zAQ{>4Bd%8nA4ZO2{{q($=#@l8$Rzc2SQt_OY#?c^wXb6S1m|5@--ee}D; zLiZcr6lR@~p^w5DZ&n z?Kk6;vo!96Mj;x++UDcm2;D{F58KL8#0w=Hkgwm#bCALu=;Uv58%*=S+nc9HbLYhB z1J{AdayG3FoC8~dG2(a?B%6;CB)Y)ue}b?o--$9^*d}hffyaGa;+4>SYa!3*E%VAh zvsro%v7KY4UX!kipX!6I^T%W_uz|De4=wkQhQogENgB>X-TR^8o(Cd9bG!~3rY3{& zz}loaws7d=AfIDXzab9bdXB%=9^F_!7jul61u9S%O-lZ>6Ue~c9~ZoKP=H>$vA|Ev z1a|zw^G#Rc|Fez{!9}xVKUc3={h!cm8Lrtd?gHdLLcc*{>rvRlm)qQo)=7g+0Y9`t z&+0alz3qlfGi6^n*KOEcK&$50IjaU(u=u0C+Fn{a8=jGnIUUPgH(+*SH2?+oa&Gtf zm>YYm;5(Ee*5GRbV^oTEA=Vus>|iI>7S<4ObF8JBIfK6@On{FmOPg!RTpvQpmR%on zkDlZAIG|7YIN$S-+H##{s2G0RNoZQi z_*83>JZ@KF7h=eJMu>ZlwX&_te@Z5AWL1j*naA7&#(?$_1D`xMDKzB396?8pk!}Sd z;tnT?$7lI%hCjmdaTm8s*h>gbTj8{}i2OGAAcEiW+oWJRNII)6P$uRj5-#{|GtVIF z0e?fz1W8*y;DYxS7-ys{sjcg^$D+yDPg*gjUg%4XlZ=7O3SVKf;lt6o0VWDLqV+;P zgW+>}p^q;1XZ<>5$jS8v<*e$e%3we9%6=F5aj>q5d*iQyeLJ+0`*tqabF|c#D%HKm1w_%JPF@~yP0tYuuM~)Lq2ui2(~HK+@XEi* z(Q{2)&q?9u;q%$V5_opy=|8W<+>33%wR(USQ8a?{&o|X+6{CKLM@x+veB7 z7uOe&F1o$t_1Ii^cJO2KF(-U~+n9Wo{JY6~m<5BkjX~Dy<__lYjv;S4U4?JZJvD76xE7$KM`#06^Bl}k+sU%mlQh@V0#BCzyB!58b1bDo` z73xfhi9)?Ng*E$;)IS-6G5A6=b1<0afYA_J$fynQ!xU!a7Ln8_7NOyUZuOlI>tZG^x)G{F@LoUH&?$f#12^Z|cy8f+n@ zFu>G(-VZm}!569mt*B_xf89)cq4m$MIB|ZjqxPux%XhS&u-HOZTWp~+>~e2xJ8?cq zY@ur>*g}=W77~3>k|4$W{>tEoi4>_F|Rq~cJosO*-n0ve%} zb?Ei7+t!+j$wQHAgT+-$EfRWkPkc-{D?!UwFr}1G7@#eLrhGgDh~=G+;d2vh88MkZ zXMx5LN&}RHGk<`6=lvG<-4)|{LpI_eE}+Si-7lm%G@GsZhx$QwO+2*hfh>?rn;0NxjUVPy>$j3wRfeu!_l%X z`H{$~bn}U3U@far2jPm$$`o*yNM-yV;-hwjJ+>lWm(m5Puf#k1R`Kwr&hjypaxu^qL5hE-a(#%SVc6r3;rc8RR8N`3 z^(iE-&+a^MeFBL6&cN2%{b;GP-Szr1@C(Ie=mcQ<AmD4}4D}a*XZu^UuMK zS1mH0CxuJO=y@-@Ui&}vG{*({zBm18dDoJbfvtsrQRJqj6P%y>r|10O_i}#=d5|pc z1hyQUpNstle+m0g49?FYZu^L9WVYZD*JHdttEHOAc!C8V4ue$`_5|7ETM*5*sio|# z3l6Q%2F72u}l3r zaB(~PZb>h&xG~Bf0WmBhj?aR@wYe(*SrV`d^kVZaTCw;(?AYrvcObU0ZN?E9{3Hsxt}t2-3z*^`q+*0adBte<|6hcgsD3*LfZ%dnES;Jf)`+tCgia(E{R&ogj6#CjGViR>@OCk{(i->I$C z_j^kodzku8^GIt+3qh&TY^q5w&E_haO~NR2T#asmNBtF`Pvxn6y;6CA6{NtrQS1sy zD`pxw`VsanBv$fQG2-iy#1eijQ|ey|fB&`|S?jHIlT#}NBgs7=U~cxmq@_RAaha!hgF;1%fG z4yH7T0|US@DDuJh5QalzR5uU23an`xB)ypMMXYHd;1Niw+Tep}s64pG71$%p1~1QV z7f4puxn9qgzz8+w^EJU)YperzFiF{A%<*|-r_BFjt>Aa^s9n^4YMU)yH}B}xlyM?y zhP5}f1N|ob4%Upk%NZQ_TH$ik-bs=S=b1h(e46`3>i-7xpPon09aE4Sn&>k!(>(qk z7(-l#{;dROrF>wgsjMrX?SB?OihDUQv0^L9^+#n zBG!7CyY$4s|T7JhlzmYP7vq%onekaBxPHWrN%Nap)kKR;Y zcdin#Mg7We1?o;85@xS1|ATuMxC7`vviOE9>X1cT!>UaqizsK;nU^bzH2#Dt=E_AZ@Kx3af*1n~?@(a_(J+F}@pjK_80Bfr@O{{m!PjzTN z_9bUX=Nj@rIv90iXpX?sT%0>k*;#@lL{0uGmxJmv&@t5B+lu_7;|ZsZr+L8>wcA(; z*FAAC0KXE4hma|UkFcq)(*H65+%5fI<@&}sRo@u#(L1YAGr=7;bPibMq;sHeiYaa{ zt4uMu)7cg%hNU@ahXxAjtShuztc`!UnPa1ny%x^f)Dx70hds*T$dE(QKcayh>Irv{ z{Y4vUO9+7y%PRL~lUo(ek`{gu6zL<|O z#Hvy-${s^~2!@#VKI{x2+wTss#l{LG>%Sk?zwxGA>wje-;(yGqwc_bH?ttD4e9O$A zTYNgx_HQ} zUcdw5D^0`+sE@a#6)*z${uOWnIui>yCm?3lrhCOTH4LUYZRC;VQ89-EeO%69;SY%2Hx&Lm#h)W-V*J#A0FPa9IC zGR@e>GKC{RacLho0`>6VHb@0VT&k>=uxBB0`$iT{+UgZINby)Arama9QIU@1r8SdkT6O&16-?f@tQZJIS2$fx(d=M8mdDN=QruyU>-UG z$MPI-rHIuD-4Pr^B)ET~3Osk#t?qG{>18T9|aD0m|*>GhB zb=e|b>{oF)6?Xlxe7!JJfUI1#Y6H5rW&5T+SB99fs{}-=DfvNny21}Frh5{;xsAzk z-Oh1jPrTkl?XK5X!B>J^Z>$aqqTmiU`2e9Zp#R>6G3X8a1&q9%{w2tJpSx8B)`j;e z*NYs$82K#|xH9${Tr~6Cjs23WIgR>m;I*1D#1`y#w6Hw{j}@x=8*GaROL)xB*NL!xjb(h&S^b3fdAnZ6C98!#vM9(LSF=9z;tfr1rbe`a#QKGg|n+;CDAyLw+Q&q!QLnQqj!=~8Uwy=P!G`H2w;wC2Pf*u1y@$M41Rz*F-NIy#WD6Q^tlEg#h9md>FHu1 z`?FU^zDaR5TI$g;uSt#yQ>+Z`4xz$>4sK)T8+%h>XZNf#+k~}2HO2Td$ydG(bM4Xj zX#dscP@m{oQ=d_RXH0#Mw*|W655@K+mHK;}cVN87IZgf4PQ=|F>pqGhh%?B?)54|$ z$ztStfl4#z_cpl%OsxIlkYU+*%_|`URw%JLk*~8Ndj7<`G_n>fK{a5K633^h-WAoW z<1%a6`-_P68+VvB8Nu5njsoy$!XZqVwc3|mk8>cmm=9}wiMLK;a$Pa<7#P>;6iH4h zhSv@(0E#Jg*@ayc1%iQZ$0W* z$?(+?eU2f|Y|1gX3Y4RwocV}YGr#d2;w{i;7*mU*@DoXGa=Lzt%tw{$yfvoTu_PTR zuW3NF+kmA5%eW5wUE)T{t<5+TsSd~RdD?cP3py~co6&f7gp(1>Yu+?vakLaY|FrF_3njNG)j=8wDi_CBdOZHx%XJWVdKQZDQ;Mt__{zUpt zCVf|8=sT>u6kmB-FAJTrKZ>ed>BZ}xr4buL9BPf4pdnwTGKyhmaNS2S4q{G>)hsvs zOxTnekQLX(HM>qddxCBhM=+J*Ft(CSB!ma^nB8r8vJIo z0WtK54>>{Dq@EepIFFh3d+K;+b*VWTGbjg#v8G)v&FDL=V5Ypfl&?&l7hWkYfi7JL zg-L6&g4PLpzt<~|+p2?lCF#*>nY5@BXkt(GfrpM>*FmG!uzy1A$M+Xbr#DR*yUx^` zyuFU!3iNGben-qbqMUQU^(Ar0f%!QZbRp*)&yS8`jX6j^K40+IL&P$Ol5WH^C=T)H zoK?;BqJcjSIE|{T6_JyZ#gFH{uK&D4pK~>>;n3vj*M=N zJ>TLwtoGEy7ME1ieaENjuu)To6{hld{gIja6{$+tZ4Imfiesn4aAxFSI7dwk=if5_ zJ4I-W)~fBgXGKKX%bexAnWOw0?5Q~A!9Ti->W#QssoF?~YH2416LvCQ9P z+l;dWaySI)gN_Lf!L4_<-CcM$3!Flfwm-6G+Y{TWN=s41--RQ33s8QmP-Na3R2vaX zrCvf~9CfF?5pNP?3Y-$)-3MPsW5uC7#_<^0X(v49RQz(UF7mSiDo=@*drtsG5oGMc zz`lWNj;-?9BAv)Wx8Zf_sR8x!UWX5LN9x1J9jgP5zfq!1nRQLEv@NFTZus&ts z|BT=N%lJj79lr~v{NA%dYu0u}D))+X)`}DQ&r$v$^qzrRlMZ|e=c-Jn15M02Frfnv zO<>mQE2IMhNsd{YIxu`Hg1y>uRtHK`b>K^0O9$?kvN}-uL^@C!&+0(uU)6!f9S(jv zN}kiufrjq0G@%Sk)R)zRqytG8PF;g9K?fS#v?Hl#dLJrcwP#kxWucZer4QliAo9E} z2^wB1Wge-9Y3WVd9g>H2XJYAUB`quP=$aO_W(qPZ#}6ZqS08RImDFMORm6JtAkN#3 z^IY1-yAaVPrOL<-utZ@A`v~I#YGLkF%c-KoxmAQhr=ly;-C?2Zz>dyzXWSXsJKF|) zoO*DXPg~TIcBG3k@!qwFo>z-VI_1_LcoMh<)uJg%hY}toIeIzzb_zbRq~0ewGxqd`L`!DXW+9!ISv{~1 z?^mH-%8^O=qv3$?8Er-DeaT%B=^pGSEyPJjRXFLW2IsqdgSpBdWjNa=(th778hImi zd{?RBiHjX+#dAdDK8fdLku9(rIV4rIa!8ELw+owhxB_?2{s`wAfvkvKry|K?I9UR> zG)Xnz?n^$IWa^9g?u<`#G9g@1$)joUPuIUob8a<9Z ziH-z_szID21mA6{w^E2W^+MiLj0IoKdlD7t_94!A%BL@4!B@OYEtK8*VaTbV4lQHK z?1VUAk>-?2{UFuKlf@;U1Qdig|eN{*Z2q%h_ zl;8~C2gws6VrQ4^U{d_ITIXHW^25tQiOv&rqSDY8`h5D*YSh#C(V#b!SJ}$n6@tZ; zu{md!RR>>G5Bk{Rb$7O%Al`hm_8Z~rGwnF%2#AL>EqiS5?hry}7T*;ri7Xyyh+lc) zF6^65`%kf&%vC3Lkr{4S9Ht8k~8#`KoVGFgbT~ z2XoQ;Jv)q@tcWOF>jj5eT6P4x2f9OT{+4k;*|+Q|s;(7?Dt*v+Qs~9A-L zsO6!nQ_3da4xG?b=JfAfup)i69E&ukkZh5^ij~u?toD22pG~hiab2og|9QMEUqN-( znURuoe#)cYY3^<~jko4hIb-1{&c>R}cnmv-<7`9w zgGY-5pM>opjtI>GjXBN1)2`*%ztjU1hDH7ue-gH>HbMPFPQbvq%^og`e-)qIrP{OpOyCZvoy}jnyL%hxE zl=BBL{@B&!G*O4Ddh7@VrxGKLHqX(AfKf>_*8O%UVm_{`E^X;N!^$p;pTTEtCAq-6 z`Ppnsv6|4%QD`cxg)P)pv>CPTdI+m##HDicvl1q^m3WhNbWW1Ny?Te%32!Iv$9k!v z{bAS}HYaM0EpO*)_A>AtYUg*b`+55D&3S(-+K6*qu#T~3TkGX*^iQvd4?v{oO- z@5iO`$d$NuE4sJL$e}^%=A&kaH5YRjukt@sowDvn4gj3Hq>Y{l>|*Q{v3}zi@mOVD!1+*oT^nm!+mgGkv0mc)=Tlu6 z^Bk)>oMUD32&vV4?O-j_I>vgQL2GErdamHQf*I=s<9X1Sw~0BMI$yN5tobM3<9Xzs zjuov3uk)+9TF{hMta%mn&mpZx@F~r)0<6={P`x!`RJhkrdkH9;it~VoxvBuko#z@Q ztv?Fy29g44&gK9NTV5G;BWvJJu!(m?w{#e8Q3V0B8JAoslJTU9 zvir8t1LjPNs1;4QP@L3>9S7yCSeN8xoC*MY8XR&~D=B0>5dsBDxxIXz+sdn^*h3O;B};egd!?lub5mqrMICQqUbt(tZY$%aB|~Yu3^$ zWIOk(%Nr>tW9Sx~3y`B*sO?;aP3;m3MddcsUd+)lO5<{{$mCqBn23Id?4sYYi*ypq zlogS+iyQG7{X(f;mcn^`$feNLM(8Vgo!{C4t4KBx``Jt$0@;nfXcI~9h{IyYU5-s$ zVc5j*@+HEI)$(^>UuNpx;WqK>rcHe5QcIIa2yqn{^eU%tLeH$OYsM5sShx$U?_D$11 z>?6I@_8~`0_jB8qwF_t)+33)70h;xPp}WZTJ#wk-Gk48;<7BoB~aLB*)~5gY;kD>-py9QW56DL;+)m(IL=v4Zp_+C=AOSk+i2 z)~38XkJjeRX8-a0mfKN@UEgM|wXpg8Tzf?4kW6Qfu(tTQ3-DtNy<)C09 z60$F+VP77{>oG(}9g4>FWY*UO4`ES}6Y9p;9 zz8dXYQ@VH`r`@m%fP!$9>o3c+Kj!RBI#WHN!T7K{^vXe{Vm4{vL2Nk zc{i_s)#xS1icU}u=sVxPYb=cVX!7%UF1_kRXMc6j4qlZM{<&z+Jca6=SK>q(a4m#U zM3Kd*c~Oe#$Zp}(Sv;Xy*~Q9JWQ%U%C)SYXBZOKav+lF`QJ3sG?Fv* zBFM!dtf@`tH;>=2@6L!b4qs}g<{J^}viNR%hSOzETg60Cky~>eLUM%l1Pkg%x~DQiP-Iuwtn*1%^Y9#Ika=N{D>L5eQlEc%C2#9 z_Py~nSsOMxawN)ID#O=4$Zc5*uQNbgXyQvjmus|+FOA)l6xG?9t3qRLD z16HVoe*-E)a;GT+bICd-CkG_9D7=8)S#^WGfWWX5nG zf3IPi_+F)tx0~j3>uhA3Xg$J~p+_HU%gp=f-v2P=b|e0O2$;$IR)146_EXMua4UK~ zVp~4`V}t<(@Lb3s{9Fe`6sQ2(Nl^rNbvgA8a?e@IphiWS`-~K)!RM(Y`)6qFP;Yj1 zm(i>&x?tGUds^w&^=(+6R7Z>3RO(Mo3^spC3pkW%r19c|SCslS^|O(Uuz_T&N>YV7 z`EP+lH)kcf_?7N=aPgJ5Wl@{6R@>{@_X@e-Oe#r2N5MuJBiK@&{K^ z{vc%J^HvPWqL=+X*_IxUF|}*at;yE3HZ(lQf-9Bo^zPVeUlM|+OIM_|A_90v;>9ABJct5yex%lp@Lh3!>Kr=6N`k5q2bbI-KlB}{i*EG1;6Ih7PuOO;L5Tl zyZS1>$3r;e2;f52WvO0L-sMpO~A;{_OZP6c=cM<9PvunI(CZi9VBPX z9VFiEIC%v30c+1AY<lo&qk0Fg;kpMHJICsQ zx$`;)cz?ansW%5T@%_8@$R6=p{n@_lfi7`-_$t;GScTmo-{85z-9EOce$INysk|Dw zMz5d$SOscJF6!_j7Q%VFeaMbBAa8TXr!Rr8wUqKUPCXwLKr^=XbZ4KWApeH^VIB~4 zo4qR|;d|HVC43L`YNiE}@?m_#6L+Mp(mhz$4e`hA^V!q!PMIZl+dbeK(kNHsY|1MA zv+33FX*&D7=`~Vb1iV#0>b1jjgui8ThxDbAzlfik5I|gAn;U_XAX~0DJ843B61AaZF)u z<1gs*!)QZHiSlpUSnxgIg@b+3xi97~?G(aI^732FIOA>1hOrNkY&-c(u!kQGoR7Lq zi~^k5>)`)w2PSOP-!tPA=q$umF4SE-myc%;0P~)k84b+ewWozDyU#i!{_O>@($OPo zO;_A4gKdv?Qafu+wth1!@bm3$wPZe$Kgko~M{gd5R?VHvpfVB~d>I7D8~R-uulc zEL!$nxE9#I*az=e3hwuQx9}>NayBn z4jYa$O@Xb*-{gjzgp))iF+&U~6MG)o~J(2(ZM>L)@)73id*WD-o z01<#EYFdVO)<6{*jrQQIU^cH*%<7X4FP@HaX2{>Gb%Gu5FN zDxOI6uk&WU*}Sj+rvV$IeennEC7I4XAzWaS)X%XQDMWImtEqw+@^4xw89}ZzQmL;_ zE3qYspZf1)jp1@zo6;h7#aE?oid}td3v@l@+E&LsknVlBn|jT6Ss1yeIYwQ`H{B80 zjXH9uH#zhS$v8LxZze8uDlSXLake_@MbVk-EiOaK3D+^?K%;XIBK$mdF6UGyk!npC za!x2|OxC!ZuX7o4ehBL(I_HJ_icVpm<`xUJ84H?{PAC*2Hj?yT4Y*|u-lr)KmaOx- zw5YXd*#N`j{(R!ojK3_A+wLBtncnU#%2BH;_r#KByNfdfk(@!kMN`#E^D$RBs~ya zaGK6>1Q#C9GIiOnZ+>>b9oDGU{$!aq_UZl=nQtk49CfwMFx1dOo2h+}`S`RlKbw61 z!GS*%O_gC}%}vdemErKkLvG0Mke>iCdu(^k>ZLSl&_%AXzcjf zxI~t9ob95~o+Tdta#;>PM`uXwsIN0+xvuCz)Mt1Q@letH88n(VV0 z_0)6sO()@@8Xl{~nS#%{bd?6)nS{&%_$T)c<(Ka@z1Dl+wT`fp@XS8Cs{=dRh=azx zRmLs>ItWYowDS!5+ip7_D7pOtGFKlY06GMDd*!VpR-$j z>uGa;H1ckD-L&KfBU}(n$E)5V?3?rdJpu=!7k$NCY@cf$V~55mf~&pP1sZC zkS@o`P9?_$pjbkrcJN@wn0(gJa=hetSD4Aw{@*||jX1hwK_s8GKsjM+*4yE4ooupE zeRcKjd?EBgWxbMb>qHfG-MLCFlC=4isLFnSX_M|$G&}cS!(OUsT=ChAQ{Cmf9B~QO z4{DrmfcFWnm3yyXmZ7d3yh?bjZ@|kOF}+xLtwt?#>}RH`Tku-lq5I&q3jBHGt)}6j_V%0&%J7R zy|3dwV#K{6cj(O_@@~6>*Om3?zv|ngdRTqEe@Xqk^?s*vUBs!LEWcLY9?-ORxd;9%+01V?{V86J*?`?tsjg=iY~?lSYX99h z3u$;U&fa9(Lt5lU9Vg5Adu(gSe#$?$;n#uhc_U;Maq>{;%1CE?6_{Gv;_%%grv!yO zb(%$9p{X{}9cF&t0&`ESQLE@}a20K&A(IQ}EF{YQz$ef8#y#+j^(}?1$Y>;QG=1X@ z7wC%KM_a@vTMJk4h&yzaJZx@5jJ(?*_7KCT4L|2QG>AJ$i=u{F%C--T7W!sj+fME? zAKe+9`$GPbPVgXE<1Lm2{S~VC8mQzb>Q*D751*yIga^mScEPH`Rv?#R+MC*=@Sg_< zfStmBK00980P_ikZADE9!^fsGPfN<{=byuRp%xvpZH{vf^dP>A8Z-7iEdNc^UOgk< z3-O^*>Et$dq6hyLr}n3#bU#J+)H^}WIEBEtSmA-cEx#0@@o8c3wJ}x`<252a+2S^G zyL`VZjThfDHo@NsV!ZC69_RJO0?j1Z;*jTi>|c3YBLwnFqqV0|UCeHS6ri#Rop@u6 zQ*!IG15X;ymc6ux6k~-sp|Rriv&^wtfO>0G`^@k^wp;$kq_HADWI+syylDF$aCERIzcljd442JKHnklgT2{*+OQ|x|l zPI5PV_q)08UPCs&ap!GlAI+t8FU>N2m!9!ttG$V@L)83n%bI~HS+;-S(M{OfNfw=mx?oAH zC9hOV*YNMHHFy)onj(C#?a;>2SWer*s5d||V)_1aj=qO^gkNH|jpr9oLD|%ylqaAz z(4Qp_w65I~F#?~P8_74oJeKLwv5NFTWN$9R>@>hLAxj}{4cN&?sD_U)BPDEaz^M`* zehy=|oF`ldy9M5*Uw2-g_Vic#lTNS%aJJDJ_&C7l*bVS^=xo7vVo~QT#Je<4&=1=RTHsBNUgIWj ze?;uMz~9FireiYgTP4N?W1#6p-qstwKU&2@-_=|tq{ybR)T zL+Up&lKnLKJ%QP(dYWXcc|R~?)#q= zZ-npfNJJN8yuZB3>fermQJk@HmiUM~rl05=)dE9}gu2F+^>s$ya{Gwb9r_eCByh$( z&g!vG)*&lh=pSmz|DkZROp^ZdN&T#aejO1X({lSsb4yr;)j#UX60BV}YHVctD8oif z(?^;!>L<-7%^}ST&EaQrdTGod`U!5Wd(s@{^mFnY_F@0!q&bXIt*^P6gQO*|G-r@{ zCk3wwJP+K#Sk>p@&vX2NYq6 zA@A0DD_u6Y2D)Yzt+RRqSJ)Y>KX8c7O9HMC0&+tAL~*L`?FaiKKrei=HCU4BG~Jr)I5s-WLS2e1&SdR~M}$yu<)3;Hr63*|yoJdR z1zL|8Cph&?L?LCvz{Tk|P-UIRA5WuhFwPe*O|i=*#2;m8_6TtE(}+{<18$CLzo9j{ z?at|A5VtPHKOn3;q{(Zcmxb}_a54DKDX`0N(vSayu;O@i;00;sP1-!>=nCdZIRCO# zQ11-@7j$%mY;sFq%W*+<*$V20)xK`^x-y)(Bz2~Q@K6Qt@Y>gc_4C&UsB)B3d1SFu z{k**?)h^q_P2BsYh>aqPeLS+zB6+>Q>MhX3KV0W(?Rks`056EFb$G_Q+dM^{S!o zI#it^M4ZkD`~Z0PX^w}VG4b#u;o*4(9$sVO;jYLX91ovCyQ*y+QORG-mc)0!_peQm zpOHnv7Z}KGr^*g~NSFwGByapfc($H6^h+sp8GL%azeX(YUs=$IlT_*bu^{Sj;yPw>U=@a?Ne$+@rtPuRK>oz7Kt;te<4wz@fLo$9E(IwJFl2tYn0aTONMBaU#;& z%Yhi5kJ2-tzZmVBr{{9N74=|97jrvf`S<}kryD-L@om$d!sn`mE>@n)mr!R4wg&e3 zViqsY>``1BhT+kR-@D+&DHMdD^^`sphwY!h%l%CXz1yWmSq1U}sFNtuctjyzy#J`5 zZj1xhy}%M_9Jt=aIG8^Cyo*0E#-XvF`s3)`%ulbRIOp*M_1zkW<+oa+@ZkXJJ@Nek zGzKH2=WXCUP5AD)V_^C2d<>A+HOIhwf)VFn97unn9eEN-F2=&b*E4%&6=58zv-mpA zIdaI8u{Nq71bn@Q@b!xm_UBxuse0@!atp#-0wI1?gv+Jys{OkbPwTn3WmVE1rz{_8RT`PHbf9ScTj+&Rs z&3pj2jg#$Ka$69!jIEf&QA2qB#U1d&VVinNZ%%CO?EuPD6d#9Gb4LkZC;2guW@;s0 z(TF?FlUz({R}HOy@E&=)FlL?!-^2~*Mj?4Q9QQd>f@`@pC1Q~zUWMR6LbS*>n3`~nNDV{qKddkcmTA6N7lbr z1w5eB(ho;0{c!bNSC%x2qZ*AHWFq_iTdhy#U;(KAlcOJKYzPaGVA<$>i6&zV_%=znpH*;o@}GW0|)7N9V2QKsk#o=bzCF!N25 zd{eA9vptoTjNm#)UzEBZ9};GsiHu&LQMh1gjJo(u*)}8Z;irCdsWDbTIkwQw zKgMKYTt>w6vDW>_&j1{$~eFulw!uG5D@B*BvT+#JU{a5oV~rJ04e}H0=-4|#b@cspoiD`AmOesR+`F8PD@?X;hHy5gv(4{qeEnT7qwBEN` zM?d*1bwhPyE1Ug!hj%H(4I$*q(0!Az0b zykm)R|D^uiG>yKCgP#*D20pJox{&a91@S!fVF&i`_5r_eCMtSgh#%szy>7BD!7P)+ zi6>@{WA3n@h~|Baa0OmNXu_uQ{?Xc?{!OD-9w9#%V-39ixRj`U+@k`@87N40e z0$wjhNHZIl{j>S^esnJ|`@)PHXQ+#tH|1NtaaGEll;H~toM+;R`|y8v%JhyKCS&&B zP0>!(UEDkVWhWUBD$W?r9t!-%OBi_vVe|E;#E0MsADZBk z;L{0g{;lMF$?g-sOFrD&dg5P)a9YOjiWBFDTKE|n%_q+D6HL2sc80mz6#nlcrvG~i z5BK9tWYi=z{a^UO@PGT)72_OqLAl?mPdeq?43{p&*FeX;IFJvHnF$Qoo_EJJema$M2Er>t6GA>fI{nCwDoO zhkz^jD>lVD;3fL73ctuJbP9TVrdm!joDbCpAA&hhlWrA|Y^tf*At%{ZzB3CIK>f_+ zL3N$RBoLkjkI7v3n>kcB@~0(w1gx_LuuiL0AyTaWL!vmonx1GA@vN_7jeAQaMXMO2 z8k+&#$k!onN4lK+20C{WwdS6$pLfnt8BYh-$*AmBKYS!GPuR1wpjQ@9lL`nPe)l{owWO?<9G{f_uP`7@4!jEl?F#yl$*R$|JPajuOaS1vAB z-<`Av{OQzM$khFishRAgMl!WOo?9mvGG&|{Y{r%@Bi}_Ft@>T%Q#o2L%6FL@t+$e2 zNOnS|w)_Q|x&kt_%V1+cri@d8anc2kDL3WBl#neRQyS+2@3UlTO3h!AsmmZ!2m14M z&k1dAsRNND&bkm#)7_A&U-qqk2LI<(AxVN5;|!-hLvqd{B0LYQ1i2>8bm}oBha`w3 z=PYtgBspG43`ts#xeIv&xhImOvxX%7()FXJ<4)yDxx26Zi`{CMuty;- z1JeTrKiQW?Y{KmxxBsTR09PS-SvuHGe~`O!*p>cZ?Wu8M8nCL^6Z^ z?Dzk8o)&Cjs68Jh!h5dpE^e6$@OC%u`dzXs^Mzy# zwnzpN#Kak_12$<(Z#%3~8@EbyrgrNz((`}XH^s>!(Me8L#cV@*{$t&?cF5Mp$PS?} zwf0(8v`si}U?#up>)2Sku{xY&)#3g2>TneKV1Y}{ieueCC03TbeP_FzCpzUeyAyjS zq0bxzp;S4lVuu)O(F<;ChTdjNoAhG+iv!wx$$|ARm*`_*WRJK!lB|&o{x$|Hg#0P7 z1|fq2w^GDTq|fo1fy|BcGkGnSK|^LIWYCa3VkweLk_^)QUQ_^}=OEt;E9VTHk@o~V za&!0^NMCAUeVrfU%!CMJt{|O<{HY^yXnS?QCgL`B}D`S zfGtK&F6pw(XG}@TfAU<#Vu~gM8L?F>x^f5ZiXy9UmPw%!Vgz~rK6*#-RU+b1$k1}< z$IH-JBc_wV_q)UoA9t*LMPDgUdZn#&ir7wILmi`ZaiC5uymIa<<`9dUo4 z(XK(G?ThaZHzL>jCp_D+&FZ-%@54z@R^JQcr+a_00w_xOd$HSqJ!kuGvi-9<1ogQM z9b)!B0DB0|Kk?kCjc&jGCBm7>w)4I(2A>byao+bi+7NPRd(hK4jv1m@;a-S1JAYiie?U58h9rg(hvL4xw?9g)*!_pXaksuFA#Qoe zY1vo-`oi#6=lJk@z;$&Sd9GHK6^m=~4_O;q$WAtszF0ZFKTg`h&>EyKqNcvsHAP>n z%y1u;>x*6GCtK*-q&1rCE3;a|alz0Tos)D%6@#{b&VV;N)SA^9E2hyIuYkN^)Jw52 z+)rk23!pQoegUs@a*RC4AHlv>hr6LI7(1}NtO&tZp2mylg1-2uDIbpQ}q zf$V%vl@REJP1|s;oILlAcGb=bf>DwD-RAHLRv%6+?Dji^F0~vQync>fZce$ za$>V)WkJc61)Q$|?iFO=$b>9trYtZn3&=@2v3C2w9|NwGyc>Dn8C(tu3^{l&4z#N5 z4^b=^&f)ac@_HGRN8`^nb%3p=?&$M)PJf|p)CuRz%%q3QKKC^t1Df0>zUjFNdQLB` z>-~MpH7v5$KH?{+N1w5{o^tYYz(?BNPWB?y5m{bdd;0F;J&mc z>lavZUSP(kmYn~C=@nRVJ{)i?GrVcT-glmOFJ5O#c^$lh{Zpl!>w*I0>m#UYl$G<^ zteo#4Iq%H40)}^B$oaR2u7I4kB5!jyeCw8DqeIO+ThoL*%|38&o=nm{2kfz0{Fi;} zsosI%TOWXb;Muf7KLlif@--WtA)f5i9S=AsD1HvR@=5aP&aeH-_3X^Pjn9n9UXt}d zw9F~5WPAEL^g{jp?FYfT$%EhKkC4pUN#><@2Mu}WHs18RNAUVv!v~Omk5~BN@;i8i zyvuKsJmd8(`W&ynqt9_d;=j`8czu&T$LkyPIeY=b2Qau@#QAy+zQ71FFQhM!c@c=i zCx&*JrVdd1F$FVN@6)R2Ea!fV7rBSf0Rn%FWxSmuYE;ZcEb>zdo5z)d2@%aO?hKgpI{~=jZ56N2KVOa})Mb<(;l&jT~ zPG{x7i_+fmKml^{rXAGxmjrm8EG4-r67V!GVf<1op z$Uo*k+Y3}L4oxk;1^;!7?Z zM+G|_E96<^kDkGKlix=rtWFTW9(UA&{ly|*#c$KvAP(~=p4{2j*-usKI@Baiumhg} zSSnS@+Ku-}e-f4z^Ix9)Grmi80zSa+zbm;TNANycZuwE{`*;ufuY~j`V)D84*)w>r z#fLL6WHyJcpPjhp9e0?y=sg|pHPJmm<9Zy|h`Z;y6;?%`{{+`D-1hR(b@$1LIy<6cs+~^Lms%GE0#wX<=C;2=r zlCZQ%vM*gBeQIg5^y#J47F6{}(muEkiKhhLuC(Fk)uj!M_#4FE#j8t~G_JzywRrCu zyoc9|SK+-YaeWo8=@0+EcqQIjjql?BM|*IMzrod|OOE2bi}bfj!|&2-)O?MZul4iR zu;9Gdp1{1}J&xH2LVDkaGa=lvaW7?Eu_x%$0qN6`Kl}8{X-qJNb#6tQh4ETM;|g!Q$Q{-{ zL*M1&9@Yz2<2!V{LUJ#~yj&hPudya-*Wmxn_-n%-D%oDX3a>ZdZ!P{d;BO<|+l1FG z_}hWM5Amn@sxHTR`E(3_P55iZAJ!z=gWo_qX#VNUCmbO(|%>3&+HmznSL^&2$ar?u@48~>-ZkN)!eNiN)qF=iw;*7)&0E5`4-)#QBK zGv6Id7UFjbanJnvxgQn(8EdZ(EUU_NMF#wcZ`aJbnDnJfGxS=9x-0Zro-U!+y;7R4 zyr}F*uT|+{yf*Hd!QZV;(~JYz+BK7|cg@1TM|aJ}zk^txwg_VPq$aVYo)k{z+Z)CI zYUW%Vj`7#VJx#{za5H}$+|y#b4%f^-`pufT7yquN?&6SJucumk59xeh1`dE z#A(F-qtG#5#cvs$jip7D8>j^e@W0(MxMcad7VeR;$6<7hVEOiWrf+ZHXT-H6&tB$! zeiC**m$xyN_3wcPaGT!oiT(SIkM-|6rug^7*J#hQ!S3IhWZ3<<0+yf7;rbJN0Agzp zBG5c7N8^CqLIf=T-r{H&9C5uZL90;P?=1%ggQ|_V$|kg)oM}!D@EzR-XRIxnlacfDR7NE z(E79Ju@YcqJkE!2{}+8=#NdGu>|ZhRL1TW3a}6%klQJvfajq5X)9Ye0?*ISmy#|~= zjPukD{9vDfy@eY;gFR%b=aom;H51b|=T? zZkxk{z*7Qt8F} zT6mYN&B;ghY6La1yw2vdGce5e^5Pxvs@BHFfb3mnc;AHVkynk^^W;^VXgj=mL671Z za!v0Ol1I@2ki0y)rW5L3r~7wa#=Ug%#!2pbz!>};y<(s7+ju>T>$m6?_Y<$H2hqU4 z;@XSPX-5a`?4TVaJ!aH#7Vn=V#1MMqIDX#?1oBukfnGJsP0ZKl4Cf0D|F8|dFsgiN zz!DEr&%VAV>|uy$mUR0+E41T;URG8c-{L#BaKKl!WV>(8l78P|@UYG;+2*TSI_O)o z^h>^T@SpX?Z(#&a;aTg?%~?^~cMdDM?Ko5N;O~>qKO9+q`|uaEzLa}Q$g1ELnD z_+Y=N1@FfHt@z*@h!y@)Y%n5g;lr|qGvPJm(TJx0Fmk)EYSEp(!#q;h=v%YsR^OV% znwpG10-xB?`zZhSF#q>&`M+P~|9*}C>)-fwyb8B{!?z~l-#E(u|4sh?$N2xh#sB{} z{{IC2C){>@C{_AxT%oEi`joVv;czE)&HTOIO41vabIY*H`CUqk`Vjt8%WthU+VHuD_1!RXNw+z;$=d^(d~pa<0FL z>yRT-qmjmu{1=_M{n0za-6EdYkXuVl8l>{AOR(!acMOAl)5^ z!!&g_yW4ko;Z457%XVXYcVSG2aqp1t@Z$BpbIaEGsv6e%)--(HcevqmzH<%d5zAeR zIOH0{AmrD=$6eHn_0)vfMEtZcO7o7n+lpt#@l3=>3txi&B!QVfkF`a=2!9(=j97dv zSzghTI2DJ_UFbt*_!RELet*tVK#W&UG2XB69(>!z4-j_>^04hZuC=D|1WphdID`K) z<&=2SvpCVLV&+Zn;`%48KMfCwk}TNI;i_7>zv6o2SeX1WyMXIAkWYIHr?HQL@z)g| zFV&QL2OUqky>>3h+6r}A&uK+m#9 ztV|0ch&fNhn$oC?(DKX%R40v7=_&DdnPu}ZZd#-ZhiY;^^ zZLR%V#ZXBd7ls&woCWiap+TOcMLOCV;xw6A<05%%@ZtI9*Bzjk;u zfQWNt@dpQmS%(;M>{Rn@j%Sp5Uj;mAS;hG!*FUG{;5HLsS zqKm*fr}=BZIm-2GkQ*Hw2-bIh1^hzr{E1Iw@a8w;`POr+XQTg&eiv(D3u5lQfq4Sz z@}Mrj%!4vLXGSsQRL_dUo$reKu#3L~xz$@Pe9W3Hq46RQ8^2R%{C};07DO3GP5 z@|Ur<@U$(7yP)k3gWH&d=EF#%-uVc!j^oiSkS1>_{Ti(~dY5z_<>dJ_=7ifOes?DN zchIHuKFyW!g#VAccaM&%xY9>YcdPYix8!cA<(6g9oPJBTEN=CYE1K%MUPMI|9ozB#;;e=f@zDF*A?_d5F`pBpYl(X2Oh|33HbrlNmx_ z;>=~%Fn8sO`+ZgCwA!)=nVHOQ{nomFbe}qP>YP)jYVTdU_Wo)Yv?2Z8v2sA9!ww+J zrSx)dFb_UK-kZ|joLMKgXed+^5V6Oo69w%E{8QoQ94`&=x^)1xXUwdYy6?m z{>hE7hBgM!=Zo0C>|6FF+7&pN{mw@w)gsPS%=rLqVtU`RPg$1VOFvt)f1#jh-g7_u zdV2qP(H*zMJ}7PFYw( z%yzf)>aQ4mo#XE|@rkmB(skGGVLKYV9rG{8eqv{3fBOg6-@u|ER#{E$ZNC=d_pi2N ze!1Hnz;iSGJcxcyi+;vaIj(2*HO3I5ukj4@A7UUUcCw73!Uf>!Ul3 zYx=YPOL^>Leq}y*kF+IwTef;?8hicTd!p=j#8N4QO|`?7i+=Z@->+uBhtcn_VmAgS z=y%|V_c>p*q2K+?01Z*d9sY&Be2Vg zei!!{dF#!h|1ZmHHw!wrEblI}pa(sVa>k}&Phh;uGrjhrJCvl};$**-1(MvG(LC+; z3|;sz@B19OIHb#g*R$wibn7P2MVoV@q>CV)o1u%d8UDw&i`W~Y_1RYDGg)*oy48>Y z8M*-Pv(F&Eh@KI&LAmg!mI0uEj0~vIY2j<}e0XDGz9U|afz)K$>-#XKKv#&bs}i^r z<@c})8!dMaBUGtrKgu5{iL2=iu5=2^fIKsp{E;>enL+#NBVI+ z-Gp>XPp?4wF+JUk^l?4if^@H*UWxQEJ-rI)NA>h-q#x1KYmn~I)2&E4)|7%}76_r*A>}K|Osd(uef)I;3qu>P&9OZA^z_qOHEfv>Gd8 z{+DHbCDZGf4#h-z1Jgm7ZfDvr%WsrvQRWVr7SFtsX;^IdoUbtLmFZ1Phh!Z#GaZom zcgZs1-ml6sBE5xaRn~JW(_wkPd-PCD`8nTyr=9r)m#YPDE^MiH1AoL z9dp?)&O0f5SZ;@~kz!32R&dWFuQ_MKhcz3%tJ&~B&4&MKHvChwYvH?sZ)!GtShL~F znhjsoZ1|{V*WyS27W`DR;oF*>;5Gb4@q>?Rw(w=K?8*o6d>$Xtj}PldtO^nNNA%-S z9_J=jeqBF@UsGIvL_a>NACKwBUj2BSN8IBn#DM?`rP?|)i9KBFJ;eXC&OEe=*6?g2im2e#c3_&eqqwt+8Ty}(5FfxRxryge@G zy!*i0=r8}qj_p`$8?a(cYy8Jnz9@O-gX`^1PP zXZfUpWX0nPUJ%}^HU}x>Nu=Q!zV1gA$pyTZ6!a|4pJN$H*8KJLgZed6+9^GsTJ+<3 zKB@5~0k^>saZ9;MtN)aya~jAvtas|641=X~a2q>tct6u+XUam7iZsjG;K75IHN&PRry)Jf7J~Oq_u}=Te*_1I=;dp3kh2fz zZ9$GdprS;>c20cwl9MjX3&QcjUdA>|M$T7)Ny z6m7>vB1Nlkp-9nw3?DRVPs{Kmk)lo52ETOwL0W?;HF=$uV0ganM;i*uDy|=(_4mbk zwD|qB`_k@;+$3$jD4C?KmvR-?575p_S&QrYY2(H9{j}lY+J1Pn|I!mloJ3wP)`0YS zW{y5U54Q*YZ9_)`B_wF$rG2CT>#W?u!iyOKqQ*Jrjp=}OX867l6PR`oT10YoxZ?DG z2lCoKrTzRO@^d;aBIW9|$Jd+h5V`s3JS~9z1eN6ayh-EY7Z__v)J*&W+x%=lcz@RV zEP9$gZ++IXV&OxBpVe5Ob!^Os7FbpcADUnK(Eb_gvsA~`3K8*KqjgcN&srf+t&8;a zS*;VS&myMHz_!9~CgKwq>$A*W3*r-e*7__W+ycrAK)9K6%-UOtX*;*PS{`nS)(0k) zwA>e)S?XF2YktAwaxytWueBH0}lo7 zn;h9$uUh6b4D|&^Tx+npe_60lIgK?0%2THs=!dg)USMZu0a;^+ma+?4?19pi*5pr4(kg}#Y|phoz{!h($$FPro~{h za5e|`Ilc5nY&HB4n}M)bu+K{$Wd9M+`1Lz^j($PE(dO3XxLAiWftXm^i+tguV5|Zg z*4yH{^z}cZ7j#=Tuyn};Z5lw=+S>og@LDtn>z&;hk42l49t*>Q&$q1G2#*EkyolQr ziaCpX19^SAuR{7LWXuM>U&a=IO(m#c{;c~gjMxf6J+^|3NbrGPkAh^VyA1VnZCXTM z2A%@z@{lQ5WjO}RN*B+`4S^Y_54^(}FMJvUmwLr|L$(_HIKiZ_1h9V0IUs!+c0?=n zcRRX;KZEVIg_b(UZCxE`N$9@0vMoU?xzgVHeuEmroGO=Cu0E_tw87s)`@3w*5!sdz z_&&UkqPHA1Df%R2*&HaWS?5 zaVcOoB8Hy+HA%BMz{qTw@HdC+vO&Mhh(&i9unD4 zyh@Isr-bjObj$9iI(xd+!{-R+bC^PbjtzNslAC+!`0 z3f{K?a}6mkm7?3t70`jkYDdk%GNmL=)Lg@_)frC&U-t~!cSpC%)rJFBHP3NS_PRG3oU{TIOUX*@5Dmv z34ztJ@U-^y!7@zG1bwiy$ze@xsEs~YTmheQY$_6dEUvB1f#&w}&-lHbk#_LUwsy8+ zg_rcuri8qshjzsA{vV50sIaNh_c;-512Q#MnC{QoBynSxF0DC+vveKYW9E0QSX%^c6&EZxwkq zjWU#yJh_@eng#BYdt*m#x zI&6CB9!hlPZpWOQvdMC+51Nh*;|C>qzAnkTEm$HM|1NxJkO{#g;_CEw1-3TBlT=jz zpL8gxve#89!!s1&cHWk_88mU^o(xTJzaX7jNI8NQBH*`)elu&|r_Awht*aWy`$0SE z<%FL)k<)o(yAo(Yzb}mbWs_TNV$aX%GkV^Kp7*D$x&;39($GT3Nj**v{C^ntDYGe& zMLP&N#=dWZpQ#r9rV&R?_2_NjVIrYKS@OL&H`C`(D}4wUEBPF~uXl;I{~yTHze101 zl-SF9M=*+@FLT>3%CUq!_wqfI{Oq~#j!pWQ@}@~uC`s7%r z3rvOJ^gpKWF6ZO23?H|RAuXKb|K}t5}PhR&%XYD z(zVgMB9#{~4zyv-;xOVE#tx8Q5xe2Qk-(2@7!TqLcE)Q-IYnDyG!?CjIg~N~;CR}H zv6OM4j>i8HJE87t*lbcVW3r8FdaG(5?9)ePj=gg1!`fbHzB^GtMioUld4#e(C z$&IFmKhr*<1te+9-k?}q;!7~`eD7%u_F_&FV8yjaH=zw#%>7;{~~f!r6cVm28x z57mdeedVEgf3ts5&O_>bS3_^lIk$Du03$duIw37W%2N;dU%R>>cz+33hqZW{fh;yT z8q!u@x8-IaL9Mza_TO9K)Ov6H8ZAL_dSoP!#x>)p!jwz0c6?|@3Lj(pG z$BXFu6e#`-G^7z7Q|#Uq71(29foZ*fwL;&^NMeoK;8_I5Zr{7 ze-e94tgwM07MNmixq&4XRvBFX4a&Q5`~b%htVq150;vhKWw(0pzPop~cne)^IB!tv zfGq}w7`QUuHCF#x%PH6_S^`ROBQTl3c+Oc5EbEtn`TPwx=w2`zU5 zWuJr9b?JqsuB$i1!bA%bCHzgIhJm{MudF6aWg&)tfcjcWo?OmVg)wiD1#?i~M3eD7 z@ckmTp!nY|{_hq4Ulad8I^$}W_`k*1ZQ3bPyTpGiR9FIJG_J>j_L67u4SDIk;vBf- zISHipZL(^uJ8wkWGp3}Ge z3G7D3`Mo&*IP3gtIKPy2z60kUW}SE8{Qa!+ojCu;tn*zsznFEt8|Uw2o$tl@g{*U| z{<;Q;*lX(1V|D1cWdjM6cv~;QJxDDB>tUDwGAy}Yf?fGG*m2jvYJ5A;x@&-@t%(Ec zJf{_S+*QEW0=pai3h=mh0<#TF@~jqMZJUAPT>-o;Fviy|2Tr#UINfEyt^qSVcL{L1 zSc5t{2EY67E4135;=RP2-->76glC?GBydie9kq9~#^>e~C{LJ^3Oy;HSj-vMn>8Tc z`p~k?{+y`IrxnWlc3~-_=O9w~3~g4xzu$nL#Od0A(H+D)U%+*9V4o?rkd`s}7KqSo z*Acq(9SnAiVRY$lkQiNyMN$7op}*o3uy!vAjIJB2i+Y?ue5)^XVzq~*5pe|=hkHtV zO8iGA?A9Dd&A_{7O;5+r^E#eMLu`B0-P>$hQB+IMh>O>3GKQN&Bq+t=dYW29+A)6Bsy20;k~o*$z3+s z-wb?iq7gdqF2w3!Jak}I#rjpDV{@%0F|5enJ%(<)reI^YxvW=t{8B!?ll8O5{~YKP z>Pn+~W2(v2tnO2$awg>ax+=a8-y>pgRAJ~~e`VkEbS%59BFYX;>Qu4kqw^N<8thN70nly@j2`K9 zoDJ$`lvxX(IdoCR1?B#_jQ=%8Clg|KlNR{$Z@uC-FL7%mQpUc~al0)&h)u$H*Lqw~ zsqc>2oXV)P>3H3GiPycVN8FEnw=jQl7Zb1h!advK4jr-E!8RC2!v<|iT24amBsTzG z85#YD@jqS@c-zS6Z)7=Q7e~1#*JZfc2Wca^s@DNTZTDR4zZ2{j#7>zSF}2>;r?kem z(_a+^bD@4}5f~S>xlcnTzVrqXRFtu%1 z+ik+RQ`riS6lIAkz7ZqCC$Na$fpNjrF*nDjOkdOnbla^!HQoe-TTL7?A+IqYv37JZI%Q%QM<3qanS*&~u%m8T*5P|7$8m>{@wh|U^!29*I#6=Xq1=clC;NMYxjC5B zG0_(1Ragbl2RjbyMEW8f(Ncl_ateCWV@5FF6X^%<^?!uth=Z-)g_3uHu&`#Cm2F>;f zJS5*Wl)=eLnlPR(>x*ZZ`5eycFQ>0R4Muh%`s%oM%=g_uIsyjzL11CW=tu({`?nT4#nEN?UDF7f{M&IO>ghCfnX!7e6f55DJc8*JR;Y3o5zM=7^J zThm9kP;RA)DYrmVV{+>6CZxm8F*+2sY+$lcFX$m@Q@ww|iQmJo>+=4@M}3=m z3i`)^^+hYGqnw3Z&KMJJ21bO}&LfXACF5PYDlUAH`HT_48P!_gN_OgbZzJz^R|$|? zvJB%(?1IePp`X8l^907@J;35(J*s%lMdaP9r~eUYtYH=T?<0*hzasr1(pcv!(wC6N z>R6Hf80om4{sif}^)%6T+w?S1N?6M&$`ie`MNbPZi&fxZgJe1Xae$$JG_DqPyh1M19%(vW!H7 z$udNV*}O7M^cd2jorFR4GngHH) zkAZh(%ogBXx8NEj4zR0?*Mit93C3sxBJ`3Zt z)FPiTT!3?(TYHmp9x$?V6D*7CjO)_Bax0$C<2D}W^{>2JKacCj4*l4vAMeqR+j&I! zd-e0L>Bk-Vu}eSh)Q@;SaHwT2#P~!UFW^uS!|M*vJ7>Mu1A{uz%(oru6*hqGH=-O+ zZhqR`wn7K~1Sr%-SOVUKPe`Aq3SENyPJNg3!L~kBpe;7D15J+rl9 z?e!;sQSGXb(O}qfn~>guAEIZ82r=wgM6ADlJD!FJN+ORvzXy3q{5I-&q>KYN@5K+O z6H$(ob4SGmJnsS`#>hN!HMrq=aL4s)k=KLr;F{~fMc0F?t^=3hT^Ftg7hVspydKFL z1F0T6^ocr>8=CYqxn!xHCigVxX>wIePm|jg=xK7{d_7I>oU5nFwNX7yZm!nTg3E)k z8rDZ(7yU9Vu#1S$!+c5$n^)#jir7LiQGZGoTUgeaQpXmQ`IJV;7iB1=Yyp|3^uoR3 z8!6Rns(dD;9#9>k4wQ%}Bhqg(?U!};#s0j~NWx8nW- zp6Y(07XQ*yS$oV=GkVN3Bk_HYzzLAwY2l(J-mq{P`agAk#MxA*=KS_k^1LzAjCq&A z2Jg4gmh>cSWzqt_(t5|IFTjc=cUZCm*Y$hz@^LnJBgV%VMltV(&%!AFEcW>rMThGT zVH6#%@fbyqz$h|4&v8cH+_&V+#qO1tMc=$KhWwkDN%(;Ex-y15_gDICj3Vvt>vOjt z#z*p6U=;70w)3u6%R}}ktni4Ww%=T@cF%4O=DW^qS~S=f7;!xmpjF;&g+^&DP`fP= zU!HkcKyf^zzIHV&Jxv+{fjZ7DgrVW72F>cz%=Mv#vl=zq=2)cECH7DX>8L~z)kESD zqZ8s0VR?t=r0)tmVhWac;hlj#L`!@d=BolfsPD(>#x6)AU*8Q0Q9cAN5qez?_hD_@ zD=qmWxXxIAz#RzuA$(i{f9S?J-Oo5Vg!H)(si4h9jC zyNdWjpUWD0Otpq?F1b;|u7|~25W^!mn>(^@sfb-O7u04I7(~Rl_nVYFU=UsEY2_9~ zK|+fuEt*67hRp+E{Z0{t$D*_2#w2 zA7ZxW#^{wc5r0^yVeSQ=6mx*Tf-Z<&ng*Ru;Axhp=X6D?o|9=1COdWS|h+e>ClO2QW8>-M1jJq@tcv+}f`?)=11@*zE7~wV7HOy;MzE zrfALKM}a^*?s%c|aJ)6$EB0WyDSi_$In_$p5{#Z=H>;20?-7gT#GItZ1`GwB;lPe5 z&Q-9lZeBG0%KL~#u8F1$zot=$W2VkKh;3>=s`dE+DIslCegFU5rIwaxia{DVv#S?gHXgqrVj zi2a)UM`064cB}vwvwiquKN{FUUVFbGjrN0W2UtGY9y(@_@geYTpwlXrzRRx~>Wqcc zn75}K+`EZwQIGwr_M2#*HLS5lTNrOyz&4T354nM*W1FDe*(TxS+)SGodpLz}sKq;X zUEU_B1KR}qSS3TUO~%^8$@YLn@HTv5*%E$_po0Kyw`+6Xqa9XnxiOn$gE0>J7G~?_zfvi65+4ft}P>06%y`fIF#ITr!?*ybrc~ zfgd#L0vzE!M52&5!mAmBp>}VTh-=?QpTFR5)F1k5+H|0!7dGCb@k0Vlxb~8~e!E%F z5WYjo1ZuDn$A*3_zZ^%1ZxYXhml8CC_P2PdM?bjYH)He{uvL^B?Y8t?=!#-DE4~4o zkjp2JuN`YHt;5-Uwu1KIt9w=K{c^^Y=3cCh9bE!PICXH6wgpit%LI<_xjdlZ$p?^k z#1W#$;Fq^~rQD;nia5eT+1FXP!L1OF6e?7L4PVL+ZQ~7 za-z*(vxUZU`EN$zA9zn3HtFU-p_k7CeeC(D@Gl+rci8_gaORxm;C?62U~}A|mXbaq zc!>e@0STu28tQ6(;)Xrf)qm$ZyFj;$UEqXY9CrNiI3n0EfaM}x5bww9Y<yt;ZpY__@?a+ooEGOWzMS`1Y#3=&Vw4@HF3KZ82feoTJ{roRp7jV+tdACl&PXGAV<+2KoAU~pEOzz zcZqXpTbFn)l#$nJ$8eed%{<*Fr;lNYF8IQd?5Ct^0{&L^6Z<6kYOu2I1bZk@l|Jw| zd#1cq`N<`==^xWKEH}nO7@@&KE679GrzF!a{t=yr{a8GmmlId5U);IKuF_dGD@%36+JQ!QA{QXbo!v`R~36U=Jk=1Aq zIckgAV~mYdA!d@02$TkYOPt%)tXa(dHnlB$)(8qYLe(SM!;uW%@!cunt^z`X^=5BSLk@RXD7tFG3M=&%pN)8 zqiS-G9LB@VnINX-WDP4QbFd~bT;O`nRTMQ3OyBmwUC>T)3Vdlx`>T!VrN9&XV)xhg z0JWw7^%Gr&^ZL*k$EF|Jx1EdOyPk?w=VG-$DyJ#Yhlrbfh?N=fEfHg7J`iWT4?U;P zow@%MQDtsDu011T5^YA^mq$0o>KZ1M_e;@C^JpO(@P> zEQ4rUwA+fPx{MckFl1Mr#2l5Fwk3!+_uVg`j%j_ z!hU-Gdfz4^I({rfBpYK_63s?r+aHf;EB7zAFh+jS=}+*c!nBN)FVSr;?fw{V{5@)g z==O6O;5~79HM4DC(izOH6pWcZXmGy10OH~YM||ifwNDiiL8aZPG#G=Gz#d;Y^xZh( zQO*HgY|cL-D*i>RID7|@@hQ*b7I;L}f*a5_537H^_&Mw}&xoO`(Gx%a3frvxRkm8Wx8717sw;Vx*YzC+ zR-x^=%YZAm&+!6cb91)=e5a|;mohyG{rvx6&*U)DsLQ`b?A?mJ0pJ&3 z3Cvy#ka*3&o2>wztO*#k^}YCdSi7%9+|628%ICa<7CVfV zXhj?{`mNW%H?`U|X`V|ha+s%>O8cO<*?dZed5WbJV^$A(#F;zAUW(SiI+#01%dq)yS3iq7AU`1;_n`kSnBx!_dv4$BXr;#`xcdHtV4U+nda{g%$dw zoV_@&M|?t@Pv`mLIIn~C*5;SzwB$3EKfYUB7rwSYa;7b$(AVV@zBK3*I2YH2Kh3Mq zcju&M0kb-hFZ^lP^MLu%6Cv}3KTVb8={2zhWW9tx%`fxuY#a3JLd=&)1NYjn1`ut2 zias$X;*D`Ma&)EDg3H3KBJ%o)JWu$PH->c_OwYF5^;yXlZXifdb1=U zMZjbIzzOUJM|}qt|NrHgnRxlCA=v*9dTK`Tt3B=+^3?PnyLyzUc#Q>j-w1cQ6gG!3 zIr_=^m#|w)3*;t7BCO{DzP5J$`ua^W4s!}Im$}yp_gr}x@jw}q`9a5j+(Fs;#%5x$ z4XH|rTAe5lwmO8Y&Dz1m73?l~0X0T+;Ajs~x!BP~$XjolN$yNgfRXM|bvdIMW0w~o zd!w#kVd)W|;Ho>DAgvpLBu1>=YlwJ$9D8Z^VzfDokp}+GSqCuEfYN4xTCRmx^V;o@ zuW^hsTPl)s;n|$q?3z4pCFE?2t7u-ISh0qERO3RzLFzyeEVJ(Ct-%*!3N|yRlw)Mi=YksZRWySC@6Pop4VS$XOvPyKr6y43Z)L_TW4QY#U^sxDE?8o&`DU zl``}I&Tj-g;(0=*9^!Rfh70c3<*|_Wx-1twtjlrAc|1$VX2J8SObc0{%V~*|kk1!# zJs|5MB#vL^3%QP^x%_6-1#;R1i`?XSHv+%F-h)Ky!215=kx0HSs~MlGA3tCe=F~D~ z7vwT!HDxyB^_+gnXZ$F$DW`!|n3K2(=O_#LE@F03#wXT9A{T(4EA=8C?xTUo*B{BL z_^E#l`lv71=B)vapwSysR^xmXBn$BZoHND(N7R})W;KxIkZlepaA5ofAnWI&Uv+t& zB6Y(iE9JTTwxpfk?SKTPtbk+{QeBjRM5lBQR4CU$rc{#p*%94CS@U#B2ZhSY^RinO3SRhB1J zf)B;Lq*zdp_%2d2sBZ?JDJVTC)8qowSLCBCcaKrlfa)E<@Ox&A9xcHBF{7v8+m4JT zE!Y``o?`4LGo!ZK3QzUu{kizB-Jf?FBTF(Lad#5?8M$Y4KV$mz@0T%tTG7LtO?ePi z=SHMG*eeG29>?)G%Jwc{-ps@SPbBdo=!}WFju(Lcd;J~X~h^nPQo|0 zGM9F0NfU0at(%^~FpE8=jBAj;wa{y$98;ddm}J}fqzk@QwOc8NM*J*tKI-I1LOk;e zID?(nRNd+FSrp26d%OYimw3U&kj0BIGD6P*UJdqOwC0#R#%hk}k(&G-j0KSTQXW$d zieDu0598AusmEV~{M`F|7-&`K^q!j0-*_Mcg|-`p97kHdxy|`T*re#^rZL$&x|i?w zP=hsO{*1(LGHK@hC(fze@?L7VJSu+du$V261D_#}|LP*;v8NTfu9U}=!>@qS3^@$W z5Hj;e$P+SHmzmmF8SGOm<<}tcIKmmS^DNG7fn))wU&zMuI7jUN>p;cg{HHj_9A`eb zK%BpgbIf-ZfV#!`1)O8VSj5IAJ5#zK`=5 zFd2BCDE}eObvZA-=@QP@0HcTJiSv(fj=GBUCrs<(&F`6pJmxb|H^}8~Yb!>&)xeff zHm@PJEX?>J5?cm&3YkpV3``ki?VM4{TgF3#>`g%KLQc=glGBDREp%%sr&r+lD2wvc z!$)y%|H@|M#lm?erd!1I{*^2B<0}0aslMRAp0&pV)%N4Sf?1l-uYKVWlOe;GuoJ&?NO_g?)P`vG)O327|KvY%wxubL5`2U1z)vp;R&vdt z)eOu}6IQfgr?1TZVMIFtD}_D6rVY5nv0?z|C+r}`UBO0^L`caU#)y!@h=Eq(jM$4MwyS}LFx?{d-icO z!N_q?A4#yASnWxSB4Rf&F_L8MCnm5;RSM=Oax5W@0$Z-!jIo3?Ym6l^%uNpa{>na8 zYl?p)Rt%Tr-tpPn0X*peG4M)F8NAY>31X_5y>DSACU+RWGN#&O;FYkCrL>=V|1PgD z^mjWH#jQM#G-IlrH3P2{I^#8K(}t(*toNGD+cs}}^*mC!=JUJj{a2gM@2dCZn}4yl z-d_s5^g&a-?-BeQrutxB@wTl;H#cpr-unC|%qebY4wr~sG0H=e9R+H=1?VM2V;ONB z-nL3B>H66cVQ-!33IKJ1eI-&@p9k!dHx-Qd&v1oH5SGp^8LfvgZfNlzLG&YmPJ#ti zcrbTv66YzbBNo?Tkxj#r>V_p2tMtX*Ld;Jg+8WPCcC8oBal=C+uERnr*A>fXY4F@2 z`T_3WvsUDf>`{tDIe10IbFjCOc#m$uP3?(F%Pde$%x~MH*f*GEG`W^=ozLde+}1#E z@Mc7kv--cROuuU_P)k5I`LK)1T<&Jz3VHqKKTC6W9?^=B~saLTBA+X8ok&LCXg&zhwq?| z2eWI#r_OabJvPiBp;OUUYlXakT@Jq)wV1v3L!-ejJB|I1pZB*Z*V2o-6Dd`z@A8_W zZ}`({WX4+SfNz8H=w`gZ(YPi0LnZW>BRGBF1ZW5m$JElEp1ANG%z^UhyK z3_l;*z{a^EYS9-=A-BI`f%YN66JLmO@^5)g9i%h?e zcPG{=4q$JQ`UAkn%tjjB{112M@{bV|m$MS2zF_|*eqaU(8S*JTYWqY!Y zY*Ddi99pzkv%NnVxRt5V!eQ=Nj;fVx>e2FX#>Sq=*SFm3~C&!yTepl#nib6=p%p}^W$ zp|yb9&cf0q=-6)#fa>9eNIBMmJJ?#Vf}FtE4|*Oc6}0tv^qfuG)rpTt+aF8X1{Ij= zyRc{38{V{H(DvVKarqCcjo6*XlCu);RMf{Mg$IaLYh(XjRtL7DQg(niqx?)Ck=Do4 z_2-?>l>O9-cYYj~{lsq^P@h)$j+YBN9pKKEsyv^}(;xfAbEIOZv_9x1l*Dv@XFvbtT{;OeA7IESl;g_!83=K`; z>Rs%97X5BKAUL7*l0idkQ*uEJ<32_u!8ei*l6Ip_McMxZA7to1i(mK*Vr4VeblzY- zX)~bn2isduDaoZCRmvothtt@NT412EXiLcJ{Lhuw+4OyR`YdU^RMI;5>tcNuVvf8K zNIMK#|I5v;aJSmHm2&*pm>fshY$|7K2sxgSNup0akDfpGdg2|*rPBC7*pTyl)9d-2 zig{YEnnmYg&ocv?bs3$*8;SAlgoq($$atS(fsDsW2=Mu?44q%oe=Bxr#&1+V8t4Dr zy#8AwRTuIOZVy(a?-!U1wYD&4+Dg@1Fv~&O_hA)Yn_}%K?Tq=4A{MIyw4YxmagFqD zvZZYX0ub~~1fbvpdRoyFk`K7IXx13YbQWD_==r~uH+mkP2kXI3;d?j%Y{b4rVn3-? z>?Qhxc`>yvT09VmH*~aimgSxN^v(EKT;#WN$KxVoT;yrmm=^zIJ@U{0Gw}P0v)|tr zFza!Dei5Sv|9N|hmgR~y2hekjy+xlG)8YMPxma`HO~U7=BoAS4(OU)$6oe7?XSqN@ zECvdKdjj7)c;&vLMWJQ~b@(^2=3qPm!tDKtV)lO4zM@=n@HW;Q{B}?I5cU-f<+^^@ zRkihvyBoJ|+carwA2jWPaCb?p+U5FiFQ`)L-RekAUS2pluW1K>Wi$sW^v*`96**MDfAiPtdmW<<6d1fBR2-9;~FW z3DB0OY~uZU%4BpPJbx3O@5cLyi+~ocYvmH}KwARw4sO_mv+)j$DsVn7Hx*Ocj!bV4MP~RyZ5mMzYj)kU+Zy^izct)e{APU7 z(32I)HEYvf(tstH_Z&3jhJi}#1$qi=4JM^aw^`3f1O)v~&uobWdmZ%fmxmuNsjSaU z$Gkl%JWne9PLD%lM=U3SU7+{rR?UJPtPd!8XXt~XH-DX$gJ{U##y-V|xHl#u%^5shk$G7#mt}wR?fqy9Z2Y!reWAg=DIqc zw9HUeq~~@_O0U*Rv6m3h2j#<4ycm*$F-y)4*g`Gk!G}wx4#&cBA5&MT&QTw1R^HpP zYIxeP`EHx4&BrWkHsrr)9gxFV8$^7E0-OiqAkDjN$Q63CxDnBizt`~+YVa&-Z~`^D z>CBB9<}AP&D6?JR`fCmrbwtg@b}MZ^_Ku3m+Eab7qIxvZ$I4fLHBIcLETQ~d|ES?1 zhjrkz|52N&%GD#^>(P{Ati#5;_ypd`XY;+rTWyfl^kdt%^ZlNgqZe6g%(AQs;)%1~IWALa zthc=vwccq$%y(cLBK;cb_n;>-iuuu6Gvh)J1UIO{7J)j|hk+G9UEWq;BgOjSC!tGV zo)Bd{nm9g>+k={Kp?+rzV6`Cp>^!ILWmBO?37clhDRfRh<`crEnR1@sb>ZVrIiD3} zX@5*PUt(JLvr5o?$JoRgjZ66ad#e{D!mj{?7LGP53RSM=IhzLRwj z*K+TykAZ?^oi*VZ5#QpI>6oa$Ov^SFZ4KQdCGIbS4C4KwJ=C##SeEbMOi{MoMAs5K zMLSP;Uu_rr20T!}-5K~SQD4X;KKm5unZAqvpg$aaTnNcqX{uAM8hQaU z<>xSeIRy@~4;O1EcQmO=a%B$Y)oJo&`I#wR7jV$~Rq&;A-&b>X3~a$$%RcNX558XF zIpf>*FCF!pD=J5i$9z4?Z9}luSr9W!oq9(4vJWaRf$u0su+O<;+G$D>n=k3^3%Frd zkSmh<;9FP^tk636cwGJ!h_mN2V|5bfo~XMM!1>&izHwkBe9=v?IV^>r8urjxY}fga zTnpf*b_KLx$K(!&*-<3;n0>x@Q>ImvJL{7@#N3#RO;+&3X-_4%Cz8h%{Uihfw zV(={d9fD(YZZTRF{9CD<>@RX^zgFK%9_2Z&l2`u?I^G6VzPmoOK)k<Ocr^1Oj`16><@C(f7hJmV)$5racNfVd{;Pv>b#r}R0AR_ls@On>4RiWf{gQ9=c@o&O^d$O|;W57jtZ@(As zFp2qhHqZA=#PfUx>{}VWCx@`U*}PAy6J(T-3Bdc7^+A@9-}`)R(~Lg!)QtX=vVeTb z=TQ#uIlPx;m>w?!F28oU3}D|F_lfs=<@!oNuO)Wsv@*f9 z+Ah+UxGyt?VU!#%YjhqtsLPqFhA3A~c2q##oXA0pwGNL~F(aXoUO}tEC;DtfvKHfV zw0xjoD0;@kUB56Xx_DI-oO zPBTzBO7vOR7o)v#P&>4Q{$F~k`!Q}K3fp#m1HXl}#+d9_u%0r)kQYKu=p#5~h1q2( zWsA#vDL*JN{*Vm0V5QukFWIOg>o-@*5LJxzm5CViUFfmD5;n9w?B{T7hRnmdSkx`a zJbw()0wn(*z`4BsOVyA$kS9VHID~5%+ltQrhk3sj_X{7Sjwn#I>ukagsiO_N*nLX) zB2yAa23&**SzT61gXsL`haTxN&;h!}L^HShL4EaLJroV>k?$B#{z9Zz!Cdiqs z@sk-jv)TMOz8#iy)L#)Yf-#x;pkI&z@cnuoEcd8?uV^PD4>1zKEBLl9+?(Gg_>Sui zMhrv-@F&z?HUeix-`|KcR-pmyXOz9eEZRMwRpEZU>|N$oy)63)?JJ)PJ;x^eg(FV( zE&7stPzHX7uWm&4zl!Go=NG^`m`6In_{(zn7=uM#1K$tc!59tSo>7mB>Ke@!(h58p zo(F^>p5xWv*SkTPqQ!{os2n}&(mXS3PnBw(>e>^Y8Hv9t;5di~0R1O$OcrTub+$G} z?){T_ZNC%$iDWVU`}dbHy$||uH;#{aB8jKjJM_X)a+6EP#_v^9HmMcbRYN)0$LM6o zAS%Z>Tu z8sU>$2A|w2;gdr|0Wqh)WkSE)>yThRrDno70tN8PJ^UGdIU6MXCHUnY3ceOG4^7j4 zxU+flmd)+2+Tn?N)KsoEEA^rKopyNV4w~Szqc6_)ZKZKrz5f7W|DN%ct1Xs#|0L6v z%?{{z%Ys(*!ySjWtr>V@YaiA)EZ@`|RFq|5i(-c+SnvIosr^+(0B+Il>54^Mfo5-k z3sxg|=dc)SgP2ze{@h+JGzPb=Vw)2d)k4iNUYr(OhvK;tz=9E`6UP zJdbe)MsR!&<>-;)k+y=gX|aZ=#R{!F%D2BXXbrp_j0BbxFVY%(F=eG%H#;)Z=cApV zB@9hE@?Or(+PW^QS~kZTxY1D^-`W9vdDfCMtw3(B)$-tzYsy`orbV?tbGaPRyW=i( zq2fenfaU7mgnjQZ#|yqobKfJ;H!qOCW=_80paM@s5L z+XbGTWhfinE{w-wzfat^Yc2a1>(bE1umfSmFy@u;H*lYL`0_@7jvtXpKc_vF&n7+S z>r%yDG{MPH2lt`j{rTzu>`Qo0?(K+Walb-;4|TV}&LHejGN12u1?!-TxJ{y7cy|}& zDeB8-ti3f z--f4GLF`dj8QOsE!*^m1nDO(Ci&NU?Ys1RyHqe(?7cBf9th1;e_9Va@NyLp}TcciL zT*WwxwVGV7T)Vpu-;=QnPw4%Chp&q9XA?VvRow?+3++anT33!|rE*P?6Ei$VRO;om zdj~t;jF({^-U#jdB#1n_{y&H5gLP%y*p8xpc()yG*@yj4*oJI3_BZNRz_!Ev=P$RFSc%w6bv`t?S< zt2Ar|XOx|>VNNzheMA9&c-vH$IzayLkv~=_KZcD8^#&cwcabmHCKWZK1MDl24<2EA z37$mz_o2Nwf8#ZMO_$&SobQ@Xz97;-=Na&Xq`#Wc*F`%SNBAPLd4skiNr9xk*vlH$ zwUjAdh4jexkVoo+M;z>bo|9JuPk8HKGi1Dq*w={0W*x8xU4}QKsEhp#K{-?@zAqe`k2X+vbH2 zT&x!v#|!y?G%sA?@q@Qd^2V+VZ-j3s&bex(!EVqjCSKNR%sk*Q<$s@lCLJ3opm z;FI6QnlIry1g#nMWdF0jagW2nzV^DkqRd#o=Cdu4hduRfmvT?GEpP<7j1k9#IA^)a zEM5_^5qu$@iM}L%Wb?+&BED~{gFF+HJaIxZkMRV@3RxfMvif*II|6iTF=mkF$-~pY zQFYO>VaXxDb;0ZB0XIpnANWD}Eyk;)(mnZdKET}2CwKrpK(+(v3p_)ga*n5Z^nGZ9 z7cftI^U7ntkoo_v%Tqzkr!=e|xSX2P1?{h7Xjy3&>wx)A%r#*T1}#@p4$-GB-kB@j z$@kXOzAxU(cYkJD`mO&xPKq7lsuwBK%X*azt&Wr7c%Lq}>22iqy1ip^d&Gh3u${)Z zHxX^A;6=m~K>eK8_HWJnW@V+B|9R&qBWbrbTdq5D%2p~>BE~ewo;LVU(x!jz_;=2S zcI806ugsF~f({@jWt}P8AF_-XMUzV*`_my7Djt!exG~(24yo%^Oh}QE%T8HJbBx(mU+LS^Q&1{Jfxvf#y93xE6srdmwVVeShWpuDCgH(3Igep zz>Z&ZHKZ%jckgc6WZM>-;Sv#;4k!9lVCcl`)$roo#Ssa6q})~Cmsu@LP52zVbL>Ix zfsw)p#-kgf2g_julU_J|RWUSq;nkxxpIaSFmkiJ+l&7GRt6Xs%scKhsAD-XtXi=9*|6Syn zxvg4@U%@V=4?3*AQuDE0i-)(s2XOT2(E9Tj* zp%`|rb$Q>iOpe>I;&W3)@|v3m)|_6Y!7c%O-AJ)3bUI`%Q7~SmilM*Vs~*EBd!qi> z?Z?28U7qUN9%y2krwU#atjL80+**-#$D^+dV7?KoQj%EL+5=zJfkFDJu68-Lz;?E7 zMnc%FXuE_*L5wRHU#MSw!1oAyCuJtQ7Cg_`y8{~5CZ>uxhp_qOwT(=NWd0_my)un^ z^D(9r@EgEqLca1|L+)Z+-08fgHs-u05ySkY${FqVI!r zLfnaxNFd)e73)UX23#}x-S|I3QodUED#!}dhxy<+6W{bG+hCUwEnF$=#Kml7K1;NZelN>otUSc$ z@ZHiL9LujrGly5$C$OcUFW3(P=YW3rINpR9Z$lkLT=Poi5lmWuUt2 zJUm)%MCDhMvH`_m6|v*`)DiPW_;j{)xO_J$AI48CzGYa_%3E{IZUyVO1+8@CxJ_am zx7jT`DP8MRo|(1DeJN7RFH?eMurAv}zbIC2OWFY*jI^T)+L3-vNjn~wDrjfAjFoKI zFmWU`W37kwMH^NsVH~{<{XGdbt7*27QUMzoY{f~~iZwfR7n`(0PSWYZutOFLzec6~ z#AS35NKS_E2!Wq?Kw|yhr!47R;eVkiEc`D;lyf!fOyTAg+l0 zLkn&2%fd!?J-!ciLEeG6P$+g4#$v2PU^H0xe?&fZ>r|7>$NKNsbd=8ruk6M56Q2(o z`F)n+A=se5bz-rnCb7R{2sH9%7@=QAAH3{=FCG1o^vr-2y}I^M*(Y_3*RRu0m6%(( z)N)u1@}ff>hr`!RnhcEN5%v4FEpgjW0nqODpwe@}d~RH9&H^#cy0h$R8Jx`NmoxUX zJcY<@fs)C>xxYCM=RQ)ZV`PYZcN|OPhaMcm$Y2j{fsuhcQIcoG-r+8#et5WBaZF`g z&S7oel3}g8X;^U;Da6Pu9*#P$jLUfrW3&&ului_va}w-n8H~(0oI5cx@4#~s2`Z&# z?YDcT?QDWItvpbt)_bic#5GfaaL2Jo{ot;~&0Drk-}>j9IQkaAv(N{NSv@?rfgQiy z(d^H4#i~mM#vNmY3$th|#*NZGHC5V>Zt&MZ?!Zn3{R|_Ej$o&~jEM3PteU0W%8fa* zK$!2W6z3d`#dYi_&OJnlWap?#J@8$W$LO5P|6nKev5a7PekFI**+UP zfnT#ay-;9gtiIJ2tM8_q>iD8zAl0vF)~v9Doi^DAX$Q;G9PlqKOV7n#vTh|8Nb&9* zCGcfOYgem=IAY3jAYqEIX1XO^m@Y-^O+_`SO@5{4NfY+}wkQf@cmC$hMd>%O%CCPv zSAkDTm;3G3a@88VZoNRBn_OPgoUZk*(=(PtX3CL?yLNLX+=TIRTO8hQ;60vE8sGzq zA;JjfmcXrJkE|5t7g|RH{6?5BC;Ol)WBh>)BXJhoCGqFLn=@YE*MRpH^M?o{PS)=` zg7}A=_r_p1k#?Z_u8ivhnM~Qm_whY_jE$DD{lFeHX8W-Un*!`ytb=n_?JeRrTR)2P z(Ocl};U3SsgHMa}j>dzNUOMrOy2?xPeJN$!w^_uu@%yS0 z=O8sCzJBKDFYs*IW%dZ&JyHjJUERJH^?7F{S~Iuf-uMalvoKcA5jYyHNYnj`!p35Q zoUT{vRoElZH|C%#U3{j=^kH7 zAliS6Gn&wU+3CbB7{_FPR4VsloPOX)__Y$OlVg9V{Kf+vnAg}}O8%}cdWkp!U?&f; z%|zdXQ_w8=y;*A>visz+G_a4AdjC+aM-*U*V*8$!a@}#nf4wuNu2d(dx5kfnzmR8- z7o_tsbFzDtk=o2+N^jOaH zM5ryh9=v;**W_RvLyVCdOwb0C&^mI2&QYmg z?x5Yz^>+1GDZEy(E0{aPP|i9Y;|Bc)>v^#{K8UzAz_CI0T5Y}<;>a+DBW&crqd|Xz z4p$q)Tmyb$`(xObK&(Y|PIh9)<|FZr!k2nJihC|WE+JNkUN88=L4TRLK{L9denXOX zl$dX|+7!lGL6cqKSHB>>+PoNFeJ#cn1?`0I#yDTt0lAfODo)cZz6ZSP%yC*~p&e{C z;M7Drz*7o)-xcpdymSZf0m#{pTTo%NfUGuAU<*ybPaxZc?={+nV}WR& zYv6mISBrKUT}U|r3>D;&b}(nE!+~~+KHG6VURU^1^1>4AJs1*?+c>@~F|TII<;DBi0v7nY3TTr7Q6NEJw&B_y?58 z{*rs{%MX7mOD6p+2Qn!nWzqw{V5^QPN@4Q?tQNLU?flT^*4EDpHVY{JB z`aNV)#Fw<12eF#bTplX)f3s`)CPeyDKiK@kuJ%{jf6yE%a=ozQ7to~9btume}YF!F6xVJ*y@EI z2tAT|X9`E^gCp?$2|W}Ni|6#$vRn$7ZN7*5;)v^e^c9ovxAJ+=BZYoBBJ|2-!9t}a z+%nth`x|xH7Wi0ItM{zM>iz4yzuH+4*ylLtgHP2z%^azDCU1Efp4GY1wzDN3=~xJq zb@H0#^jzST@&yfo}w;OD7jqm`Or?9(&cJ0SU6a8 z#-#$U2@P=4B3GbHeKQUj=tra|CU$DulSZq*A(bOW9kP)pois8~PmWNOEnMcU;>6 z#dUex#n^U;e@Y!KtLrPf`4M0IWKh9oV2AOw0T$R*dvugG$IpyJ@S&$N#-L9{nyz}2Xf7@mC?nD18 zPxYFyN@2k2JMg-;)9oE=(o@oue_|gY z%%kN_&nX-)Qhwnm(>Sq zyVNNb8)iW#C*MFzXOS(eO}RFE_Kc_DOmemF{8F8SF4FnUxYDU!IzK^ zf4vkl^^(Cwz=kOK?qM5HYYVmdG*BY~!3M2UsY}BOFU}m8OQV0HF@zW|_YMMS4*3Jb z32f)^QNurm?^rf?sKFJ=@h(X>4pqZ84T_&L@pr+dJG*u`W^EnI1=KtBWK=LWM|GrNm1N=Z6fpXyG% zm+FHJ0bCTFf*2L^iP=z-Ga(m2E6R4X&IwOt{~%C>`q#gd9A%9-}lgppvz7>!^t08j?`LjyMoib-Gbgwm# zKFveaxmy2=@}~u!k1OTR+d}$mxlH=FCy+i3kUmY2KD|Qv6sBFnLiW_Ot3_k7r?MUE z`q{g>OwnZxWe#P{2ry}sF_1OFwwIbn)`V6$OG8VYF7MAc>wv7Oi7!VKG9_9Jds(yd zV|6*!WQ<7}r*@0?2x6N`-n{CL672e$HC{r(9085G)NfiKA%Q``euF@iC8y3_YzrlU z|5SX?d8R_fT-z6J<4jiVQ&UpD__f(O%}DuT%(bGBFS8(DW+xV6EaiL)`@l6FhaO^y zd2f!oLVp)_g-62QEqL!m%9vosvVvzIVb*vLT#_=ys!d;*Em69Uzju70rW|(-MiU zKAj5kQ7N1fUjAS;$>Y@xP~bwiq4RdC>IPl;R-bf+Gxb=Wot*V<{Y>s z)IE!`VL|)HYBH~nql4PUnKvZEeP)+tNQGGMhTeU>=vk#?nDbXD4>*s`$OE8#r96Ot z%z^KStT@K6IPracV+nkakV}vQM~a?#agFw^j2w6#a=_6LbtRQUb>|lUVO_)$7qM*o zN3(Phi-Wqz(Q&%S($m#0i>kV=Rx4b2DkXqYJVjBQKu0?B9g3?=-QG!AuxvQ$+OI~P z-C&P>%FYcX%Yx8y+Ll#FUBsoS3!G_Xf%CLuLGgexfBij6+m?>kK%@lt5#|Mk{xNwM z^p2fYUy{83)Z?ZZ$`Z}$dqQx2{y?nC15IOpG&21ewLr*nsZl_dLvz@Rm7!mP=AcN; zL7j$p*Q=CB^)rq`uewwPbCL$=4IhUl1%R&gW^=N^!v_Dl!bJg>`nM(MtD-w}Z2=VF z*cP|I+7c@(_n!;1$Km_g4tWh-Kw*EvADU$s0k1}8uALH`S#-=;n-lY|QHLhI_cV2Y z+>V|3&phq&-#Y6{mp;OHKYA?Z%;7j@3zid)87soR3U64uKL`9ZX<;M7rD|J1oZsIzA4*RiX?D z{)!}y<-wc1wxnPPV{gq{$MT0T_8zHau3JVk3-2Yc?Ucm)yGoU|D_!LIJ_^+cDDtU zV(20{Y*%U(7&kR|*-KEX8E@?m;EOJbv69^JA~=J((<>R?=mT$*)sZ*KoX)7h9ohDY zBOS9Q{SI2QGV_n;jpV*1r>mWo=rWF!PEWMLX^oaS^S~KXqD#eyxxI7ounO*I9E>{m zN5^vq{aZSJIJKzY4;SJ+eI|c=96ukdN?tPfBlQ2V_criR73co=oZU^b*=#nOWFa90 zIQx-9Fi3OpWpxg`TWm^u=70gJu}ZdGxN+m zGjqC3P8p^*F%)9=&aI()l3-lcHncck4WTDTv=`BsQ*k5sH@;<}Z zNp9D$${?e6$d17b_(qFRH@C`88GWVPE;qw}FJ?G<9?XuF_?iMGzBemAmABZEz2 zJpI^a!}H+OYi7k3$gn1ufw0-|qO-E+;)`>ByKJ9@C!_IJ`eDd&_B*3)?ZN1^tD^_w z*N&H+B+H^gPkFA}igVn_6nM|Vc=mFL$}t{i3NsEyLky$N)U%5VGY{IjVc(}Lyf^D0 zEd0HD|FzrRw!u?zchNunxdPg{&}+9$%%^vk#V|fa6XR1z8qoDAWp0Xk@ius{k5}}E z@TZuYYID+Ltx!F9*tD6j3F*pJJ)@5mm!=ZENLtGZS$yS8Wbvi3Er*HtfH*4}yDvK5i4Y0G;1 zda9sp)A*}9R(JHQY&VT>@2na!jccnSt2L0B9M`UnBoZ>kQ~|SkO}KTrxxClbL~5v@ z1TsyhI#N9(hVp9foH1m63a)mTxSC<&hAt=NOO>Z4Xj)d%2UXH=bTS-VU8HVU`A&~a zS8!^^r)gun&Ujgdg;mQv3@p|2aMP{U^f`=ltDw{2rdu@v8Ky;)4yjev>2#K;hhfND z8>tr zeD;Mk^4YEHsykN6XSc8E>8O>@UWnk*wqE*gWgoQL(MpW=<@j&;%5}?n2)cAtxL^6d zY8rkcM66!ZLovOr^xt)@Vf?HJuU$^$@?N6Wu5RsGdG+cJq}15i+Ow{;2ll@arThw) z#4PJ4V4|JHvZO3amNmw-@?#^)q?{d0ox8=HKJm_k0&HPSYy{prDwdAOG&1`trHKX2D`>HE=)HUUp zYvOCJ!0Rsg4Oi)#uHxe^&s#3{+b-t`m;D`=?H4ZQ>ddjsW_nplz6M#ATQB%WWLchr z{r9b!Qt+E;J9E8km#p9aM}*lf-i&=;dz}5M>2Rj`40Ne{pde?(0_KL|J;ZEvkU!aC;HF5 z=s!Eqf9^s5*^d5mH~P z{cO_7-wF5dO#A?c-US^|_og)W`Vz zGnIquW3&AX<(NJ8Oyy{By_aGAr`&%)j;PL#4So%ChfeE%NFxHtS`m{TlQTw%$8pJ?PuS;;%u5sAPR) z#6qBN=;auH#^XtY>xg8%{Y=M`oz6427sn-QL*__3s-TY_InG>;cO>gYXR43J;xm`y zlw>V+{sa2Z_mvrzGnJ#>wca{v#LZOSMjLRp1Y;}BrFUX3eJ|$HJ202N2XpD|m`mS{ zx%6F_OK-zm`awtTSj%2zHmppBUsWcf;9mXR40|*YlEfVfH^@EA{Q;k6*)__Y3%{|Hz!T3kM#w>q{Hg#~#?r-LO;l z!A|Xho!SXIbuaAH4%n%CV5hdjUfzwhhr6)$unlVu59W4bwJ!LoYsyjA#AB|&YcBb9 zSLqwB;x}EM<1Y7GF6Y}W`w5rr9jwvS=3FuY?ByfC{y&xOMI(@5;RvuJN%#M2dbn@| zdid4|up`mg1tUoJ(h*=s(#}RA??`ku670MYlx-x~k+k*s|1XeXB<*4G2;?0}9WNR| zx+AIMk<`UVu#F>-ci!+ufGHSbv@4*DH#;%joPJ*11v%tbU8P4|#m8Kp*Ie$`UCuXL z_BUO&;}}x{IaCLyi@$7$_@VA!GQ|B)WN`7irr%Qh*`Ke8u?*67B z;%R(3-TgzAkL=Fr?!Wj9m_;e5PI}t8W!^nn_{bJ7|0bEz&FMqER-bCRC zmGH9^{;m=}N#TE1!XEtOKdgjjQ1}5QyqLldDd7l(zoUeopzuE_;o}s(UkTeGXZ{`~ zJcYu$m2eYthNJDB({j{In7d;3xl*;fE*c@I2w|%Dp^% zw=R5_F1$?_-l_|4(S`5Sh40XXH|xT;>%zC`!nf+en{?q@bm5JNya+ehE>^;eDf|s3{GN_~lEUXG{7(F6_$~@l1IP8bj{gOPrz`x;XnPudD}|>i zd?(t5#&=V=PT_CY@pn?VR^cx~{c8Bd6s}SD@96mNQMg*+x1gVE_~jI?QurV1_@7WX zsPMz^l{I`Hg{La~&vpDSC|s%Vqu9UE@S7-nw!*jDlYA$IrzrgGI{r=ypQZ4Nvy=Ez z3Qtz}59s)NDLhHx2e1~c+N6u3Ur@js^USQU@;szmw}E>ZZO==i57JVxR7 zVO>eXM<`sZ@W0UUSt0mE3V#!Y)&8}a!lM>r@O4+=r1cn$I)F@hK`}TtoU}c{<7jknf|geG-%yrWoWRv%gWFo zbr-&Ys-s9gG$`F=WoR(E%gWFobeEN(!gZIGp+a?+m7&6PmzAM%beEMXNOxHoDnNHx z84fb&E-S-RL%Pe#u(7YZta!(){<5+$dJ^SBMd~gq!=j+>vf@C${<5-hdJ^}K7ifBQ zS@NAdXhj-7U;7CdWt}wEzp$$JyoEC0$nB0 z)dF23(6s_h=R}j2l}$?$2wB;5fu14I=LqyS1p1o-JyW3Rz-sccvh$JzLRNOZK!*gn zUZ5KUdX_-X7U($wJy)RT33Q`C&ll(g0)2r%HwpAYfo>M)3k7@vZn0l5`Ci(f0IDpEYMMb-XPE$1^O0&-Xzes3iNFPeY-$! z7U(+!`c8q~BG6j}dYeGsCD3;Z^mc*1N1%5I^t}STQ=oSV^nC)oTcGy{^!)<;PXhfN zfqp=s|5>2FE6@)L^j?8}NT44U=tl&4pFlq<(2oi9_XPTJf&RWg|3IJz1o{bqeo~-+ zDA4~R(EA1YDS`fxK>t{vpBCt61o|fe{jUQ3tU&)%pq~@ye-r5E1^V9w`UQdhnLx(` zIxf(I0)0TB4+``lfj%tIFADTa0{yZ;zar2-7w97b{i;A873gCE{hC0(F3@iX^qT^G zT%g|)=(h#>gh0O|(7zDq{}AYR1^PXK{-r?wN}zu&&?g1@eS!W!pnoIK9}4tG0{vTo z{!fAaSfKw)pnoUOzZd9F1o{sG{YQcRlR%#m=syeeUj+KE0{y8#eBhCpWube2Hd1v*=x9Ri&r z&`yEQ6=;`0=Lxi1pz{T~K%ff+I(3l+{=X2fF50L{h5s+atBXL1cy+NTL92^O30hrz zO3>;eRf1L*vl6tr=#`+=#jymfE}|u9b+Ii$tBZ08T3x(L(CQ*zf>sv;6STT$n4s0g z#RRP`LMCW+u`)rci<${qUHnYY>LO`^Ru@wfw7Te;pw-3M1g$ROCTMlBH$kh5!U>)i(DMa)fk0m%&`kopP@tOy`a*$TB+wU0rK)~u-_OD( zNbNd(TUepAB8B!yD^uuEQdbH+T3VAr7fC%Sbg{HHg&reCQs@%th7@|NbW;jlDs4!i z$4PpR%)-V?dXJ0BqxYyRc#*x{W3n)>r1yv{%qQtR9t-nJdXL7!0+QZiv9L1fffRYl zCB4UCVHJ|zqp+}vlHOymut}2MBe1Z^l77Z-VP{GD8NG!~k@Pcm3p-oV&&VyTQqs@3 zEo`c!pHW*_P}0wsEv!n4rO00`>1VtaRwL1V7KRwwCaq!uuyZ7u5#p@4I??;{S2V){tv1U)f2E(XxDlrxVTsmTWTN#wis)6k7Eko; z0{yT+|EoYB5$OLE=>HYyLi8i;qWq=_^yLB_7UWlWV>7r+&3s9f5e@z!Xn_PhU{-sLObHW8^4-Wj&MbGRO zpuN(sb^l0OP~2 zRhn{fV}I?V6dFvE7k^Z?x3gteG*#8n%Y@c+t?gU61}_%syl#GP@0@juE?B$_f5By! z)Gb?dNf6;_Jss_R%lcNX?Co7CTrAM{f8bwbPsi1(T6?cu*0qKu!ksJGvSswf36!Lx z{o0zEWqoUwwXW_@aM!E~cXdQeV)2HH-tL~hWj(FSSMnEz7$wo#-rn1}az)>=6>Gb? zOcGkzx7Hvg@kR|Lu39Hsq+iABnXOl=sff(3)}E_7lpCsaiO_pD)MRvAdNW5lx2?6O zr=x2bFG)wwvfkDqA#J_s5WENkZ=d3-*2}o6wVhs01?}m%^!^ae)XUtyW_ic5jkf7hjrX3KZYOWeTj(x}W;mBvWAG{d1;>#M|f05L(Td zjfuP$m_~9-F{C2t#alzZ&rN&h*0563UQ1_IDlS*@EqA6cvI#>eS+g5M;fC{qKJ6u4 zL!>pt%L#dPEbnMrdo>9!zWR_=&YHDsK3leO^~%0w^h!Z|!M3j>(#I-Wd)o24sx@5M zx;9c-QhV38MA{1?7B*$H0mm=4Y5WE*HxycTU}XKSv5T^ z3C^Bo%uXpTFA7JelsU;x7n-Bfr6%?a;~b5-=|$uz8<+7M!!%N`^qR8Dx)h+oN|B0c zeERg1QYoyI5-F?{2^CgKj%quYRyi#NnPeNIQ)Nt`>r&)QveRp~rgBnU$o&uG{L?3DGhOo6O4B0it zY-4R0DGyva;U(SyQDDFLynrp-vnMdhXBq4LslsG64EoWV0lZ@QQs{3K*O-6%U3GeTb=B$V z)m0m%m(q3AJkoVuS1szC@)eZ}UR7E%0WV!$b*aW)$+ya&990J8s4^%=l|ebG49Zbu zP>w2tas&;^5i}@A(4ano2IUAElp~m4j@on5akb|d;nK@ndro?JYtKn9Z|ynh<*l8O zUf$Xn>E*4RVUVvupRb)^knaqGe5V`aJKZ4P>FIi|ou01e+UW-Rn{J>#16xy@YHO51 zHzhfRm6DpmN>Pf!N~sHlm7)@bm7)>ON*!+$R*F6pR*E_lR*E(hR=V`H>C)GxOJAEV zeQmn*wdvB=q*Nytq9$GXnsn)F(xp$G&?;%AOJ9>NeNBq=e8{FL*|c}vJmw~1c zsdiUIPpeEFLew&VJw2riI=!jLPC#e&oL?Ws4pd{P4@Ts$$ zB-*&rQs+8Jw6TDxGoB>cNL8>LMydkaND{D(R0Xz?Bw!n53G-vp=~JhO3BipdK^rtM zK3J+S7No||6aa2$3$V6fXvDxbuwpc%82DwxPPY-nO;-w8a0BDZ*+!*D$_6eU9|zLm zoSm*M&Q8}BXQ#`}+3AAUKyX8A1-6m3BDRtBrBqD#h9)0)<7Qi9P!n9pbiHu4ff^OQ zVe6_kfma(2K&VrbW>;;{>`21~!vtri+Yrt+o{1PJo}<$h#@XqL;cVjuW6-@g+PJ|O z3=@Pl7$!J7y_8fHwZ`qJ)0#qtTEPDttMJE79x$yo1oEP ziY8*LrBu&1S?u(Rp|n%|-vm0n*aS`~HlKvnrnXa+mm-+TOR+BsFLiWMc`24f;Tibj zwJH8MtE63lu9a>4_cW{NdYIvI`w)O-+G>7Eh6X7uTvEz{Sea7cNlB|5AP;^8bECzL-qJ;jdrhx z&PLoEANmfXkMa!fF8$$#q3=l1+OQ2yVq1RLcNl#HXlUugV--W+q3y$tObgD>$iXG}@^vf#oeW%LS zzP_H7ZEO2FdRb-rnpLYhR`)6U)`4XaenzUNV@2X1Q{uc7x79j?b_nY90GyvP1?Z1o zB@SMR*_(@YwJxM))NH~kiD&@NIL9h8cFBlOe5*A9Y0rRsw*H& z%M$6YF^x+c`!hw*c|{x!TBqrv>REm8s*3p4L?z zbn3FJHKjkCW@4F!H5m8}QZzs*C-#!(UDCxk%}t3Z*l^_0a!Sm@MsibPGB*61RI|iy zphGOWu5Mrqj9}>&!4#F!tBp{`w!sKFWKyYC!YB#BR2V@7E1lBU22kYY`Y00t(-2$pVF%ur^=#SCw1Va#qwH!~(kovq=B6ob>-zpAgb zja6cJRen3w5S{xP=0%T-(4YEP0IL=5!?AiEPsjN22tnc^93FvS+7p^uI%B}o<7R`# z(f%~tSpse;XruJb6>y!P@n}O@dW!{IA80({kcPWfz(ql48{jqwxUD9*-2!fp369>3 zpx4Vj6Wjp-w;!}oIq1oBeR|KE;QlP&UNXValfe4)j+@}<306JsqzR6mywu}90nVu2 z=!ro+?sF4dyMUu7pN;g=FW~42W21bx3pjd`*9iAL0Y^{F8sYw3z|oVbM)JKW;OGfZ zdPGI+N52(t^rWT{?tcUvJyB_tuM^K<>Ggc232uUb>ombt3%EWL+#CTH1&-tp%X_7O z+iHUA6>xii%QZ;vP67A03GN{Q_pAx-SpoNw3GSGHd&dO#TLJg63GROd+~+1Z51#+h z*Dt+;)<`dB3AkbtT!Vm{Xo9<3z|nKNM)`&XT%!r@b^*5(IHUIR&jM~aa7O*|#{!O? zZwwl=mzM?HUXy%(E#RIo!F?v+sQnn_OYhg!*W1UyO*N3uFW`!izEOG45pV$$+=T*; zp2IXsuS3Ai1kPwYj|jL$z!{C_TLfGSaCj6j%^o}`;Hdo=;eIUOq9*yiDB!l6;NBH* zdrk8FlYrZAlAaaMck1gm23(;*c}oP`ag+2a1>A?g;Zeyn{ml_@p95#4zsm)j9py5T zFD&3nfitS#O#+U-EntM(Bj9ENXH<@V5peWAVWV;!5ODOIuMzHqfD4=8elOsnz1>9>UxC;f`hbFjo0r$BH?m7WS&qEuPcZ-0N zO>hqixLOn3j|E(#3GPJ!cclsLT>(e!+o&9W5^&VMjc`^tNc#Gv_HERUiUiyfCb+W% zT+9S_zJNP!g1bb((fhQGqAv;1-$SjtRKsCb-`SxQGevZvt+s2`&eV z3i@`i*912~z&&k(s}XQ7nc(ILxc5wO-x6@AfHRu^trl=!nBX=DIC`JHQF=QCoW}(B zJpm`1;GPq3Q%rEL3b+|2xRU~Iwh8X90&bBBE)&kUz8zd?f*T{?I!$m>1YDm9?tB3k zHNjmh;I^9JIt1Jv;J`|ocSZ!y)0k_`-_gw+^yb10p0r!##?x27>Zh|`@;7*$0 zekb5QF~R+>fcxA8=SHW|w-+1Q8E#F}vtPiu)%GZ^t9(Pi(YGp$`sE@4HxW2`UyzvI zasfBP1Q!u-jVAftE#MXdN97mueN4c$nBZOza9zL|jazRBxF~Q&^Sa*(II>?xg6L2p9XVebpTOa!Lj+@{v5pZPZjQp}|1l%bT+(rRM!WsEn z_X{|;32wiDlYujm??nL@1kPx^=~n_SWRmY^0&bBB&WTI_O2)L+8 zz849&?IyTR0k_vA-6L2w;^qv%OubHHGP{5ruN$*_&cgiHazX&+$L|~=$ zqii%veZ7%=HEO4_fRjzqn=asjCh0W`xR6PD9RiN*uu(bI3%C{&+`R%W3>=kTw5LB1 zaO+KQaRIj(IHP`v4+*E}c@J=4rRDo40r$8`dYQ;XUvJNvq&H5$y#$<*{%Qr>J0`db z1l-3axHbXzxe2acz}ZnKqx#q`;5+~!$u#|aPr%70xEBQ66q9`45^(e#DWiNp5pc6j za8^tj5S6$Vfj}2dD@TcdyV4}RDgoDNl3t^L>jTcH9V`=Yn}EZ=H2KyExa}tCZ4+>N zP14&Z;GQr^@81O6^T2_XCf}O^?g((GqBPv^1l)VT8TCtA)JIg}`otu?(E{!Zll0CO za5R1z%o}Z&wMpDJJQy6>u|6(%UBB=o@H8`R)^NSDN6S6L4L?kv>Jc`hGV*<`;g8NSaR}7p{yZ=hSO*BbwG@KWGz10GTqNKH}N&z?9 zB)vHTZjniPR|&Wlll1xoTo-Ug_G+tuqxp;xZl8eLY9inB0`375+;IW-1aL-n^7jJn zc@vxkPNKd(jsS;$Y4tHiz`bXJ3ktYTfHSJ!c>?YW6Wq52oD+pLD(_kWS89UWD&VF7 zXH;*G2)LOhxaS01lS#g>3%Dyya32e}F5ryjFJB3`2yjOAR)qKb>g#v239eGW?E%iH z-sT9n$AL4N4_ztXo;Jbt2)Gz#^_z_+NI%q zC8_1zj1M8uJKVKP!z~tY@5LCaHNf2_;M@oC>Q72Uy|nb66>y=K7^8R1YnO)moq(Ho z1m6nB-k)}9xC!VB`g)U(Ge+;W*DejWNWgt~f-!n0zIJK2+XURize0Zo?fF>&*N6kb z^bUUQ($b^8s+aEwKH)>}?$<62C*unbdfY^O>W1F=uU#7MG6Apij zPo>Z|2DD4Vy&&K`IG9V{CeSVocS^u99L%L}8fce>D<79yj?Og_{}zIXTPomc*Gi1O zp`cw_dUpx97!Fv|w->Za!^H&L^EXNSn++oFGXXdA7Kzcf9<)nKZ}Rxma?HkuGUyu- z+NI%^3AmO!CH`#*5w~5%X-T}aO=7>q*%s~6(%Y-x!W#ead!>}G_-eTQYI+*(W8nTp zBk)UYiT-|?kS_#^u{}G9o4mC2-W?|Hi(%r%P0;ig;+dJ{+b~Srw}*+lVVJo4hlx8d zOxz!aiF3%q(@VuLar1|X>lh|(!!U7=3=?;7n7BU;6F1g7JpF}+iMvY0G4-N$`Z(IS zc4__N8sI3bU7;TX2V%8^c`yDbj)vp=1l-5K?MFP}JUZO-0&dSU62G5t^wl>ly*~@M zeL5U{4Nk+A`%>lGufx%^B^vHq0`7SoZk!IcUBJDh!;RPB;sWlt4tK5&_pyLGsl(BH zOq0*$Pc6qMIvjnyOvBX*xX*Ps42cQcw*{Q-CrLfi*Uq%`ZWeHE9ggOK8tx$hSE|F& z*U>cGK>;^Whoi5iX}G@%xLO^KzNV((d;uk#Xn#5!ePvC<%@%M?I@~-RZiRqbs>3zv zaN7jjavhGoEv4oALjf1o;pi)F8t#~YTd%{>*WEPSe+#(HIvjoVO~aLzDPc`7J9RjE zwoAh`3AnvF9DOBD!`&$0p3vdw>v0|Dd28_|29{&9O_1OKde-#NJqTN*)?O_vNnMFB1D~hsuw~?1fS&<8!fu4S)Nr1qt5*xV z5Y{lc7Nkj-99xEy!g&8Eh3yu4LYd2xA@Q(+$)S5yjDylt!`T*kUb%zk%Xk=gyYx*w z>D!6lZ{W9!IiL$`#`5Y;l8?f+<<%rO4_lX4CHR)*Rm6ANGS8KkcSPE|swq5E30JXP zTjn>V<-Kd$I`JMzT%f<9(p4&bjzZ(zTT1*4mDb{?OUqlAFHhb-O{KN?I)zT$U#rqu ze2uhx)!MG){naY1#aAhG;{KpY^LUhnSyuGYOB0~$%q(d|FP?D5^Go(jT}vm)h0Crq zKWRn#x<0(`4e4a!-c`^9g)=COQc*h8J7aCYy0<22p3Wu>vpU?fQk7*BVRZB@M0fIdp6*8Np5?u5TDltvqoX$v z-LX0mA0?Wrhj~!`jK9%i{&?@&vQ=xASJx=K|JL)W72aohUX{Z8RL=`4yua#rcrhx^ z<1c#N42Ab+J#V_gJEiAMQ+R*U^Xe4dAN9Oih4%+NPc6qMdY)R2-|Km5Iew?-1sQ#F zG2_2|dppU4cna^WBoE>#yyHn8jX^xUHKbtvh*l;qJE!}EPH$xD>? zaFUlO@1Z0wQQm_|UZT7QlDtHD2a~)+dE-f5qP(#rkII`PO((`$bTfpfDPbg#Bh@Kk z+?XTPDq-A^Bh~OQx)H+FN*D#rk*bt13Ya4Wl`smHBTZGpC{T`6sf1CG9O-N&%=4Y1 zgn7PaDPf-PWF?Gz9nvHoCOtc(i9AgDbx0LFOnP-l@-UUhmg$x-+JepUY?)&$?QJW%`a4%t zAtBIX>s2}k+67v|2#M%bln?0a`gWM-_RbYV18-O9)x^L&B#YBZG~qK9nnx3z0Xp2D zyx*eI+WiuzllL=)PTYSMpBr{wOG7N>H(8~Lr1U3A5bwHV{6v-3;wuz-MHk7B`^z<2 zy}wLJFA*QmXf@ui;<-L>zfYspc(01r;$@Adc;GS5CHc7cL@RS$mBt)qoD$zf^O;iR z-Y(Mf*%Hliu2U)k<(!hBkuKs(G@1&Acu#^>@*P7{fcDO|RjM4tD$Vr*If_7|o)ht- zRhq{`mQg&tPF}Z}N_JOwN1f$tq286P56F)U=t~mYH9_j+9p2Nv#Ju%9qwb5QQWi z*+BDnwJ%r`G}mE<3_9Fj-N_Sl*)k^7EBEuvk>7ZYru+~;4m9Dl_)?9gc;Ib~E4o^* zM(!6BSnnz~@q;)VXYM9CxQ85UwT#Ycw>aSl1XQ{Ub z6t5${v<{9DDCFZ);&>FL=RF;C#gMtovMv!#bU>r2kt037WnE84xSHT~||O;DUWGvGlf*si$z3g_;1uNHdd%fdW6nLe;PE zZ9J^-t(KnE%PApXER>EKu8?}$+6fPSxzxIxhmod}{BZKV!O!7g1>+z;T*YTeRBsAD z6ZbBsF!?VT5Ax3i^9eqmUwxP875KofGH~&+a}V)oTnZ}p+=7-LTy`}+H_UyLjE$To zQi>lM6y2)-nwP>mOy?6lk>A!Bv9{;_!^kaH zu*|~EEW2jlCW~ZbGtW5Za zxQxjZ75P$QBV|lTP14EN3i$|+U!*3=*WjMROPoX~^8Jb>N?abzid6iX+2nYZrbS8< ze>}}ro@RBwhM&`a@~8S((s5n47MtOh3hBPlq=kxwY#@^Lms|f!i;a}s#cbu9tdWYl znJo~rYC86+`Qg{AOv;_-?a264ySKmMb~aUZNEvcWLfZ%+2Sc1K5Q4lJ-bI!pe(wfIvX@HU?|(6& z$*Eq4oZ*dop&!IjPW}Fi6R{cIdjc1E*T$1KYG@A z;f%mdQ3vy~obu2uM-Mv@y424m9zEPv-`Cw1ig~?x$HO0Ieel)Hu&q2cUD0Lcr@8iB z@@4*5<gxg^eZe#W>#f5fvZ8vYfGU~n@f7Axb z-j7nPRQLa;-eQ+oYseXX`*6SfDtnaLv<}5vZ(u{d9tYclytd`@D+5=LxyQ9GN)o90m&fUT%F9VkeTkmuSvT7 zax*)`^^uB;1YUEe=%ZY!jd+i_ZQfU{&!`%q)bYntZ{@oF(T)0h4&8Y2-}MO6+0)&r zI?E`}Wt4&saS`uD?i_DuQ_f!sbF18`xJY1!+g82-GTaLpj@sT*W$>yp;I~YX;faM=&(E5|lqc@xeXY)4Mc6DczEzAe*o+D*mZb$p&w}rCrSf3HNi%O6{EJ>-GjKEQS zY?({A|IqqE=LCUQm>p!PGS|DO2JS4nwZAa?e3q)0jKGmmh0gO+@+@=(gZle1p7@oX zlKp96?yeNs{Wc>(g8chp%16to<-Dkz?M=j16v=v7VcQaMha#!>jk2FF z$eS9^*xv`gD1GTC8v~YaOsuciVxPxuX$%EDJ6Lhi7?9_$1?70PSY;GEi8agP zS+?x9<%7Su{Jfx}JbFH3(XaNFMuW^3x7i0-AnGqI%5*Pn88g^<`{k_`c~FY=*e1VV zt($A{vdsP#dui-E+oUgT^9M5;Hr40czHqRtp(!rk?&;ZA8Vhbh>}7)iR?ecOpJH{_a0lHR-3->+B0p#&nJn@>_}|C=6rNcWbI;6=yJva^*-ZBl_ly=`5T9=iy=W_C zRX>p5&Kf9x`#Q#o7=^QAdp(w*C){Lvf>{D_uif*_V63~ue@pM$`TIOlaFYkCB|erp zqot(mpv@vXC23W&Y-gTui8pF3k>9Pl_`uRwi9ghB@#bZQ0`uLSvrD`OsxFAtPmE13 z@!lsrv?f&XbN4{S?oopQXMsh&Fvw(k&{DxNZkFqVi{eq_V!^y{?tv2jtZ5g;ngdL3 znE1x@OAa>swz4^(LvsD(9TfxRCjtZhqPhY9iLzOT2FjL<8u0J-G<$R1b>aJ4XU8s) zL&1UaB@vv6lLpFnM{IHczbEk9j^CnwoYchcl7416K=J*|PPh~OcG<@ef4yDyS`dFd zbH<5(J(J%DU37z+|1PuS>%YNLyWIeO-`1Ll~Px0#+Bf98D zn@qn;Ze*EUt{a&pPVqM~ncp8_jy9ylWrm&q5#FS>9i zVy6ZqOT$^WVlH)C(bV$H+U=z*=nOxAHW4Xhb;}Mn$LLm z*;68y#&TmbS_b4ImJ^#2amHt~ERJQw!nnJnEb6z&){IfH(edxJ?`bKKS>}Y+O&%8X z#5+qv)g@(UQeHRP2act}V|CsTMd(`xx0Z(LnEWz(#t~$DN@F$KQGd`2^d(CJ6-(SZ z>YbA>U36dfE8f-es&Mx_^a=OEvv1mKk$2Qw<>-+g4pE9f@GWseJ08@$ZNRtNJs>Z! zoi)!92o+4NyeaYn;sj2(9pxd!7Ueqv(Z0uJCKXr58mCsC*T1d&d>3&2EU={jgZd4|t^F0=d!eVjJ@I?d|Bco%&<=ce6_TG0!E9%=@x? zz`MkKkNy1M=F(8jPETgA&r=X|Rzx2P*e0{-VSTx74p@=m6y}_K!6J9;+DM~CVtY{T zg=gKg-5;7XkIFsZ-OU^r<{Sam&1IeLTCmOcGm`WA!Rxlk=R4cGos|cQ6d4Z$?S898 zDt<%$Y2L#RK^o8ZvFlb8CmHU~4zrz}QNh|?x8!5aiqNCmtq!J_XL|SxiKLmksHk@< zL%%=GgvD4u@af^G88 za*L-me`~O3oA>;j_Unvfdn&K(QIahevXLD2G&vqm$Ppzu=0T1MO%7*LjsWDq{8*F& z^*FwGT8PW=vz+!mu@0&Jo>67E1O76}u*s6Di=yySRfap~H3dR@PoF6v#^cZI4oW zxazd`i1{e@O!^1z`3%Lh#>WossNcA@Lb z`FEd2MlZbJt)udX5jZ6EQ$b);gx0Dvgbe z-`731wFLH#a-DL6wRcLSEfVh}J9FpUSISqr_jsI2-dg{lb{UBFDs3WmCG~}$`idk+1sgTsI{}$vVJ@>~4=c{6 zw%m`u8&LuW^9^Pi^`$%V9^OZ7V>4u(a5&=e1WV&I{{OPW;*S>YSo@1s=Dnk^QFgM& zx=%_UeHF&|QW$fueLTa#wDBT0=6Z2-{EL>X80y&lNrc9ToR~BINn}RL9*iqw-JLfm zb-fAYSMFmOC1tZ@vYB^jr(DEQG;{xMT-_rqOs%d z+WC1sZXegtfUgL1gj)h1L5KG{{!M+EH$Hu!}b2iHL} zIqkiq7b-LL8CLdIz)oX@??^zJfUzP_;Jzn!-`?9WRxH5yR}zQ?XskHu-i2|3a@Yy` zOnP&cMfWvYCbK?_8>yCg1|KKpW1Pr~=k%|~B=4zulwrHSKHh-(^XTj{WYxwBE7@aZ ztWfOHb+8RtB%fjrFjh>!SdshiK30CzGl!2ABnRcci;out7%v)Wyntj+NU1h*dRSJa zD(c;b5cB7|4Q29^{tU(nKaCZNo#tZ&jkR1hWvnILJ*CR_eX`S#VWJ>Ilb7v(iCgOV zi>|U4R2k-vRU$EejGV6GX|uF3!cZ^IsCl1_ym`8zcs<5q8@DSyjLU(TGA2_x-8AyG z_ZaE-5YlN|_vxusxwi!Lwb~@J{WDeB&E?j{7VA~Rt;@D`FPz#S$?*R|e&?hEMbu_A zTcOo^PCtzs0LH5;x|C z*AMp6nDMi0%(e_#Df7M_c$C*N$xJ=^$=pZ055OKShuwMz4?J?68QShy&~?a_BUWR9Gv-1-+d#hlx^N|Bt?OXg7zcu68mPVN| zOGP%@1pQ#XU;Tqwde7j8VZ|??IrKwh#pO3rtoWokb=@>ieWh%*`;TFiU#79&BD?D5 zLw7O7o(Evhy_PcQj@$DEvr(^XAOL$_5yG5N8=qBs&c`~~^FD6Rds6H<=2_gH2M%B! zMst4;y9aas$H<;nz@C?Jd+vKf{%5l1z9sTb8m+L_P%EX)`Mp;6UT!r*i`?;R``>{j zca+6^WOeso%wISWbC~*u2G}c)qa5=Xns4*@gwCdmb4=LuozO1zZJMiTJ^*GhdztTz z!28^;KLdN-J%{qC?cEHE-aUuz!Q6N67im(GBoA;Y$rqSnox(`tq-{ROQsqm?CWow| zeNFlT7izvhpL6Q5BGOED`LP-wbc|I=ZIgpd&TP4AEK_`SInl!o8P{t{$ zCMV5l@`ko|*sH6!j5-}AWb~)YxXn*vfFd9F6S#b&$5i>?uWIQQJ zlwF-0sA=V5K46koTuo~s(xUQh$n(oZDPAGzels5xLW`7goxV)k_>gziHh;=kuj)I! zJX*VW88*5PHi~jYJ9y-vl*3j*E_d8H@mSWlLo_SVZ2h(UXk9hbvX*1qP}&jQr}zfy z_4$&6W8wj<2*H1cH>04H^ynUK3^Ka+T*##L1*7Gvr9vQa+I`~&xFz&(@Z)paw!kBL1AU!m?azQQa9&k4Q;{3VPz zAB440=kn8zIu2h5@}dmnFJL{zaNG$D&tJGTukDefzfi!(n_qU&$n#kDJCZWyxZ>Hd zK)1U)nsMz{GvF_Mh&x(SU793=5r+ZUFZO_7k3^7fE zq3CkNUUE>z+U%0SpnC%g=5NHx>{d5cdA+~L;_I^B@m3k@vQxt|-ElTEe-P`kN3bq? zlw-zNE#B{1LovQC>(#BxBJ99OKGTNt4dsLKm^iJD=+cC zwa(&e&Uh!(P=1Gd5v{_82jwHdhi5f=C9H}L`kPp@FV3cx#XJ|ptg(yY&E*?pOGT4) zUc4yY;NL8_J-_1L2jml;fr_Gx0a{TF?Dk-~$(lSxuqQ?8TIv$4q0*Wst*O#l=81^1 zhFOHQQ!n{l_?591iZ#{D7K-n;$zJ>xU2l_##+oXlHBZBC$PpknSPPqN~w8e`%eoZ z%@r@Z=MFZPH^{T&Q@w|MQ!D0HHJ3}lr7>st!b2Aym@~LIzPL16x3n}iO@<%Z9D6&w zHT(p|`0cad;T~`fjt=*?D>lKa(gi5#<)w7x!=^Q z3u+O%hfj$9in-4K#wmxNVGZ=W2^sz5QXb29_w>bXI@~F2gvJ$&NtUTnSv>Ig#TGlak4t#R89R2}ck&TxD z`A{D4zp(D8`r^~s0$OWReevs!@_f{VbJ zqrhSZzE#>6o?=EGvJ zl0XAmJKaO=zYBX1%zvMXvol9I+Wh33_BNOQ%H2>tbN_lag*Aj2b{?P)%Ya-|@CWE2 zgmvljuKcL`2fh>epRWMd=BbDl4)}@+ScL^rFF`wsUWfLxegV3nh7WXm$~P90C;0ca z87<#vqx<+hzQGLrJs7`{KD(0I+{v|?ZY|}5`Opw3?{#n@2qSdon3KPjlI0t^J=c|K56_8KiXSaG`}qJeYCfk z7(;n}>_v&mk{iBg0j1lBJ;>(rBf*Bi%8a2 zg1s)P1;Pef&j6P5hQ~NLaEsMHiw{^3!4E9=vGFQlEj4kz8gV`||UaTkFht?OF11)tT4XFifZ@TakQJPFWKHP@%&gAaN<`_fZ<;G5p4)Dh_* zTkC~A;kMSvnghq&DYh1?>m)y=PiZ^+B)4E|;WN?Pjn`K~Zp!QX*{~@+WK&4gPv`k`SfH@&O3GFlae%Mv!x2kr~E0Im) zZC>p=&WRULA5i-Y`cJtQM*m6l8T6k-ze4{>^eb8yi#cfzrMT+6T(sZZ;@b1Z6;FR* zT=6vg21DbhPf_`_T6v554XoyCYi;mCF$*3yK(htRMqvAWCrTZ1G#6Gzk;n0pMDoEZ zjw8zhU1Hq*ay{=?$Ws~Pjxg$1o6xUXLmXAM+eiIsz_+AeifX&Qye80%yJ6p9G1x2A zV-mfK^DHk*uZAgY#ddvp%^}#jq5_XM%HC-i;C5?C0h41HN(wI{h25}uO8->sSL_-p zCy%$kBx&WPvQoMVA9exys7GHU9`!}kG4F>n)jGbzcI>Mw$&R4iP~GOoN&@k6i+5wj zn0OTa1;dGkU$GM3pbg_GY$f~t%jGY@Va87RE zc6MXX?rX3~V|FmAm6N^2gJjF*MAq`L)1tN+8jCl!sJ0C2YAJ1w6F ztLHgRUzGasqn2Y|rTSnF?t_hqB|Wg`;G-q|uNM^mYcBa;TUfDsJbbY4zz4(W8194J z4 zk@+ndV-ERynD5u+e~15dVRid+KfBsC`KHVZ*8KK1&HK8^GVev^&g0(Kf)`KTHXpuM z7ksb9FOu)IRPnt=lkasm{3@6K0=MFOy=EofYn9@AjV9mgSi<-EIef3}Ccak|e6QLJ z_+Eiyau?3OaPO<^Qj4XczRt0-upi!+F9h#v;kut(*v!wql=x1_KbpJX+G|!f#sbg!M0gruCDEe?+Xu%Wia0*Y;3$>aKK+Acl9&hVpbggyrsF~p!@xX5}Y+*-YoWq z@c+!4JiE94iUaTgwl{lUlP|aT$P(;(ksD{70`M^c&^`Ca9Ny^kkLQyArJPtH`@^IG z-wE=ilrt&Z27MX9Sq6-^=k>#f8HJVUZ(zPkXHsw`U|Dsx%xCnH#ar)_O37EtXSe2k z_>+&iORxiOkz<)1d5_ewiY&I+GbVUXy`v0gW+0oRB3ktE+-qh-+lo)c_X{zXoA%}W z;wpB<2kW*~oIfu6Lopx9BcAv0Gpu5sWmH~6-5uzXO3EAZ+UpwUjET+scwCHf_zkQ+ z_0waF_E)8l#RrS)#aPD2Fv=Y^3L_ixSA5gtJfkXlh3c20++SV~Spq*BoBe^2EW7gB zxGYPtM%?Ks#Qw!a<(9J0Xs4GwN~>Bk;NPw2{*P0|+`rS>&c=D9nNB*Z#A{c|C>cBn z-)@7A`IhloE=2FEM<-u|dErU)n}w{+ z@_#gYR@s=Cvf~6aW4(KPWGAjUZSXX3e%Ox?aoeXy2q!i}v zJ{z+QQqM(gm0#Xk;%kzvam?tgSJJ-5M5;@z|Cach$h!l zaW1VEWh;lDy}#M}rhIEokNlgE(qF0X#$Y#>*h*t0J9;xL_(GdUuE%T)a|ZZ#@K~{~ zOLOvA-vU|nC97Vz!qz>$mB~?9KWYm~JfC+VjONoV@1CEa_5JSh2%wYi5!AQ5X-#E`A8Y{@;>wdDg(T5WeycsLOz;R z>DT97k@_3GO3Z`Ok2|Ri&a>n@8_Ir)_Vl^7R=B3 zJnkT*#=P~V%tUJ;i!NK;V`GK1B7ydWd6jnpyB|IONnOux<^4X?!rL>>aq_hvZT=eA zt>~Wjdz?E;`Hin~PKsN+YqN1y6>C9~G~wbPP6_Pt_MV&Dh7+hK*hLcUS;W;oe*k@6 zL9()i1%#%y_!;h?Je_5sU9#kW59JAO{Y%m^!Lx8Xr_O5`z*$EwqwAnV_wg{!v$8U* zqfFK8z%loeu+=ANyvpbHnr6tpCHa&JESb##AAZ2MJ3m?vdyqds=Mv!?0^v^7Yw)Hn z8+?!@`Fr4hkROS5Pb0P^&h0zp4d3xbUeCbtIGa&iNQuilBvF0SJsg{PSi&9`e3KV$ zq&C2L&V$g6YGWo8KrbrRc>v{GSTHWPZ5Q$KV;s%r4zP=`K3cnQ!i!naoeABow@P## z4{fE-YHoX;z>Ij7CupCJ0PR_;zPL6<^8FT!k=^JS zIoZGA{X%DRaVpveUruhUaJzS9!)8=03u0zaO*Z4AY{h2qG0PjxI@u7LH9&qpZ@(ef zcint|&JN|Kr%!1sW7a&&jeOkU<3CE}!}y=ank$aFX+NII2HQX@(qtQWZfH%I?^4gjPs!uhaB?sAy4Xi4)~4HS+NFQPgn^> zJt?~5{r$+bAM|1dHxBccDH!izqY~r&go2v59kwgn2 zc*LUR#rtex>~F%>(;^?vN7ZmUfOJTfN9{w6{Y?yOQvuZZcCCJB?B9?a`_(!^Kj&kA zY!)B;{n+#KhOEOH`~9;N`^?Ll?3Z`gm9gKAeyH|8`68OB?G5w(=Zt4zzpp9o?#_vo z#>cca+6MSJ!Bn3!i~F2obw1|{DL&^<6rU4&ezec0?EBpZpL08W&3o9~!8u!wgm3<( zvdK@CSFJ>NOzQTa@F!RT;inFoq z{#NE|vbFsbyMB|tv`$X!`du5fO}fc8PucaW@9V+-A9nrb$K_}(JkN^`(i0hr<3aZ= z*!835F}AvyH_j~Hchr5qOzwN;!S_6XeLs30gZBN7x%*o!-eUNkhc(|*cpf7w7W7QO z%4nwr{%3itJofo-G@~(@*tP2S3FqS z@6&sl7?=1yVG;KG{FFcT`^tbP4|ciKM!S8!9Sr+?6?~u1ihVwIDg5X~FJ2tKBvy+# z(dh79VLHFFV~!(MIQVv0^C~xCgc#dKGt&ahy_v^3u>ohMVp#o_Y=yD>gN`{KnYldy zx0_B5F@GlW#H4PAk9n}elzCoDv;3fYb@-Zk*vL^>i;p(@Z*`xO+rvV%?^A`DW^!i_ zGt1x4qq!68j&c@JTf5@tnP}|_izwdt9<1##JJtd%{(28qYgwZ=hdr9TZ`TU!P=o@s z-c+C6K6=6mDi`GmYmIq+4C`=9WIkTkG3@E3rbX$zmECrq?>gHlx^Gk*>&!)E>N<0j z_GsW^xUi3~f$v*!`Kd)HtIWuOQvdyI$POO~bH8b^`6$cv+mv$zSYuAl539_(v7+Ol z_WV@nv2GSP4!y>Tu;%>81MnYcFKZ6wP9E;_Ic8bp%{U`Wx@VZv#Vzpdpkt{!e{g`- zq|wWp{kOS2zHF+Yz_|AA?4IDZFg$&-Qt+PYT|LY>Rb89J9hk>L+oUmXSVs+h6Z7ObkyW(U;*Z+sK4s1SZq`sSbNdXG3u8J;gt@VN!i}})8l~;O zuynwut&qgX|55kQs9!{v@;XHRT4|^x59K_3JIR5$4|>64-Be>au{{`1>D1S zoqzGfEb`VwKOn7-Sq`1@HqY?v?p0{@U16>K)DDw6@Jcl2QR4XCiL#cE>9(_*BTCRHzrx3MIj^a6e4bKTUva1GZDpE(>- zcS1-7vQ=!`V>3H0??_Z@&O;_LNy zWsnU>YNH@tQg*DuQjzD;>!hUO*yvwht=G&z#R=H+Bk=37mN0xBmA+A^$g<7p!--Pz zL-t!FmdE?w<9I4b8(Z>YqvXv@S+~5B`kXI=`yd7ILFO}8%mX=(1Xx+775fK?x>ols zGsm|!`?j#I@D%E2IOE-1b~oSq!*(EP8uN1A*OW5_7=?@6tvG|K`Wu>E=FdFb#O<<= z_WqO+r3_Y$MilH>em{HPt+Z!}^CRe2=VYT_m0S2T0jNLn7kiMCVmsCGB$*DS@r&%X z+k8EvXumS6BhpHJ$csJ%d6oIl{_+F(bFXi)O_QSXP zdh&R`p*C=kw}Epcr45kJV3D_ExMC%+X7CkkG-fdWh3<{C8t85$KcOrh*xTJK-@(?R zwLBSa#0fpMk4W{|J@C8LF#~vvZz#7gX5h?#I%Z%m>le8+it;f7_>?iDY$2@H4*0wn zGl1e_2JLwj%~8jUWWLU+8?om#tT6+YhsF%AUT4afq3BE*Gmy7d%VckG!kv#9Nxhxq zdMm`-eN!Lpe0_qQuS?=abM{=_oc$*$b9Qx~h@aEDUEL?T6?67o>HRuOoVPbdOL0!G@UV4HkR-CWu$qB;9{nT`B8>b{-}@Er95%-3nBsELL&JnNmDujBb?K3_jB zkt`PPZTR2E?$XWI>G|nb`FwpR;^&&o*X^-!hdkle=%ij*$U0V?t&hP9)m?oJ{swuf zcLAQAexv{VtNC0VbzUJLFb(hm^TI?GDjg zp3drdX%3EGn#=D_oYhn2@Y?Rs3C!7PZce*Gv?hb8dd-Txp-}E%PCO&_bok2F?08e8soybpdFv6_*u`<@AT|NqKbYt7!8XK3 z$DEUFI3JC*9L$+R86NK&*0KLLd+#0})pec=uesl;*1O*IUY_?|DbJY!pgby} z35md3WwP`PeNlNHF>HB|Y~F41)!AdGwf6Idz2^ZHNLxXBJ8aAFT@TRaLra-q8wr0< zkG0?M3k9HstGb1?&64lo7gFh)hF^%W%HCIIfj_`61W#3X`r-n=)VuA_H$qtR;1^Qh z7wUY05|5aKcH3wzUa74p`Pm|@RjRZBK??n#aaj}kP$7>auYQhl6tH}0`e3HMk2u=V z^|S7?B|n7^CNFn?N!{W~{l*VOSg&DymfYU}yjoI=tU$en^#{JESltJW@;$UQ`D7e1 zWIMboq8Yv)Bew1U-Wz^pO6R!m{c_3Hzv6p4fKe|8I*8FLk5P|ty1kn8{%nFIo16Fz zc)FW$qsrHsJYFH%F&Eea8vQ@!C@S#saI~C2rvT9bON>W-)iy<+RcD9S=W*bEFpc3` z``Z%QgC?aU9YQzQOncDW+gJXvYn4iSIP|r=WDLF^UprPL+JemZ@vN`p@S76m}qIZg2k95Z^wTZJy^|B;VtLzX$hv5#yd8XrsX)g!O>2 zzz6>c&+)wtkX^<*KL@AB)^n{oU^@y`ntmh9GJM@6=&s2^s|}1X6)NohGQN~}ibs{T zuv@02K5}UVdf3Dcfo5U-clm{V#CDW~EqItTW#}is&4FG`{bZ1G?jh+n@;iW?0gcK2 zF=!b5qr!Idb$&0$TDAu~KntyeT=$sA9yUh2M(Tcf@ZHj;^bYK|FU1S>q9GT2M+MS% zv_{W`2Y6Tahc=J_?6;=xNbL>4cNA<@4Byc@={urT(4vk?wN(XX1HPkQ$AN@&Oiuk{ z%$VD=>NZu0n$xBo_c3)xL6@4;jQL!yROyhcooqLKMjq9cV?2Vv%-Nx+}dWuos%~fgivD;8VxJnH&c zFcIaPgvLmk+&2)Cej&{5L1J}R$Ue`ikMsHmF3=?A*OM;N#bfaO6vOuec?{k#V}2{J zkv^gtzC(U9NBw8;`RL>Od?G#YhuO#X`C#57z7GDKr3ISTX3)Tx_K>gL@7nrxYtB#5 zH^FnmInVJfksM&^S$cMwA(P}L{yXLnWA6-EuGwadCPq$rb}-MueT=$>iIaok(rmmV6stqQo_qMttLpf6_< zpP9q?Ji`VY7G{>5&&Hi1STu=u%ejmvDBHo8^9P>@#MtToiasCVHT2H9=QgbzfYc`I zai`(S89XS<;v9S(dX_KeQRN}imxH@8Gs&4a16d2k2YMB~Jg{R(zYO?Q=`z*yTQ02K zOdrm$lJVgfz9ait(}y#7kouFP%Y|a|0QD^A5d1W@5#N&Y4E!`pLHn2CrxC=T0kx3^ zA2a84mBg1Bekk+@`ymS1miJG9n+x!`Sm%8B?%%@x+>(?8Z0el$WK z%|&ghjyY5#)S+@CN}M#0{=^s$c;$Q@@zGdonY3jM9zc9Bd^F}-mc`$oS1u-xYYtCG zGt?-jZ3*S(9+v!l$p?~$ zOP`-%Q=)&y{4NjQVMI&nG4gNFiRBL(@z0F?F6o~c|J_f5Z=?OWo8ZkT!?;@GDLpj) zmCC!L=4hi0xIu7ZpjmRB2i}Ea=1Idd6E;0Fl&^gC;5vQcCz!9l9yMQ$wOCvE$3?4% zaglzRi*>d!ei`yo_r^4QGOtXn-|Mc%S}f%yb1kkab6E0BZ^_ohe6w7OXSlQGF4p3r zE(Mqk;K-*h1}4TK=P%?SzE`frc|hb$I0h~7PsH=_hkdDeQ(bXjcS9 z2fzhBZs>rxX6k^dn0wUF0sV-`@;`3m((>1;;Dqx8W3?V^ez5RC5x9V!?2FvL@7aA7 zrKLiYlV=oQ3(`VGluuKf)k>+Si&~y@&gH4!e5Rw)X7GDqEx#@^D%|bIoAZ7H+Z9ULtE-% zXBTCOj3>5G(w3cl?d>$i@1fefL@V zZvKDv9cl1)p}SP%Kl|=??Yro>eRs*6v&S2?dhKXr(gR9`k_VpmC*XOHi~Oc#K_STV z9`=*>Sv>Dc2G7Ge$@2mti|2X3^ZuK`^8(;`fl)j!d;y-BJ~hMhJY#v@Tjdtd>+@uI zp64_1JkOa7&&&C(Jnx(npgQop+#G}F8NANod@5`-RT;iVo=4s{miFEP&oknEPp6b9 zcDj$m(w_K_ZPlV)k2Epcip<5}nIQ5#1tN-Ub;n}c>rT|w>F}lcnss2V#g=&6&{K#X zYSP=#(@z6Eb_|~Ds3uP)dMl6iW5sf>a9-Hp08o@lVZV-Hv!6j2sR`fgI`+qaC<(@6b3zgRF0bTs#eUuO=FTfI& zXAe9Q7GAr$%nSQAvPiz%A;ORPp(EMWbwGEYM7t0)J?KDYo~Ve17}CLR!(V&3JCJz2 z;HM9sMTQhaGbW5L&~5t*e) zJ})=rgbdLLJcvWj10IC&M2P&3oE~L6MlQ}9R74mwMt0R%S+aHLOdTw~3%}OYK6I?^$u4B*T)1MWBZ>T)5xe(jj@@U2-{4D*Z*5T_ zLzu{pNxd?ro0yH`Ca&Woy2K}n3_I~8#B?*yX6XfKCq524vEtBxbZ11jGZE7@zoueu z;;W!YvBDGqJgEhPyj}Oj}_0 zG9oiN@1gQae+ldcy7RBMwy92m+E9bt*9ZV#)A>r@n;{Xa?oRvMw&wFQn(hll)g%x! zt5U4LC~fzj$k76~Ay#5#>OR#LL0ovyrux6oh~KDM>2Gp$Rwb{4?O~BU^p%NrwQW&D zYBlN*!CNM_>Cw6H(Fg65Hm6=_dxH54V1I54xy9RYXb`I1D-hA#Pb(6$bxur&-4pDg z3+ig%Pi(3a*$xoL0GkzRH>1lL#~{%j_@zY|aSUTKbsBL6K$*yR1Z3)DoI<;qu@jc+ z&z!N-h-WY?)I^&|G|C~yIUw)Z#HT>DFSOPHeX2#~`5Q~;15p@~qA#9@{7n_$L^MWDu(dK!s!Y`F>RXZ9hG553PvNZl0JA5g ziXw_oA3*jzd*o+z1A%tc6?M<}6KxXnY%1HL-)MTFVdFX48113kg4D5ortxX1WdzG< zZzU#F>KWJ9L4TYNyPlC}(>ok-fx`~K&Vco2pR^ZP`HB2X=h9v+yzkD3RTMD|%(E%% zv0zLs2uQD@ow_Tc5K5a7AAb`1V+Xi$9Gv(Na3)jE;ay=p(p$V##fXU{TJ%EPiCi%I z;>-nipK4dnFf+`I7OhNQ-u)r`shyr_5&99gMRm;;6ceG}I>GxGrA+^vVOx*fyaDY2 zmVNNR#W?df#_n~`vzKY1Mo|!u-a0!p$LnB!RU4dGll8v*xbW;b@{!SkB=|euzc2or zeEV(MkM=Ypch00lC%E^^Tqmld^3C- z7z2K@+xTW<3=G( zp6lXEa({Y-g_f3ls@i>t|`FNNcK)!dSPha~(< zJ&1wOjk#1b&Rm+zky?D|-~TR2$MQX|nC}@)zf0wtjEt}Cq7^v|SLXcm3j5*xoHwr& z=r6`+r2wbDYveG|5f^0L{Vy;mI6kC71sorFuLIh| zO02{p^p%C|HORd9|0H?6cY@>&4@F;Sc%dFKGMd5L?ZFmTmoRwy0Q07)MZuWNo3<^w z)yQ208z*RE5_q}ctHV4cuQ#!Jhc50RP8<9Zv>yv(@$qS8k-p$7{$`&?kbjfrU7_TB z@Z2)Vzy0khc^6s~e>=lJne~^gE7MDn?eZDPzmXRi>(I8kyXwKeJy@%djR3q`6}cn) zJGP-u=DP&%*eiL*y};+$8B=7;g|r>5%tH=@$O{mLr|S3TuN41;Z0uGG)?`|+&TPSt z_IzS@BQwCY-#re`mN}4P8)X~VCs(&2mu;AXHW+XITa$C{i@P)Lz3^{(FZ{vCWoEon zj>A>&Tq?i&TXyT6J2Kq*474Tob|2NrMuG zpO_-v4_(B^%)AZY?z?H3?~Ndc_c3mfENeFB!d^5v}o~SneT-8YE51oI3am) zS7yBg_J@%(`;UPW2NIk^q({x$s`Pnpm`PjvpWEm&wJ#v$k!A|_zb!d$+CEw5w`BMu z#*a0zzg{=_7@y;Hi`FFH&AKm&x+Qe2#IdEnBg*F#=u z)B#>8>lig(&06qowK8>G{5#;2YtQ(9M|5Q0%7pXY<}sLBYhGw zcQ(gt1@A0{znBv95l93=*i{8uKX3to>B>p`!oCmv$&!`7A!X$+N&e|-oRoNh`IRz<>e#*rrU!_baY_r5eQ9~+3 zz6b4j@EB-lYFT>rfw!PqJrteHnC0Zo`n!-Bs8JlK^j4@0&Z;lexX zXN}0i$iXx&50*JIOdcF~rP7jzyY^&LvBiTs6CT~-z?{G2$PXlTIbj0=FMigBHqtz^HvV{OacSCl*5ZLl+w%6bJ#pbAXEaJIe1WIAaD}^P`^MsP` zd0aUTo7#>Ozm7i~N-d>#a3(C`Ekko)4{vb&26AsP_RqP^u!-Z28-yyBg@rsn$1IDv zFjf?4iHecJt5ywJx%9sLkh48|AePDOfC=_E0J)0tFMB(suPojU6~d;CsC9B0E-8 zjm>=>xao%`Hl$zyb=K*&UA5m`oub8XJ=*Vv#HC?{$+p=MoN`1V3Pzc)SfWDEt_zWMU zjjk=24~&=yHTdmnQ3x?zXx%aNcpLKM!M<459B3AsQr_LMIZq1Y7V*vtKafXjojcwP zOWiIaNRemcdYrj69&vib-SOg7?vMvE32o;~kl|%R-K~0|X{#%OtxoQfgZn4x6)C&_ zNMKvsu+tS_tPqI~o8PvsO8*3vwz?lK7AQ@Jey9X!J7_h4=2UZQ0x}2l8)z<{ zR`yV1Z{LsLZNtqe?n~W2yl{rOP z?l#cg0=Wrcee=GO!#sAU>+YaD0Xx++{jvB7tmACSeaJjOS)wd8`#=Zuv+dL!z=KVD z7G%UEWCvkjNEg4+f)$c@)dTLi>9S`LJ3txH?u0bsvQ2qh{b%g6k$^7tI&?9H z4MN91>vl*>%#Zd|{0{G6zl~S_E3*DaQ2!lP{c)pyO{+|0eZ%O!!Z(;}d`;W>?7q^1 zE0mGmdZmE9HPMFHGK@zleqon#2!9&BA+dcv`O)@ihTbgk4e*Mk$X{iGT9j5KnRX>sEl zi=A?fokDzT&wkR=Ost!qQ$`yO$7*{?H+>|lK zPeN9$(sy9~f6tu%Rhjw!j+~pCww?L`=M3IO`UP)+Eoz*352U&=FW)Q+VXXMZ5)W)a z1-8l`I_6?trmT6n=cRu`-RuvmUd?-pI<{ug-ydT>vR9_*o2_@mpUAw!EBY=!PA(0QQCt9Maz46{jvb3WPC-$DhB(+|9|FyRbL-`vM2*m?47}hv`x)t%M9B zXENe+UZ407XR^B5!I}Jzv42SzeC%eex9*`v@NvxNK0oL4PqOB7a)Mm(IiI2V$EY8T zp3l-xVa(@;M%H=GXFuk1pjgglan_{61!_{b4ugPOklj7^|3} z1>7g+reDt+S^M7{p#|i;f^HIaJMsf=k6imJ%N#$vv>P-3tU2>l%$Hv_@yqeu*z?U= zow<6-`9Jaw*iXip|J+kZ(!!W`VHTA+o|g09kC^$9H~gk}ftO$N+v`V96^~apXK0z@ z?on%g+?@B7sb4m=hPBgjKLGNtQSBdZ{*P%l=1ZA_Pu&kHoSZ^{vRw1C`<(On>iILK z9?YMz?D>NlN1n6h&jrJVGBQH)Db63Z0CFhslX^(5vG^wPANn)F-whsQ=Bc)a+Wd@F z|A`T+9!Bn$F>CD4z=M(oef}GGkVonccP8AJdE`f-#7ndkAQN!)C9X0T?0ZlzWeHMP ziMh`FFtg53miR39dEN6*O-{Y`8|L&3B-)_c7Qq8Qc}oRwtV^!aZH+vbl;B6{wl&$h z?OU;VBkd4~R)c*ydw`_`|I= zUBdflpz)mMTO%~JEkc~i<}-5Nf$}3zRZa-*JFwE!VGA8#Q^3>6eFw^cQDOd(N#B?E zC=TYx1QIGN%7|2n>E9f)`v7+pOSI+bI`l+aPMdeKsENPkoe6EW(*|uebOCBvWk^GA z;Ft=0Lr$Ads14~h)vgfRWDgW0I%=3!Xri&51>TgSUT^UUXoUUP@6>_33S_|OcgF-naS*(s$)iENdAKF92pd=8o;HPmu5mq~?x;{=&& z(0!9$3cL6(*8FKz7olo~{!ya0&FCznj zE36|UD&8wF$v5a`{Bu zULV>U)R0q}?bW3pcG|3-U47Cn&z$RMD2(k(Y##4Hrt8 z0UmGg=Z7woUYTP5(>`^n2X`NS?h?m@`e}AA@}2x1juGUNMZ9MBI|ZWV)SKM3VhbW1 z`<0!@;hD5apPbwihT}7AjgPsX?kiOXwz8+sBw~nb;ZvL4FNy~eQ}t-|1?*EX&4&7I zh<$Xpd^&PA7&+uI4-lW!PYV(Ad1HJy{+aPXZ2#r`=J*`ISL|o(Agpk5e5`(Lz?g7s z$a5xQ_p)DHi38I)_mX=y?ABw(5bu_HC%+e~B&FVNLIHU%Y1OI|eU3FHX_NO=l_?x= z4{8?v)gg}UOZ=#-DEE@#vG^lF0k@74}6La%WCF zvjj5U_0U#pAkjwPJ*vje7)sa24q#IPtqLMw<1fJ9#L#&z&zJbRWqIdofZ zx}o?*#Tx~tu>75lsvZ~i{E+nIl{sRNv(Ti|MM>s~*pK;BH8Q_xHAKv!b!^~;wy!>L}iO<0cON^yYX#dS{lsUCJV$m zw(b4IS@5o5f>8{y^t zR?!~dEu4|7+RF73?}MC>$M)7I+LuoR2Ob#@wv~P!v#s7?HS=Dxtuq_m-**G}E882L zr_6Owi8&D`Og-u%1}`PvxKvSBbtx#kmK$x z_qsBCk>8-_vhP4IacfMc%^che%ViPA75->&K{vSI5oC=={2{oZ9UkfYK)UqGv?Hf6 z?m6s(Ih^Z~FJn&y^L)`UpZap4 zhRhEW^y;`Xv0C?Zsp9=BEwxu2zX0O08BFJqt_! z?Ub=^7Nu3FqNTY?-h?k9fBP3j0rt(xM^pm*Fy7R>=BBG<)msc%6;~yun)~5qWYuYq zRc|rk5<<1KW-q0zx~Oqy@;uTE1Y^RN@~VnQG^dZnPCY1;N$`vG+V*z6yQL{j$rKhz zpW$ISp+w3bRsLvO5Y%e-H`^$en)qqprk8kL6-&zB#PwqJRpeOupAN{SzR5YZ9j0VD zCO;mvdR6Qanom>dKdB5n>bIw--3i|3wfisvzNlzqtU#{adtt#qpg(|&>yS< zvbwEa%5mTn862`xO+y0g?-oue2|{XMb~)gXb16wuGNq(Q$&`{IB~xTJqE7?ajBp(i zqQntHG6e^e87?T{f)hzelbJK5WJ^ialu-E`B~)2IGG<5#mRaw}Ne^)r*#j3mb$W|m zh*n6P8D8F0HrBMks?!=rUJ%HckTv1g&=%KV7pqQ0VB~_DC|lC{I8o&2d8uXT zR_x2%jy-`ibpxUv9LR|0)LY}r&jK@!xZHJ+FZ&Gna_C66h~A&r8-FOe=8V4I?mOXJ z()pU#rc?&p6VrRkb$bYV_5|9Mby$~vTKCV`+2t0s(ihF0yajvD3(b4G`h0v(3Vno_r(>gHIviJydv_qMG?%~W12gp9XSmd#^u)p`2=}cn-}|sVBRIQmcm*wtC8pJ zb+a0IPOb$Z;E1G5X|4qeW!~|Fuv@wHFYsgH=hKJnMtmjf%*ZRI{bf9Pk~q=OBiLg^*b8LtTF8;$gAE1mA-1yTz#JR+5xz+n za+xx-X={w_W;^(8cK>bQwe3nb&z0QYq@KtFd%KJ0I|8Q*8FGYjQ@ zUvjO}_pk@Mx>yOMY7|_wo9I_5I|t{5Hrb zl)ny0`D@HQcrWBj$X~n<@)y?Z+{ci6gTCz@1%HbEG}qXyH3DNklD|A_yeT(xZI|P{ z5Arg0ZZ7IFva>>7R#z;vPcC!hV?UlNlkG>b(grDQm0jX@VJ+U9-EP}tpo#H)ykm0l ze7WL3lX$7CExxL8CDw=ob+ax_PO~L!Auy(eK2(R?vcp?l)hkmEWLH?U5(`;yL;O_eH= z^kJ8YXl{9W0ND3+uxGB{@_c+}bW8oa?ud9P-fV-WHskM_-j3HmPg@3ALCz;&9abM= zt6ji`G`MS#4~OSLAztVxz(rf@8*(FpR-}%G??B&j9Y+3(cBS(CDp><*c&+^P+A5qdLpGlSj?*_Rj-v!AR!V*j+b0=?|F zR>K)y4+lNd*S zFWWFOZ{ZJ|N|}J|dq44C(2tn2=)=EHu`M((WmtR~-CMFlmmwFNB(%ks0=qAjit@RurEf2ol;VJOBH0KzA-Uo%pJU@Vn_ z?%HI9Giniq#Ie%gt7 z?w+wCR_T4?IuSVP|1jbE0^WpN9*H{8NPu%o_};J~C+A7L$#-k1I=ltEkaH8dDKejew)a5ZfKJBeg?-xGo)+Wpa*KOb z@gDc=l08-(1?rm)JL<^~w<%Yq961URZy5-P)?L_Qo^ZTURNxzkD9a(g{YYg2oHga`K|8J$+m`S&!S0G_j z$TV3rYtU#vG)&ScaYvFSL93+Ec9TXaqmxFNk(&3FgGNcK?W1URUP#ONi$ibhZwq@e zH2V*6hh79P40xGIrog>1`$LaP5AN$NPF;mnY3Z6SkMNVeM2F)$PX1yNMIeeO@G|67IG1v zGiVm~2SM+?Bxsg)1cTq-czzcevj>z+{eZrOLN0Q*9 zpx43uQzbtpy|zhuT^99S`eWCgQ_g)K{RSva?mv33%pHw)S?}OJ@Dh@tgVGvwSEICU=6@P7<**5A=#jli@Kb6HRB5 zy?eVF&mgNecxp&Az*ho4Nh!Fk=__&A4ZL5TgE}ecma)8y>7}paP3bFfIPBi@&`GI{ zj^(*|ILbAS#k2WsWb++5ui(ihC>66#wHr~W*4 zmxe6>8YI?2+b>EML?8o+|6aT*5({bqns$!x+2B8BR$`QcK;?B^tcNY%xxQc(`1+eB zEdwi{_N%a~fR^F+wD|9=o@U5v_`AH!Jxd2YUx*_DUCDWQue^_S|_x;aO)zKe07qrNL? zlixI(b99_?8K1q_ihJH)dV=vy|3|%ob7}nj}iF zt}vg4+=n5P&ti-$1Y5C~3skGHiPiTN_)J+SEM=jvAq#yzxg7IznurbEiTPPBJ(`U} z9ldM18W4@o*;zl7!Q)OkA{cRR%+56}eX#c!voi~gJLv-MS=~|$JNWo>GZ%An6Xs_1 zK0C4>wbxj4vt@T>sBvLsC>g<7NIr7*5M71YDOVRtJXmG8ijo6OQ)y15( zn1yoInzOLqTxEuNp3lo!+2R$VAb4DP<3L*=N2w3hBeuM^yiJ7`ikb8e`;EB>?NXgw zSE-id=IHafK8y?X2bhVKYTtaJu5~N}{r)9zBy)fnd^Ar4-|}yjbegDts_7OR;| z^&@#NxXsu(Cu!3?>vhbvH{u1p70{M}Rm4cyD^|hNv_iR35&Ki%re;7d?w(jOKf<~| zm)@ll|FG!>NtY|=|F~DmW3=}ha~V0hN?P3WUgsR!Z`J!f&N0-hg_}gGa1~bdKD4;i zKc{Oe*0y0~i}qC0+I;efPir&g(~QgOr_Ja=-u`jgUA1b0Y| zcq*EV`7~a;Aww=4+)%NwWaHSj%lU-5`}~79u>JR-uJ6uY){v{`CESo(*KtmT+s*aP znp3$M8*MU=I>w)knNvps`8PDzt1;>skHYqeIaOfJsRFEd1zhvq*fUDMK;{rJcL?W@ z-@;#rHSdwD_B8oCFay*tFsF{3I#$z#`E+DCW>dpZ2jJ)-u@W;1Yh4-EJkL?&dXV;4 zudyd4aRJy1v&_t2;Tcl?6*KN6$of@r?fKN1_dN8A_*h6R1Zi?UYxSw8z8PblLuZDP?o_rjaDo9BDL?+N`kWEZiEn^RTYE$P(lUy%Rz$^ZN1 z{{!-W55CpA{SeBw9rwvIN%_B5{y!@JACv!6+qDqe-J;x&V=bh6HE*+W7|(~dui_bK zmLDT`$q|%)vVB8Pw|!WZobQ3BW0`Kd=RsM=asJmIsCs8%UDZ>NG&GwYzeRWGO*>*$ z??hJX2R&l(=kNM4EY}0blF@^sw~;%I`IdRyjpOI4k_)WkFW~r@tmAz+K9zO6AIDE; z9Us8)Q(4D{aQsBp@nIaF%sRdg$0xFmlQ=$}b=-^NV_C;Xah%LL2GVKqp{(QkaonAC zd<4gPvyLCYaXjnzK^$+&I(`dTlrF%^|0>WVEuQI%Z}ljP*Lq49ukn;EZuU%Aj3}MO z>&*M3RTT>#+_@6H{P(+B<7a?9(SbX8IB;|x){X#l$@w23gVTGEVjHTSx^8vVQ;RNE z>5Fc`Xx@yGXv5PhFiP)KX$x1Rzf&a^zYMR!OYkb3u1eNEj|}P?&uy>L7c}9yPS9Kj zXs)s99qiqjT(A7Qz#a*hvE(^@~-C^SnBW@id zI+bVSI_qn%EXBU>&GmL@=Q#;ri zrL<$}(UPAa3ohFj6)I-FUCNk{DHGstkiE(+`)G5Uzs^wm_@D`_vmcZaACTHig`3>#?K&vwys{8R_eJNH!WKbzRK zw>bP}`N`#&qsU0r4n4hJIo{ap)xgVIS&#Y6H;?}2dBS6T>kjuvCvDi{BMRAq-{gi| z#kN}CLb@nL%iME>lYhKsJuBejwMjp(J@Odh1GLcJMRvGXjT$?bPfNh&L)$FJ&^zWg(!kXEO&zKA5B-pT5$Y*ppBlp#{1)Sk=p^kvo(8oujnwI+lHAW zkhiz?Fx$d&>@(7l<#&$IW~#;_kXufka(@2`_DYT?^IGK`m2;EzGZPc~$X(ssv8<|M zfs!h`3rOg^$y-*XD}8z%_g)$n*JHME@1K&?GF1pqd^x!K1^6FY!96^QdZ$m!Y}g$q z4M7(>+2%h18+m!@mtix14Ho=i$ON#zs9vny{9^b%@y6BQ6m9<2(*M;2I+pVawzGA7 zZWXRbI>7l`viON{yf^IU*7-kKc{gI zu&a1Xt6!J%ZVhsd#UiH+1HeevraZ=BjyNJfYxPX@pE02K( zphfbzDw!pWl(fxu9OJ9`-VmHOvVUpi14@g}jGWuab7dpP9dfQ2_XC@idG@YM`584( z!0&Ua-!o`N($5{tN8_^ETOI^Tnw-x!jQ6o^9QX6Ka(}Pi84?o*ycmC9IlkE%`&VsK z@54&42cER>iG`AuLT`|k;KwR2t60#p$!t6NaZH68mNdeC!?+QNiuatuIoaPhH;R9i zy&(A)`INj9R7ZX_fKexh`d>|wesE-5<=UG~dt2H5GV(9j#az6{iJdC=G~X|ISWORj zIocp;ceqsYGSXzG&Q6=;X`{|z&lJ=-`rUU*`Xw!m=4)o#K%0`UA=A`ozE(7buML-0 zEbiI<4e+)8vS-29K#Li^_C1rY4a-{i4YKX_S1o$7^I41)umyt$Gkh&FxKZ9Gz7IMy z_#x^&ENS%jaxIA%NzfVioV7;JAf3VH7Yi*(I+3eM^L|gsmQ!W>R24Rja;_~Au8l6P zxj0^fW6}=SRz#kN2dmcI$n)!Pp1REg=J6&R-*1*XP(EgsJ5fGrmbasP#4PVZxz{Ym zQBIoWy(r&jmb+1gwkF?q2xVwqvW#fm$RV?Q4CRAn89Xs^z$~9YdB0gciE_7Degfrv zX89?UzhIW1M)_W|d!f9YJw=ThHRR9|f!@M>JvLeyTq=9HHkTF#`BgJ&?X+c}W;{Qb zLrVkJUEE)5qm=>A;(SjIEgbSo@s+eTuorMXmzD1MnzfC1nmv-T)R$IqUrO70#mxdRZFGBwn9*PJHdk zYQ0EzCRXU4E;n`zyaUI1DIj(w_FK2@9EDvXZfV4?pi93(5`Kj2SjCrMGc&vjj{v6cmLQU67_!sC~ zXc*VG@PPEAV{Z-RjZwaZci~%j;J5e|zJnbUVPda3UPxqz)#my z!-k94!5SxW&WqfpbnE_{G`xh$^Ks!@FE#)p-h6%uth9)FO>Iy$x4>Asm^!@07TNR@g?3*d&k^U zIu>>SvP~^os(815HFnH*v_#bkq`hY|^!j=JBh>Ac!lnY?1)#UVH;6Tpm@UKZx}XQK zgUoyLO4a=L+td}dL=i2p)ZFMZO4*`w(&en-ueWYG=%l`K0Bd$B^M(va{k+}T?2jQ1 zPe=?l{raeX66<$ac|&S?fKwgJSBM&jPknz6mjezzG1K;6ywJbf(J( zyagX6Rqga84c}|(l0bB%Z1Xbq6X3tiKG3|j`E6c3GPM996zT*;mDbl*SYP`U=2w}g z+Jz(56_uluYuHev{e&?ovOk86{#ZDOSUvOyVu_GDp_+ZBc~g<-Og+z!_x95|+kIzZ znNQe?y}ih2A45C~>=d*|kOEM5VPmrz1v*~hXAuW9;3qbd{h}E*+eJqVoVbx|2)Z2J z7easdS68DyGW`%N_(_)t$dtR@X-_B>P>!5zvQ+u6aa!j$|Px>P9 z>z`@tn@4;^o`hcy3yxesQuAHEvNBg)BH@E*pfnLG&mfyd+#=)Z%~Zt=h6oCdyB z+uau&oT`P|st}D}tJz~woq^E4eBcZ$8#C%tu%D3zNH5qO7%l(vINEI55HasDUhL=2 z-EF~?;w=Oh^2a0``45Qi4@U5}OWIw~cNE_wVf3-3ma@+3c z>{Q;jwy>7Ko+Ul?iC^uthf?a4&wuSc@E3#sg!?BpV+Aa1d=|V199+gBYiO;>+ZGMN z{;i>(FT?&_OFKERC7E-Fe}CtWg5M#oG&DGM{!hUNY=4PfHh7h-YL)UTdf2(|^d#A8Sr z&hA^%@YufneV?;pcCSUl7bN}uOMD4d(f9VSf2BVPT!ImBu2K9(%gNBDAK&M*V($5k zy)NpJ7>~oGe(24+#{a9`J?{BgwYdKp*JNG48nJ(zt?$XdMw!&1_l@W!JBWyU0}?mL`uKoud%bt`&WLcMQsLOV4sqALjP_s zXc+4b`iXit`m_RTkfd#a^#t<~>yHh4374DePc7D;@&33jjdz~s{_nXa`=yxcq>$?* zWi;|qtG~!mv*$5oyzk}`@62pV3u?F?mO*D*1)c3QEFtcAUK$$Pc}AOTcMbpxcC#`X zUpb<)8TiVVuEK)lF1EC`|7a|jlEH#CL0fBuuGaXeSg;>ReQoZzSg^-FMPDQCx!DOU z800fM;?w#Av*9&93TsnTZ2i_i78a}=dYRO-hykOX1!p+*BkE(Y^gti0kQgyacGRz^ zmk}cd44A}-K`%2fVumipOvo)&w?+P33YV!}); z88KuM3kD7o;&uGaU_JN^+Z%>1B4-ehU=RE6D|y)WN{Q6f)CU%NLKF513k~LXNTgVG zyiNZIx>_@^V8F%HG*(qCMsm!WWkdC8wQhq~6u47gZEDFvgEg!0$19p|xge^_z zzykb|b?hrw2Lk+i@JvI^KfgU0KIlahzYj z!Jo8k&pNveXP4{52IlCwsm`n`yAV-*i|$N0^k!I?n_z*y1o^npQ?d9CJkw#`**vr> zzATXRe~1`uV6GMc<+kV@;HKUJj;an=t60@j3!_!*7G40J>Qx};-UB{s#SoBN^MTwV zT5HjFunX7Az{Z^dlItmG`cD9lbrN{26To8;CwA+ZHNYO$#4GCFLnQb1ruSf>8h}lN z9RDIBZ3kdU#qs_MXc}!q^Q$^Cli)gR9PSxk5xe8imlf;>7Sw%rB-X?W(;~W~Uf=&( zz^1tM&wYOV7A3vG+iOE^CsCT(`t3@;!?|(W671}P%%vV?x`cdZD^9fq9xT;@&Ef>? z#s3;j6-UHJ@sn87`{K~k@?aA#Wn5ufpp9~$;zAtdO~r_(M1CAz!|VgD8W@d$o&a`= za;Ks1F3>jcdRw5F`$&}WJn<1X0f%*e!mcL$ZJ|d?qjM3HirnV|jIRLl>Tk9ky6=NV z)Mfa=7YfMHJqHck)gW?YYQ%ZteP<9CYU=dx4Q_S^9%J1zv1TF%TvFJGw~}px4%K0! z%vY}bvJv;d?`d!?ev=j`Li>`+Waux5gOJb2?-SroSYug-{4UDV?0H(Da9+x{*Xh7l z0YQ@Z(_K7gjm%Au_wG+b=OA7b7|^!hqov>8mia=|b1`;Eq6rLL#Ke`!7}3k!{66cu zw&0s+ub^)3o~zYB27oVmF~`w-7<#(nk`0PY{2KL3sq z;;zhZ#Mrrf$$c(A_N|v=m~_A&vGeM`9>_Nud=zzNd}w6flPe;F=CKds)IJ6B(G>W#m&^NbohuVDTh_fURV(Cu+0XHQ)@fk3tonV( z+b%TYU1u=+NQcHM$L}KT$!E}h${;t%n5!~mrPZG7V4S%3=E%0#zW8ZaNih}<&h(4S z^=HoZ0raqUMBBD&fotuhv(E#Q!In*Ma~6673^ zwhRT>l4X3R9_OKV$lYP+(=pl$9FDx+Z}@f0GJQJMdH!a&l{$;^oAv|bb+{keBM_Lg z^}KR_O6~3#@It@DJF9@(A{7GP5J({}$g9ZxaKv`~=Q;O5FTiZ|6SK8BUt+clTOs-k zV}Loy-3C>l7&DkHv{!P;Az&LWcaz@uEXvHAFoJDZSPR>L#5BM*Flg8YKJz&NoC0ZJ z7;-iuw@li02lU5@i#!I+i0|Gl@mWTH3+H*;pMwt#mwo^~bQyd14bxr$o?>CFc#aq= zo&#P3*d*gVVw8A}_!Huicwf1B&agM~9Pg=6L&&zCRDi|8$an6Z3~RxF+E1Gs__k)h zSMs%CKD%F_-CA%+saSZq#W?=Fyl>?AF1&BJY~*+c#?iWWd#1cs4HaOlG4EI>Wc7$q zcB=LDd1;cI_&&e>{rkiKmleL#&lj zvH0?C+=X?A`XX?%utji8C4c4nftLa;NQ@`nE!$)_+9Bxx&r{Dtorm}yoTHs$m%I^?oG0Q{Iqs5I1Y_U_{iR;*FI}}FU1#8I z@IGRk&&J>ymyv--v}=umd|*vQ#~i=xBPZ|!!KMfT2wZR{H0e#j&I3d%FDoC{O!yp0{+qq z{&HMx^MSu?V;^9A!C!#mTUfiD{AIY5{G|sxPx2Sg6y~DjFXbk7%AzazequfCe3s9E zu8{ZN#DvZtY?j|{KTo;>hVL@2o7m9b`C77hEdA7zeteB(ctb zZs-e@{bqaWBORPn&nQEx0q%6W;@I>eYdxpW%_h) zGt2b#ZZ^yG0dF$P^bKz`%k&vD^b6#hb>5FbR%O_F3)hs`O@>;X}6v}JN z^3y1K7-}ASq>WIcZ~bx`5&_!FwUFnvaA#H zYX)hZwzcQfqu___${p}gTNotbj#{9AAyDX86Xvw;T! z{-;)~L@V&MQ`X=dEOd@rjANi-2J=?29Mr1N132z+tmbh5?*aM-&)U|q94x$|D~`2m1tmUSf21?|D96 zll$zIWnLlfw+rze#$WO@sRbHymvms&0shIgvx4_f_F$dhT9d7M-SafaslzhzoHjzf zzv8;fj0KTv0`(!%AL)#Bk@k3Pka=Qua4GUtp{ z=3KJMoMTqms+V&x`yA`#9JS7I?pkHeX{*e+Zk3s)tMd2d!Zo>ktJRkrHgKIRUSpO)*OC{> zx6udVKhFKX=})R&{j2|)!MpUyY(2ocgvZ0X+-%@oaEC$vgM_euOE{$1umlKGkWxaOqZH|EO$s7Vp6vU=y zqG>NM-x;od1%LUD@#AS}Lp|dF<|Pap>RIR>ffc(e!$;J$2h^c)zaNqQXp{?JV=Lhn|du{@DxB~KJl|;VOr&|IJ zr8QoM7;23`P7jYq3`S6IjKX>j!|+p+)Rh@#cEy@rXq1M|=lx3NzL^7gA%IYMe>g zZqdQlDzGyb@*wVw>-*nN+}K$>wD$bgzkV%jQ-FiP_}IJlZy7*-?PkP8wMsM$?PEa0 zgr6v_4CoU@s1#P-Cgly}9liC;yU2U^2k}3MbN`ZG#bs{8QpAFtY74J`oZPN_O=~Ht zRSN3CTT8dq6(AO|9lRNQR$^g*c>qt9`a@Q(xxo9pyDeN)`awPKhs=0*Eo?^bBf5n4 zqTW*4j-=!YP3GI%4(;FBAnOdkx9!6|=BN{^v#Gn7Ikb%&V@94f;$H&p$?%ip6?mDK zLzlQ0K@sT+cjft0;@Cpv`PfGVDUm1FH{Q zhzF#s&hKQtHTH$HPmOyYY-Q$sFECw(_p#vpz_Xw}+Th@ouy8CIa4Z`7fH~B&aV*x@ z!-j_ZhgbhyHgY2RmbwDOurTLoQQNS^=-p_VgZ6GbO-|sbWENZDf@%*GS#IZ29qOz}^ zwlc(^x>JueAZJr@NiH!g#{8cF%u6e>Fp>u}1N9=$ODs!+GZsv$hDM5V^cB$G!zn^pD8^ne!RkU!4-1`k`~!2SyWqmoo65-H~|mYnb=thA>z_Kobv(6GY3fi2nr z`@rU_|BM{nXA^$ddD=_AQ9zv@e1kjfscqgzZJ*27(zitq-G_Q%Uy$}|^qc5s8Kx`D{*bIGLg)On+nq?~lwq$0{rmO!hk8`f)u4yyJ zJ&${@I*)sC|7<=p&fm4p<6ifyqu~4}!1;$1uv;Q+{J$J|d22pLb!& z*AoR;|G8dx^g`$_Yl!~HA^Iafja=>LB>E#~;L6!+=4yu}6Z*@4y$bu`{9F<_fQ~W; z(BDpV#}%1Z%Y9Or@BRVu{k$F@mG@5SEf;VOIe-F%N5`U$*$P>pZ7MMzu3E6?tR;M{$jEMy2~+KjVLE%<~uK*XGS1q)Pi0lmMF+{ zCo|qre@TlkZ`spg_Z}CYK=!`2-m=E_ym;9A%Jk!&l8F6`K<5ChOw_Kf0;YzPTmh)M&5tOZQKvM$T@lz4hhw-;y!2s5O59Hu$yN?M>Hr^lta=itgBQ7qTS+>A}oB>wxNL2fAZ9Ji5#SWT8K-3_uOz zB0zu^h#&_{4=GQ^ZRIJYV!>ekYeAc8FW2E+ z>&dT}-;y7(ou^wp)_iD0?duKr| zE*?N`>W9@&Sv!>0&-wgcC8@zGa+{-skn={Bb%o}n3M%Z{6Y0D^AM9t#- zO#IHp@4P{md$zb2ahLZN_=SqBXsXp)TJTBLRten-y|oPXT+BJ|%=}Wp8QKENIIIYZ z?@v5QRQf^9cZ>Q++#P=c`qitAPhDcqj`+`lh*!j{290q>eS&MW&BAZAoU~}w#qZ=d zgC7_i5%(Luv>w=J+e-}}no%}?U-g++A+BM*#MCb`!~h>IXUAorTK#e4Tpc4*=CK=F~U->PaM@~=q~fw!#zZ(9l81{>RT&ER|W;C~u8=Mvx+ zu|N8v0YALx-ty8l@wEC?0WKNA4h|=gTP#BULT%}rYI|uKe7(ZrGm*CE{@5t1Z#ji5wuEEc&>)k$qb{%P%axmuC zgiwK>nW;P6vr@<_D->EIF)tn9Vvd-9$w^URk8m#%aHBr%rW@wDk4qhaWjq^&2lTs4Q6>I%2>tZGqj~!e2g}Bi;vOv4xTCb5^eI} zX|mkOa=<8aE?7L1bL5&l7JQ9!Mb0~u*KuCre%S`jSG-r=!+C7+O!866^O8p)CX7Bo z`U55Zn@hfD^2C{!XGuP3@;%81t8s3QyiVRJd8N#uf$Rc}^vccbaWudqsQJI7f@hMS zx|(<%zs=@vi}~xGt?$8j?ExQ+fR8ffPL7wwL+fxKDaPWVG5FJ>I9H!o10FiKU-PX~ zcgL&a*Mo;5(f%cV;{c^h;95Aoq)hP33i48p?*RN@v9VzT;6X;r6vz8FJ-^+ z*$j_EnSER#c~yUDhM#44U%#>bWq4o6(naVa@}2!-P~Ol!SUY1 z9sfUj?*blGb?%L?Ju^ur$z+%$6B5F0PiB${5|YUc&>~Jk!p(q)Sn$$9Ai+=rOfEp8 zhy$Xcf}motr?#TiQ*CU^Y3Vth*4mkH(Y9Q?P_d`|TC}#ct*51@_Ov~J3*YZuYwwvo zYxag%{Qtk_d!GM(2(#XI{r0-P>)r3VlLjH}@hI0*XQ4OgG;>&SA4yM@7<81S&hi;_ zmQT}JzNNnRVKGyjm#VXLn~bLg=@|$x*k%bG=J(=;J_{XIHe)}-Jn_4v!^SOEA48px zjAISK4AKFQgQl{^8e`t}_k*50uH;JStSe#LUfDYCtt+9MK7o@tKLHIqgLEHO0&?ur zz>>#-{yY2;n&!Kd@DDd1JBe!4ZpTp$VY{FN_t=WFZ z1g4bUeU;prPCF&|JbiDGTeAuA9?n!zTwHfy&ul_>8SOT*?Iy!^tAx!)cH3moT!)>@ z;W~`$JkoPy_f?*|EKDmHf>BL>owGL)T^=o^FE8n{X1>B9T1t9k0#-T9BDySkS=jdw z={N`1e{^cgq;ohoLxBz)q280eQ*>U34Rw1KJsPwKUpZ{Y_hFm72U^&T(0PHY)rHV` z?_#$pn{}AmjZ{8dk3sJx?Yn(6UWcA-5OeTZ&4 z{oTh5eB=i`Br&Q$y{3|eE!g)%d9I{0vX&+F-6R@S;QF20xTI(KI#bOaRwqMW>u#UH zU1o6m;SOUxRZ{P*A-y-QK-Kl0meByq;m!rRn(hN9e!ndy2_UGQ&M`K$T@tdeykH(^O zPW#nIL!RB@76ptWVV>Jw4jbF%%eK>)(iH3^(npl&d}$g{viZJmxA{J=pDQihG)?EU zAH$u}&A}6@Erz=D{C>*_9?D(!C$WaQJFYjc@LDGfHM&AtMqx$KLqx$Nhx+4ma$ z9%apempxzUZVql$n}arAY0!ne$XIt*PV0G3(~8gxj6RiTt8v`VT8ugtW59gxHROcm zrFi>b_$^;r4$Pp7&z3L6h!pwl8*Jb-t%r$YB&ir9Pi5fK>^H4x+=p3qzP6EeFoA}w zWfHyg_7JD{LB?pr=`?*Awq; za#cRk!m!r^{YN%bcDksD-$}T>_gJKq&x^70SOxCq-9LnTRB~u}zKO6=N92bZtLSx2jL|Pzf}V zXi)4f0hVIJXou=Ve9=sdzucJkqjTXeL^s-SUToM#ca%|l6c6T$J@;kf-XZ^!`2Hau z7+1?Fi}mOD@rQjf&ytN5TFlW2}U0V)gfoRss7u<0^C+oL`u(ATuv(jlm?Q#n+ArkvtWqcAn8CV z_?aUhW{Q-LOs;hEG3r~S>UkKWx zH^+~^Al;LVJuK$(Sv~XS6xk$MHIq*>Ud_x;naZqKmtb4ni#_*lTAe`a4YLr#-jWR0&%;<6jf8IhcZMk>!IyQyr`tIu8n~>`vO{xjYwdwd(kmJ;Hm*w;$)7N~8%QVifPRKL%#(c}&>%qu3%5Arh z=K$BY{Ei{$TDs()YsKW{UaH`Ir^Fzal;=eNObKm=lFxaxJHEPV}C9vpLcI+m^j- zJtMmGWqCf7uj53+Om%~?`!h0k5W8odP;CY@z=l=!efIUl>C zDxl$(!osBcPiXFw?mamT%aeX-Pka}>hhLm<49n5Qf9dS9F3`o8+bpN|@k{G9Az6Gb z^fZ3S(xl!I&4r%EFU^H6#V^f;?!zz5h0@&XD>%dKdlUD<0&MYP9u)h7=`6F>qcwq} zwwm&LluyPs57ZA{ciZvE^)bu~Vz)A@JpfC!1@}+q#=Yx?%8NbISG?WjTTk<%TX7@G zwHP_4*}kh&W<+TYb2~~0)V zvG2`C`_f=*24Uq@6xX61)neB;&aw?R^fck#$;jLz%Ma09`KJ%J9mOnP!%eXby)E|1 zY{9S#^ZZMCieNbx`7&9|*B&Z)Ib6|%{!O@|_(=O2$EK~fzCXaqvpr4SrM_7Gn3uz4 ztz@-UPP_$i*${Vr(UE1aq{HPjr#qMJLGGK8!$0)g?{8D*!&W?qd_9Yitfw(@v>)lh zC_l#HS>0ar^ITP<(YcCcDW14vUS1DYAP-}G!Pi5#u}=ILiWBnkm#C&QC1ZL+=ru!a*9*f4K!WWJG!2o>kZxg1Ri=l zo4zlUm%Qq8Pb00j5e8#6G?tSy;j))7XNR#I8rz{Uo*K{?|H1wW%*!=x$i}Ezc7VqJ znd0;*3ic0su+PFX#hnjbOLMl=@$E%Ad0m+Lof}w0al`L`Ha@Q9 zGaGkJ&>H^?td~JQhbeE<*aqr44;pUFqivA9@Ji1*vWSy5}!i+g=+vi#ej6=C{u3Wkvdt(n{}<92na- zx6M$Flx|}i516#^0K&-8+Bnr2wPC_hnNVHG{wQOvN2rbKSSm{$tG97u`=&5-KDB+l z4a@J4obkHT+J5SL+WuYWCbKk<49MZNfJeGQ=2IF-rY!shx%`>FcW!{>mA|Lg70M`~ zd$jh|r0Je6oK#46T8@Pd+HZF;s=LQ9f(5Ge;7K!4xNC#@xIesr{g=Y8RNTXCt(-Y z2~+8t2t7cna_X>-yLL-g;k4c1K?f=!a2MUZgw4-qd~hd0c1Cyln#M3HrX%eDh@J1^(UVIsx-J=dOT1&Fj$m z9q0_u`nnyVOK6==57z1QP&v>DniJ=uJ8?cb?YFdzUF>-lanjuK*Tx4ELi-uz@Do40k? z861Oo4YD7&8}k}E24iGNcfjDrdY}fH?|@C9waed7yM!E#jZw>VZ1P5eJ)0fxobN_G z<_%ZkD_xDVugk(qdLMHM@T*z3=KzzJ&Atl;e2=nH_D)+K^m=XCi-xE0Z zh;b(5Z|yYhWAo+md62cH^B_M&U-CY^A1Td)&{@rkY~Aes;P+~5v9_a=yYB1yZC5Gw z1~>a&$GDKIo@P2q)#3W)vR6UVTCn0kv+p6yf?SW2*qTD$?`{s<<7qDYH0DR10B2{Y zSskX4B<#|R+vf#GsV8t}?7jxbqjxB9n$|0343%SNVfpEDXvp#q&ARhB5n5?Ww(ctG zJ>D)(vR;b$3+hGEe1@NF;oBJuR`_oYaDFlN2+@4S>CIk&L+=w~t>`_9c@#PunCR1k zVxEKMPvX>T#l9{69(tMNkKQS*FQ*l`u0!~xUM=5ci1d_Ee1k9q@k_nrIN@)jd6GX3 z%&Tyz3u2E9V7{Yv&XJn(*`r&^T3AiRR@@9&?3o?QUQ-juEU1cHbJT@?mxr%=q8XBC zofop26)W`(XU>n8U?jR~z!~?_SHq8;puRC|;jrV|1x>8%QFUG9GvTWbjqZKXHklRm zxMNt~sv-qwZK}e2!-1AfMZQsLX_+l^TW?WW+>hJdTT%XGS>on+OrGISovqW5}Twa>$RQS9MD+A+a(l>-Vx+snCL*0?gEWNFg|DsRC(@Aq? z^iDCxn6Y1{?%X`__Y$Q?xNC=(QJN?W+KWNyqHwhLH5ok3WrosCX{Pgwx8`8JMN6+! z9j3DI@U&pnNKRQ4V}ViZ=i%qqW4VIXDBWsC*%FRlbI?=djPQsi^Eee<@C(W&xX! zZ>o78q%GDh3gTmJIt)@?x-sa_TQQI1ZeIA9bL>VEyxQ9MNFH_VC&ccoHv2TY!UKat^E>B?!@0Y%rctDKb^})^3Ts@y9#IKymZ?6VLLK*07!;@?qv=0*cOsvu4IOqrpkLw5MJjg<3LYHuzf>o!w zZZV^4PcB1vW2jDecqmWNep2%ST|wt#aJ{3qDW7&G8_vR4VXsHKZG3MjZ`af>a#g=Z zcgcFO_7r%THPr<^H03B8XxrLRYMQ<=~Qr}@}}C<~eok3tvB zTnifyI>7ArB<^2_d>M86P|V+s>%*bZq1ihi>-@eu4=WFs2^lw@lTYP`aOtmp%cm~x z-iCtFD;@O)s6eNsmG9!u@K=5yk_=1DWVCg^`vTiUq0 zQu`&E-1?tPgD+$@ZqV7-{fso>{#N_>v9>7YU?x&uYf>KF%?d4sc0~J`le+q)!H%Be zk#3rgX`Opk&jY1B(e4<|R}b;I(`~;wPyRg_C(q}^vtzk@E~ddS7n7Zs zi(#3@xtMQCd(*xhUxD>R_t70P63;f!*u zX?xRlL~o7m-?sGdz0v;O=EL`FoqIUCwe2wTy4dW)Q4Eh&uumQidG0csyjQ;c7%x*%=CE#P9Br=j@qTTXoZRw%P#EoGQ_ z@hh-taf(Lp9{)ns>$yI5^S}b^L_3a~e{i$!;U%$_xGO%oB3d~A2=k3)VSgC6y^HZy znu8fq_c6??9C^DNa}?Nzb~ScXy?|5BPGen6ae>S03_0RMe#}4Oe181m-cgKQ1~Gqt zd6#7|FT%Y#=IX7EU_7gSVJwgDo^baRMOtjx?2mTFa$~OH;zQF{c(Jzu<1NF?7gimc z!u}>t_rS6hPp%2?7*bE$Fk-8g9Ez`9d}Qn@S7lM_67-h6N4BneyZik}2S$6Zi(h+m z(P8XKsE4jDV1AcZ!I;Mo&b4FS6SU8H|22a)?@1c@z$pw{2JAHYK_j)(Fgk;g*$=QP zxE<>qVp)8yB8ZhfYBXa%%~haH2|IaTff%of;cTpZ1$CdW6q zE<3&{JJ_UV2N(FU4gr(xj)eWa`WnLn-@L?faF-gswFUGJv*htFsU{nB zx@^#R7|BMB?`6B<<$LeAbTDt6#>Oy?CdS6Nv}j}bd~OThPRNT#mY1C*FD2L!S6R}E zF)rLz$Z~Tk!nv?{$6`z?H>ZqMfgXo3tZ^9ca^ZWB%jKbR%fBS#VKVBUQAXv09XiQ)RGz_kohGI7_)4|-!$Aijyqcg z2J2%JZj2*MrLoLpUGY1`Q5GH05V(6t-KowP>yG6`cB%h1sw$Fw)EUd+BRJdn9Wo1~ z^{h^u?Za5HVGQTn+Sf;n-+YCCdv;(g>ljuYx!)I`m{SpD>E9qabQs?t`15a&iUF5u zyZ8qAJuKTl&&JmRs54c_e+^^N^Y`K#q=wvIl@2HR}1*e7Alq>4OpgmND8_hB>wz4R5ZoFzBGw#BvNkcf1o%u>|H0fy7g@qSm{a)1* z*x5TJGHWdaZ zx8rX5EdTt9aOlCvQ%wh&sQ<%)(Jb6~gE0!6NgW7dZRt|7*KFuBCfAZqZJXz+f-M_h z`!Sx0@sOyU)^hfUz8TE};Cv#y8-%@_=wp%H$9Vq>`VKmODefEDa-;ZrN&HTldElobc3P{6HJfyn&Fw|mwJODR5Bg25u7mqSO@XtNb`dw$GKjR> zdD{6JuPs+6+|zd6M0r3soAo^WJLy%#a|-t_Rk3pD)mK`fQ#~YY%ujuiVnvjkD^!i~ zY|L$BK#$e(a`T59phM(x6LITn#uC5RwDGy0uMY6%a{LyT1lmn|4HuxiVWCtEBL}Oq(%4d4c4CIuH z^C5)(o51xSD;UeQ-)z|7{k^SR2h!PgElCZC75#(lLigQZ>yFT>sDUQ)HBmo?e!NQ` z%a+ze@iqNe-`HHy7Q+1?6~zsu6dF@)Zu@oHK<1x;q-@53thlRsJIifhxX_@vLw zQ!xufqr~O2{U-(&`n{|ru-H@MQ(#+9WcOFJsTncH%R~O+P?1_Z45UvMqAgShc+Lfe#_a*x+A5~LM)i6u8iOfrL1{z*q3`P103FLsPiWHYMy934`D2n}#2XII zVKhedA>tOP&c@nXO7}jV?nXC_@zMGS*2+A`sO{w2JjcZxSHsNGlEx^`FbXI0f-zdk z%guUT0^!metlvrIDaz-?DPPb5Sj)2%Yk6QN*UTIXUCM1H*W_m8a~ATs4f#BT3pkrI zoc>uE$`~4xeceO(JdAvHx+$L%&cfEsrSwrcMftuIrf^H9y>vDHZYT*w*BH}ygz}Db z?kb;Yqx?S~+2wz6%rC;3v5X!U)|@&rah90>rVv9bb%bo_N2lJ&8s&GgemSiThi8Y; zlO}7Xn60C8UUIa)bPoE`InyqA9oKs15Obbq^)cVq+w`q1vhGxe+0=-a%{P9yNxg9eM zJLVX-W9~*fF0`;?s2%rXycl*#h%A|X4P?6v*WZVGS&xRAE5hhCVohU3F+be}-xb`> z3M<9f#<_K}Z$MZPG{#9*M={zowRN&qxHW=3H@s&IE9MZjQCKP5n!zu(Hqi6t)(GZ3 zxDKE>D0)9(tqAMqw6^yKZJgg8v{c(`vv0GoZqW1QR!#98!irgnzSUs`Ju?5WeI8bl z%o@ge#^SHt22G&0_@Ed3O)Ac+WHk{7_RBvXzD2s;f9;_XzxYpeRgW| z-iQO=+qI(AmwVJ^w4T$Luk4-u*qkeKdpzhHppP-CmH8A8w@t7cB_|N$XaAv%Dumgg1r#z}zR3(_}tJfFWf?dj|}5$Vi5xaU9Sl%4&8yKlc^ z(+7WaRVw&S#x`KAFiLTu4Uz4H^OUeprhfO0u9_x3pQ6X{U2bpS^aIi%Xg8G#>5#2i zxIY$qv739y-sqW)_3J@a+r-PE7u$*!15<2)Xe%QE!8Zk{lC=GFA)x-5+HX+rrh%>6Tz z4|dV;u|lSe6<$?@zSL$>KE?T+7#CcC^7-+cinI5*`}Q2=<-NiytGLKp~ z!85L~$WxM^$yiaLhvj=Abon*Duz|K84I3NbIKUzSa8N7zVeVO ztmlp5EV!9-ca?u{BK^81UxRfe3wyHF&t*41lX>=kAuiaW8!~&C|0lIQ&ZnNFQ|j(N%L?LZHO|6S@Y@`>;s2(%9sh^p zzi4WO4%&)T%~IPU9;}gM!3F9Njp-phdiqk*Rk;P}T8MOAgLLgex=z@g74Fe3y|%Fr zlZHVGzng<}jcvXwGwz_frByoZ<5TVEYkKYRiXOCt0n8pdZE^Zv;r}!IpO>8xcTIt& zQn{uYz5lt+?3K?j?8>UmI(eSTlocFTl>+SOEOnqP;eI@>C&sCO8Ya8RLYD0*yX9BL#(dQ_%-al4C4ok*G6?I%Ri^dRK5qyI)(!aTt zttxK=-H|(gmVW;^9l&ZByAG1wtjYhK3A;JS?dG7cn7M z&D+AFprVe)o7bwp-+Th!Hzy0L%h@rkm1zr+EPW0t0;9VM)`hEgkNSrC&a~$E!%oa(mDJD539taZ7n`<)S2p20mIZy!@+Qyx z*FY~;YdY~&D#1#v64&J2{jSR%?9VPc39b7ewU9zcW8vG++Daa_GyfaqXiHh>dziW>*y&!m4JYUZH=m`o>a6IH=RYGgRV&q+DetKDvuxqZW;eydhf0I- zB1#9=cQiD|nMe!nq@%Q~?#0TmN~EPyq~$K8Wf#(Nh+A7uzny<^+n=+IS!{R2Ve9yc zLy*w}%=ZSY{G|aU{!6q5oK91_C7WRsqmWi#WtTMGl}#ysCxW{=9TXacwBl=nquzPw zosF$%3z;!ibi|_+9rv)p6TCDjWE=LzG8?|Bd@V4(cVjRfhtLkewl{?H+&euA!pMm6 z-843RHx0Fcm?F10@Ke%MaUfv8#<|8DT-2Q@Ok_{IB zI(7*Z;-<4koOGLw)j*AzXCFnFB%8nLVddBRc8yy36s!28@5Zw?Hhp8(59TnOp7#gX zfO(Bn(-hZjvzm^et_^V=M_eZm*89T8}e6#IPJV2G_}d;niPkm3!6~= z`nF9Zn?kpSAyR&5TDx=zH7%E(W7xNcjSIQ<_D=yGg0x3PxNUp=tDXd zetc|}k4=a^&2_MVerQV{n^?V-`ZQQkcc^^UIL|~eGRylkvk$Gfm-;kw(Wlua<_Gid z!i4r#=uf>*^D5K(H0aAvp9ZJr&B#B7v-xelBfxp=&9SDJzhbh=?nCI>8lO!m4IB$H zKl8W5oUzitEYG5VTU`{Gz<#`yCpplsmG&Hg;4o$odH1EYw?}w}vj9{n=E8CW}FkTg@ z@ox>)1ZVqUu@p217O4y3%)ix7V-Q!z!tuXEs*vi@k*rwWA?NVmikukDr(F}vjFtM` zN(^m>srT9$jW^_voU6>=T!YPCytj=^*yFK%L%(K@0jZmpCw`f$XEpnd= zCoQYTZSAuzYhnHvodxL`685+@?kUz%k^K*CJ{LChJd8iUrhpBtj`C!yVT_x`GGJ5C zo=4mOcXw|`@4x4iZKqw2-Df|C@a?nRaaVPi#!~4HKaS7wF|q>`1sb=2Ejx?8LCKny zvRPCg(6`6|?-N+2fd#@vL)7=g-rp9i%-u4GSrvR^^ps&&fJ(apVwgpVXLR)5-K%`{ z8KlZKezB*CG2Dp9(6?ro%j3JUBfNc9V~phjp@{L7#jKpSf7XMMRbOl$&YUb~Wd%@t zC5_Kw#DH3B_$-Uj3b}Ul0Ff8$N8|mwX}hsjBMciyltCQ*0j$5H)#(iN!t%9n13iDs z9=>~cv+%p6b6scenpMooe^poQ1>e85c3_FZKs!*fSlT=IfZ}x}{Bvm}lWNix*(Pu?6((Mc3p1 zCgy3G_sPYr3s`{@cM!LIa)Ekzb*Q?w=GrNBp}J6YXlk`uxpKpr?hZ8+YMi82CLxp5 zDc97iQ`$Gyb*@#XtXkjO*`Q8Y0Ozumee~a&ek854ofxY+@L$K8jmvupx@>J^Q2bvz z6~6;StXtnpK7H-<-=_8mepW{|bP(CmN7ROO?cHl`S=Wh>=61FBZfx&`vYm@up2wpw z+vbU`aV(2vDOt8GdzK?BBP%m2E6bUcot2Z7o8@xmIo-}tPLDI+S>P;mdYwhi(avIL ziF1sz)H&8U-Z>$Pu=MAzb`+!A@j1oqe9Y!{?X$Z*pLMvsk7l?__GY@rJ(A^CA9lI} z4`sV6_vE;%cIUc754zk9yYk%ApKv$3XMBEClY6uM&x(6S^H1EmcC)CS0Q`Q^cIjzou?H?G4YrS=9G2N{ z`@cR7>l~L}7t8D++oj0E@W)aY|8Yrmk(qJn<b}IH8FSR_{3NAGr z&F=SX_6gaSSROCAE;SuZ?#C4Svdl|N#|$jmzO;I2cAv7_Kbv!j>G%c)ST8LdO>W4? z>z7&{%P@_7Y3XQopK;g&u799Bq6R&-_+!!MPhFBe|Ki7xhMVoLTv8hT^<%Je+U!gI zuS|pSdomM4-IpepP3~tE`(uuOKrW5x_}a&y#}6v@MVG3_fBG?e*N!Rn)tCBRJ63q9 z^#XmN*njqqd{2z};sfPU(}6yT?N1+BI*hV0Q>5YZ=ojrpzvvP4iylV5=ppor_Ml(1 z8~vgO(J$JCe$f*-d1Gx~biaz;*(moh`v18575X{hK1@F+-7)%k)%_yvlk(>P}YYLu}(=mYCdj?mYSKwm!$ef<#h^&aT!-O$$$LSOHKzJ9{ljrhkMcbA-S zdr!JOuex2Yxt*`O9dAG|MjWEtoD3wd6!%H~-F zin%W|$g!6zYzEpRHePXeCB})K)!ci*Kc=}$(0>0%bH~6hMcdbH5LMx908w z|8dP-0$c6dnw#{%Tbg?j;&S1S!s)`hA zl;(aHZt^3(2Vwq?=0?)lZSW&L_H!}PLF^lFaPKAeqr&|Pxru@2eR4k{++L(3f4^|Ik^5ocru^qWB;1tW z{3nDvM(!^P_j~02f^d6~ulzm29U}K`;a)`U2Zg(X+`ELkpWIIhH`Pb}Q^Nf;xxXab z2g&`kaKB3KuL}2jp1*?ym^D&bGq`sc-1i#X_ZZxF8{FFs?z;@` zZ3g#NgF9+)-)V4fF}UwAxHlWzw;SBI8Qg=yy$G3fEE4V|=IgeC;52wcxujZ>jS=vWjcNZxqX8Fu7Up^xvBBt>B3lp zj_)S-M8Q96;D12w34$NN7>SOj@e;>)4R7FoK<;rG9y^i}cyf=`@CN<|{yd+eC$}6V|?rkf@pZ`41#8O z>9s7>t0a1gL|036jYNkex>lm= zB)VRr8zh?6+a(`6o0=rxva@LtJzb(Nm*^`b`V$g8L!xQnU-GfDnMndJJHvW=!((S* z14aH#65TA(vn0AjqFW_;wnWd7=(!R-Pon2b^wkpGCeaHddZ9#LBhiZ_`dW!zEYa6V zG+8pq$Ih-#5^&jh4<^a7v!zLb`!AE|8zlNhiM~mqsb`RU?CeuX0xmnl9wozLXUh!~ z`LB@Zc8Ojo(W@l7L!vt+dbLE~BGFwEy+)!xBhj}?bhkvWmFRU6yrFE3?Dix9mpx@RlIS}m|1A=Ir$k33daFclljyr7 zdb>p5Ez$Q#^t}?jL!$4K==&x50g2uz(Yqx2L5bci(R(ENA&Guiq92jyy%PPXM1NMI z_eu0)68$-e{wIn4yhJ}P(O;10FG}GDd{j@|sBhmjX(O;J6XC?YM ziT;X24@va%68%+){+dL8U7`<2^uI{-HzfL-5`9pjza`QCD$)NY(Jx5!zf1JDCHgxO z{h~yFSE65%=J#KACc&z68*A7zar7!m*^iz^baNam_#3!=o1os zQlej#=+`9rb%}mMqEAWmk0km{i9RjSZ%OozCHg-k`X>_oQ;Ggx68$rY{<%b-k?6N2 z`W=b>g+#wA(Z7`FUrF?@CHg&y{*6TcR-%6=(eF$2e@gW4CHfB%eO99XDAE5V(SMTY z4}zqW?#t{~w9|P@?}X(dQ)kyhNvV_|gAYi~#iiCE7004vEf? z=uC;ul4z$yXG?UBMCVGhOQQ26+AYzeB-$g<`4U|q(S;J7I>>_lzvQnC+GvG}{=ej} z4FVuIDYlBY-yf#RcptZrQ1g#BvC1`DMEJ167XbD;yY)jDEpj?912JaHI zHprKtwZXsytqmF`Xl-yYL2HAM30fPhOwih(W`foRKNGYzNSdIv!PEq;4Z0?1ZE!Y0 zYlFB6S{v+5(AuDIg4PC)6SOwSoS?PA=mf0|S|@02a63V3gWw7Ja;d(rkmyfH^bCo< zQle){^i>iamgpvlZkFg-65S%vtr9(3qUT8TT#241(eowxYKd->=miqJP@=C<#%g8# z0eoz1qOww>>Bq(jmCh8}t8}H%Mar!y^k`*W3XQYIQs@$;KZPEnY)YX^mD^J2vC17O z^f)D&LXTIBJun-apcs2tlpkXc%Ena1*h8}6@Hb-*2-ngn#vYCh2ecY{Fg9EwVeFyU zSh@1~l=M_6#vTTap;Vqs@vl^jJp>z@q!@buHg=g}wD4_gvSPI0ZLCT$TIe=5MKN07 zHdd_|Eo>XBQH&O}jfE7Wg=}NBiqQhLu{y4~P`a|J;L!ysK^sgjZv5U89Z*Bp~yHcZR zpKJl@<2sF|J*x$%uPrHbgYuk4(_YR3)c0!|P5UYfAP;}nXxigg0C_0~jvm@?SO9st zR-<3r8I7iWa0QUp;~Gu-(F!2Xf758%7ghjy_aPp7Xn$4#+QR~k zrhQTcXfGQyn)WLduuA1gji!A=1!!;I*J#?qQ@|>eKWa3sJt#nX9fNe!Bj|wAq|vm` z1eY5qD>a(-dlWz)+@;a9@1g+u;!7G$`yUFRPrj$W4>>7-{&`!Y=}T1r>CMU#@d4Oe*cjz;jixo#1*}9lq|r22R{;CzR~k)gj0@OkCC@41qqV*T zu)k>jlO9@giwhf+SsG32DGH$9S70yEX|4;u__jca0 zw!QDx<=yL9!riroEniMs>XC=eRkzmHFYjN!ynWqZg1dfgq`Py#!WUck`+9o&m-n`J ztl`Hcm<7?kYE@s?n$`WwS8wR6 z6fe{o0-=Kiv|tQeI(i_TyRyBvx3haW&q-(R^1k+qLRR*rLvYeTEgcnsax$iIZTl)Z z;YQ-p0STOGO!KPs9i7WNI@k5B>DP+D7!b}#=v!|39``{!=^-XgQjomN<$pO2$w@hE z<3g~+*%jgpj|)Rnj{CSUH02Z#E9l4%Y#I4MA0s{N#OyE*t8|X^pp}nu7|2KudHE=Z zk&N_!nU8W9%196C`6vgsjP$^qksiQPr_1|A4*sbhAue&ql$AgvPMER)=&j1)3&l_4 zvGU`scOc;BY zX=y=l_Ed9rN_KhTI65WINp^aoIXXSn#GY;*qd7M{i#%lWJYH@ZMhcdWL#l2}0SZ=1 zs;I`NO-sp@V5Q_puu@VeSSc}TH9NI>Y6>#RHbz=(YnQXr zYnQXrYnQWA${`tLN?9b?=DBP%&t*eOO(X(oNY7;*vSS{GdDhcIpuAX=N1EIawqdm? zyWX5_UK-|wk&+KCGE-B^L9kMCK&;xS(^F#6cqwscytEi2WDcqZXZlfy%I1yN5k z3sU9@XnJO9r>5~Vs7WTLn(ESij zrH9v8XBJ*c)6wEcFZ0Gaxy&hExp2X&O%oIF(u=DxRoRPpYfbV|Ym$#zlYG>gLX;5kB~_|Lh1QvxI7)#aJd;SJ--c?r{}lf^7Q;RT%Mla zhUw|~ZJ3^(--hWX@tU;xhUq5pPB)2nnn}FVOyZrEUd|2E(#yGFno0RhGbuk4UDJ@N zYs5!4B{;!K2~Dt43PrF|>O!zmibSwdN`$jgzc+%FQXYbpQXGPnQW}Dlp8AIL)HkH3 zz9BvJ4e68VdOXhm4*sjp8@eSJ#m`6ru9vZ?9pTCh{f zNK=3*xzKni*+>N1kdl=oJ0&}s)zDboFg4}VEJ9Arr^ZXksK!glsm4ps6J<43h?1#F z&jXnesd`sKPpwYv*r~;qh_cZ7+VpFAVU=p1oP)T-*OeNXrVnu>t+D`=A zJePGTttb(8op~-BQrckxovN=A>{OE{$+k#|dDyA;P%;qnGO05!lT^bf5lE^Xlw_M{ zKGn)eqRlHU)z(R(%@dev@g&h^MFq`aR#ae{r37rVq5|72C19IH3H4*q=u=I^M8eHd z0vR+>J{YPnPe{F~DgfM67hr6|REdFaqQ&TwV$v@ocDjxrZhE1R1~*Z@oNbnSgly8~ zO)u(<_F;PVN6D(COJGa7wnh5!#R{ry4IMVHz(*zX)FH*Gc20 zXcoaU>5n(0^v78>4Xt;tS;>E`$I^``v$6II`!CnW@l#?;4`=zU(zL*c@Y)o@MX~yY z10MxHQmnX8L7!o{Xi@}RpSBqxT%QXXAzYte8WCK~Sn(N>>W^vl#l_;KdlXW|&qN>+ zQ*jpyEqQIsMUjd5!HXj08OMvhAWvmp^aXj2bObM?-jZ_B7tCgMNA!~Mo{NjVU^XW` zqL)5WEM~Lm7mY=ps=w$9X6pb(^wLMli>~QD@=G7-1zqQTwzN5Xr9n-?g>6caG_Ll1Q{{G%ID>w9a_Oa?!>({RBT-VRI zE(|Om;0uI$J69)G@gx=t$q0P~(1ih5JY)$lu02YuI+CNgPyn>f$?`?MHpUyc1B3OJeu*`2mIzv`hxKyFrFwD! z5FlmI-G!m3FvB9g3u7UDv>fn)AcLnc*jD$pukEBIhu!Td&B5B7OM|9=dQHMh2Z_aM z$=lJ=(_sBh!ulCOV)S?tCeTNCCt(MT_#I7yaXhUVxv&;6gQcqiOH@j8Hba>!1vBJD zgGyBoW{-ZO?r zyIsxV-6P>>SE(89%My-uZ<^u0E8%FDq*?mjkZ@tpv>iikN57JAv}@1|_csZ*2(($e zw5Q2fE;n1?CQ7(23tXLq>$kwQO1LO+lnyz+H%qvk7PvkM_ZV>47Lyj<{Sxk33*3_u z?gb0n3li=X3*1Qw_m%~Y_IMfV<2?)9-zD5%EpT4!g))Zc#CvAtGFifvSm2r^T%`r> z1_?La0vC~Rb1iVVQ#&Oe%YZYJmoG>-I{(tFU4B!-MSu&L$jkR7+v=Sk43fYkWf5Mrgrl>SvCT3qyjBTEdq>U6?*<9yM83?@7m;w|fHSLKIs?|2-zp2- zV-k+eyEeyB4@r67H`SxJ?qyh5Vbv`+$T~EpSgrxCRT{HznL$3*5^R z?q&9&2V;fkc{<9@@>|RiX|M~nPY~VEa74nxT_@GDGMC!K{lrE zJqz3|67E9_-0c$1gK{#<$8HH1u)uvu!cDip{hNeqv%vjO!mR+#OuzhG!u4C={v_dc zSm3f?q#MimaSPm733tE(S0mvLTi|HVw=v$gEO1LD-1`=|TO}L~0GQS9of6J#fqO*4 zRRU*LA77SmGc0i5k#LJFa3>{PhXw8z5^lf(_ZJDb(*l=^K>%ZY?6<&8lyCt%);9%;k*{OFH1Pp0{3kRM`t0J#d}=B zO}D_Ek#H>*xIam_MHaYBbjFQxaI*z&jD+j5z*R}Oehb`H5-w_iTP)#rTHrb*++)Cj zm1cJiNVumha1Thh0~WY1O1Kv-aQ`CVUa`Q@nf=E4IAwu5E#b~s;C?IN-nYR0UBdm< z0_TD78q?>1oZ;29a`sC&k0y`uxXKk0ZX9rC?St+qFveR69Gz7ohu0zD=q?2_+<=6e zYZ32m3AY3|N{1ZpKTEh37PyxrTsLrL->ugrTogDnyY5#Kj`Ww=ck6#7+_M%qH!^1| zXS#C&ucqaP(Ou~gN;{CIPTV#QAAre3n&kBq5`6XP$B7Ij#xTrS z#F)PQ7V+LG;SN}Y_gM)Svk33267E%t@D5A3GZx|fM8ci52=Bin95o`a(%Mlrgwj|p zq+iYCRF!b5MR?OBT*xB4g%U1o5niW+BRy=EkIfQpg$3>r2^Rs5@-OStuSmGf7Pz>C z+X0+eyL?N+Jq8?DY4QF+!aZvdUM3v%syC zaDTPH4N5pCGG$gDdnB9}AOx9KeqWYwss-*P30Gwi?~f$hbc^ubmvAi>I6I61cqN`i zAkc)<@=+?`=nf>a{MJgiE{pKyO1OUD%<6Z!gxd}r{-vdFqlDXI5#BBdx8EYXrzPC; z7U6wI!o3I_SZV2dL&6;c4po$f`>llgDR5@(k_PqRm3ZE_2ye86`_Ll1DH4vppJws4 zO1N>rnbq5;BwUq6cpD_#42$q~Nw_u(+|v^7W((Z6C0sXfRGzZleO1B@ScLa$3Ae){ zymJz6uLW*2j8f_%e90{oz?xcisS>S#x;pmJ*8IJ5ljkZ{K=!uz6xd&?rcgA(pti|~FR;m%s%&PccqEpUI4a83xH zS^2p~r`DSnIJ0somvE{@crzqil?CoP2{+vWw?@LXSm5rIaEpLL(WL2@MyM7xF5#Bc;GA+& z2->6L4obLFGZ>?Dp7lq^osn>JuV#$Sht?k*=fN4-ly^P9AAXWCvN`le$IXy%-cH7h zH!bM6TP570)#kW=l5jh3F~@yh!oAXEj{B>Gb9{y|<4q2F`ur$=V}4t1WsL4M&>tNa zk#N)3;@)&>Aljqj9+hy8t&GvPT7PuhOA_wPZpP^RaQ)G7?@72l|Ac&C3{`)0TtR7S zK3@HjIImpB&603C{@EPYFX6(^F-GU3>yIAqGZOC1^Ni8C>iVPO-jr|+2NGP)V}0y=1?StF$o&in7k*Qm|1O8uBjGmxjxjnnUVrp>pOSDh{)&5-VN>gmjyo;k zcHnv)I;UQLbet2n6&Ul;hHGu;e0%-TaaT&Xy|^BR&c)Xs9k))xy@hL8=sbP>(Q#jr za7%EV3Z28RKRWJ|gu5Aym(K6kA03xHJ~baI8WElQuRl8O3JJFdmoCu#0s5ok)=9X% zxbDOZ_mqTtY_7uSeggf`!+TT0oyB!0v^GtDbX?{Hkq_PLJzQ(jVp1-n1umk$U$sO@ zxdB!WuR;q?$8`hub)CR3)jj>yXt+!k2F2L8>xr9u^zg17A+C3XxJO2adtrpQQzOLv zafG;06Lq-?^TMzym&-?pTQx#lbcDDsj1c#|5#oL_Lfl_Qh#RAhto*JRA+B?TxQ9lF z`_~cT-q3JNdq@u6f{xW6y?vZfaUZ+R#P0_Yg*nlby75Ohijg0w^;};n?!z91(VBk3 zc@4ObgxlMv@b?KR`A0m5?xzME-GHUz?vQZr8E|wnmX7<9g!{mNBRfmS9hY#dKUvOn zla`MAy@YcaaA5<^;}`GfqEKSM(al?Wc=Zx4V8GFRKss)vgbNvPbW@j(dq~2~FyQD0 zFCF(y2{+e(qno{S+>a#O5(AEI{L*nBO1KpU9Nh$_i^d%!3>bp6<3;P86l>3T$A>#_Dm ze*op*{Tn6RTewuJ13dlFaW%DdQ&q!L+jx0neO+x`ZLO+?IA8o<8wyRG3S@14U0uU; z@>r2`XKUMRMWK+yqp(9fZk-PQQ1~@)MkQOXtW!o&CjI>(oD8&fO+WTt5M2fum-6)Y zuJ0x&$M`sJ1I|lexemuf1^b6LY@)ueV*4z{BlqwT~zJsApj3rsG( zt6_2|OwFBbqs_&gJYL4#z&n*sU|Vh%ey_l9H_Jg;*fTon(4`b^M@Jncob&A+wF$nh zqn7wCN9L7EN9Vw*?mBYM5bj!*=g9no($TkJWf#s!!~^;Yjjq+`%LR>dsf7P@jn@6A zDIM({9m)5nYP9a(DCord4H~Wc*DD=sH*_c8uhVGVzgEzR_d^=Z{gD@DTir)T8=$N+ zvy|0+*p7_-M$Sxwq*KbJ$0@{5S-omwKh8o!IGK2NElPsi8RSN;C>&}|){lcVk!RED zzEx`@t67aR)7a$F-%G)hd}P`X7ez~M1#XI#+>Fw&0hLE`Z&$FJA9sGV_iyNRWg!JpF)f$kGD;WPJuCO80`zK`gY?R}6nj^9Ca@0uPh{LFjl zeME_pF2vf`%zNO0!sk3ajC+!F0b%Y2#>T_xq;Pn+cPD8c&UPKMF4DV3OUrh`80fo* z?&AJD+->?h9epeHaJLc0KyM|wb6vteN;EGX=0*N9{z7~5aVFXFwd*_T>ILt=jl4R+ z`?HZ(D|jCmc_G33lYxgrLwOwkW#mm4ygwRw(**CVkvCQF{$S)a3f}LHyavJhPa{vu z$NNT}mXF^Vd0IYxYvhGkA(hK-47|%x*Tj3zz=Klbyk8r5(=^_%3_R#Nj{Bv7*QoK{ zHSnP1Iqnw*UcJV9$H0Te;kdUAJd#C9-x&iBvRFX8(>iZ`ACf0{Zzg$2p5Xl`$wTr4 z?^KdU-yj~|8%Z9LCwQ+Xd880I?zJQj$rHR+lRRpVJk3AUdF>ILohNudF!1zzd_T!U zb%^j@N%H6$!{dE9$xGz-Xp)!6?~x=gk>A5fULwDTlDtHIhm*WSe&b1ABEPXDkMf(V zOe4kyG&8uT3O54CRT_mGFXk!@!i^Vlm3r<*GlIKLxRKFZrB=9+!CWOI+{jq2QX|~R zP_9xf+{j3-GDWy~yj8-@0sr%h&e_8Tz}%!&j#l>Ed$rp~lm6C*rKN(H9o!gDn)|Zpx1%)1%;P z3pN|&$Q)x^wQ_a$VAtwe1O$3)lSYR?yFn}XLc)73#REFKX%*D-s;<>U1Mk%6b;N)j zlErD^P54Yfb8n(EKt~3X@7pw5e_!Er@_i=g#QT%EZP;}yePSuT%QTuu3V)J<jOV zfv2NHqj|X?9mSwg&k6t08qNKYmLeWr$7-^13Mq|nlDmM?NN!+8aSQ|je7A6OOdjD$ zJ|KH8?nWAb$)$HSehzo@v}H^YX(J!rUaB-2<&`m6q$}ZfnMRXerEqT|(Pkb;W`5H~ zDzv&Tk{*;%zOoSs5hRt712p&7+JZen^D@j(K}QDbx_E$YN5;e^@jj0n@lDWaiVyzd zK@(p0AE(pg54>aU>hAVi(0VjlaiZVSr_s;_jA$Z(9mD$^Nxu@?EuH;D0+-j+C)~hQ z*)~!~g?A}<`GD@crBBenUdCyVz)rGlY+u=z^sD4F{D3Q0HlipR$d1kkC>xFk9c(l)6{3yTs%H2iqjT3Yi^Zr!&~W#EB+FSYfxld30o zmW@OJZiJc1-9Uk#VWa97dGcU-9f%2%LV;pJ}F>F-=170n}C$$jqixAxivA_`GRzrva zfg7KZ*02;}v(l0hBCsmqpX6ywhA7gP>N`-*q|g*Y`r45`!s8dA$?0qMwDKG$5h8s* zV~HGBM6(8hKW7d#o~4%}g^52NW;+kFZcxX!4xagUqc4?lV~_5e;a3Xjz0p((!3At+ zAnU*G{*CTCQ2rouRBX2o1a~t>AZFLg*r&yZU!RCCSt5f0o0bPkkw8!$GJJ|n54SkG z$CDcf-<29yE8=QV_VyeLz75I1+sKo6bLPVtAL#E62JdAxYL1ej#uUg#05O>29Dy*> zo8enzJLdOoMN0Nl&IkQ3CrYx$m!oF*;y#oQd?}_u|KfyihVP-kwLZ3A_Zv_ZRZl~X zKUzaF>hLMi>;3tYm-(YrGb0&+Ezun2W4RUKyG|T+!L`iKE<17b27h!CYg*<%T;1RE z1D~RtioBck&iNS;M@4L!DABABa-EN=*ZX5rP8{8=ZdE5^+oX>zdo3o9#=`E_~Z47cQPdrmPH~ zIJ$U{ORiQb8NoR$>}Qit9KANkqJ$;6%Ea6;HLc~K}DzE3UvRO<0O3moV5ByDR zNn~^6#L@pAo<7GpE8~H{1A$^!NG;9|4X9x@;A=uTO@MBQ<)gJ`4fx{uj=`+LyP*;8 zg+{oCIUgt~bb1oUYLJ4C2Zh>^v;u} zVT&}UVg>?=Nw|52G`Azo^bUS4!W~o>vLn1aQgH);S3N1^QK2*p_)dBpzT@_9X(d9T ztqP8WB`x2YXV>ETbZiQ3z%58}ME0$@PV|=l)k=Uacn;HxPK( zLjqBAdvln5N9B%}Ozerg;O^Lm(d z{MRG?rbb8uz6+2D9Iecx7M|(2L7vux?(6MqK z*Q0~#n^N8%-kcG5kaCbfEKRAKjKB$_Z@F7~|H$S-*F=d|m>puNX>Rh=1nw`sd$2J3 zDwbL<8G&O(g|4eo;w*HBLdN&;J@Jc{lKnwp-lHklJIYZWgCgXVcgo@cy>A?&V6kdUD>Pfe@5n&vdqvKxr1YlxGxk5jAC7zFU$+HpS5!6yA8@v?yF%WF z*|PYg2THtSJc0ZYkDBlCWESpauRQR~0}g1moQmjGjGe#pYmvF(IdPve!m7qKF`x4Q zo9_#=TIXwwRY&_VI?S@wFJ?_*Dx*0pHYw^T9CZ5b9(Ddaixz=PzGnt<%A=#;8_gPi zVf8p~p=a5Ol3^Bz`?|K@&~8(0j+MQR%eL4XXWM)%b8v-oTx_Oe(m7@Ba7Od?rrhmq z!{yCa$JOoL-p9wW#+~D0q0K;CKOA5c?9=g(H|h!HZ}o&c+dK>!+onF@p!C?(NAZ6d z`$J@gH|CjulsCj)ZyrHdOs0bs5+Q8QtA)eOVzOrwu({Sr7=+bw{N!j zy_xf4r9RfMa#L}@HM-Q_+-D2k=LrX{QA_=AI>Lbm^23209&g1ln7$6yO7Be_3hpbi zsV75sshAN5&n5rs66-x&g8^-Ux0f*Xw-_rw5)o+9UAY+Hge~{S@ zHNk(7ISF@q(4)Rh{O16ryNCZSy`8yy&G5h7p{}I& zZ)XnfznL)~y}#L^t^)smv-kG#QPpSO@Ndq%WM*=boFpfiVF+-}yz*8uKmxi&I5T+< zAt0hrtSxU+RJ1ceBmu-p1cYq@XtBt>ZN=AHJGLM7u6^p>T|H+KKr4b)-E^OJyT0~u z*KL>W*86Gq(+8jL^*d)KlK`!|_w)R>e`v_$%TkA3n-D;_n3yNxEibY*6rnEP7k1Skyo!%W`$`PFVTf45#<$`B~ zdbL%p5ABcDhip~JdHdLbIu;jGkJn`yPt@s)j$)+dk=41IPJQX*Lv{N675Y1=@2A2p zl|A2FHsZ{AbxHQbx9;59_iFQ+oS3_DV`(lM@-aEc*fn|}tc7xYLvmUCp>;EI0cE9K z#%K2`HMxh@1r+9*zM)#*Tcg;sP}~b|aQ8~O_T6BQBt%$MHtdtL z!DCWZ>2b_y^G3C*`gwc$zDu7($0U3&hOsKWVb=SU!fgJ9nfCg5OZ$g|%jV&^emQ!m zW;ph8pnl#<{o+i$gvG{GPlzdQMRN$@kxF5jth9$jwy$E-iM?spc_aColsBw917Y2^ zuw7PPm5#fY#j|RU^bmG%RIZ9|m8ZwE!7*8kWgZXPX0u0H4?sQ^r|t>cm5eZ#i8I@^ z3jMaeA46E$TUN~6b0nnAYWAv{-3v9B4fjY{^%%n(=6V;GcMb<%F2`DT%Hg4!Vddr0 zjcIY-i0`P(;-1imLMwKMnjFHkx0M$vf+w)1$_wXdHMIt!^b2qd@yfVOFxC5{>2wM zrz~E&+`o8fC)QQwZ#;CP92|5Gt1o+5_)P8PR{6BG%IA^ zr1iYbq|{%wfeT`HNH@C|%m*$?<7?8Y3u}VaJb6vS(Ph*9jTvh_%St!iNvC@hW7Fw! zn$Zd4bf2r(z-hMW{3XW@>)SZrjE_}z`LPUZtfl7r)<3-ub{C&- zLFTsM=*!djxtlaCyC6;^dfIFX^9D6L{rgjF7;<5+(8ZbOeDuj}ec+ss0>8r@ z>UIz{glR`Zkb#|_Ry_JR(ZPTMy42@S<708=2p@&G?{eQC;LqvSb4;Q6Nv6y(WurS0m2s)_xMc z1Kgm6fGtPbl?v#Nk%iZm_AmqCc=N+tg^uH4w>Q3&JYvR*RWn?){x|Bt(4GD@?Ek-hrF}0(* zT@l$s!bhL&Pq&|+Vgzy`_AuD|#SPEwn!3@WV<$3XBzy;xl~bn7CSPe*6G@G5gyV@a z20USTnZ4d0@7_vSBC7*VU`r`ebl2r%RgRvreb18DJa0Yc{Fl#wtBxvNjIa~ConAHs z1Sd`KR(siKAowa1p*4uo4{vbghWd!s&+#< zVLC4sJ?y}Fg;`}b23-JL#o0M<_NF{TzHml;&;!8MrFU~|RhN~r*tuFdD>?^`6p?pS zt_y-AsxX6eK@}S=+jL@o+HoE>oB}Fm*+}%?iOrp#oRp^;;2lC4t)oFRe<|ZCXZu(Mdw8rKxb87g#NTXLjM_W zE6`v|==O9OP!?-DC;&gOioJrliY}Es`y-B!d$pV0Rr8DH zZ{_nxN$1eHO`LyNSyt|EJY2wc7Cl>gIl6q$qDvX(=HtU#0nL-(XW#(oT6U+2@feR} zLRnU}vG;PL8P;g;o~M2MnCnp?-lec-WosXo-$NeT58noTY3g(f+c(~Ix%pnO=6mYA zICrFkxQB4JG{`b0exsW&=#29>@f&oe`6D)f|u+2}-_kQvtrkY9{(u}*+Ja!H+lGs@GM z@a16HqmPg4gevY^kv<@#_36`2Y2h`>+rlTG$*M;x(dAUL%f>1>4lhBwxrsRZ%AdjE zUs^c4R`=+#;t|HbJp&%rVJ77QVWt3a7qDQHGmvW!+1Ym!a?lM#7L-xt`#|NN+P@7{ zCYzoxhJA2Fg~knigwmlzCve8pQ{Jcs zJI>Ojd3@${E5c_>mlp7utY6bsR|UT;wTEOj3|sw4!si(9Il}Qd_)~@3>A@a_Fj0(Y zC+1q#rVxi^&W>^{%xD$4+jfq^GWSNb;1KljURX=8hk(;bvV(oV=gI;u7Ho9;#W+p& z@0=0K;v5;b(*18s+~N%WRDF?f+ISAO`TUlKZ8RILZ8vawehaPe5qr>l+SD!gHb@UmNM+ ziEH~_1#}74ww>@ie>R~^2_HX1c&=GCdw#FW%2;Rwdh?u*_DJKot$i95!N+KG-^$TEW9{cp@1+RWLz8g*&dcrM1#1^SJHHh+&k?d1V%lRru+3pa%?_-m zA_sfDWa>%!+ig3^Vkb@iC0OnATk^cg?P>6oX-@I zedkJi=TD6}g~ptxm*m%)$1HPxd3=8G)%aY`n)`YX&mL$y znkGAid_vCqq&FfN*-PgivGhiW>|O4+{dv6+88-C>@dvkEEWMG(6<`Y28x_zS;1i+| z_)N*~?U9P6kYE2c{EMU9zaSk#wz#QBnlu*6gjdYjlQDgZvkR|{UJe*kUlBPx%K9>)a!QL z_E|$+CeCz+?1&47H&a)qF}E=j_YY_2e*9a(zr<7qkx_>{wc-Cfm5-IaXi%iIy-h=; z^kH@_BBhi8c2^%`&guu4qxw7S=NiRF>r?H93VW1CN4?Lox#{^{ij5kGCL%Vf`d#C( z(Ok(j6Jw)6ziT`;`qhjgNz6UULOEy6sbnvMMfBe`u+Wfi<6r+`lVkQvj^$hZm7+w^ z(K#9XUeYm|xo>Z>p|@+4;q&D=Ewc`PTRlo~(d%+?X@JEm2c@`gNMh;$BAM^-IIW;g zu~S_1`>7?8!Io6!5EnhemiW#}h{dWkc0oOE*K{5iRg2=H_&0QF>bw&*2C!DrzVNtG+F?kL2ubw`$HHrM-(AN+v zUGPv-R`ogMsKFzkwWmAvZWRhT)U~iN)i#m~iL5&aUp3m(m_-yc8y%BZ>d9E9s$H%% zN7u6Zk-^C5mYi^jvxnfN(|xaJo6{BE}rqw#k$7mbT}CXb6! zJe0>hccPqt{(iSZS!e53hNKqSXZ#QA<(OFA9eP#bvCNoj(bc+`YBriiTSvO;^u{kC zhPgujU#a@kv(ViOo4tD3h-j4Nw)WMe`qz`MDTjW>GB%a1(qS&KN~3Dzw@u>ABnyPW z&5G{y8%#OlU?CQj`vT#FGJT_jIHoASnv|6`R;zc1#+2`+p2KhOpQ21yF?&y@JNg>C zvb0yyG>j0$?s$wKOR)lYoA0;4uk0)MatkzyJN6An$ltusC(hKIEcigo6GMqT*)P$7 z64-yNU3IZ1yx~W+$O<`dozqH?%GqB+Tf#+chdJBi2O8eVFL57u$fco^$V z@B2^6vBSPwymqxlU+^&tYC#&KT`6PT5kqS9^fFqzreq_QJ&Vz4smB|HP$n#@`U@vqAAQ!YRI{P;wK{m1z2E9n zN(H|mjpS)h^wuE*f}>$ql>u>d$$j0?lQ^?Oki`-P+c7>jRha`HfnOAxSf5!(x`S3q zb_D!;#%Zx|>5cxz3M%YV!gh8kKI?DfmlaeVfJH=MIsO{#39%BT(LNCuQ5M?WYH-0IauvWTlnC_sMj}UYC-QB}b03x$Ir&tg%X_=%K7aeD14a zA)}g8ki9a1NE1o3FtrbKxVtmTz?-`8T|DKFU!XY4?kxPblj+SrqHoKH*XB~@cVD&P z69K2VjlMYyp3`mk&Q|EIzI|yqrdR)L-9zi}wBj4L>lS2In0z>hGqz|Q8}JDM#AnnB zh65VZLQO0Hn}Fo!LGK4HIqCvB68~(rT>%~KRrIUO-y;HcLozkZ{C$%RfAe@_`QJl= zSJlhRziqI2Bi83`%H#BKIAsM|_fNTmx^LE9?y45o*8D4T^2z19<2sedn zs0aDF6$)nw3b8nmUMWDzxS7r z?8~|-`NYo;IpL#5oa_&$-S%1QzeMDQXw2{;Rh zmxFfe#8iG_l?G^Gox^7A(FZTH1@W@{XAwFlPnL9xV>WT)-OM#xz~9HuwfIa6=Z!NB znqx@HKM5vk)z*}&Jk>2rtjRhu|7ff$WewPF^Xy8sPtp8GTW+%=4d zXsMD>=sdE)u_|*0?2ElNkPVHIA6iBH0QtHb`~Z0ygS@>2*}?fj_DEv!6MVtH1DcI- zzA(>r4|o8d3$G?R74$y981%bCxeYu)GPnmkv1hjA?$8`RAJ}Tw|V4j0`q4?|r;h!neNU>A{HHp)qW$dnT>EB@cUW)jE^FhNN&ENQv z=5JWq~`jZt$oAT}9(tdYZu} z^GU$R8}T=az=zc<7iz>gR24fDaC8KAiem=-!N(-~%5z%=L)r-^A{pCqU3 zx`xw|m3`2iKE+kQHNO#1L`MZ|$fP0}Aw&^jgQ|n9Tlp!Iqc(d#5bk@X4unNhS>S6o z>J8wBGH*z;aT|wnIH4ZszRaE3*cj{_q94)i!tJmHB>#e;8;fG0JH@$OH6(AIn7Gg# z?vkMs?S#`W0ILw&F473kWn z=%8)69o~_h(~a!p0^L~ZKSj6ca&5(d&ZYw3I$z&%$rk zCi%@C?l=33{N@+oSCiLl))&2GpjPPX8F%}3!ie}lcH zk=ML6)tXbql*GMe@mZ-eh?*ktO^MA*!W)F&9F%wsPbWM=HoMFIjh*h`!SR}+uq4B8 zR;il`0=1HhxlSw66vm=mj=?+sxM|91rhdJFh0e+sHvej1z*N6}6qQ8qoZ&r7L)<66(a!6MR6F;ZzXrb< zbwsd9x!)|Ae)ClL&G*_b?Ke|Y`w0Bz`e4SkRd$+wbL1U|5K7bp)MN8mwAB#^m2DTI zEA8;0V+W+_dw5+@`uWye^h~IoDv26vQY%k(>)i@`r( z>oR`Uhqc;(Iv785Ym2_EuWPG+^g8&JLTDfio(TgNNw?KxL&qYp!B88-ePH;|+*gGU zO}^?FJ|7_)4Yfg3tJBjTR_M87mQPJSH2KExq21i)-D&#P$51mwzoSNo`_w(C5qb~5 zAKS&;d0(1*b*c%XS|q9oqFN->1o2uVA9VK!<~v3{5q*x|xvjX{^r@lS-LyU*d~-d~ zrC00S!9hiiCUA4k!RjNef*c-{>J?YR@%5`tHszX)HFa9UDn#;A$jeSUxhOT1`T?}p z5lzfF4Yz(>s;j@NZ?)kyZdmW(`P=hAzN@)EU2Aw_8uBLX47xW0kJ9nSbyfOQqdNEd zb#mVpRE-6c1i~l|INyPAS8X;7y{oKYe!b^(ckDIk*Qr0i>nO1cP2kfoaPLUB`i6o! zAH@d0!zaP9X~?R`>jL?*&|Y}T_i;1y1?qh8N%$D*hIH8RW3c0`Tpq`#3w@{qq5Lj# zL}zM{i$|T$9;)+kvDiN}codFRHbFJQLLK=Im8kQ1{0ASU12J8O9&y}U@mQlAD`9I; z&o=EOY!8Lj1}ye{+;*Uqd~uu)`3|Vn$=LjhE_C?g+FChwu4Xv=a-HW_7tMrOjEYBJ zkYlGEH5JLm*7N4b`zkgzrnD*g(vP8+7t$Q2A8?MXfY!EYf~qgX{vCYpG<&NTbqF|H z2^4{@(BVN9eA2Z<7mBCD4%{h6&tfghrfR_?>6MB5#%k`b*uYn_3NgYhi2C_-SDJ-! zc?A`mVp!W7=iYk-d)kKHg4}dHY0$h+S2Q(F(Vsvju-3@0i`K-?1AnI)jPCGR32V*Qo~fCIS*ksw zcn>r&|CTG4pdEPHxK4(T4m*@$!Qc@qj=Ml3`y6#4oGs*ONU zgjLzqN3{3q9^?9TNBisW^&W#sI`l8umaUfDu5?V*eFo090cWcOVk>L^IVGzSPX708 z2T1ojsA`Ys)g3;EJR8x1(h*bl5QlDh2`A>LLmejf7i6Z`D-kcVL0U_^K@C1EY#HKL z@M@=mUYa~c&?7n0S+WQhQKaBP|3m9QQT`0=m-`tD_N=Av5MSfJUFK(yvtZ8Y`4Cd= z^n7p;Z->u0kPlSaV-D$R$n=LK(+-m9^(51Xesg5>D#H3^+7G0Q&S(ebrns8jl;u5Z zC;V*C)?PFZ=HOhXzAAedqjeWB_uJs9O|Wk$zdTK+Sw4UrXxgebb3PJ?hphS{q7rKD zLX6@Od7ICwRiXW7to>gweR58d$)A}t0dKhx|F~G*u~yGm12DpV6+fS=jc)BxmIR?CXyVT zzi7Tj;F37gl5tc2p~iN^lJ%}iR`_%(G#B~k*eiVH z#TS-mDZY+4ym^*;`uXpu>m{A^`Jd6vaa^OA&o@<569Ro77(%i&#l#Tkd`<&k2=>o0 zWST6^FWs<@=pgC*RA3e4&)?6_-=)i`KiX{!b%NlD-@#+$7&20w3xsS1%^pz2c&Wa% zkD(3#c#`W|CkGSk_pk+e4EU_T6iAod#FaMdTM5Dy#2Z(-h=0nAOp_dxfhw7FyJ}Uk3YUQjjAq=73A&p zdmf9dYo_(^Ih$WWjWMq}Mhq5nM~_+h-|}n0%XH@cMLPU4{2j=Km=CuoN(o zK=(}3E3IAy*a%;kXrARYjT)V|=?^3Bl_h_;v1`k?KfGs`92!B^3^owx4x0hU!jv~~ z`j*WwLtZF{M;+gGn^dQjUFqHsQJUWYmczm*(#PgE=-bHCEF|6ZDZWK#^0d1tavZG}*vwv^m&fa#O5pFY-OxGy+w5yD`bT9d|QoMFZ>PMdU6)P9);y!H| zeA*H6Y0X@4?OFbe8$Rv+yiYsnj*L@ZBF{iHut7jRIFwn;bHQPMI1p;uj+?2jE7tY) zFZ*hBKl!n2`_zP>48>Y?J8*goi06xBk<(?tpT=@(SNz}lKTY*Rdg8Fwf#cSVmCI*# znd{vo_}a?o9R z%@^?P2Q>cO%t1dXb9*z!9Ydx!EZ`@+)sr;OZYGQ-@EmTmj3kyLW8H;#_kdE>*J*SI zA7J&NQ&siRQ)+kA2A@()b&RYqu7Hp6f2aOAb-w9C*sFt0F4R7{jUB13z8lt;AP;#61qh=0px%38;rl$_;T;j=5kP%i)9~hp$E@uqJnhA4W#^ z>4dDDwBOch=5q(6E6to})WTJ=>&d>4^srC3zv<2AMf3459wVjr7ZaH3kv&nWnbZX# zYLkWOl+P_^Pk283rkT%08yeK_Uggj(vZ)+ z&~N5*mm>Z-Y2BC$IZHkl@hpqAIfbwmX;)<28`1AO0Y77N`n%_0Lm-#?34Fnmu7E{z z@&gA;pmkE^h8#JB{F^>c%;C%t#ZE}b;45;l6)SyM>i1Cevw{17J-8#K*jg&!1J2S` zaXS&;;53O+UKeX#kXtLx76cY41APY-cQ7Xibw{Ol%0QE7xlA}gvxqYVA^&%JmY(5u zAD*LeLAwait{=23o?D!iT*V@JT9SS}EM3X-zZ|Qh`aE}hqU}YV{eMzj(v^5rL zH$uPfGX1#=yEr}hIxRk^lcguhg5RWd6zHjHHRQ7qJx9rJ3tgjx5!1%+`%WN}v?EnM zDjF`m2Gw_NW2jG42bJUNOGZI)*xtvO9Ck`>L}pUWChv3qjh}&00{n;s5OaM!B}W7p zV_Q?VwV=M9ai1-6SW3p09BD>=iTpMVnE>nuH%2J$s)5dXmhVfmsJ3uw-zGCt8`i3! zueq)z|LcNjr!LTz(>{f51#XXk+Y#|?hd*$<^uzrp(*e}M60ff)-RLsCEjs6*;Eibk zoE4q9;8ip)%}#4$A-luB06EkJ#zs6Zi?eJv%Y@=q4dL%gHy~%a28r!2hL|hTcRTTZ zDC3~-nYQ^MDH&aIP;nA`la=K9*Y^Rl zMsKg5?nSnEUms`P9S<jnKLVo^{S zrwvAV$Gc%avStpPN082gV-fCynCC(AGr((0bWOkgRctmJK@*sx?h##07DPvaydA5=T z%DEAGqnICq;79%!)t1L1e*^Nsjb}F!e~9zSt31&|A$Rm_)knmW(~edTl-gb6SVG_H zS;cv?KA)E|d2uZ)V{iavog#((M!Z^TKZ6#cuO&ncF$06!Ry1xo?9% z;rH$K=tPssieP4Q`WEN8&xi(u%sypo0v)`lD6>W6*uBcTsVr>hcCx=C3E1BU*>;Y` zWrWlw7JGUnaaP8}Vmxc|#4(d6E``NYjxvtLSR=<`eCHx8#xoPJI7e9AVqviWOU9gA zoHh7)u^19gSgay0H3^H&*yqJqjND!xi%U7(NPa0BwK-jGIQ6}EQa0yEN%pgD;~?yR zlb_yn-OlOr`FJJ}N<MY*n6;QKMfUed$u`tt7TMpSmLmK6s~O*@ z6FDUpP`TTAeDfdHv*;>e!;4I*h&pC1weKGc?U?;F^Pc)rSL-N~L}_m#n?WvVupL(S z3%%DPpVY}KF|UQ~eVE0oD4#Sa{QkbXDdi7MWKGZsYg|YHfRCnra*dyOdmFT4-o?Iim7#omz_XamA zFey6pGuY~@3~!FfhVkfoaeHu9DGNk&)HrI+(~obH0JH*-X&$^`N?WZ_?f_71O-iGAD(%rSFB zJWiug4Ge5c>n?acM-;YlMA5S{?q$t3b41L^Vlx(@&fTofRBem+U2t^@-35Q1azv~a zD;Fp~bWppx;)6!7nu*i8yfMnq;9g4<)_0g}*}UzocOVuaBQ7;f3yYP&^@(%fPN-4w zKJqaou7Qes8}If|^oK^IxG~xIhj~6|J@l$>$6kx`L9mDTe1z9Q4KYRw6X!EC?!)gn zzC`&TndgH-cEmU@p8tNHohosWGw8YisykEH<6p23tEmphi^$d|t?M;#C{C8*tJWQ%hQrtM{REelIdIQH ztYN{bs3*iDHMqqhhfaFme?rD7KCP9JkAS&p%uTp&LHoPkdv9bIG;XIH2xetL#Mm&O znd>a`AfUa98ZOL=6sbFUm+%c z(EBLjGkbOkVd_8d(-M=;4)!CS(^(+M$a3RLEr^Do_>0eI2R+ z+x01SOY5Ii0=;pG&nxjcjg#NMNvTVZ@saD?XNUs_*@;!KZyQOnb6%cHa#*<}kM8EV zB$DlGc-)uMab~k6(<_~|dbP1O@l0PJcDV8x0q zzE(zKQN3hp-v;h?_;sd?_#jWn8|gM#Ia5M53($bhz9@I(xg+x# z(BH}(X)7pmu(#WqMle+h0fHUE~c|?hb{C(`X-y12cr7aO_UQ>+JwAJ2b3WjovR!Z?2zVQ z4|}?avOpg}j<=?&3~`RO>OI77S3#yOk@G=(1G%2XLtDhTkXBUQ6{Hn8nm0?|N0vu$ zoh*F}S)LyDHy(+Zve^Zjnib_@4oNqR$5M-El(l4&axuqXXVV>h%EgRXIhdDjq~A$4 z!7X|Fn*4Xj=Q6LR+BhcKw-vYpmi9qpjYzuu=hKvhGUW5(OwpNNH|@d9FG2pWe%fEJ#(<0YVhgnxlJK?a+K@TRO}&2gfom zh=?=M3c6I5VK!Sq&R}mKXOYiSn>x_SSzJoaE}yHooDR(MPqA$^O=jqYf%=9$yS4)# zZzmhXSj6RwaD?l-vR;Ym8ObJtyrFgj@`n8oO|HUsF7G57+V&uNASopFt`x)816a6w5_seRFaw30id1k*E`Jq8|dm8S> z9cYcq`;iIR3oq|~EdceVRAobX0=BxL$-&cAcI>dky!|{DPp4j29tYpmRM*PWxYsRG#^G!!BpHwOL z(v3IA7hd&bpRUQ?C&kp+)95+-U-bLD^qc=*^aI25o^<~i{xjnL zMZf>Nqv8dAT2MbK30F&a?i5>UGp z(ddYlNka6-YNj?T6~<5ZgzgH2n^`G7FPC20EU0VjGqGbW+K!B(#}-h&tis{pQ9=4$#4*AG)Ay?WR zJ6E~g{pjQGK99P3B+kjY)Dlz^EKGUZL2E2GqG>57s@jPP!(D>vnP3t@IET@kZq3De z?M6wiVdt~3JL+)%{<#VD6zD_x#hJ~xU7w)a_1{7FMlFs`q=>eL~J@T`^$AcCR*ND#%i7)*!!U|JUO(6}*~cDww&4?265M4}jvO zxvr5&(I!>ah-zfwQ8w)*TPo}As zg5tTTrI1lef!p;|OL4%erT7K}I>@YA3d99@9Jlg}`3z#X)rjHN@SF9HbW+#Qr*kjt zuE~~$2!Cpw{TarB%cG^nOkfR&-TJ&1U-P+aXiyf>H!iQ^6fe~51H4q(DSLlJLZd*G7@4>lnS1#IB z*^s&inaOL63@FozdiIR%DEerI@BAoz6j|Ihqr$))1o%>j3pfMH(Qv1(X;&Nf#PHT; z$5I!?c*kSP=4}ItDbpR8Hex)yaT zWpq0MF=YqEkb4kQrti^>#*H-oZiF9E{UC(Da*9P?jk4IojcbhVkWCSSXVoU%w)G)! zrx5(P6CFvX*GF3$)H`m_>!XG*IcuLpcl!GfRX$P27JUmeU8OHhNiG%L7o?oaKm{4y zFvl%~%F!O9+*p{nVLioPDo~lq*me3|oQDw6qQO)kl2jh2-(uIGighLP>3##BDjDar z^|ks_b=pEi-B<>v|9V9nwSRBe2}!f4CETgql@@1yMf-_zM|_~^pW2Qf&W+elc~x=UgZPrag4yL{31Sd5zx#!aQN0g!#xJp03XHdL#mF&y+Ibu@;;=R z*=sD{&m-T5RGrc|9D4aLfHkKMU zso|aG?gZX$Y2K1JSiVfjDm}^!{kOPlJD0tkqFvn+5>Sal-88?VRii7PFJ7u-GS4Dk zN_*OZ?>>ciDD6wvgoHPwJHTx$i(V>)SU`N`TC5HC3E&^AHYZ}_rJ3zwy-?*shMDfz z=LE!=V#1W8(t~!qH(NiB4#@Jhq2JL;z6DKXu7u|P7{Ad<{&w3C)|6iXQMNbP6+++9 z;2jFBW-*~*b zo-HuX;PPW!jWNW}@!TwHE^yoeT8qhZ=Y+{?ieN9azt1x!J@+Xz5bYSbzwq2%Xmb{b z4dT`VQ*tR^kg@l-kaSS|^{zDPi)HFLM109*rIQHA-Im0djFvRTFFPB?J@=lN~Kn~%GL4J2>Gg+B@1p^!{6MUT?n=q|bt zJ*-@dxcmoZ{v(E4!$C9OltpBf5JNX(KM{V%1;{rBys&akd2Hi%T&NE!av_jSeOqCMHLyIrYdVe~)Mf~ysezA&}hIhP5M zWR$weNh6P=BUwP{aF*#qI|HE^h^)E|30*b%crKs>bq_0<-;kBRVki_;-?YohYM}xZ z%X-OEuqVg@pfjcM5cyYzcA~=_;+Uixj&t4cAR`+6d*a`L_NdXCxk^LK=>#-fbcMH< z37Qb&H}uVzs#+G+fuXn)5B#JU9cY4k)fti>I;-WR6Oey{yzbeJ8XELUM$b`;E=eUB zFVF>bHu&4Z_Mo2;J@U8@9BddMoR63|{|ngMw7$1f?{Xwr1&ptP9R!r`cL3wJ;BT~E zoEG|IA+AFGBoKR@i0{d@z;qMe!y@4*;X5kso-D?9^rtx*zl!7gKKCm2DZ1JdqJF8c z0^M4{-)>t6G^I5JC}vrhyD%@-a7irZJ){S3M4h9FhY0UhTWEMPR~!!a6ke*`c?7){?D$IYCHn?=v1F~=tCBcRih zG+Xi0^IJI17CqmAaX;^~)A+OVQS)W#kfL$^AMA5{Uh|o}Ju!JsdhT@#ueZSd0c~6+ z&1W`$w;Nw#636Xuz9wT<8*Q=ZE*cTSQWWtoS@q&@I+#d#49-wzxf zb|=wyRKOV>(Y|HPam6~y0t(Sw=vt02yb6_2rl=FO-ZZ%MEGmrOdfyD zo=xNUYt|eH)#+lCA#OPU^bMy8PyeY|4*k5*MBw4j3v;QD1FDYWWuODuoL*InmgFQAKvk=|N9WX|&uBBJi9r&hhon?99S8FELDPc8|#FL#W^!LU*lmk{`d_ zAXFGE^iw<8?l_PTDj?8aytNs0^ ze{V7+O&~^6?`@N3V2eNVOyFt8bJ*` zZ!GsSTd zHJ(biWfL}ohZAwCnFWTI2>vz7wZQ9PZjJBd)_7lG{#Q7Hb!d+^{V)v+fc@>ZEf#ui zL0rgMLyH%8dcd=0-9=ERd=vP6k_{fD-v8*eN%m*>CrMc%z%=t2(^99HautI@f<$M!3OY^|io+VVxft@{oS~l3(YHp&P?LUhC_^`9V z2d@G@3403oS@gH>S${(vVHJ&QqBfnsh4Dqte&Ijx>@2Kv6C#CNcVO&4HSVmUasSkF zG~Sz_1+A0lO_I75UcO0ddvb%;*^))hKa2XFY=qmAWHU{|@WT@^JSaG2+*5}=HHG|9 zTUAMc|LMHp7d33|oief&_)R!YQps@~bk&o$^1h*Yy#d)bZ8YLzl6T~y%`uDkm~b59 z(q8CH3Bw-&hM&OCtv;SRT_Bc?rl0(j3t&{N!|9!K=NO+69O>F!=e=9o_R#*w4` zvzQK9V=-P+B*du}-;5Rjk@A?nDP5yel;EFQ2&D;^W#wlUy36X6_z*gPA}(o&I@y=u z^eWu_^6HfaX*{`qFZzL^&Tmq#Q{=f$!srPdLBCks+XOMohAquXQ)~!P%e@(lfdHJm zkAUmLf8cS$PSjemSxFo9 z2EDH+X88~F-lSNj0)Nf?>RyV>Q5viqr4j`}K?>RkIsTGr5A*p?j#k zxBK+F89{x(CIl0%2YXSg5f;+d8Wgwu@@e!lVex9zp(1X19C6F#ydG9izXMJRvfELA zdX6orw(3t2xBLU1`8wj3(AkLc89rzz;(7 zHh|oG8=7B>i(2%1q8e+y^BP?UGr2XlQ%%A{>PEfeis%}>8z?-46QyYzC!;LvtP_-Iw%UdoS6@4BJFV4|bHB;N)T$i1p-`1; z<4WC^n(0v4Z&K*XtUE^n;F+#I4OrG=lo{2{f3cyQy1v0qp;)94W6{CX3|K$s96m&Y zlgbohkRp&araD=zE~x%SSvdl$X`dLEWaV``dA{B#Tjb+!JrZYLt$Bl5<-yj01TYf* z=3e_%VlRU|fKE6}#eR98`7ErNEZGLAIVIl`7;M_3+y;cNY2F8+o+m=b9UhfQh$z}n z8>t{N=4=m^vwzcWE?-9ehBQsTKQ#pVfL3ykLN=+URU315Zh>X86`#TW&2U>FLUkvs z7XP8|3H;hcXWOCoDL>LL&dXIj(d$tMB}4^n@L;_Hw9vnw_v>l+jIYCIq*-QKv%I}6 z%ysaWwW*`_<}O8hmU<`S9?7$Xd18$GXT-W-b1YigIc3qFPSo19^SIxFvunk9I-k$n z7ij|8TkiZc{iJA@XeV!PtNTyPL3Hz9V*Ho+_zz(GoAcwROglo;rW!?im`E$ygXJfj z?JTC178#btsr54S%TZ;Tjr)Jn6#PUS+0MecV#TLIh);zOr{VFb+?J_^k8%NWX)E5> zM0a>N-7wcS@B}kycA?$hpi|!pfzHLE6QT}8r+Mf9^JC-Ot#QpgOdOf~;v0aJbdJP3 z;>;X%`AnI6J&W2sAW&Y?a1#8+fTe4giMT#C&s`MBS^LGi(75#ki97iS*Q82N0}r&su)9;-LJ&?4N1pGpKVIJ_3G5ScEqS+LR2&=Mz}u{>PD6{K<9^U z!*{PQL5#`?Z_p=Ed}@lZj(T*fqjnc^8Q)*b$G9(}3;%dqVc)m~2l;ii$LtdA_Jd>x;RHq?3EpH$aODW0M3vzT?a4gz+o=XR z)?4R%uG$?tTglHkaFpt`_&Lvq4Sut@N_ka$laEtZK!PuEDTNe`aGb$B*)Xqf1I}DR z{=nc zqns8u6>uP4n8(7({2oFYI!qSHKhLEQR-UVxFoz2H$Lh?-zh(K`&f$@5dC_uH?tvl?@m56P$km{TNY69Y0jmKLNQYtZ`F;zBC^6 zMoF~yN=shz^fbrI-`6Evp1e084!%wFn@_lay_oX;r}-r>@7rN#r@cwU7VMDqvy)_f zrJe5U=4BmzeqPqGL)ulS=#Ch@{||bR-x0e=*1_}RvQBb~cNR5hu^weO@}mmeB#2jH z?Gz6oc?C8{y~sbyj(i+ML|*;!QokEgpGBQ5@sBC>O#LO7_=mZ~ONd>glO*I_MxGZE z4{0ZJfoNo2zZHAw5=|9qZl-~Y@2>wcKpvfC$)$lif1H*R0f%nF7fNmV} znpog~(i0y{T!aHWUT@;S!EsxGa3BO62$%Df)6Rt?w;3=BVZrXa%@7v3oWW+;eMPa& zfakK&W8guCxNqKOI5*C>^=HU)Q^jg`_)I0Y8PK)zm*;OU!U;cQ>puT`cX$oI|%?5-&oCQEV#WUbO$^cd45^_xavvUwXU21%i zXxZL}@o%tj;?we8YfO5V1%HnP*$NZaKW-}&WP8%OA=|RO>pbbBKY%C6PMB(Z(8T3Y zIY#~|Z%&wgnJyJet6 zzF%_5Uza@c1Cm?*hNQH;mY~X?PRoPNzg?sAA|JX#p!(iUl$zyz=mSMQbcX;RS}}X7 z__0w2d$e8J|hCILhGnAd#wFLQ+8U&1ft z@8Io$Kl%aro#>##JDVWQP45JTUn`SX+jFP)4HYUyIbPp;x?vtJw4xt#5~6B!8&<=Q1tl>C9l)J)pxC4 zA8`fohKwM2=sQDSW^+@BvBlTbWftT80I1s28*x8x(I!;Z1k_xKQ*TFg;M~4I&_J71 zvPFkeEZELmhNi5RI!=-&+k+dec4IXfQ9~D|+>Y67xl!Hs>r^wwtvdC7A4|NG62duI z@BfPp0mS3hj0VCbwH2curQbv)nwI_F^L}-wy2IzS=!!6g35b+o1HLvT!d-mP{@>3j$ z{^rJwS-Su6Au7Jk*rwpU=aBQtjP_L93h{rS`&N(CexCmSN!wGcyizAhu|;-Q7Lw0# zY+;7(NA4Pqy{MgeQ<#Bm}JPO zdU4}5TbyUYdq>FUw_*%CvCn?K&(K@2M#lHqA%6HI?R6dAMjUUx(!EW2uIW9zFl-ZY zC6(B>I5*?O+V9HK1?M90z2ZAWn|Iu+Pmo^7^8$U(8NKj>xrm*M^R#s|7xsH+{`>Va zgWUIZOp);YrFT=k1<}kKWGUDnDWd5IiKoo-!`*yu1byWA`I#}`>9hB29ggsLhIw|B zqYs~v#5se=vK-p(QHS6c42B?vPTWbUnKc)EbTQAc@^Z*AI&yapLr9{VGS4WllK?Q5N%ZVH79le%8I77&FRLiWg1z9I9?c}*1=i+-Da=p?vtzsE}GbvFZquCNyR+zFK zKhMvV`X?#KQRQViuCJiaKiZMx@^fW{DaVO=-S_)-b@zCKI^MH|=g|`cbi(~W3k$g1 zLRQgVmZ=7;v~eEBog-;`*@pwfk1N2lG=6@aoxn-rsc{+Aq^jjL-enZq<##@=gl3ezv#)hvm{~i8L9_slhvdHTb zFlE5=;Q;zh#lL6qd|{15SIpyugwr}rx=xuuS6U@x1n-f6tZoybJbN$ZTfT*rdB3G^HQEx~8#h5yU>>GIV)Mnr2RtS6b~^i?36MKX&uVIJ%O zyq!NYE~|(I5XOpg1|Xk8s+?JoEXfjHci2 zeLeMRVk=2#On1?{O2#!#7PFfYg8Ep@`azP`MQbGav*dBYTJS!Mn{Xy&d@YxeuVIoe zoX;A(5sV}Pdl8PNq)W&|OF<^+%nIj-_qUu(k1gjCfxF&BrxThDXJSgk`aITMUM7;X zn~P+EpJ5O2<)vlf5@$%~x4k?}@AcT(VVfiq5r(lWnZP;`jjHx7uK~X?#a56Be!Je} zHPm>H^NlGJ;q*tQOn~2>NSoaDNo(CboY(l>-b?Trblhaw9ViRmPc(mevP_UINc?2U zM05JubDx`8{*}EPJNvC0eE?1v!BO&^72IILt_xn7LW6~ z#f!sg!xfs+rBvypq35O?>D9X2E4!Zf%!^vO65Vq$j2n<|IJ3ALKJngT3HM)kKN=Th z1AjcJAI<+XGW^a0`7KJNla=6|1}@xG@KoQ28w*F-%*1O=48RYhs$jJ3cuSh?} zGjCQ4;i@Y5DTt}sZm~R-N&Oz_-G6Sjm1&3%qb}SQP!5NLlhZfjR=|8AVpMt}2?;j` zB6^Jw>xyQTV`04K4mX_0SINf431L zzSUPB+i$CnoCN1V+Ll!ypPQ1s=#{!DMT>5ZoIl9ES z@%9@RE5X}udhlKijjiQ=#~RD;*o%AeE4bf*8>ewOM}Ef|_#O5dz5k0Fk+*bwlzuPy zlb{XyL|3kd4|1j^3qQlB3z3J|^{92WH~G-nZaJe)wN&k*h1#Xxq(aTto?bn2H1)gR z{yq5@Gr!X&MlzWnk$-{vK_vH-Q~AaojuN=H+EKn#qP`zJm7lcSLhmQ1XP_mZr;2@w z1-j`|K+aFxMNdDl2exw+Jr7yzySDs!X*V?f4*_|-Jh6#$^^e6 zAFscJUlFAmfR(6A(i|StO7ZL-YHC|V6PL??%e1GbuqRr{aB2g$iD(_PH$jyb(&ydb z>(GIsx(5D)0=pCM(uGISa_e}HlVX2jfllu-L=Q>qZwFCebGkcRtI&6_H)S9EiIEug z7s39N+_q2J^84F;cfps4=6#96-b`P@eBZO_OT@6Z&^2oD-d12=&^ywV-fgI<(G;UJ zj=H+(@m~WPfN>2thjg9}+>f9=@)~3K5#H4&=-FG~p92T^IeLrii%**Q*`lxW#PcK@ zgLFE48|+t{lR$l6)196Otc-{OE5oq4!y}cm=8%-Kn)ryMfyqnwR2z1(WvxAHmu~i}#un)We0&i`HLtnmCG3{R4_` z<9%P?Me=8gZQ499hF~j(yEvbaKFF_Wz_j`SM^6S$uIAt7 zas>ap8FGZKW8o$#G54G=%kx}4j)%Yc$6c0xPPIf)ZGwO2&->>luZ{cXkrk8Wh^_E$ z8YFtzAV*yhmid+?M-*$z%QN>?)q5Ay`np41j3Ecth`qrthvm*C3BKkrYhRXa3>mWa z)lYkVBE1d!IwswKJ!$aGX*c3Y8LnIHTZ%VwX_2&7a$Oos4$j)K!YULL6m(9x!omjE#crBl*`0TiC zp0|hRG(l>*APcx@8L{I{)+07|>`djXxg%T_OsRQ;!A-#&mz5hXVOP0L`~Bwh_TKJP zr*;&wb8F&?^WxmednowVcqimFCtjugN01v@JDnX#&ZXAvC!Wi%8*3?+7jD0UcK*>} z<;7)qL#0mk(i+%HU$TAlnW%e5Jm{3PDJ`Mu;FHgqi|i$(PIuD#l+E`-i7B#|{^uln z=?TNU<#_;+-WsN~zSc_C(Qqn)x+se3M$jc(8y~#;J~r1u!qq1x^}$gsek=79Y;Gf^Zmz@Y#{3A^$SQ#5^rI3=-DDm zhU#?!F*n6?VAarV!ZF-6R)u<)UURpY9iX7SsWz8-njzlGVtf! z2ckG^57h(V_hX2|j@sl%YvlX=XZyOt9;rFl#_GcZ6f;!pqk)(Z3`Ea5|9+D&Dj|}A z5tNGFPV|G4!$+kq#5E-3vyf3wqBG#)XVT^9T$Q8SFE%;+TIfjAdHCeMN)}0yZEz$T zJVv?H?Zi*K-eSR?cK^bP4)l(ffR`sc=<13Xkt9r&t8XstRdSK_(8J^x;5&p(!OS*q zC}YJSGF-O7Z~Zjh^m!LL^8bl*a;Q>2lkzBsnMV=qh&?fPtV^|&=!8KlrMJ4rfW>QQM} zT^iRyH=-8)Z?39O{VI*VG5=ks-Y8Mn3gBEv++lb#;Oi3NYkys)WF~(<_!ATEFBkJP zn(tdY{pEdv?>^h#9g_=OU4eHH z29&Y|foQpF`ep$Z^HjRCrqmNpqq6K8dsm1(D}txN&%W|hRg0`5LymhA=#LjVlKePv zl{*cd8H28_Xkk&UhVsW*RExlNvG^MKzWImSiBAys1dj;z?e4fDOgqYMfL@^Q(f9fD zQRum#4xT38oMeIt)8}4jpFVdFbQ9_cI3K@`9AE}Dok{R9cv)fgSu-<)?TBeDi#ev@ zeK|ZvGbXvx-Kq89J8IAon#Lwt4G#ny+UJky#YU$TfC23K~{2!yl&&cn8et!P0u?{{hA@o$HwZ7Iu z2z`4>Ej1BZEkz%nAZk}j-^6?E1rwixE9~S~0)H5rZ5ODIPf_h^Xa(@uYt^oPHM1NT z?PJN2%>%5~1K+ipb&RYVSkp@$En#%qNa2lp$gff6tcrWb4|H^;$ZK7dobblI+H>YJ z_TOHHr^#1+29^K6-XV;DZ^FPs?sM+y`^znJw*+Ddx)B*Vcv~QLR>@MeAy66p(d^)1 zp!R#@6T%lIo(75w9Ff7>CXbsuO+3ykHNpG5QWF_7;%VY>`kfG)cz+BC&cDy!ChyqM zfu4M=r##XG&M}o$}T3q@pPSC5QX@dgPg28Fg7%M84#xU3oX* zRax)`&uZuZ5xy9=ukOMs$=(59vvIzjy{8>E4*t8)DbAr=KZCGuDkM)NgDT66-5ndO zo;91lTelxo4JD4L607kLK95OrQ@By$h$j-<-eHuxxy#~bZkv$Ypw1CJ1@T|QnH!Kh zkU2&}@6*@@$7t|$bm?NcH`xh$bU3`!QJ>m!e&3Ej_-$C|XCioK<$O8BB3#;|&d{OW z^DUK$Ys*Mxi}t3XHesmW`Ph9UnPRVju=lVyyk?NcO>d|jbiy?NTj^tGBkW4uhpm&OG zDYC7|p8D=2d+Moztole+xiwWkK~}rcjivv}ew`YZRm+x&!M?I&_3*eY72>v3>|C`e zr_(Jt^>aCG<8s<+$!QDZ6t+|)Y^mBoMz)lP%jueqVJ@c!6V_Wq%nru2XZSM*+2!mh z@NkN5=*-NNvUaFK2at8vt)3wIhlgvW=IGU^Qy(a4HoBE6R*$Ns-}Il| z*`z}+sl)Rn*&Rm@2A50ED!%wLDZH7+?$p25JDienJLyHSSGv775E(8xf}3YuyjQk2 zrZvcsw$Rlg@#(OoTlDSNk>U+%kU=dkgPc#rIbFTt?CxGqu)^*M4sm6jteYbxf)N%jt-EEz;zaCTUT_5U+AhLo#hFd zHr%Tub2DVS7HMH8-K(WQWT@o!-v5`ncMp&1y7EMKJtRt{l2npPR3U_XPCX?MsDwZ` z4pvT;9!Nq6Y|9n~CkBiio_0ybM&hBA0Ap+mOcES9)1GdevD0oBnIRqAv6Hl1CGl|L z2kDNdp9?`FQ6Prkq3KIcefCx+||zZm{mq8!<|zJXDQ(N&F^Yy?(W1*2!8ze1nk zpv_%DIY$}1M^eih;|zyUmZr9c8IR|^`;E~CE37=mTIT1x9_NWM@#6WHa9_^>xAGp2 zdw_Qc<97nbKjC@&{+P$G+J^8u0;~wF#65Tp?;An+BA!S2FRYKNpShuF?0fs#Rhzrk zk6hltTz_-L(@{QLV z#XHLo+=y&I8$w#evrPgvTrIL5Gif|Jv_lK7cGIe=DeZ1eUF*&~poLpu%UFeLnqdiV z5nAYO(RM+Ltchx&b-3;pMN@B8H2-ai7U)*A;O&YQxzccOeFG_`$*htQ%QAh(d_ z-=AngO=A`BEY$*9Lfv+t!WXq{AHaWmZ@>J%SN`88|L>RoKU1+js%|?7+Oj<&Tyl%OTCH{6{Prr{WUYfefeYGN4H;89$2xKf; zq*Z7YzNO{LYl8Obqw+b3UE1~-uE@zeisNIbF)%dNTObLwz*zC`knQKT^Z}1(_}Pvh zAoEvB@ruvj=xt=v!kP^+mR>P{;}hc93#8Z)bU;%pUyhohvR3mj`!pE>8#^} zI6jqiJc#3yS;xTAHwBL5Jsth8~Z*q1{v5aEGU)q0Lj;@EGn{Z`^ac z$I}q6@YO&1z%6L)Pwz@2hWK0XGjt{%4;`+BRup^>Sh62iXm#Bc?<~Ip^ixa4(<^QT zAFZo+r+zK&?5cPt`VKMztr>e6nTO87KXJAq*?0y!5^TJ<6}+$sy?+;az8i0>fHz|m zp3_?4Yv@i>bMCh}MT^JTfUzij*UcVFLkfLakZ>xZfTa#a!A_hl!j(p!E?|kAiTxuL z1?OSmvx?YLiS&Thl+%0DmV3UD*UWrm-w9gW?sR_)E4De#@$UY7>?gAUc4h%fMsemw zXuBz_-&SJ(%dX)0Vl4u{hI-l7p$hk#z}1{eB(X-_mB2d8Mqg%EVA!Gp8I{?|8DvO1 zq8u*DG`)v-ORqa76i7R(oLii*okPk&|MYU*hIu62279Ju4Q!IUGgg!KBEvS$Rt#+I z!_0VKdkf;6AQ7o;^vF1*HnkJ`5lft}j;UP6{ia3-h>zv^4r@2sbkHgRYaTpa&?oJ{ zjAHHAh`nv~wpmm7!O2Y1zN_Cyyc6HsnfN(lLopY_XMyiIzoW~a7HT1U$q}2pA|t=) zIJ5$<`(yZ`1+b}7Z^!opvcG4t4Yt;j@g1=xY43XZCH4pLExpJ06o%jOuIt1IMD1XC z!d=OxHdPZE>>+rLZeLy2b?vt-5gzj!wmUya5+lNSncv}qR>itze;4OB*J5GkAJ3Wh zimzL~Zok3q{w(rlYlxZK?z;7w{(#QY3!MixaoI0+tfx23{x`VMwMbwcV%d`ybSZi8 zKC*os!68p|HP<1;Kzw33bRoC2v!WhO7znk8#PBr03$@e&f0j8busyXb$>+Hx!h6U z4>d#~x)gEpHT&xymSXIs=XA=kC&wJ0Pfsb@$W_z6iO(R9t*%(VG=B#?jdN}tD~FBR zJ$56I7w}U+cP$=E0Re5zm;lr44`<9M&^7`9Q%5@v~*L^E!y}ZYL zOBTH@i(Z>WcbT+&6WcE7;+yn0Z$iGIQ5S7}-T?Nb3dC|G_u@VByvW3E9!v#^Tu46X z4X2fRd##bzu#2aQw!Q;hficEl!QR+|n2*VwkFDa9AK|Dz2#PBQxpSXaP3x=v2<$p| z67QAgtmiGH@eC{Pr_B=mH^6J8>{vF~o`Mb6xCiHqy~9xc?=REZ$tRtG^huLvIp*%d zJ_+QVvK(Si*fZb-dnVSIV5_H_c#hwxm%k4G=LBR4%O*wT!vwkKxgErnpnoi^-w@;gYKHsC zK>_Py=x5FUrr@~I%U~UWFNxPqmqCxiJ=4buY`Et2{w#Xo6#bmCHBa)4d{2nwn%qt0v#>Ofy1b)iC_X74#mON!XkD7bJh} zPNd_1oX21d2CwSZnLK+vj|t_|7PdFPAN@xxBHnWm_p;qtX3DMOlujjIEogNAqM0&E zUJ0)23ar7I2`QoSnr!v?(z^dW;zfF3gFJ)n~Y-4FUR27LhZpg|)OPV}Hb9|3*9ppSvxZ_vY__Zjp_ z&;tg23Ut3gKMi`XK|cffVS_#m`XPgU4)h*_egX7@27Lx}pFy7m-D}Y2Kqn0PWzY{8 z^tV9YZ_sJbyAAq0=v@Z=8t9z{{RZeA2K`;o+YS0{(Ay099nhaP=pTT-&!B${`d)*6 z5A;@pejoHb27MLu7K8o(^k#$p9q3I4{d>@N8?=V;{wafwf&QdH$3fp^&`qE>8uS(1 zyTPElQGTaEugC8TN{*C^T)RDvH3Io*qsQ8?+2d-s$5RL$sqtU|{5k7!{<}@EjydlH z1uv4*sivJc78{IXSj_}1?&2=o?F^;MVX3x#k~FVdI4C|vTG8?=7dDE!N$bzJiT7-p zz9-kIq;G;(s&or#An&bdoLjkM5K#$*4bDhigEOox z0-ZxU16m(X9L%9b12rUVr@aAhD#yK6+8S^TdT-D|I|o_}<&#$09Pkw=pR~~4;PmUg zbx3a^Cv5_iXvxDoHd^usXp}k83eKbaO8nw}C+!QBiyGIt7GmAAi0dAKbx%xR_ozd5 zu6yj2Zm|Z}#T<1)S_NQJpgn=MgjF7IL#xNvu-cQ?(CNu<=zzt5vX5&A;!A+3wLSl# zE74*Tv#vGB^C>y+ZL-i-!MU5f4=l>m8ic$~-wxmDTQ-UuMibB{IakVfE@>g)H5^OS z*H|w;hvUq*tZ@_X;W5V`>uVl!e6rr=F~=^y!93=72!|5q_~tiem(wai8Nl{1%PAlD z{pK-che=bWm^5XLNmB-ywD~^DsO&Pn>+P&E$~N;nWu8e>7Me6=q)Df_Y+FPd~w zA2HWxuh?bGgFybuG5cOmqPaIAzVzX3>4LPqcTL*U=SsY6zZb`j3%Thzndf`m51qE` zL_Wf6eSNu8So5^>LnPsMuzTh&`yUte{Sm&-IN2#L1b3NuU3wyZb&V&Y12LX;Gkg)% zSdVqU7a?kfO6w0ef=}G=;fEp#{1p%i4#vbz`6Gy*@(KDQP9G-9gdT}yeQTvpg8qmW zJ@(b`M=hMbcZM)Yc60iL4?5i*k;yNE ze2})C0pY~j8omtSByP*wFAB$TJc8r#nDkMkn_+otSFP^a0K624X(isJJH4{M0};I? zK$13}zb$>oco6pCWv~}VVK0Vl$70W{l2NRk0~-|N9!j)b=o%{;+n{JJ;dh9fw5Bw> zmmx|PS)I}@*ps-ksVA}0?i8p!vP&tJH(|9+1C>MnOa>U?yd21C>{pJnxm=e`EO4i- z{-J;phxKXSh2D)@Z#{p{H_L-XbHCl}NfaX2ur2pH!I--!Q@#Z#8)XA>Ai)PO%4F1Q zC-O$DL%t}F`cJm5;OoVj+Cq67ZH9;Aw}sK@Ly3nHGCz8Pt;T5Lu_js%xW2>wV3q~2 zA*kE_ruj(l@C}N#NaD|L2Y=NDt)(f<5AU@ug0B>7M*1Ry=|1p)ZpW?Tx_WXS;-4z& ze$u6^v1~`g3ZEKV{D${iTY2v@2kclQVVwi4tW{Zy-Nv!ziu8Eef@={UnWl{Za~7WK zPh)*YTad9nUpC17^AIy9?Ymqn?E@~nTnl$qxE3Z&EVi9%W7;0-@;*o^+Ly1^z(UO$ z6ej*L&iC`(=C^8!1-pTyl?8c-pi@0jWy-n>d^x{Yv5NPaA196z{i54xBqo-5^W=9= z820N);M7qc;I`|2#xDR~T!~eu=U8DG8gaw++kcd{UqqB!gGC;oUw}D;-NSh9@0!|_ zn8@vw*xdlWJLpxx1Ne4Lw+AnOQpels`2v8q#k-KhUki5ExN4J#@{qW1d$r?((^!jv z_jNv60~@icdI)Rno_-&Cnct0w5QXgnG`U?=HY-;AQFhqzi@msu^=tI!iAp=p+Z8+h zUcx>ETKEgZ$#Jihy;nlU(Mm%oi zI{A$u>4&GyLYEE1J9mbYXO|>?kNinDtzvF@elOZf#@hXXZ^-2{=wtQOO|F^kPrd^n zGhbb1jqy!$v_KPV82Si9ogsrJFmGin>@W6#SufW8UfQ24JxUZf_u`8s=a*p&-}>Jv;}{dLZZV94SRyi{{-fX;;9SzY9qDTt>#0jd|1_SBa#=SFB<#`dx+g<;yq{n)@$p{?*)p!_Wz#eKNH4F0OLjJ$!Uu;0slZb0mOTlV`Mr;(+3&BW8IcA z*pK=EUy)tbIPf1+rZEox;PJ@l0x6HGuI+2~@3IGV@6+VLQ?Qs+HP3^d&-T#$_H1L? z?nZ#S_zLQ8@P;c$TYQB5)4aPYlyR#v*0)LafEGKV9xkFUs;BrC^vunJQ~iSR$F`LH zQ8&87H~U`)*uu=5S2pu0!3S3k(nn=ghG4(XbAP?l8yQxv#DR13q+k<;oQA!2 zBBw1Jb0dR-lUOoFNOBkG7**~SanQO+ILjFSoXAMcN32gk*`a$F$h8RG#v3*c!S4}WYdP{zqQ=_C4qV`64M zm}R2}<(QEDfU(5Bll{Q!uRW(f<4-M~@oSUP^!(WA`pfj2Wy@B(v8|%t-SXU}`T5jfy`@a# zI=!wGGKTr<*Wuf88?slHuWSF`;oGhpT9o8!vQy?HW~}n6^U1@lm#$ztB#CcB7GmH> zv(}Sbt6J61+TKhAv7-D3U`UB?Ga_=ZqQsihhV!`Qt&dn!5=Z))9T+#i2wKPE{vrSB zoj^k)WA6lT-9sXUXk=ig;q~?AYo&%PReba;W9_xpIMtS-y3*5@R_2={jV0)2C-^Xa+h8KHeB-)+ZH3XEnpMv zeyeD?v^LX~{v9w^#I}8awPPEwZL5IjxeClt$9VIYE3+Xa<~>9xdX>)pb0+%A@5Op`$O80}em3A$?u6C#jCmaN zDyuVD*tBSFW(t`GB;W>IJB2*MF|GSkNHQF6#qku949B$JPa(;0OdI|bk_^YR<4+;U za7D#R^md&Y*%$BP?2fPQ}! z*>2ikF<%8+>Akobd9&oxnt>GiRiY!53;+oT?AJTXe*mP|I~D1rZ&!$=K&&kVHnBcl zvA!P2wK^czfHzA=fnKXya|w8~I^fY(ybQcw+xUC9?-`)eP61_h5-2n5|JQgI-g3wJ zZtVNGEb$uR+;2<&7+Mkddw>>+E68t~f^8UlzRw4HOqV(rIiS;d^1K*_jQmT{p$K=u zm^BQ65bFk(tUHlEmec$3Pv7oXCHo)&30(d`;tb~glz(JC$35+bor!m2)b$yg+~x1I zu#V7)=P>To8PeoBfOA#{@=g_`%;KI)PF9QR_`uH|V@M#e?7(8GWSJ(a3!T3{BnW z#+iR;PuP`sE~)bxc!_+)c4J$Z^D>dI8u-QA0U1Eu8hxCEg%8LMAfL1gKz;lWwjZo>q&xvyO(`HA#7!&! z>_akdT`Oo_4}PeC4dqVq0%(l&3C#H!jP0HZ90R-BBQbtBp5KgnfUo!@u9ffA=^p9B z;(CPT^k4BAkYi{|^V**IPE`ZWpl5;pzQJ{T9@}+Ft|>d@JAKh~-b;NR?ZyQbpi&i!^p$r z40C^gNttUv*4J)g*;qy_8?XO1BoT48@;amJHIxwt#xk!_rrTUu)`eI%VBElUM|Ly& zVoDj27&pjE%W)|$C*(aon&nM&|KC;)j59GmopSPBqC$ zS?HJJi1C~Rrp=iDlq#!lMWY-~dzAaK%CW9kA!NA&NPgDyslF=k(evP=kz#Mig1#mP z5&NdsPXPNye6u%T4G`lDPQWPPow8*x!&K};za^|P+r*Q=J;{D0_Ra099|g|I%V!Y} z1?*c7&+n7=WXT&B!C!nQv2RJa=GUL9*Nw87&%iyfzl;L=melRbODBPSBX1$EsKm`> z-+O+Vx3aFg_W6J6HEH15z+0hd-crC_ZV5a^N%jZr0DlX9#F z`SU5qhLAbxn9}8c%yO+91J<&xv4e7KqL^}Q@F1Q7j3xO5xHkU~ZOo8kUIW)=a`44z zIfnbuSN*8hG+wu1vQ2*m)>c!vHuK&~)4Y~-9qY+Hm-ku!)N9ghGkHy7C?U(@;gzTR zku?&z0heB@3nK?8_=~=1>g=+uO@31pMOqVqSKC_g&c}Eiec4$3a784K>F3^J&|5)c z)g;U5AKzrq^q=2t(DbW+%Ao0Q|D-|F4}X_I(?7q_pp&3C81xa)cN#SP{_72zcz_;* zCQjfEgFXfNc7uKzG@?~x-JSt`n?aujjmQ;Q{v7CA4EhDoh-Q)HXF#tt=(C^^-6G4+ zfnH$&UkFwFoGuMWBH@ znw$@;&w|RD$$a2{=2tGk@50Ia)wmw@`*`KT!Q4*L0a1W*V18sdt{D_vL{2HV28gP) zI*n&EigkE5zH`x?5;--Ra|@4y>G`lT+HNHs5(_G;25q;Ij<9^v)~(+IdenA1=>VP! zl+_)igK5;WF{g(#>csV=GlKe54dxQt=JcoYaV_Ikv0_G@0_ps_SRO)o^(0o!W<3WH zBifkzNuERfxF*H>az4f5AnVj_( zL!J`Tb#T>^W8kEsFf3tAi92_b|5!Kkn7qkvFptT%CQW|UWqyyFd~TML|4o|X!lXG~ zOq%1!q&dFK=WyJa95Gm`PH1?d`P}3xl}r)Mb^59{h`8wv@@e4gx<@&7na<-3v<$r^TY>jvhRfd z^;$dboGE+|yq$>q&-Ixe$!ye=)d$FIc)0Y#(ZgJM&$%GJC|t1hMw>-OB%>+HGo@mtl_KY|Z>;}6(!kJy0^ zQnUu^g_Jt7?3?{NUM^RO4?1iuSJ&3ZmE_VzyHm(1nSwnR8c7Z@Lh$2bgpEuggM9xc zWDrI%EgkSe&=Qp*awiuW9jp(3EO+E}ne8P)2p$WG5b_!pWUji21Y(XHB85hP2*OH( zmSZ4;@E*MHLhNz>XNux#Q{9%AY0Z6HeYL15U8}(28_r$Y6)bT#2e9gkWR%ro?WzS> zp_WT`q^n@%y%9*EWw75hXMh#T&B?Tl;mK}dg$`LxB-$^o8!HA@NaP8>UF4>kBp z-Y#4Td*(L8@2-TUc3EPXeIYVkq9!udtiJzv!nODdTicM6vUL0qa#HSr^-Toh%9d}G z`{TKf^t#8p&(r$mQIksXSb1=5cwlo1c`4V!`%haO<2gN{e?k7brx3;2A{O^SmMY!A zHT^soecxN+XIpzA@!2U2Fm0^$4 z^Zy|S!mAa*pYUqdo~!zif(wmtMcb{{(CpA`Z>O{-2#7& zw8c%=YI76ov^#-$GQw-HvOLXNYXJ{(>m?EKSt?pwjmj8ScjyHJ{j;+fbEHRKXV5ms z?>GG3GFRo`LHYeZ$Sc+OxCO{O)I<*XnPO z&t?95zlIS69e?`Ub^Om%zCFAL`8c&eX*Ivy{N|hUw|)*DRfYCB%=d2BlB9}f^zr%G zZ3BH_4%dSGX2VW*P{wH**PGANuaovUdEHlw`V{OywsZ)2UHHst3{dhRXF$*4bWdFJ z3K4>^7n%G4TOD#+UjH}Ae3$*ebUB05_@6MWc8JG!08QcuT1@;8Je!{gAfhx1{Ew_> z`XScStOu{ove!k~r-l!-INaONxA?(dCp@s({i5kp%u{xn^_=46YwF5aQQDvG!TJmK zBiVL-=#M=Me9Id*nddOSz^>$70DQ@Ad7ZRdF*fxc3$Mf9ZvOFl`b*7c$@5W-xx4Y* zuL^Az{)N1dl1P3Nz9XjF{-Pym&-rfn9d86Sh}rU2V#ihYo~ytJ9qo-mXF>k0(`R#C zLcS9={VM7-l%DW)n(wEN-BtAkdpwv_D8I{tT9{J8qssLhq(qMLK5X*NNP7A5#9ann zXnF5Rt6!W(q$4?-?bqdBV?i4fk7@0Y565IKStoJ}rGm7>b4(;{zNL+;ultLN)qF;m zzrf-QGgqybwv2%c$41ZC5Y_XANlgJYwe_lndP09z_(>XS>a6ijOVecPV zz%r~8_v4xK+5g~i^u?%w37UjGt#SXKUq)N47kMHbUf53n*R$ce8cbQvl857-~J^LgNYlI}9b zpa zUEwFu*2DGR-z;rNXz88JZ0nWCeGXZPT!eTBF*CZ)LC1*5wK)0LXiH#v7%NG2 ze?}ScB0&2EKjGH=Rw>)B%8||%y~-AZr{KA({QlY6uyaNm^15k$?E$~G%;495Tl`^+>Ukj_2u&Z8Rfi|e~XM?^Vt0TsB)a2!q0?(-w0>0 zcew?#i_O5zKo&B?1kIPPn(f1}=USZHnhH+HJyUqS z%yf=#2mYv#*KvGf4%jyHPd zuN@E6D@S|nS2;(Ort>rM9%%XY-rInv5m@cJ5{@zK32>2!nwRaV4=;S=Itn3^wc=94Gd-;lYY zex*le6cbB>Z2u)=h|c)G+Zmss&6?kqIGR(IGhBh(aMI%fo=CP8adEri>Jjl5gDGI> zk%Q6_4_c^+4`iq!pW42NaY; zqR3ot_*HQ{g5O2>oxtxx{K6ZpRI?nvRs6+$5#`5N?KJxtpJ8#goJU^966BF8Ifl}^ z`a3iA14lq1PgA`HOvLXiM16B6A9xyVZqCIUH>ulV+ckd=P&J*(ySuc=Mr4e-6L~$> z13S}(KW2{l_uy;xDah=DOn9qHk?jZhAm0I+;=6eF8|ZOhR90w+=^Y7a3capgR=e`H^sJPb`0ImR${KKy8#Ew z0A;i~y5$vOk&xEU+1+Yy~*aa*{ppzn25;ihV zl)1Qolvx2BP<=DdL0afj$b#|@;!Wh_gU!s{gx*6=rDe!hwc-PyT(08#-pBU=ty6aZ z-`9`t+k@`|`UW}AM~d4#4#ZRD!G}yA*a(K^lck+_uf>ZU?7dHb*SWr%0GD6LBd*`( zJ?w!GL$i1f;dc@_5hnY2Eqs2OuWnScq5Q>gkIHLaggdnGMnNy9qb%<_m%OTWLOE#&pm93DqiE)X?YbT!~?<7yqX9)K#`I>rLJg%Dom|PhR7w`aRA`oAR>B_2fxD2j8Ph zM&sLgE!zY9)JB@^WYW?v$Z|d#n@I7zY+ohqkY#wEFFNU9Is3r8k9}j(>@$;QUrIXb zd9tr(d7iZ08g(MCn9m>|nY6Us;u`P)>fkNLXopUow&^*KwVXFOpU?Lw1?iIao&n$L z?OP}%;M0%~yC4H;*F~0krIs3D(0Er zpi8k2AnlxK%`>lIT7td11Ni|Vz3zZ)VwOK<_*)GR`VeAp+T1U0KZz%q5{%NTJk;fu zQVo(z&lvbAv1`B+I$U~+vd+L+-hBxY2-vVr2T8?v{!J29p_E1H1EL1wIzK$b$gYJ}&yr}#w$ zD;vmpQ?mG0G~P9nPJugRS=KpdX9r#kj;OwdSUcYW8={|YN(Fn=I3zva!*@(QixTZ4 zB-_+AGtT$>1?m-V@7op+Bn9khh%-yuTnjTcj8iva=-rTQHpn(xJh1YlxWTS)UmVJ} z8^MR<0rK5S(CmNY*^+Wc!!cz)&~%d$~-B94OuA1r6Dh+ z3^wE?o^cb_5b!ZVMj;lEK1=#5r5uzpQOZNe$w|mZ$VgKLQYJ2*q%36F!e;vT)MR-Z zo>N&pXl>!KKk3wh9u=}vwDOplEvg4?t3aa+xei91?Ythp9mel!;}`PvVaV5qr+s=_ zF63*j^yxvmHsLyQP90=wwj79K4*MN+g67_C$k)*UEij_&PShk6$XC&H@hZO^vKX@U zK8xPpBb?uTd??HyW&vHYCy(DGW@1WhT zSvZ)aocvQ{ShoE7NO`7a$urfEXKIi70dsHSoqw)8^UaiRK)z+kw?K*e56HLEV!qw| zW6HPkZsj!kg>4L61oIvhKZj$sHzd52V=v&iqr!JHXFZNE(@*AHT@d- z%r{6II^%aq8@l1!qz#?%9nyx}`+;6A>u`m%p)=w>$Ty2i$~QGjzEx%Ed6a9^^QiAp z&#RF#4Z0rWo0M~qRn!Y>sS{2vj8m3HOj$Q+Ym%}pSxy;3`6gvyFj=mt#gKiCHYp1O z$%T|Dz(d%jEJOw$Vk7>Ive48uubIQ7EG%~`ly?{lQorOJeig^h_r9ddKmQZT{g8h@ zMijDWIxqE0wvUu&62EBbf+MsaVpaTU3!g#WfIQEmETjy?8b<15Y-f%Jwmr(AV~!NF zPfVJ9Wzy_JU%h?}`_?RHpPMv!!KBF}CQaTk?WP!CLfwK`PZb$+vdHev&!*3 z&~qXCA;-Da!dh&#{>|HncUwIz^R~khq3hLBAEs;se%D8NEa{zD^yVxYxk9I|fp4ac zcrfU%hV0u;*$1DQDf_604*UPkT5zE=!TvfUfIER5OJV}Xi3xaS&dv|F&T5M~i9V(! zN|C$MV0ZbOvvC1mlY0aV0}B2ZjTDwnLC9CsI`?oIAxJ4 zgEpOqv(x!PF3J4m*vsSM#i31ykiYz5dT8xM;VBd?7io`5C!_8s^&J8b;q||zH46Lj zNi4vLS*%f3^=+F~{R`6`rT!DLuKv?r>>Dm#1kK##?^u{C1iMP6J1@>3h^toXiS1io zDUY}U@u0hedwImw9BX;xkP`QoSmVLJDvtZhEOGy5EphcxOXv1@q@d)j2WlSp#kP2` zvRH%e3#}7KSmFsM?062XK%P{{i+JOaP@a;pMHxRrn=I^JSbyubR`8AVp@e!uk_UvJ zR;d9Od~mo1)-4{73@nq!5@Qal6n7rjBCT3eJKtcZ0o)Jk7Rxc-a(UddKPr!j=cn~* z0&BW=`TPUI!QY+(HG260QON6I)q*V)=aG|_*N+@Pzyhy3C|s)b@$jp)arl3o+uOsX zN)TEY)0a>?YzYyPT6 zcVgz*c+KBzWwnEMA<7oJMOz-ijx7Rs82M&LBv!(iB6kmBZdAL^?N*Vif_2aoe67+W zxX*4*-;%^h_-bxi)djx;_{*Up{y7OB=*R-j0jw`uh39I>tH>Mkf+8H(v;*5@MI&+m zWVO{tmDNAFn{6cD1ztAWNmEWL+v$Twdo9i{Q4ph@e9xLEKAR)NONnQ4RPjoTD~PL(?|A-U~-PmY30ETxXlw8tM~X@mpA#&Au;y2*0g_<+W z9QBgNR$%pcIt7198@P0e*I+ePIG(|sk_XH@6CcHM+2swFcfSbE<~Pz`30nE(s<#sw zo-cVYvf14qw(2~n#+1Nb)_@vY$&M7_H83h92SOGg_pWU3GT@$s2)N0Q*S7l~o4zaMm;;9n(FXz;H(&A*!Rgi?@L1`g&7 zy6&TS80|`)lkvThhm+2K6Tkkse64;~xkcw|rkNM6OBNmG0Z zeC11p;M4H4@HjlF^JzfJRJl(Z)(D9AgS|HZnYw>gKAkCNm;5=rJIXBehU~qbcc0&b zJ*_H`Q7F2=QS7$vh9nit#HfW2N8glO`)#fJD&;L(QOa9<3;Z>1DIXz=A!`e+U^*yh?c3r*#Ya{)uhsEnLBU8bw{?#`Ua_`V! z`;>pR|2qF_`_x`M^slB5t-mz8chv{wr3ctn?8Q?&BmiNQISbyGISQ_}%B*+NuZrD6 z;a3%^#b#Ci3AoosOAXK_TZOZ))D;D%j!jZ+iRA`l~->#g}OHf;9(70M7x44c#+nnccc&W*ALvb zujSJ1Y1n+0!_T;)InyzgpYZ_8=5^n&S$!b3ee;f?om%i#AavT1wcr+bQ=63>iDlIk z-LHBK7M^ltwJkS8yeU0@w5lUl9kQGCSl!5r!Au5s0`UqD-%_BD>dry}KLcw~N*Q4! zpB1y2NHMBAJV7l-O+XJm89RY>bLWM-&(p__D1YQzPA*Hr0@Zf$3`#bpPxURcEy5n| zuJp3r{7e~G1Uj!$rsfJ?1`HFj@72O_QPc0ez;AeWS9!2a@xX#J??UDQ_)zPeAyI|z zw5F5^pl2(iWtY_Zk(WWl)pfY8pyW5bXA+MGI+b&H_X~LMbMXEm!oGeRzIPow1*fnE zbS?J6i<+KeaSz#B3VMZh49J*b>~?wj@Uaxu=63AYcGgpfwK3Ma@HRq*G+|we_^ru) z$Qxu>q+L$45_>SYz;n_De&_Xk3hD+geZ;Col5)*2!ZyJt`<4x2y2T0`PZ=C&dIFX* z&HjY`WWGUY=MrO)&6vxQImp$0(tJl+z!ru0?se3$$weO&@UXA2NA0? zQhbuficWJ4k8_CC8Ig7?luMdxerJSnJ0r!??&N124&vIv>3N)k_zeZMlg}AAp|KIdfPDP4Xu37 z_x&o7Ftm>X3&*G&lUCM?ZHDhMd~Dl@zmsv5uoW0(_gUbVL0*OlpKO<0j9tHXp8P&6 zz^A_3uhX5fJ#-qajuuC&*H1zxpbcjN`S&i$=gD@HzO<2I-s6il%4eZH`t|mupY6JH z1F|n>y>GIgeFvMG5>ahJVD zEY(ioe&3~TjLr41@zUCqpE(B^;YycdN8)Wji(`d}u~dn%9FKDRh@c3nyX<>2+u!f^ zUV8R^?7ZcEfj+QE8>09zs>Ub}=-Wz#y-QOVQ+%VoS62%(r_0t8*mw0gjHA;AK6Oy$Zo_+89ct+QhMBeyR%oZm7cJ(p5&wC6U|Sstyoj_3C_?MQB4?8 z7)^~$IZr_6lKsGO#StLeB0_AYW-Z{@y1{c&lnx6%OA973W^K`PwA;x((|O5OI%MaQ zIjS(mnlQqeETxC+9AA8k73bO4njCQ!jJQ!P&T*&Rk?_?ewJ}JacyMJh``s?ue{b-( zZ$!WGTg-9C_pW4L2Gc%sY;kSks=8e>#~0TKlm$31$7CCh9}i+LBx8(9J~YOt-e zXN+ALXE#zTdC(Z6w98?<%4bR5R*mr*lH(QozF@pAZhUrXyw+fBF2*1I^YqQr_pbY& z;IGH=HGcZh@~QC)nU)u(uPEgB67s}17RQcJ z-EzN9_>nO3yklSK{S`b$F)(5|J3MFQ<6rZqvvIwawiw{8t z*dPNC(X38L*$sJr0CrssYYdkz2_OaJ+&alQwiA+}bLzKF>&zaHQWjKrlA`!9WP!Kj z4C;#Y0KSeILB(eF4Q;zN&yeR|6Y}Zxqxbltqnj+ALx`88B$(8!ILpy*mRUSWd2Gcs zn}*3>mSIG;X>rh~6ON^P2yJ%9m6a!1pXcDA$}`%rf-<7QBNAwHww)8PovhIw_CZ)H zfZtZm8~r>EByR>^$a(evj^&sz=G`REtLd8|FXZ_nJXXZVA!Fq6F&;T`aDTfBgGR>Q~qG_sda zV|)(2k<*@{=udk86*6P;z>&U!#B1;^yAsEw%-}brh<3#I;XNtH2DTCJ)q8UgSx>zs z=5yH2Y!_a~_9E|)W?Pyx+ZpqLEN35>fM{TC)p|Aj%z zaSmNVUJqXebp%P%mtkH{UxrC<&7y%9suS(m{QYx|N$}GAG)E;fqaW=(VE^HVyU>pX znJUcpOA^6ezqYLJy#4tPH>S&kfWCP{^y-|y!CJ9fx!HdCL)QhwL|&x-<9EnzH#-*B zm(KQnSQ~ZkhY)(+^si;oSS`~3F<-pi#1-SNgj#4IatqJ8;r}Qk9=AyMe?(p{k^T=A zW52er^n37sd~rHodieDc_&?Mn5+{k|rDRIamk$5Ol|!O97cr4#qFs7GUh2cNF4`_e z-6y6zAk1CIXJe;iTY+3V<{`fwGN#M5BlFyuYsbg1emrKb9cTDKxGUo}EBqi~G1r=o zLSLCb(6#+>`azUKh>QH3rCf2<$C2l%9P!U_^(2mg##lKU`14hb!P}tG6b9CGUf}$ z+7A8@B6T^3GIt)Ba2U+;mSEcY5QC=&-f`gK@y(s$xT z{#NO`=+}KI69c+$gE_e};6?YL^OtMk z46olu-$*-r7>`F@tzA17SH_~H{q2!LWz_|EL*^p0UP+fS&)w`_jrJW9t?76~6W<$X z17gZ8QBp~urIJfq(#Xpmor8SnnOqpBu`hd}aHKu(iC{(P?%rM!6aFs%U;BC6iNxdD z>hX?C&?lpF`_k4$qKw`UMX1(L^Il|B_ZK0@^9aVLV&ST>JZKG<`7K!Ur30=dU5M7h zYE&5!p1=|Kwp!#q=)dkASmpF5)n^NPF&DuuBzLvPGmz&e1wT6CPEJB@K1|Gue72Q+ zgKro7?b%1PH1r7M_ZS(dVgDgMo%1j5)ng20%t(?sTiSdp20P^M(kJh9)oLZ&yIl8S zYBo_}7wO=$!iNPe)_7d&V~seL7Y`m0d^g@fpAB+Rp>DZ&H=awKf%QuvCmZXCbLdO* zP|v<9`3rqR)^h^S;yqs2m=V*pf_1@P!)V78UB9-2b>cG+c*y-W-F%L5zrm9el80Dt z=zuoLqai1JFGhXc$fVDD%DMnUf_8tOP z1z#IHA0*GGrJX#b^St9DdA>G_-#Hd|Ox~aRn}pZV90P^ZV<0>=2KK@4Xt5)jwq4p! zOu4S}pJ6{a__My6#Cy=AI^eze@0e&m>f17f?z8+3`CVuOb9}hX@qt(|d=q4vj^mTp zK~C^_Qbs`TfghMNNtZ3Ue3_7s3^QwC+}Nb_f2|cdxV6`m{l@(wj4hC* zOwi-P^tND3Skmsp{z|?m@+r^uyJBsdWskxdwy3<_4hnMRfu9 z3&wnlHb-Aursef0_KSWqu?zA#)6#a0yq+3&z=%Ni$6P<5NSUp-onxxK*ltTl@A{LD z8=mYm`H=N7HR?Yy+L*Zp7>{4zh8fi=pT^2-Pr_cqfn^NmardiG;tyw&i^xC|ZJb=JMBx6VZ! zyQxq5WCBMmQpaY_kwu->&+m5xo+z5ylPGw!cp77LEV&bYnV$2HCinEk&mSFXJD(it zl=~62o=?(rChowVM2E9ew^W z^^FbfzV(8u>W-FfPfpZxA+t*hx4Pje7lpmU3GQ}(TjrMYu<_O-)}anEq(rDL4=pAs z0s zb0EuM`?!(Ml=iSN?02B4`@-fSY2D_g(_2~Y*J;RApCR8$3YLuiHT1Co-dkBSIInWy zU>>lh^I*HMVeZYtyqi~9J@_%iH0*UPdK39$W5{{6rXIQZa~z>J%c>X0-1hnFL>c53 zGRWAD^nN8#;5wIlwq*}x6xLv`TlmG07sg)ESWm$=P=OT=>%{l6p3qBZ11hOtJ(Z-S zoe|CFTY+52iUwJXbGV1!XI^LCBfnp_>&g4ga<;B)r&-RSUyzHH`wV;s^Y_RC%6YtF z0p0~Ws9s0(_5!oLu^(Fj`^4;zd4}wJNM>~o_mUsHnK(FV?pRHKxf)YE7d(DMhm3W@ zE^+1Qlrmx63Lo0;UJuZ4_a`J@C~uUQd_hZ^&KVYujO>%Vp-A4)t!m^GTI0wo;FEwj zji?y+Og&!4nyoQ&`V~mPSlkM$dx=+)S~n zSh)}UP?mr0^jDi#PVocgDCCLO`9bQ~;2ZWS`i(hC!5^Qt7=2&vLoQv66zoeh$i0a^ ziSJ*dUqcM7Nz?9>T$9~DGN&-wVXKT_xppU_&>85n^&8H4T8=cAxu^HE@&sVOS? zzpzK%$!VJpEJB!%lw_@d&F9m=3CKP2#_t-f}cGB5uh~X) zn~2602W&gE@#Nz7t>}x}7jAPt@bjH1%uaG%3IfZ@SZoD&)=&@ebvb#+eVw`n&&ru% z1Tz=0q|}S~>%~mP@*d1o5@R~xF=rHJB$8NG&Op@P-{p*knT)>^SZm10Yh=ti&-?ca ziE+hD2HXnHqnyv>`bVAzR#iU#fR0uD@Dp)m7?#5J@D0(Hi$JM{b3L^o*kmH}M02pk zUF)|5fO?45BwVl|<^YrW-Q2cJx$43kb2Q2bAd?}0Xu9k?%?}P#dJkgFZD3HB54XE&I%zLCjh)3x z9l_yJA4{d$~QF3QZ`mIrPHj(2M6$x6R?21~LzFzXyBB3~rEr3mf%c z9#>Cpa4m|%*3j9P7kbj27qXT*{D)v`7%r`@IfQ!~_a9e2VYkmaDa!Kd6u#?XFZOsX zJa^h13tna2^>;HCT-F^qb5M8wEyp^+q#NGY=&FgA;CTn~LWkUZ$19Yr^G?AwunV4o zdm&p}TjPPayWH=}Ee}}d{?}elP-Y*tcp^!;UWqqB#;LO1g0URgZqOOB+D(7A*>2`< zoohSaE$vOlyRE)cVkjT&7vdb;iS>cehC!@H`~quO<~v~s*Efz|GU1dvvFH3>&% zPC9RlxG6`@FF$Oap`T~v2bB8xi!*Rje{JaJuS@;BAQ3Y3^N`ffm%D!J?f@QTT1P)& z3qfwlGDAl{E_HN0GsTJ0EB$LjZgWAgHt=ly{QD3+hd)9;XDq-KASG*o8MlsYc{vVk6;fqcdH6|UcI@Z@ zNIqR_hZKVLz8O+y1iv!R1b!uA3HmYhX6ofXp%lZfk%vN;Qm`a4zXbGj9r1);N;gV5 zBCEXE&4Bmy=uy|y*Jb{S3Fzz6nhm{ttEFadyDCKWj@oc*pes-YIo9f~RV{%$f3xUP zZV+z`JdSJ;u)Ejir0thd0dKTS@mXzYO(E6+a^5-wiQYra3i>m2=pdpFTRf2x_xp+6 zeI1GruRx~UpD2$c=lGjEK`X~J?LDf;Z&lR%TQF9%7S83qr5bY*=jynOBmyNoh>u>G z)PxKC+tuQPpV=7^u!)G=f+dqrLC1uyIhY4IgZRre59g^a#9yzmL6(&{LdT(Bx=QWk zaEm&qOYBNK1ziH5=BrchYoJU?!zVQT&hL}A@m?*mN_4{3CM+)O)sdW2m@w-;Vu36~ z-Sd>?j$X(viL+=C?qRe*PqEc~T=$(t8*lS9Xowt`em3N*o8NPn{2r~v8Pdw3OO-?C z_CeR8tp~O(`U@u?fo#~v=RseZS6SOQU+UM32j@Ws;9HyUt%pROI*t@-hjJW&!#UN9 z4hhIpcQJ4Sx%k$59r)I!%BxPlme23ybM-bJgno}0sRrmq4U?;V4FW#j={jDJ^3&r! z9vZc<4bCKv2fpR|k(Qgz>2+aV`aMRr7T6fQHuvW=CNe4a2L9_AWBUae<3pXtE83dB zC%s&6UK8ch;n##7=VR4%9{=}{tpnp5v#D5Uvp%@b;s2}YHF)5AcIZ42-Ou&uiC6yZ~mO;6-n(|DwGsJlEYETIH?{#%x`RTRZ?ANQ>IxgSx!jMNC7W z02&c|0@1~N9;Mrsn?92$Q{J(;5}pK+KT8tjq2qIwws`znAf-$QPjGeMWD~|l-OYI% zXYeaj9d8keww%%TDfpZ)pCQjA^#kOyt2^H2T0B&o2do5q;< z{-2kH z?FL_c!A8~->&yLSUH_hA4t3Q+ErPmV6=hCnXwGjEzidKlAnwI$@dTTcV=Y%>u_F4^ zyztWxp)C+^#QFT1w!nKDCO7(`mv_2qr`zL^qG%NF5ZeoGMLRx;_K>;>+GB!id&9m? z-9$i#q<%Mefc36iIJpgf+^4pae=c)3T7MzhM?*}AusOn(Qr#BWc<^}W6J@kTE->_v zpJ5HRrs&++ug1bxA1iq94|e;d}q4x8jY4hQr6LcZrE z*-o$-Dt}+Wwu0WFete!)o0I-@uPY$Vo}z40@0=l*Zo~# zhdZvuqeY2@vHx7yhn!8W@Nws@B7eL-0Dqe@UguH&MR_Xk6Nu!omHe9iHGg@eCH#-t ziG(N8T;~a_sRJJ-U{^?h-xaI}-KpS+yCafxLl17l-b6Q4R*$y9j)Xmo-a`KLwYEIv zxXpq61Raqs_tC!UC2LA^_t-1e*GX4bKka?C=u3a?3EvSe5JLPMZ*~Tgd*}3e z)NdDWc6{lp9{)e8U$}Z@_(Y<2Rnw(+hvO~rOFtZrUHZ|mfUZ*>yd#W079Ql9@KM3QO6MKxNs14d<%8Fj5;1i9pA$E=yJ23NBXK4A20ps9_Z-- z)G>Ln&vD6qks1|Z`jY?i)A(kO?UDx>Ou_a2zGah5)=Niw`D?l4iS&d?iA%oaZ#KCW zzv+P`O@6&t&}_Tig)a@Ytl@WI^Xs~f+~wP+lm7nK7PHay%WM8c*tMcJ>E#JNspcH|bD0mO5>;vp+UH$75Vc zEXxh(KB>h|kEx_FrW_bkbBr<7ag7!~J*HOMIHq>bI;LKiW2!Y>G3%I0Vob%=B*xSu z`j`rN?F$Ax5e>5{XK|^U<5!VmO5c5P0<-zM7(=wF>&Om$jA2a4F@`ZE#~8wR5e#CxjiL>Dpku-%Fh#1ln5{-K}L z{cUt;r!!+M{TETMK4~xa$JE*-KlHk43yS|U_}-(eM|K`BH1i&J5C~=kSTE?&KCF8k zzfSyaRe7W-j2bB53_6eUuDf`lWXBPFlcY|<@v`U z|6Mjmz=rfpE_Zd!xZ6JOyUI^{kqNoM6Syt>6=U?}Uj3CZ_F6Ia+KbzrUwYOP>{dT_ zRe565aBc9CHwa9Xg80aIX7NC6;DQnxuU)c5sjdBCa985JRoZ#fr>s11TQ1}p>W7sE zFf6Pi)+Eb3%J-JJFj9RxiXv?&O2YP4%s(7kqUag<;uDhbN((NmrBSdkrTNIZ- zFy;zRA7B(hl$N@qrAaplL^SJZOSHFm7a&C!$_j}Yw@_zVLjJUeHu3p}sAJfLSG{Hupl!s1)I0p51odnJ%NuJGm z-wC|$Nxbg_-Up2W@8UbFYfqHgeJ2CfiHycfBpELO~|Ip95;no`MgU*i)`|F#PK z?W;&b#wT;8W&D4c!EgT!>$$dAYm#l~-%pA{Ycnd`aN<%+8V3}de6 zj!w(^!0T4X`f##{vi`myuID_WeLU1ufn6MkT3mI_dae!YxkI{76&!abv?HH<`X0HS zD}t=YI?mLeiZ9D6-hr%t!xl?BDC=jzEWS?-Dp;rh^A=naTrjW>whu?TJQR1wgNK#3 z`&XwuAuZ6HE(?J_L;uTr@H0*6&-bUW&WqJ%7{_7E1U+&FXh~Zh&^4_e9-^kDud%$C z1)!5r)6&;|SnJ8PA7=&2k9yanU5VOoOsNeWwIBlmGDK_y((Xv46Yow2Xou;zprsoU zRX}Dq)2{KoiSj^^K#c=dk((CEF)=6Y80+734&%t<|6ewNcEBpZ3yn%JCqgcAB9tN) zeO}bHu!!;iwvD`;?L~hZ^@O{Y7N!dmk2pRds+(VHLF`u`MIB78Es(=r!TQNqCxCA) zSPQgOE^2H?BUA||{jC<+QHR&1EOk<>Y>V^Z$RI&*#}nX8qRrp0!`jKKs0%!(5YR zADIRB7#8g*f^E`gl)>wdI@n-p_k^@7)6@fMGs^knM$XT61E zt1tP0Md2wbJVNz&;i-;2@cgVhUdO!ZP4gkPagM8A!DByocV!Tl&}GGpjfA)@YyOcj zYje>DaQ&dzk%DnDE8O+a1E3`^7VfioOE%$lT5Gqw>bvG=2Heo9cj3NfRt#h2Z&ovh zMnvbVSuo`3PLE~`dDkC;&W^^L zx>q$euU^$x+0xkDzOJ#YziwTqrlql`w>!j!nZBZRRcm)gGaHU+ZV!ECoSQ=Zt8@sK z)iC+pw5s0-_8D9VxK*pWA}yc6RrOb4-8FEyd@K7alj1Pn=JvXz^48h#Ij~jdz$RCJ zECViNGw^V2fNg~fGuie^maXlt)yrT69=;4ld6QsqnQ3)PicLOl2-+dpxS?|>Lrs58 z(s8T%tCMEK{o$lpT*k@Q4g0zg+gRC#TXuSXH{vJFC*MD@ADM%V*-3{h5B869;c@w# z3y;g_TzFhg=fa!oBfq>FE`kBC;p*8;sRqNVjASWls?#%TG4m|Gu3-`LT)FV-E0|~Y z9MG36x(5C>FwdO%*IqsM3YHCr*b5e1dxdX$AQ%W$Ts5sSSQ!iiYXZKqrnMcNExur| zda|!9flT&ITUh0r7U`>OUFn)6&vhX(Q6o(=yUBT_as?*C?0AmF3EIjdpom zV_Z3|TvwiJtSjF&&NbdOVGv@O>68l1=ypD+xLrFP?#u^Nx95JR+xxW?cixUvcfoyW zZr{BwxBs3I?y~La?((}c+`+pt-QjH`-F1(+XSrv5V^o8?U;TsPo-yk!cU^-!{E|EP zvb+2hci9oQ|5dl|HFv@5?!2RJ?;CEbWiqC<9-Pwv6*H#}-B1n{uAz2)NI;9JAaXDe9=S^DM{9 z>E~OH1~;_F^7Czv?QqKK{QBrD_XiHOe#CjU$L(-2;{3|d;NGdI&!nDbIS#=ksq-tx zEca1WeLMX;%W>^__fxGs{O8+Gx8$9Fdz^HtGcx~)_88>Gv8K;qJYS|heqQ7GoPb)e7@t>VMYDs`HmOxeuwie$0+XH{bJV>Tf8KKM#vizeDV}B~%5fi* z<6bDoJy4GAP>#Ey9CtxEwm~@_83AQ@SkGP0{3Si}m-WnF(KA1yXa1_5`D=RSuUj&Q zod5s)uqt<^8#{ioZ^{3~%&wKoH*tdE@QCrVW#H1^e zG`KL?GA0eqPd4JE$%09@-n(JEz6-|dZ7^Ox;_8I_3tn>Pz3leB;`SVIXTIuo zz2^AI7*vC3-*qyNc{@t)=fIlCO4RPL`;GbvMo#5{@?1SKc$gm#* z|5Jwj0r-y__IAiC6aHbGx1kI{!+r*S=NR@W`0X@oFU-5cu;Fb@?5BneuO(x6ZVBTo z0Q&>Oz8UPl8+H)##BqoH7z{|u;9nLN5YlF}zoFYtqWyKrBDC+)?H07R>2@#L-_q^vXn$L`A4U5+y8Q&&kLvajw11%6 zc%3$DuWoyx4YT&>b{*P}>-JK#zpvZ^nL8tDOB6&fdb=cX0OYoP8T-Z|3YGBnEqvG*XaJYIsXZ?t9Ac! z=x-*E zI4gAjX0(lZZ9zMr`!nHMrRn#eJx%xT;QTw$F4z49aND)X`_P`M`yb`}yV0Ja`^({; z4U-R|Jz4iZ!TFy=8#^PUIylE{@-xsb)&0+K{wUfdy1yQ->6!c-v?uBQgPi{m+PHE+ zS^(!mOnwpC#k&6}=Rb~ik?vmx=bBA^IogG~|0L%>g|<)kx5IfKlkY@(qVE5I^M8c) z1l`{S=S57u7wz!|&-p(>yTIV#{6(Bcdz`^@{*TbcjSQrIPlCT0?XkL_!L7_@94Fd& zx_=Aj-->pw?uW-%oDK_sODcl~=90=ra|(I55Wpps!AT}AsVs+6$mbFb3yMoB z8_Ox=^NEIc7;p)nu;7!*#`6;9pFlJ|PL)V1^CbjWDk~(qi0ERX{X|b9x`gObqRWV$ zO!O3@rxIOG^faOaL{|_UBsxTNCDBzxhl$2}>=H?3H3{|lMEf#_R^9wd4r z(VK|AmFUex-$wN9MBhR57NWmG^jC?#ljyBPZzK9HqVFbpJJI(LeJ|1X5xs-xuMvGe z(GL*4ljsMD{yNd$Ao?Mqze)7NL_b3GE~39h^tXxr4$+Si{avEJNAzPv?qH+V z`VFGrB>EW9$BF(W(f>*GTSUK2^nVfk-$eh4=o3W0L-enS{teM5iT*9o?-KnxqE8Y1 zd!pYX`hB8L6a5FG|A**568!bO#*&6W%!a6z7eTRB>&b%!+H{q*q)U zC&%L2IEfb5#>uw0HcraLwQ=$;u8os?ac!IojBDehVO$$07vtJE2^rVM$;!AkPHM)r zaq=^+jgzEtZJbPvYvZJATpK56y#^p!*}R3;j2{RvEQu!%~Wq4CGTMk^f_?NvH0dW^E#qH~mPi_TTnT6CV$ zZ_#6wFI#lJvcaOqDH|=iK;e(D9BjP8A3-_T1cg6B!us$>Kn~_p_#+$#D^&O+7zZm- z_#+etD^~a;5C`)s{1JwOO;R4Ql&?hLj}RQJRN;>R9IQ;?_wXHTvcm7dJJ=M3-$Qq> zsS3Xb?qKB#zlZH$(-eLW+Q9+}zlZE#6$-xx>|pS^150^B3cm;IV3i8Lhw5Ne3cm;H zU}1&d!*sA}h2Mj8uo{KmLv*lO1@{2q-ehA(?ZdP0=(<2J#)oJ5JroZ(`7J7HegsC% zuR*B4#Pw*@n~8pe=qHGNp6HWA|D9-$12QoZ=2u1ZwM4HZ`c|TM5{=iz&4lrf5d9v} zDXLE6b>3`f@1UXa+1PAY9*u^^=Tx&{dEI4bd}cHomgj#M8lT_HhUNWxL*uiQ+0Y-x zf*%t;*O<*h%3MR^GltpFpVk^0pY_Xz{`M_H<1=^J&>vqlG(P*54gK{WhQ?>mvY|gu zgSeRRS+H#A@7Eg|pUKLG_2Daq#%H6lVSV{GL*p|_*|0wS*3kH@P&TY@@RC6-;d49L z@S&ZH42|uW4eM)%q4A!BY*?SaW@vo2BpcTErwonHcx1zP@GC>(vlj3H3PnxR%eZ|=+co~PWcN00?o~=@e2z=)%aeN)tzg5J66LxQrg$e?}^OqTXgx7M)((OTwL9_XmJqi zn(o%--p1aJj-H+ll9<2u|LFgL?$#A6BRw}YcCKb|yS;-oHsYi7P>0s$o2sfBdsjC` zRt?0xt5aJ3A$TI(pX%ro>+r zpiish%GN(%@Vqm!!iYkhossSpt@;chE(pF*zzD;6@s$I1Z&ReZyS1}Xt4V8jV^8EW zk)|Fy0&h47;p-xxo%k}Yj5OmLc8C{WjG#IBGB>YoX>Dw2UDeakYcv5L2;Pm*(SkL3j%pM}QXU!lL>p(smx4dm+IQ-mR{&H?F0y&qL zq=;V9F{!*&+o-)DEzsNA-^&7#?q>L18R-f{*7gUgs%@UI%~QpDI#%`c2C4$#KvhWW4hr1W z0a1+VK&6ls1+EU*<5mW$1EE@52+dt1c3Y~eRYjvMbxyeLh1O_$snK00&Qa{OS5b>B zuH$rJ3=4~|WeQYVK;3026)y4GT1%~Tm!(F!%Thw!Wy#U#2Q`5j3z=|>X=@qdbhV|N z3AcUeRt0JT)%K;Ux$R3=bK94$=C-s$B1=nKB;4X!R*P#Hwk(Nwkg&a$l~5gV3~|-% z5wI?j_OSIEWW#Mjca_*JZVho`Sn8p5nHozw=q^hQ&=snwv*cpKJ+0+q}AxwmIfY8y9>bTQ>opy}7EbgT0<_NKlWEpdKMX zJwk$dgaq{n3F;9N)FUXUM^I3YpkR3f1@#CD>JhZpBRt*Cg{O-+dws*x?ez^$x7Rm3 z-Co~toxQ%{I(vP?b%J~a>wLIQkZ+wJ-&#SwwSs(W?d=?{wYPJ)R?vR6g7y=PHDT*m zqi=LA;dGZJn(neRite&33*BXD65VBK5zS@Y-smn%d+07pbLcKhYv?X}>BIKYhwY^g z+e;s|mp*JSeU)W#YK5q>m%hqg`YL&riRa6C#ZDQRqiU+ao2PNF%%3Jqx60~@s zS@(4kw77uQJ)Q(DYAP5xL`?;5QAxlpYASGxN&;?CmN5KCR(k6$V!Ys@l0Y963_ftG zLR^q4;ZOj$a4djx8^S>h{DM&oHz|VSGIZO=2=v+;1xIkf;H$YswTH-pLq2UgV8=DL zy|pyAy|pyAz1*7HUhpa?xNx)rw`jCNw`lamC=$PL$Om5BZL0)JLMx=bT{O3#8Fjy~ zcU4O8O5p|wmZ_xMRSLQtwy9y2TAEX2s1aX;pC{Y+kJkiZ-Uu zMZ~RSJ@QRd+rD5hw)OZoPTQ-Eu%+7CPH5QLPYs`?V1~~!zUV&d*2(Z$Mi$*CI35pM zj>lO5PqBA)G-be#BQzht5& zKDUz;9lti26iu&sCN&eiteVtJ^tx@5Gg-eLlhjP2tItW#$bZKpshLFA(UYF>oO2W1 z5s*}F^tOYfW)j`oko1h_TsLzx{ikL;=b6lL^`DyYoM$qJOKnDDcuHbx6u%b#&&_AK zYbdD&NA5HVbgyoS^hV$^btC?=0DRRZ5b5ph?r2)u+uFkd&8t_gY+cpMw6WIT*st9{ z)!o__zso6pGZiJwJEVpO;N~hRz~7q{zblJoGh6_?4@^3vcJG*UD!D(csV9CXA1weO zdm>U2?}U?1#GB`&6Y)+w?WQ|vTLB@hOaDNXbXxqLKq-MYGs69ZiCYKD=FWScsdb??AS7M^?7tEt#DW%PKMa?xhVEZ`uWM#h`vuU*aou_VNrr zUwqFi$%2{r#dp8XalZII*g4H-jC9&Ocz?_ABZLUE4--<#vZ{+H@qi&BKQokd=n#b< zs@A09V| zC*LQwy-qlMniC&qq3hARgu|yUMcm&Aw+OT-UwjWL-!97}+(g2)OSnqH^-8$8gc}47 z%R%eAjBr~eTo2)P0tb&F+2VbbaJwbkw+Q#7gnN>3ha}u9ggY+b-X+{A3HJ}eeInuT z-L?Gkb-{e1c9}}JJP9|8aAgwidcxI7xGuuYlW=zsZYgl0{_;)2;rl~H>*Y@f*9BZq z&|jV>+%8GJzardT3HLGKu>Xki#b@03<%aJ&tq_#2m~eRzUsT`eg!4Hr^2(Je7i(}8!f1BKH-i^;spqI5;%BF)z*G<3HJ$bqV~gQS^4t0pkAW#brBBV zi7i^b_zqn@Ub%$ZNw^uniR$q~!Yu+$RF4CMTQ1>_5w1(Zy-&D7;Jku%K1(=!AGL@Z z4LjI;yX=;5rG$G%!d*x>e8;sY--U!bDdCz4_lbmCM>u?kyC~l~3Fnh=j}R^_;eJB6 zc@pk9!Yz|u%;TB1_R|wZ4;eJE7ehK$i!flms8E{g8Umm+8+(g1XA>pbBcSyorO1QTr z+zo{L065Y9-zvhLk#HLc=Yn<;#oIwRuY`Mya6Sq56yfk)`l5VaB3zw>J3+WP67J80 zTO{F9;b5Ha2g@YfSi-eSxN^evO1O&&Hz?t*CfrsD*GjmZz=6xQ-`P*NMzI4a?e5$=S9dyjCZCEPy<_lbn_K#T8H&2r9-Go~L9F~LT`#r)fmvH+D z*9n|xyY(9327wdp*S$+P9KS@{t-lj)w}f*;<@k2SZ+5_}w)##Y+#%pZ{a^;+j!L-2 zgu`)8bo}-u!hIm&HW3aBCps>BkZ>Lew~uf>;6&wnj&MQXMCZBwn{f4#d_N}KA_0UIP?GOegxfCR@S7@p`F2V2y@hc5B=PPiTvQV82ZTE!iFc52 zCnWLSBHRa(cz+@su0-Intw$rEQ}XSCc1z-=LMD8?CnfR5 z6YdajqV~ggkMZ&F`$Zz|a>AXGa7~2!M8XXa&IN_Qa?s_mop4@&Af%0ZjBq{)x1Vt3 zl6>DFT%9D|X~NBsa4PH!z@&Iu1OirJTRrj#hu@GA)i*@Ac1gT>gzE)Pw0s*0w;4G2 z-&Vdp!fltt+eWxul6a32Zm%TX(}a5lIB?m@_d4MY0|$%9#=S?lw}BI_mv~YiCdJcf zNxU4wosq z`4PhPOXB^Ga9bqtJ|)}^36}#qt^D%cCCPUh;r2~y@cxoPBdO^B^>V0h`2`yw^dTUX9)L@ggZ*Oy}*ga$@dBOjD&N* zK_b6A4g&}O+m^>z!o4lwf`mH_oM`!8O1Lu;?u&%WgvyHQyOwYT5^gKu%7GItx9<>c zhJ<^Ha0?{)zDl@d67Ce?I)M}IN1P>GKX9VumJ9Es<(Kak2^S#TPT)k#Z7$(<11H)a zT1L1hBwRP)qQHsjyM=IvCGj36+;K^~CkS^^67L1VeIVgZ5blhG`zzsG(D_8|=gzS% zH!pCac9}#tpCsN4!j((7#e}Pqa28-meS}*M91LML?lHpk0tf%w zxTguXS(5K7gxdj}Xg&H3;c)*~RFA(B?g>e}j9mRbR4BTZ4g$v~xGhS%Pc_Hj;IfvM zkkh`)ZD_cxt#S2)I|AbazJJNpozPH*;Cbyk%#{-PvF{hbK?jYeZXTtZ& zu>p-_avu@yJlm&1*;_+D-^nemnq?vqx=_%BVE+?|B0TOsE5 z6Ry8q%za3>Cp*NPAKI0#$0uK6jQ^5_S-!=Do6*S_-V-rI6_+EN5nOw;@YdwxW!5F^J-b^OfNVsJ$F^2EKHgj)9p>ZUNyQ zg3rF-_Ycfua<>t#A3pDb-%T)+$vs6lFMPHIzqeo}llv3lTHuCl{7!?JOm4~qYdwy` zjmY?Y2Q!&m6XE79Rv3O4!b~Q2zrmT4g4ZeRu%KPOr*mEA{Ab|C=9#8|Uo+lMjd`VCX=LVO`>OnDf8aO_g@!m<2bHK-I^nO~eg^|T8PLi9KB-fH8w=qfX zktDh2ljQ!KB zhcVks=K9eE9NK0&2oGkQ0nw?zek%OKX}I}(gmBUI3fl?Ok@IrgA;K-aS<&W44!^i& z#$!d+d|NmUzr<#8b%g8UxB`yrBHYa!H=g5m6K)H~P2jk<3Acme@JnH4`P5=-J$7;2 z433*jxV;>A5yveg+>;!K`+;V@1B5%sarh-Nllwa1j&NK($2~*1w>b{KTxQ1m72!^C z9PS62oZGM4X21G~(8^Zo>IF z4!@*ka?cYk$Z_*H?mfcI;5hv9ni(%^l5U&rGLPf%i)sR&hU?Io-1vrOUS@=COsIVZ6Lo0#7XTUHX z9wL4U9Lye1^Agii_I0@47^jqdGxI|GflAaWHK+D_IXRgt$u=Z2N<|=cPQCP|1-XOfiXk|w) zJbQt<2sC`qsJDAH9R0v?HTnuQ-&%N=0?TkZCo1s#@Y;27uEgn_fHs(Hgfm4|x>tAO zJYYMNNRMeNN@v_=;Md}&J5#b8z}Eo}wg>D^!ye`6>@jUO*d{a50Wp!uaHeFU4et>} z+vUK=iM8^iD4MM^891-Oq+?9O9^t@;i(9pPnPvm;QZ9tYa@*ne0{HD@=}>PqrKJ)M zJ$2jJQi%oE{Ax=m?sv3=(4Xl{y+~V-;6&)TMTc%LIA&=(jw zWa#O-4w=*I3~f%YRazn~Es6PS3~f%Y)^&XTu%XTARZ7dswVjFiD-CT<59vBSf6&m{ zbf^n+wDsU?51_46)0DOzcpMp?H*%%Au>8{G(z~D1*4)<%?`?xPsW9(KXbH4a(1u!J z99*5`$;ldUVnbU`^UAI^R^dwJS2_E979RUYssr-EY|&Q0#%$4MScbK*@UY*j3OvmZ z-!qByuI<5~*zZ+_zUJO;VBDZn?`?rHN4kSBUDNk~?$I^!soOy#4uGb=g1RZv1AR^7 zx1jFsSYyOby#wcmETN@G!@aMmx5ETze9dRZxILk>k+}^RhZd(5<7jbjOK2_5W|LXf z)eYxFw6bhQhSRsAZr7%3aW|RswDdHYaW^5u>5ZscSH-6fqSl&+d7=I(f5m6=;r(fi zD_6HvR_VUK@V-jj_c8Aa>AsJ6Ur_h`ne)L5PPIJ##QW-W--o=fR`-3t`)YLGA9-K3 z?)x9!7uJ1$;C)6tPV+va9`Ex$qaN?^z91Wo?ecrhHyxHW`c82^7}PZ1?>Jwr;d__! z!FZ=}zvX<@hVLZjgF#;7e#7~y4BxLgAB;E}_YUX7zKG>J!TF#sW~1+z>08|c#nXLn zCVWsl-Ss&;+giR;bz94Kif%)`>B?ly#&%9u$}}6> zH(e>!Y;4zbr9`u_J=2v*nvLz4uJ|<@+b>-y)@*FIbfrkMvAxojLe0i@N>_ZEjqQ`^ zOquC`VJOno1!H|miVN(HHrQ{&e*CwA=H``++Q6*oPx|8;*a}BePuJ=$Ek*$C*4CRa zUZ4T&wLR^6zd8#a^~0fVo%Zr6ecbre@WC5H8dvw|edr&C54JkpP%f=soiTjI*a>-t z9Jqyr^1;4PP_wZ<&Qy;ATU&6mQO?w{j^?Je&Vlx}5Cj4}uEEej&~DHQY$0LtO3VlJ zh=yhuo}1g-Py_EW^eS|~en^_8^~uPm>ROwOIt6ssKw^G}q0RXfO(*7Ox{l93Rogdg zzX><7nBNpbqr~`=6)4`i#Pl*lo6}2m-PVcahxtoPZOlJOj~AcrH?=Xn*xpTcyqeX)Hoe@*w4lCX~jpa@9P>G_F=~B(>rm0ra+&!6We*3f_pjZ^aTO+ERSo5 z3)AyWjTM9G-niED9gDjF&FxJqjdJ7}T5A_5M=oes&hhCvhSsJ-S;lDbTH0_QXEc@( zOtiDHjA#QhN@JiSfZwg#8Z#1k>>tqgGBq0t0ZazYYxvVOTPthIG`(y%MO$CW4Gry; zGF2~EeA*O4XmF?JjppCMWJ}8JTv5lOdwduyXpvJYAy^ynJcDVhce>|{q@q^Tz{ ztxVG}4Y)~4A2dZ6_oGw%N*@d?hK+UXHyE%xR)Gz5EpF(8gQcEsP}(voR{G$;2o&T~ zq)*c(VZ0)UXHF}`X`P8O6ymf_Kt0Zs+y*QIWQ~(S=Ku?0Z4+vUJ4Tys%nRD9PuKGq z_4Yd1`TvQ2I< zVqsxW!!#|nQPz>rZk;P|VL*cMJ>St2!J!`QGzWG8upwruW&;KO6bCMT-S5j zSG8auU>q37uuGMmre@^9U!p`>G#g@O;&C`0eS<$kvvnpNkHZZ|S6YrxtLzP5k$FzPn=ey_ga(zcyD(TCfmpgzy7(DQ?hE8uIw+ObK>CQYN%r*G7! z!4Ta^$^Jfcrr^*|H~G~WnVzxC1-H|m8Z7YSvOG@~W0!7esJ|q=q&`@6$;c6* zV8`72(wD}3c!?*+5j@%8%JBsKIVnM34ht6KWC!zdoWaut4Z%|d^;IVe2CGjLM62E| zs1F}6h*n=48Gflnk1f44f6_RXURoc@FWwSnIb(u*N^<*MzU7WR zCD*}k1b!dr_xRp{-(&E5!f~X4RUU>M4i!Xe4i-dfqXjJVOhJ9<$%4V~69p`|ub{sA z@xnLL_LRK2j|$M8K%>Q1+R4vYX=|#~E6@mTz+e&|)3S2*2+GXkf`tIGnK?VKjGuYo>HEO13+f>&e zmZ=WPqva1e-9P9x^XK!a%Nt|30a#VIOeo z!S`kVEVp<_#q#d~2IBxT zu&s1PwYT(GNq+3$8e9)PN_p|@0$BP>ykmlkyg9*?QdXVft4~Ss?Z0jBpWev~`u@zkZq8&XGfy5OQ&W_}dr#k?GoqHf=}op5hR#0~S7Tw8+k&nl_+Im_x({p+;3OMex1 zPX5%nzG+rzLy*C|ZeLVE50(L9DUNR!etQy(DfOibJ$!wx7!}%wb?iuc&m}!k1c8QS@xlL(f)}runY%*I#svU%BvGvzliY&5d1>UK&lw zgR$CMz^Xk3^);CV(Xgw4)i_}sces3v!PW!%^w|0mw>PtE5VlQD@3`=PKHFc&f^>X$ zp2{zMam?QuaJ*(PUT^g*>3YEzD}H?zj@k{bF1Da+ybH$b!g@B%h2u4YO>HW}dX4bi z>tvHvwFkB~n?~VyjU(uY;(MJZ`WJNF{Zv^l?7L-oJvjQp$U7!)Y`wQ^Ec3L(R*k`f z6IrmtTMD9;I9GN3iT-?lGzHe(!llvtsJH8I>EvB=(oUc4EWO+l%-sm%_^rWU&K7oI z^O^;t-l@2G*3_4CC)GQO6-SEyR(uWLH)rmL5s^juoW&bqJUn~mx@e034xIAvtiWxa z#iiGJ7M3(R78cHSEG$WNWOl(eDA)zl`x z!V>ZN?o;!N-PPXWmx_LrQJLlS-8K60OIx95WOjX2sQ6&aShJ@%*Eey9#Q>i&87&s%ypdygN+C;#BuJ(F@_8jMY48|GC^KF*xDE>3CK69j?T( z6inWK2dvi$U#1CV8xB>J8DlJrtCQAjE6W_jQhI%P;Y9p@A5nAx&{@dQv_s@EZ7ni^`BI;k`S`_ohXC0o+ax&SKzNmUx zY*asP7x%$f^N=?qxEschUEUGe{t;u#VHVs|5#kf6G4latO zuNvR)Tysq{C7SLVoY~ty1NwD(G$WQZw6(vef4)zFt@HKFbD4W+$-(@h>r3+ew|cS< z-aD_ZWquLtHv1l6Inm4kPt@VNmW_&zjCp;B*aEHZU!R&^I7@L94W-R5QrX(e@=F$| z`M#HY?n&FmxQk|`cnj02yd_Jje!U+0D%<1F_5E)%#=OV|IilGujd-n-3z@z|5SM&El6%YbDW>%Q2tt;{vJ2Kp&%d-rv?XL_&{ zdYP8N{Stb4-kT#RkCAUyXy!a^A3@uWf7%b1PuUV^7g(pDT)3Ywa{$USfN{U?zIZ$4 zQ!(|pGr!QG_}2uVo;3NucCT-fr*e&d>VZv9LJwd5WADMt=xu8}y~|TyJUb%h()#}K zl=sj2VSnjoQGKj#xCkEZ_=3Ue$-c4# zGTAq6VU=%Mq_47drEeO3MKbK0b_JMAn|kn1M=!iZw>5%}<`(z^=l2@B5nT!=B=omG-p~$T54Kank#KYT6$VWTBd8H%k3KF^0=~G*{;zpuWO7e$Cc~KbB%T7 zyT-Z3yC&eJqvH?=8R0J5 zp6)KcJHs8kE7Kj`Hqu@9hn(R(gFF0^JNUA@{1tcE5jPws z_+E1tyzb6B>h`|j_PpuNJmz*CcRPQn-6^>>UI@4cbj10VW0rfbLtUVzL!r*CBrsow zdJSv_x2M8&!)ahZ!tyY@vBFGqIU8>XF%#ZIVkZ6uq{9;4gGIse;e;|*7W z#Cx6WL)cm+lFIn0cs&+IS1zgG;ghP}nUV0QEF~epQsZ~B>uyzh^K=4H8LaGFQW-AT zL{hbgsc^i;ClznN zpO-K{ZebEhWhDs#mdZ+rE+cv}(Nl{_C)BYG*(*Ax8(qHiF28PPWq{Y9c1iC#{0 zgy<%sn~8>E6Wv8L?rkTM z%HTvkmsC7`z$KOSathL+_e}m|Ui2f$g4-@?e(YuKL7SZ1(`a48FO7wS${vOef5xtw} z?-Ttv(R+yAOY{$j{#T-ZNc29Ue?;_;iT(-EPZ0fYME{iNpAr2e(f>~LQ$#;a^fN^N z2hsb9{yEW6qGLo45q*H@gGB#==x2$3j_5-~KTq@vM88P%VWM9m`emYDA^Hf>uM+(l z(XSJIl;}5zev{~9L?0*mmqh<3(Qgs`Hqrk@^nVlmE22*j{SMK;Ci*u-pCtOXM88Y) z?}$D{^zVs&kLdS_K27u=i2fg<|48%)M1M&1pNRf5(H{~0G0}e^`maQPLiFE={yWkC zOY|9{|3UPpM4u(vdcY6I{|XPl@juZj(fH+_M1tdgqEm@ZBicpu2%^)8&LBFI=#fOb ziN=%Ni3G?0L}w9gzVS()N7bHoel5($p~ zi7p^|Jkj{HS|Y*mKhZv-3yCfwx|nD`(UXWSA-a_4GNLCFJ%#A0N`YA&6`vXMD*PUw z1C|fJM~BmKj}1=E8UyEtXK@ec>_qd#$%kk8Jqiy@i<$A^7d1a(qUL)%YQ6@j`O}Q3 zZ{%u#x|`@bi2gRwPZIqy(H|0x_w1Vq^P5U^1JTzK-AnXcMDHg0fT8i6Y&Nw2TbAh` zo_)unp+NYhTkdm~?dzHw+xkN_f#zmr`#d*Hy!!G`O>gU}p4Hur@a#liq_?%PcmATD zxmRCa6@tg6`YKx**R^)6Xz%T5v|)=wjdK@W-gEWkiyLv4MVHrfw>I}S_I7mi^mIUC z{$BhaKDW67o&mY35pIf#+i(L_VD50sDw%5if}jL~8?k!YJK*lF zwzZv|5(d7eDwq<#sG(1be{T`W)<0nIyc6%z)~Z3B+J|EGN%7CcLJ++4VT5VyUELU2 zHGqwQ7f0+~^5M#!$Y&yOGq&EQTx_@&6ROeOd>Qd(TDuo7;b;-~GB>YoX>DwQp3>24 zGyxw-`*f&qK5Yu*V~-fW)gYP55-RO}PTnV+$-?BiYyBTh-Tp8&K zzzytys%o1jZ1Ys{o{m*Ly@9GgI8X(jlurapxPtOiR0 z7D98^h~1XzYGu}FOPv#Ld!aSjUTSpLiE|Wt?N!twi|aUD7{kKirN%(D1=L-ZQsEM> zt+mujcUfwryDTNtU6ve;1zr=Vv5*P3n6{QNPFGvXnQ+^eZdITrP;Fnjn%ll~HMf1~ zYHmwAB(k)$MZztvWwp4LVat+;2MOD2Sqaq<#}HTD9s%njX%Ab!K{nhbbXSSp;?@v1 zhNT`_m#ML|gYL4_09~P)I!i8w&yt7Xv*i$~v3F`tZPLfsZU4kl?m0@ zdx2(!C88{=Buux*#tt>mG*emmG*emm7;i-RmaG~ z-saVnw9PSJ+PL6@=hC=Mi5@RxZ?0+zH9zj7p zf`WPk1l>bKuWxv|y}se;_WFkF?DY-T+3Opw6XYvc z=fib^eCq`H)(Y~i735oMZ|88Wy`96gg7&Kww4Y$C30ub+eWPm$r@JiCbeE-3beCmW z=q^i>=q^i(XfEsaMt51-Lw8x4Lw8wPLwDIrAGVi1Y%hJ-Uiz@T^kIAHt1OFCD@2vO z^i}rKSJ_K%-J#WE*-Kw#FFm}bfLrgiO*Zb5)!5x3a9i5Q7yvD`FnpG3#Dj({RY|xl z)iL%OssrH~%cfb6Y^|r^vsBdZS!!zd>~+GbT02pqRQ5XHZUnqf!`@ys0qZ8jr~|lb zEp_1hwkQ~Ux|psLyCt!$%@)tHS{$TO><)?DL9yGqR>ZTkt{DlpW$j3~Eo(^JZCy(e zZt>!=t}O}LzSOY2t-UEhTRK$2ZRt}9x4m0wvBkBl64$cQvKGZ-SBh&HwycM7+B#mv z-PT>6gj-S)act}UP$G!9P2jyRbS=R)vF;efgIM>25^izjt$R5MTD;J#`#K3)TtIk( zf~efqogO0yj2xn-0=K9n;1)F%xJ4xaw|MD$Fz%LlZaFZf9E;(C^&)(24BrBsy##&9P(+~0Xwd_?X9J`?X9J` z?d8_o_JUVI!G)t0xJ9ECx<#WeMv?f1Lq720Zd)Z-5?Uec?V`B_&8Yi@y{l4!R|+>k zuuLW0u2RtLu!RMi3C(RELo~N|Pejo08f|Yd&24WO%`NUQg4Ii-#T`blnLt{wnb6$! zTH>Mzi~CVn(zMpS*7ztV7(ul>?ZMG4ZdfrZZdOUF+Ip2%2%A^yf})KnbP;hYS&w`Z z)wVAfj4jy|*sG1OrP|s~XxQ3M4WFf8hR-s-=sxS#$?#c57TqT}9uHfN$5{Z+pLcdN zX}^LiK?&gh`1POwJhT#s^!9dlG_CDz?O}oD)hk!F!fW`n6Z!tee(kD5cWYbx(m?zQ z1SQPN3c~|%MM4Vj7c}CRGH5o#1;C3Z(iyc2F4C#wqE1s!m3gg(7J!gF5h;n6kE9dv zij#CAUcS<8-98w3rv zh1lY4B-~aBhj&Z!?XpwC;a$Hx_o#%!yH|N`A81iM@UBaq!@F!n9NsO+b9gtah{L#5qVl~?xO&hos59-)?-FjFg!>!e;4>J)d^6!5C%#>lNw|rGYnO19 zgzJ@Xa|t&H9F~LDcNyWfO1K`v?F4S5Al_FAw_Cz}i*R@^n<(CsggYeRULo9Z3HL7H zPD!|b5bhHR=Y@L>`Q__^`9$@YO1L};H;Zs(67G7!)k(N6!p)O#cMxtVaH9V5O~SPR zCt5FmLbxvAf`b0?JmGdp^8FRz_DZ;q35WehlrMfOj9+f2fU6Lcub6Oo5MNZ^>4fu3 zxP^oZO1M_S%>YicJ?|&nBH%>Z^E(N*95}b2zK;+N`;UnG3E>7M`94Ru?Go-S!tIje z`$xj{)8C%}o??|QH{w?BEI6&f;FZOTIdX!7Jy%KIJ;i3}mV!|DjaEl3dO2Vxm+!+b?Wx{!& zokaD$n{a*!_Z`C3Nw}X8Zh?e*k#Ng_6OAvwB3!S8`!nJ2+j^pSX|OZT_k)Kd+&IGR zlW-M;J1F616YjW#yN+|>33n&q_DQ&h3HOYI`w`&|Nw|ZAJ1XIh z5$=S9dyjCZCEPy<_lbmq7hGHVixc`8%xY`rV#0Zh{z%WOTtK)2;6&@?BEppchtKBG zcrAphlW_fnn2eMG`I(G66K6mP^W4Ot>yd`7R*bpd{a`2)A9r zwG(caB;Q*Iw@(u9e!@j1@qR$KBa(Or33oyg?=8Z8Ac^-U!r@8;F57xE0y-tX+;Dsq z^-~|=e3E#zgbPaIT}illNxW9V;W#X+$IXOWF5&JYTo-Uye>$E%PPm&TT#RsAfD^5k z#|gI+IB?nW{UhOaOX8(MCj4@HQW9@G;SK>OYQHeyj!U@933p1uH4*L;2{%AE7gS2L zJhl_g3lM~~wclfe^GUe~ zMz{qM?oq-mlWa`N+%Cd>B;kHWIP9dN<@O5UG9}#a2$u((sNbI@T$v0)(3*i8q&UizM-GB;0aIyk5d}0w)@;wh|8aXGGkigxe}9-!p`J zNWvW@++N^B>mfLp-H$%cbMYsi$d|xHpG6{EzaGk)3_M6TUt{*tja?2fKUA|i+ zT!3)+wFJ>}n@hOez=`&UmJ#j=3D-@yC~%_sZXw)ZNxX*%cU%(h3BsL}#Cw5oA4s?p zggYbQ{z^C(bUso0xpS<`%?q5UT_zFECy6(MaODziG2!YYTnFLiNVr=Fw+J|B8r%4C zAK{h*2Sb>RdyH_sz`_4E?rFkpmgM^i;dTHgT91B1INbjg)#I;(dqNT~BUisq6^gE< zgTV0#Zi~|HW6d!*xU8im4AnkLssxPu&r_b!{b{q z$Z2w~6Ydnp;l1D{=YW@9qHU&+IL^m$e!?+dVtL@3J9*En%oZw7vwm6bEwI^Nw^sthi?=$xif^D$8q?kQIji&?FHIqTH;HzOBu)A zNVw%3hi@h|8kRE~QZIJh-6o_71-dq=QOJsX%xgTqbo z0pPgJD66Yx>^*KqK(%q@MZ)>(7<&VJW-_^oP+gUeOQGuN)m4=t_|NAHYJUB1C>X4% zsqy)$s;X<@2h#HNTjnmfL{Tu3o)mUK%iGjj7U@=4`zd2{l-UY?z(C{M9-tN_%h-yrs=35I7Ua}0Q6TS=IHL!La?3+5B`ZwW$8{te*mG0HuI1ktk z{MNEzD++vW+prmacNuN?9&?rhcxVc=Jz#el_9#bZk7>KXHkpwQh=~k-*BNbi{|eeJ z?fc_ec~TV3)|m{P*I?2yreTk8;PYIqTE0xPfp;kv!jn+#@OuILcCvJ63pJ%h`)IXh zJ6kHT;F_(rgxXEp(Go&Gd{h1+rKPpMxw8`O8M+;UZ*`|$sNjde+u=PJkU(Ex=#ZhO z>l&Ut)~DAQ+MHgiw7^HH6Z6*?+MHgk>-hX(Lz~m9;9K)+I}`I)8rqy5(sg|PprN(t zP#5NC>%nIvp{-NXl(rst0u0t0S1Mf9A11ldg-S;8ytJQrU@V*+|_eb7Wt^59m z_l0%eA9$ZpkJG%*sK@)f?Hyf4VK@5cY0^G%0kjcs*`^TD8|`F_XwY7O7JoDar3 zjr%R_;e1tw@7J6UMjVZMhx1`y#PXfse9#xO(Ra-Bt?q&1>Ap7; zJ}92l;n@u)Y~eEjrf1Y6f z!3-s++fcC#r9!u%LK#Xxx1k~#$~4{9@-5eGE#IlSt>rsKw;|tjWwK^tJEtpUnvLz7 zu9RvvwrjdlqS@G<>B=O{#&%3s{F;sJm#!3RHnv;3Ql#0~Ug=7qW@9_0D?ZJ}_Q`an z%yhso6zS@Mu|6fm1$IXpoDaf&{I`MT=9P`wz^v&{`r{hd3P)2<*Xk}UMgZ;B)|)V1 zpaJZ)J?(nGIt#DO#-VOqOQbgf_fuiqPYoYDq}{l>NAE-bFnqAp>4tJ?{pyV2GsaHH zGvvT6ER+xSg@T%m^>LeP-i8`@m!Vgo1NKAGG_6lYK2_J+WYj63y9N^TI}B~kuV^|kKht%5{;9a1 zxUT&s+{9vjQw)s~<4;zgc--_dLz~k}b=}s9<%ju8Ol{0RNskww?l-kDz1ZNj_JH|| zOl?drG(e`Nf2Kg6w-eiWnu2>d>+}Ty^(>ET zhzry6O^p?U>E5{3^Bs%30L|@9D~)pG8Cq)>C`T@6SkCe3IfmAzLs`aX@mkt&A7?a{ z5lpnRv5aT~GfHEiBY@wn+ZrT?SA5zOL*ukE-Cl=Xn|Yk6Sq**IXqD~Qd!UW7ls+hkF0qZAptb47x}e6j)`lrQ z&|L$S?OH&$Gi73fKEIYZ*0Z4;p!MdV#5NI`GbUZJm)7u=W^Qalvs*kD*~K zV5m_7J61d9NKDIftZ3~;3Eaqr9^D46+|h@xn$Qj@wDtkrx}rzdz)sOLNMI*B`XWs| ziD_k;hH1b}Qu?4N!nhxu;#c}$U@>g0W52<8bc52CQL)ko2S%VE zpCWylHVNYuK|FI>Ax`T|l%Wu(bpq;frsOtY86az%3_1r`5Nn%IL)p`0RVn{Txx|nX1`9fj`B8i(mIUHCy+q zj_y@07zh{##xd+trKhPGdGMDgkrvH{n3;GSjz{0%&(Lh0Nyp=GgHKa%x#|8?n70LO zJbp=eMEl-`wu7(jUpINF6RuM zb-{d@9+=`uW4{~B^LW_r8b*!``WD5qfG!%$FO8|`lcI$l_-J}#%$Gfa6=XYEUiN6_ z8_aSAJ=eyHH+?ZS7THaHVDfzp+5HeRzfe&Y#tLGCUU%@)=Q78nc;1fW#h8Cs^To)Kl+8iiqb<9f}TMZ%zY&2oVtNk_(5mi%6#La-WR_6yYJrq zUEikuQj;2LJT_?fn>ney2dX-?_!Q7#` zo(a$8`|7iA@nAY>zM3 zyJymM{))m&e9qXOl4JeMQ3&sV4LsanH7Qk1GjZ=oAmQ>E#1fzLhsXfm?zyoxCWNBZ?|ty0lpvD z{z>kM!4~hxV7foLF1={53*))VhvsSVCjERo#><>Cl$u|9W!B8L5rvz4zg^=h+T^@u z)XA^a|Lc+uMjp8T{frotq{|zFxGI((OOEp(jcon*cdhvsSnYBSENjjhip*w(8<|$m zUyOMc*_289@2iLQ-!ga5J1U4J%r8Eo7Ch(hy~@u*{&opHar*D;2$rEm|4B znQ`(qHer)zbnNyu-l0r?{jPDb3z|Ape|vVxkUQpIb7ic1O(y&X!4rh#jmpW@rFITK#SQav*0$?VD-%JWWu6BVh!C!3t>oMro&*E=zInXjI0hc#|% zffC#d|8N}H0=*wO4BCbxW5!N{42)tjWw9Omo477-wEI zt$x=P2Vi-#Yy6Aq@}9#HV&s9RR@Al4SqFJec~M<*(}OD<-H^LFqQeCjTQc*b?v^#q zvQJe5KiH5(UoyM2RcsIg4HSd%$!FwOCuhVS6$5iS_48X5I=F;!t>wDoKga~Ez@mc-KZ zbt~0ax12?aqwtFk2Y6u(W5ychh$$5%+FFV0nbTO$QVSgwrP|u*djQteWByV7H?F_Z zH;!d?)fZjDDtwm)=M_Z5bpNO_k#x;R!}=25Qk*Vn~q&MPlhkEm~Efu zNM)maO=#%sU*B~7&=;bfDDy?x9$zkVd7M62`h#Ae@}jvh?@)d*^Az>Z_l;s7 z&U37v5FLA9evyOamrnFdjDEDHynjsW;uYg!tn@)oesP1RTq%e1=2x+tSiy7i3YP>~ zVK!^`vY@+kVh~5e%(CbuwOz1IvtJK5N@fM;7s8jcAs#DU;8@nuJwLx#bvR023Qjzb zUocKG+c0?8KY9BiR?aSJg>lFW>#P&jM3-IuukTD>-%Br^@c)qZF7Q!QcmMy{Yc`u@*^teJ5MVaDxd0)VB%2TvVH3j5 z0wSnDMIj-&|RV%c$6U;Wwbn;B2?({cYDKsjc=Y z_VQ@^SowXwXJ&V1zI%@4|N761u;=qV@A;m~oXgDY>>TW$U+>>v{(Rns{y6p?VD&z) z;6A9a-<$jV%(1iCj5wyJLVCN}(&;UX%*8$y_m(q#97t*7K>71y_&89_+>mcud0%c7 zQ#?x2#lEq-Z&EAP?Fa4?7ybkKcq!D8ziO?z3hIT0g3!-X^LIjeSi+&)<-ERNEM$MZ z++I1NE(-g!nu;tPcm0Od{^fy5!=;t>al3B5deLMEwH20z=el+cEMIiAe5G?D)S7La zKQx>J<81}hQRpBa+q~s5H$~7T zxQDQ|AJJ8C(WCue-wdA#6nISn9|IluAAB(aA8^~d3niS|!T*_;Ybw(h_H^`b?dXU5 z=L?~fu#Vpa8?*12>??zxOy}5g>^Y7cXO1h!os*N}$;r*}=H%u0JYzh5&sa~ulkXYl zDewe6g`Of$v8Ti{-c#zC;F;)|gtx2H8~*08)$ja*&F?v6_xlby{DJ$O{@~|b{*wFL z{<3>>{K`EZf8=wy{^|o>f6Zs}{Oa94e{}yCf6IgZHvgO2s!xEbB4*mpOQ0cpCLFuhG^a_GByW)sw*SG zf4~qBa`3kW872q+^`u!B7=rUbq6>*GBD$F95~9Zw4G(abHdwFJq%gnmg}rHWFw>XP zV0SR<16#0OsfosTh3F8`xju2fzbS2SML{}3%h3Kh7Pa_)Uant5t(@hG?S4(sq z(JIk+7uwhyticcva(YlvP+^eUoP6a7h|uO)g7(bo}u zJ<)54UPp8X(Vax|+o>6wgTaY}X>+h{lfv?EAo>QPdx+jh^rwivk?3BcHxa#=XuL~h zYz`JT1cV$6ZX=mC2ZI}Lrp>|d&YH0~*q|XGFQ%-$nF3qW2Sh zH_@LZ`T)_NBl;eq?Fj`pC>v+^f1vQM2`}Eoahrozd-a!qE8Y1L!w_K`bR{c zCi=%j|Agq5h<=&qSBU;8(LW>l4ADO)`WHl>CHhsOe@XPOi2gOvuMz!UME^I@zajb@ z(Z417cSOHV^czIKN%UJp|DNc#iT(r8eiOwV1NAwt?{X~x?IzV(j(dl2rYlU|3Uuzf@;DGythQjloLZXX^E+!hj zBrt6bhQG=%HV4CBUlV1+8nHsX#5p{u{jw2TEN&G41X11Yz{WnHo=f^H>$uk3G{{kyPyw*?_HRR+Lpc% z^4^7J6CZ%^Vxm7qwE2t>dGid9+B~D6Hjh}S%{>maxy7KKZYmM#xkRrfdI!-D68&AG zPZRxHqM>dw8>T^r1NGYKCsA(L5+NYB`qqm%n~@kJM_An^b$~D#e4YKRkz?7?mx6Uaaa}y&D#c(HuefJjBcA7jJIT&8 zH)rP>&6YLn3?Eq~?4!%+?4!%g z;!-w=ODR}V8=XY6OW6Qsu@7<4vpt|=NquCsJ)Q~CVyUnuuGlQD4RK{y%E6mVv!z99 zCQAv>RNvfUNkz9=($H;LDbzP-H)pU3{LP^LV3viXXNX)a(k>$vW8UTz0)QS%-O@6V!`!WmlaOltZxYXp?mvRBd0ux}!~&bw_J; zPH;R_K{-^x@ldnN5uKUMMQ4h*?Bf=lnO)xKOxjm!>!UNXk6W}Qt9#c+Te6Q^v_+7v zpwCBJ1nIU2(w!klcZML{8QJw5osnJ7(HVmJn<1z_!B`Wujy2k(YjLNUEZ#JerBXDL z+!x325XDeIEJ znkhT?*0sVlDm(Yq6IzYRuKUL9+)I|HZnhi}e)53puv_X#>(thzsdbyB7<_L$b&FaK zgkiQ6M?X=)Rje&jv*y`aPTgiHsBW{ARJUc9iRa4FtBhP_mjO>BglE{zqNV7OZkS;y zgI+eY3(rwzis=@yS>oGTZJ8v6i^4-r#J+_~g~KVEn2uU%JCmffXBcKn?=Z}k{+%&f zdx>EdA0BIOF=$Z}gOfi?dp2lGgEGwGQVMzy9(v(*BACUcwDzJ*k`3ZgM#cHIj#q{g zap4;zIT8D|o(~zm#dQMjKd9~KUk_K_n>ymP1}#4F*7KkYZ9Nq-%;F3RE@pXu6=x7$ zfFR1C^^C_zQdCtiLWyz$W>ND2vnVHE7UcxYq9kGXk#u_NDPkt$qMSe*6qLQGR@jcg zEF22JE*uNs-iB}x1G``p!zm@2J*1#HyQQEtyHaojZ^%9l+$<_Rcotmp@p&K{=jQC% z;^yqb#?9IJ=H~2-H$ujRqZOD%9Tv^mRi$~6*o8wr_{pv=?nZDVG`qOD2&$3e+4Y6y z1_|CEoPdCrG`j{tv%?w|OcUIkJ%Dm^_WaAu*%i;t*%ijk;s#?q$>Lp+)8Ymrm?k)# z-C)oxm?pS6yOdbx(d^=Iv!rUH*=-kb>(yeWJk|^8j9FZ<*%`rXi>p;qt67bZ7d*>~ zITo}9ZWh;)a4LXeOKKzgz@YE!Vx!ekYEEX_r;S+k_uEcwxFg6r|9 z<$9de;?48kjh*~6`x_Zn3%_ot?HC;F-`KfjuzP^j!ou#Gy5XHr{BC+=ZGzwT@9*A_ zxjCQt4uBHr8~Yy*z;^;t!2Fd$=F0?{%*Q#vuP>xK@~=FkvE*x&&Vk1CwcX04EnDGr9tpEfp-=%##3I_~d zgR!>1qibWL;p16oeevGz#K%IW$gb|rEjO&)(BHAC8^1~H?fAFNokRFV(Z@G!5tiM) zrIe+OiYReQ7m@$gm9@=_T#$w@LP(o7xBiR3&0Ro*Tl;~CuyzI!A?Ohj%xKsmz|uEF zfS`kjuRZCtN$|d;vF&aH~Lz{9Q)49?*I4N7KEEaD$+| z0`5k_B|+y3xSfRCC*kl(U~|13l5qG$tC@RP!r_ydX6_hhQ91C5LNkX?kcv2b63)z> zl5qG0mzg^w;qb{LGj~qH;ZNku+`GVuj@uH#;gfVCZav}fi8WDqhX{vHhKc+gARIn{ zCE^|>96sqJ;{KCx_)|bp{(eTd7-)DzC#yZbMYx3$?jM9(23nM^51#iiACEN>ZZhF| zBwPdG1|{4)!X<&je9->8hH(2N+yLPY0XIhA?=yruBHT|03K65-#Yl9$ydaC#siege#G7ZG@|qaH|Q|BH`kMTPWe~Alxe8MD69vgzExM z)GwbTTpT!6&|ZE>xWkfke?z#(CEVW$hwVp{t`8`p!h|aU|Dy8FBwR$o zT|qci!gUjF7I32ZJVCf+z=`Jby9l=qIKQC04-yXBkBEDca7jtJCkb~z!u^_Xhb8I$ znQ+G>{v0mr@l63&ASiDs;m%0>)e`Ov;Km93%_H0gz=`T_HQ_u^E>ZsCgewD1bbNOc zu13NgBHS$CMCJH4;g$g>D#s|{)=9Xtgo{hKcL33nko1!mW{Tza|{E zZ&5k^OgL=cBF+I9Nao{xS7a5oU{LkV{?;Q~-k zqVj&0a1jajHNv$>xbG2ev4r~(;no2s8ee`xxIqc`SHkU;a5-=?Z*B(9`aH8Y!DB)&FxE~O1nS^_Za9tAa zb;2bi-1~&vC*ktorh>U09F}mC3HOwQYb4w$3Acc7uSvLT33nbi(fQ#!nI4dzY=blgmc5ixVgM*B;0ty^+>oH z!VOBeIfP3}xaEY~C*isYcL+EzWnIT72=}mryNhtgB-~dBhreeMrTYZoPD!}qggYbQ z&JymNg!?1m-j#6wBHRZO4%dn|w-+b0GuSn&p2LI-=?k&RM_$8XR z{z4!dNVpY*!*Nb@{q`xsotJRC2#1*yU6*}< zZ~+N-jBpBYqWqmCoC=)iKG%N}E+$F$?}S?>;e3z?pqXu*B!6MT#U=T>gm6hox>pkJ zfQ0KI++j(&w-WA{#NYjdOG*5FlW;Ff{2eFUIf=ht6Yjjk-(Luaod`@>{U{e2rTMtw z_$q3r3gHxqzZry6CH|HYE++BUO*kBfMdjE*xOEclUc$wJ!}8Pd^fAKikZ{9<+Y6ki zU%pDXL%@M4E8Ra6?uf*n8xk=ex2GlkCKB!xaH9H)67E$A_X)ziE#W!|_kn~PBAf>b zB|07l2p0qhTxQkZql8l=+;fDhk)-=`!nH{Jy-T=u3Fm;528haR%RoRE&MHSK;nqm} z)f28q;%_1027wc`gSCX)4IKQ-%HLMP9gz6jPq@Ppf4CMgrjyywVVFB2{y@%MYe?UnfZh;a8wxFR@dH6P!@l60pN z?wEvYCtOOB?zMzFE%Db+xYs29?j+pX67DeJ{wCqRM>uSxqT}`w;d~PA_k=3}PSox% z5UyI{uLv$)%*QPX926z1UDXnjh3UUhN|sp3jK5hY7b& zlD}sO_ke^uL%7F*6OEJa5bjwCXNQYK^YJ(h9Q?~V9^(o3nuJpc_bzavF%$CE@xBmjX^y-o1o7E%Emi!o4c-_Y~pYkobF% zaOWl5Il_G?;oc{l2O6KK{`^JO;}!%?R4)~TQzZUo5w1qUtsq>Bgxg5Cb_sVY;g$gh zRg*Qo+)KE1z`+of#XU;6LEzwD7WV_f?Utnb65;LxPSlTHCmf#tipudm;hvKC%PY3N z^Zqz+<_#W;;?J=b=^T8m#W&>mdIGwJueCYXtZ{MO%#!fn>g=?D&onomq?-cn+a@lZ z?iHDIV>~uqYJwn`_Sbij+&ve`J$aGbs~5@r?IOASlJs-KA79?Oi{w^cBzN0Ia)&RH z`_GHyeshuB|6L?k4DATpj+=da{mr~c?voeE-F}hW6Bo(-;v%`f>KqiAZ`eQ1qy>DY z`v(I%hSOU+yf_fgw{Y2y0{Fu?)D?tVX5#Q|jcIN%;ntWqe2Zh6>nB`~i7PR2hX^-l z;_$7HX@4VxOPV-*8)TY$opAe19KIzo&AGsi;q-RM#No5cX|9@Z51TlAt7Mv6Nw{Ms z4&OGJ=JpWoSrdoPf~UD7gga&86chI%;m(*id~0Re-+P2RXX5Z}mT7KWnHEmh%ey8H z-*TDe8VUD-iNm*FrnyfN&Ivl*{_w4sX)Z~)fQiGmWv01fgex;~_!iAH_glhMo49Hd zmp4({CtcpCiNm*Uru|JPTnsoEns`5&YU1Vq2alX)wpGBbgLCCJU~owq^oLXJ0^r~g z!p!!x%GjW3M?m>_b_L;T8W~#+w)B?f>KfFUifOBFn%UIYP~TABs3-nK5?oi43kCieY%S35Gp< zja!}{myL%tCJ*=3887>GB4DH!_~zXM&)5= zR|968haFw@J?XH$s~+vJZt-QduI|M8-UbZM(!%wy9<2LP8!kTE1MirC4fG|tuGjTU zO~dbcwfGiYr{ibXx?tf`WB+Ddr{kM6o!LLC>vVi0tY^HX*Vw;7*Xj6rO=tF3b$eWVs~W(M8}`M8k}>SUFq8`Y zV0V&lPS%Q%ts4el#qSNQ&f_+Bx$ONcJhl(F9n!*NF>He{CW~Q)dB7F4vE4gt@WaTp z1A`rdTL#c6wtHCP18mxQvVPF+dtmYFu8w}_=6rov_vb(lXc~F<0bCp#mZSxM>pM~N zrQ9`sFY5k{eY$`59k@Rv2{l~+&k(uyKm=4ix21jTG4wcOZU@HB{dA)rp0C>t&He07 zGn?c68}+>GM#iLXL*2vUx!+yseYyrZ(|&g$W70cOcW=(bCsFeyc(pa=-^ZUf!f(3Q z!h-e-a}GUa{2uw}}7!mwq^d)%;P$~$V< zGUXjHY?<;78@5b&Q-%%8n`fJWhAq&|Al$5l!9kv_Nejb{dA6t)h8^;3jXVt92*M3o z7z&zatJlI%z&x9(g`r@1wmK~g1?ZEzHxMs)Zq4uWbqs zV?BFq)jW*#>$O$!FxIQrR>{LypI%!94`V%gZ4n;E`t#btJdE|`wUzTQ)|b~7;$f^O zuT9}$tRGk}ezqNkp^kVQ#(I~_1L2Ju;Jgjn@jr&vuivzm56oPDkjQ9Y>+GEa@tfk@ zM=gfCyKh8)wXG1|GSH*7s|)ZxY8>jeb#)AOz;jaQ_aof~KfYdj(}327{-xVs*6D|Q z@pkp0Zqvt3NVDFKGc4o}&WltY#_~Ab0UOM=V1{+y-Q(@+aV_(n4fWs%^n_MjtDya$ zZ7@SZ^d?LPbZ+Z<7@pVnY(NdXN7tLt0Ouh&Tx-$DyEV+vC-Psb~1jq$+4c`oLUXCJk8uB&S}hnc9w_u~0XnYM2)*7I~5p5<)Q z4g{35CZoYG#FwTu77XHp8Lg!|9!~+*_jGR3^HHK}UN4Z3V$g7$Gx0^b=JAl1Lhi3? z1D@j)U>+fe;c=Kp3_2rtT$GW1dV(;zAUXV9(d=%4ZR&VK=0AD%>&mh z1GfT2bS?0A07VZ@c#Z|ELG3EY_00WAz%jeRS=LV{mX@cIGWeZzpJft|`VNMNVf zw{~<67;)8HLmY4wwyjVVQ9O@!MQmGPV9~=^#)Qs5c;jXWLs`SETj649pdS=JMq%4l zxG(|*>6B}6JPQ4lgTHiK2;+KvUDww+_JTOByn2M+ZZ&aq+Mh^l+7JpmRO)V6U`wbn!6w^x<_lUVVc-kB2qJi`U^gpJT)0rrF)F zZx@E~`o;AiU&w-=;PdmVub|!lOHXJM7oR&1p$+FHRomw_X!+{e8(<*}er@8~#WhMT zey0`>i)Lu?Nv=_{e~x@ucIvykB94Z=^B2xl<_B7P+8+!3BIju3FSapO%rIP2*2=o? zQg*AsJK4(7u6;Sd2LtZly@A}|9RW}9o3Daj8g-L3h4Rb4Wi4_tHWxdg5=?yY3?fg%rEJFLL1*0Q28tfVN&D%0zfX!zXV>jU<|!vkO4 zQGTFnB(e@FiV;Y;A%1Np55AD;~{c+55@$sS&R+S2F z?8!|zr=4_oM!(qEzU_l`&TU^>@0>c@(Y39nEH&F%{UgUIdk9u{Z19|1^iAEwYc*RoWfyocJuM8foJey$lawu={5Mv=&Pk!YPbMo>JG52sQ#Do2}xX%1`;Oxso z`Rdrj!q8fFBGjQ2B#!shh3`-o#}`A*=ZyqMA5V6-L}P`7s;EiY!ik>2eS zM_KrOmOru%IA2xjp+h!5D;_BtEmd|Yo!cjm_HKuAD?d>RMt|=64!dPT;pp27g5f*a zte{;Dmf!A3LJj$MhB6xvClr?JG#leQ~Z=i8UULtPC6t z9}IXSvG@^%*-B>EFR!b;WoW-LXLeofb3@(z`@_%Y-9Cs+2-*baVIOZK*$2m5Iie(O zz$buBROC)pdLdnJB(=?3p5*@gH6shTzwq;8&>tQ--_#K|<_r3W@@Ai>e3vs=akt~j zv2T1n_T49-ACDV7^p1a|WE9dsA5JU><{kILeQ=N1le?j$SKYi5_`=a(cpn=(;ti*4 z!SXvD$d{cQ{{rsgs(S9;Nq@r@hq)~c|HP55jc+UnM(!?v;|FCCU3^%RzJslefL41 zJWys+_rf3c$9>QvkU;X4TvfR|RXpMyWkI*f%41G#ui%YWM|QQWT4%2q@ptw4N>Vu< z_^QQO60^k)FUeDtY8cmwPbPzY)jRsd8(3v3*uJezS;m}Gf9SXh_PwixRmFlG{g8vO zbDIagR`Hdlz>~B3BZs{rdyJ+d^#tHX-HrE2$i`VBSvj`6i@t>|bmB+C(VX~CDvx60SAk%xGeYmx*>IKDy zEc8pgAG*u!b;=8h6RZcLN0qa|v5D)pFI94xFCHtu8pey04DRJO)s!*y{`eugjpe1V z7M)M8TGyIfnqtXoN3Kly;tL^uLMneG(BEZuGhb@lNIM*f#fjA^f65ogXZGZp)R@$& zbw`zAHa2xtVjb{hsfKv9y^`J6SDY$>US3+o)V9PTWi0#KLi^2=Ql%q{!myTERY_oS zstL?0iRHaA@wRg9!_9bQ#&DVSO-J7%X$6n+m7{SuiURJDtE9&6=MRk*FU%6I)8tlb34o) z<#*+jR@%p1eZ}OHrIoviOT!MkKkRTXzv5{5O6SCpqZP%$Yu5+EBMp#W1~%pm$J|i9 zfvoan#y&0Y`?h1wZ|6D3TzC7r?qj7+=pw)G^Hs59r(Gv5j7!CWLDkuJPyB%E%?ry@ zc_ZFbdtab`9jF?d6XHoY&v6DPM1S={>t#@2c!MRg;a!iQZ47fc+|EGT3Km$gvUM2? zEWKj+5*C=-4toBwtKt7v7HD5|&GLCm*fG~DXQ8uMX5HBDa!OK8kOlCTN}DJDbw-I`%z{35(uv99KioJ4#Er5x;xOYz6<_? z`wwgT5nTlrOxpkT&G4B(f!8zeG0=hkZR?1`2i!96LJ6mZ@PFogo67WsJstg9JNn_? z`$8z?1=wuNzGJel41PwPW6QDUIC7jht{itxPL3xhH^-Zkm*ew{@%TMsJpoU?XPl?N z6Z8~%iaf=h63=)~sb_*`qGuA`>P~O?`@vSf^9we==aAj+JLvES?sxiwpLh97?sNOg z?#=Nl_jvq~&*l274|x4GpUv~Dcl-R&{bT$s5Bl5uv%WO8&A;337pZf#9@cYjCJ+Jzmzr;s<7^cnU+0t3jzAQ|u zR+Bwu3ePcxXPd&8nZmP7;Y&^7OHARJrf`cXJi`>mmye~lCQ}$+%9Y+4P2mPpxZV_2 zO=0{p6t-pX=bWa6ufni(=}1KQX4(_JLw(P(U?;54na21htj|fb^=ltkpOeJHOBqa? z1J3Zxn*&;zd2=v4V8-TPxrTs{gFjJcm>m4cIkWZ|g7agD=FK8wbMPmJXV)1)vSSF#<2kRkvBhjBC`bMIAiQYu? zW}@*9m$5lm+z=3Qus)*UCY5P(FuW^gYz{VP2nadY7NWNj4Y$xtn}a1x3ilr(dOOiK z6TO4zPZNC$(YF$vBzh;&yNJGx=-ou$PV^q4?;v_F(RUL48KUnZdLPmIiN2fY&k}ur z=+6;-57GA$eIL=EC;EP(4-$Qd=r0icMWVk%^aDhHndq+&{UFhYiGGOauM+(=q8}#u z>qI|7^rJ){A^ICcKSuOXq8}&vn?!$$=x-B!jOZtb{tnSk68#j>|3UP3iT)nZPZRxp zqMsr92Sh(h^nVil9MR7cog#Xe=nH}9 zeWE`g`X5C9lj#2=`a`1sMf68RUm)6g!4KE}HZuU%|3t$LWzz=N|3tfpb`zaLw1?j0$kTeL_Rw`CB2;_=^u?bFgBf;cErc z=3w|s4P%3Q+C<|oFN_VY|B1$5QWzUt{}Vl#Xocty(d9&kiH;DBzeF%Lxc(;^e_3E` z4u-!VFg6FnUkVtTgH5$fHYD7QDzKG`>%U%t8@4_V}J^4|`rc%TfA7M8MT0|J6)Jl*sBQJlUA61ivj{Bz1PgV;wrZ>QNMfz)nn)mcreUCGBRnXvVM}kXgn`A01TmTW?^;}D z6&T1{Vo2xBMttCr7lRu4x=UJAX7wd-f_Le4H|dq3z>4?rv#n$~oq>*ji@_V+lt>da1Z7@ufM6Qdu>*D|v%Rmd%t<>9zRwBTS=y#6YfE$gM}3uVHR zapsgWR?8Y}8LN00NZRK4YQcG z<}pJzS@LO^Ma%v**EVGzUCc>S_IiihoPBh;Is52xbN12YW^pN-#HF;Zo1Q67G`o}y zkZ-XManZ9qVD2RKk=6E)6|O=GYu1X^IECe_g+9cUVJQc1GR>A2shKP#KvR8lizOA^ zW=TW0Wu;KxoZXzkCh#|d`h%GklAa-QNo^XMU6}gjESruB3fU~OSq?H3Qs3QjJhien zi)@w#uXC2x&TUQB)zEcYw!bFpI=?z6@@MHfI+tDNO%1fpYwMe=t72>WTGyx6ZQ0e; zWF72vPEZc(Ft2lha#&Yh)j2^q;I}!Z#jJIXHWDbCU|rBA%etVo+P*41?6|L>9BTIQ zZBnz#5uKUMMQ4h*?Bf=lnO)xK%!U5%$1U0-NLSG3qb-7TTLkIO z5TrXpknW7^dXCP>uIK0sLH*4T)SqCiiCV`RZPK;4(@Yj`n#ocrn#po3G?S%DG?S%9 zxXC)-XeLX2XeLW_XeLW-Xr}DkTNhl{sO;QZ7h2b-?A%+|UFVc_$zIKroqOwA-Wrvi zd+Q0UMrGH%bqQ&WveZ4FvhkFxIopiuq+9Ap>(thzqjj657#XKg%YiV=mg4AV4YR=7u6;@etnnIwgawnGlZ85Az%4X12kI%=uyOp?}~VVEtw!!TR=cgAe( zC5Bmic&xp}ptFw})}XaD8MLKA8D?=Q;YahdMR4EZQVOpV;V@W+_Dqru;!;M%t<5@K z8BWB7x1Nz0wAgopxK7|_t)jxiI~0Uwah+JtgEBd(7Z=`omSfQ33|h~13|gE)>lu$h zi<%~k9HOcMvnVHE7F89PML7YpC`lN8B%R)RikQi`C@0YD1qTOiRfsdvC>#m^7mfu@ z(2_-C1lR?m7)~jI>oPQFw-mHyR|<~cg29)YMWqMNf=fO=4`k!qoLyVooLyVooSkoO z&dzuvWL!8}fmzgH(JZdnMq!%(UO42VU3~lnM}lWEyI#0iP>q^h=)FOLHwY&nIHr9o4VoMQniEdli$eN1i$~^-@PGolRooZ0435l_&*+i z?*^oR`KyJ@mkKnQk8^-uV@P-8Uwuep$=55L1C8l#O=t!PInW^`@rxX3B!2fJjl?gK z_;*Iqx&lI4mc&q_G%oX%mXyHn;^6Br0hazD0tB5zgk|>>871p3GG5we zWIJSc90_UaJ)E#~p@E@IgB_i$7JhW5efH>~^n(Kz@rjQe;GbJ(qhuea9-jlBK2scW zW8p9Tq)pn-c*3=V&KGdg2)7Ef$lqmz>j9kye>B~z2sa4YE8uPY2R>nF=J1J95rBi5)PkZGMD2JaAO4iK0~-867C_wJuTs$Cfq3r_Y&b=m2htn?rjP8FT#Bw;erlp zy?9_hQN2tfT#1BhBV4tFTTQqY2^S~aLJ4;V;Z^}BYA;_VTo-Vne)%Nf;=rkb_VPo* z9hRi~8^S#<;r>oIY(JuOeK5hA+wl7nOG=;UW_53c{%puA6YPfD_H< z3BoM{PBfq2MYwgq`32>DkZ{<3MBI~vOG?r`Nw@!Z3Fm=wiSidGTp4hp?Y`DB(^LZjFTdHQ}&*i^}n5!eRRsaSpgZ zG9O=T-=cn0Ot{A-+%&?aB-|Xrosn=W2=}&xyMb^YO1PT|7l3*amG`rRi%7Vy5w1nT zeUEU9CESk)w+=Yb`0^XV4NADb5^k@A%Yl=5b31rI!c8FDF$q^kxZ@ITF5zC4a4QM- zu7tahaQF%k(eb^Na6t)oFX5_z6CIC72{%i^{eW=GB-~4c>ymJ<6D}d)-Y48X36}>q z70m76u!NgTxThpsBjHX-xCMlJO~PGExbwh?&L=k$?n4Q;lW-oWCy_r~@7`Q5K?(OL z;S>q?4B=`d+>Z&@BH_*vu3f_Ym2k@>oEt92&E;Jq;l>lLN5a(*ZcxI_AzV_zEhpSQ z3D-@yL%@M4>pDI`xQ8X&U4%O(;l4sRTzgHF?h}MNCE<<}?u>*xOSp3q?vI3fSHk^^ zaQHhdFlE&rt|xD9FHUG@A}&n0fZiVIeU(cHR|cG@U*bCP=5(up!{41ze_e!Yk#GsZ zEtI7DS;Acf9Oi?j`v~FINx0_-*9)9z-ufxwlE8`1>)s+9j$fjA>z{->BH{c{ICDKe z3moj4Ro)81odQnO4rUSVjD%Z3I2`9h*KeO9+<6JNi*T4Z(RJAu2p5oW#|WnYC(7SR z!l}TC?sNS&;bM|>|4z7N63z#S0Gip>N%9vaTwIdBO9+>gq9tI4nOMPah-P4hc6*xV^xM`sJ&HI|Lk~;fN(*8z-3nb zJxVx5!aYa08cDi8CtQof-@Alsmv9a^X@IEAwhRPx;jD6$5^jyeUp?V^B>ol>ZV)(8 zJ6KD&-N3=Wto&^y+yRNd{e(L#@rUabV>;>Waf!bl5bjyvz?7B0pAqgfaBzsSxIYr^ zHQ+@35^w55RAzfu;;)EsA4>d9Cmhb7qIBmGt_(QQal4LiH4=YY2scaOZ$IG{OSp## zw?@J}L%3exus-Ry`!eAY5`VuZ++K;lj|g|4ge!uRR`c;aEJ=4d;f_hTcEY73>0V2? z(-MFEgnLcm?@q$KE#VFm?r#$AdxXPADmrd25zZ&!eowd(;6&~I0^zD9{)*t@#eCeN zz(G;6+Ep##+9m$x5pJ2p-*tprC-FB(xL)8y8)ma1ThhGlY8_ zIMF!y4&k1aaCW#zG#`)Cz`?()<1wCauSqzSaPI;qI=%}C_o0Nlo^U=Wtf;(O2v;WI z_7ScIIMH$Y8sTP1xMv8rSd#85gj*xw-X>fxaH8|33xrDmCpvD$h1TP{SHjg2?htUI z<2H|QM}QNZ53M2GQxdM9a4FzK<=soT(-MDQA>6AHe@_wa4T--O33pz?og>_b67GG% zd7$x$>d#+fJ#InZMD3#^@w@q9+-4&U1V?6fp zQWFHhw7)GE$=!dE-1jb$d+j2*4=$1`EJ;5%{PE>&yh!f4i{$RONbcc_l_rBZ`eQ1qy>DY`-c`GR@sh zxDQMmJ`0}aupTg+-kk7eM?6k=+!Pb{Q^EyI9KOXe?e8CiD>HHUcFQzZ3U+h4)g}(# zdYR@fC0x|R;oC6NTqogRZLCZ=@GY5XZZF{$n>c)XW}17NaH~ulzEv~L{ef^@Ca&7V z6-?CjN!N4S#Nk^w)Bc(Ww*xpBn)vaUYT_0E2alX)w)=ov2j|Ldz~B-!=ntpbD}aMX z2s7K?RK|9gb_A4%D&CSh< z($v_}(8BHOytmFG*j-_(mJXF}Yz1%6Wr>xw#I9pm`WmThp(>2zvq7z3BFHF%0iG!LWy~ zm&^0xvhlFSlk7c8P`?BA^GbbOPhGy6w%osMsWwUM{<8v8fsIvroH>CFDBu6aC^ zh1oX@;LjhSuH8Ad4FmAYE_iO+<2Ic^V$R9t(VCxa!}_g*@SAe*j&+=2Nu%q>gb1V&exiCe-8A3rjd6az=gJ9d0POuz7sWH z7GC4`qVC_=r~7x`f%`*}P}2qQ43T>eL_pPdEDE`MTZE+|TYb zvpL?sQP0b6WK8-t)IB_&``wk^r)!`y?ROV4CcP7N_vTD|5;b4WS6i?Cef)VN{I+}T zrklFpjVpW&`~NlD8Z_JA&9-{Y_BXRl)ogz?+2G|X+}~f!wieCyp4m1-vz<5Fnl;;> z&9)}Z_CID@RI~lbY}3o}uGyxS;~le2FUKFvHkI*pj{jh?&4gnOZN>Gr$p(WOxBcE^ zo1xp@GTC6f}j@N7&geBX8XBegY0RxGlmW4AnxyHh7Gc(*?wx+a6shTD~1iSr`cXMY}g-p zo`00Kb;RKv1e)zdlPz729~w3|9GbsVh7IQ!p6*G*mMQNGhAmUx6NW8Q-s6TXQ{GX- zmMQOuVat?v*sx{Fn=))z-aOk3G;D!x2H|Ec3=Z;aOU$3o-hp}G0wn`qx`t;f=co^%^Ym4wO)}PlF=3%TiudSSivA(>v5D#NL zd2I?0WBtI|^Rw+R40XigFxIAKgG5FHTW9Yah~E_F zK58-C-F+kat8InwmVq9vU0r~8Q{zy#t*c|O1D=yYzaQx~`0@4Hn+CKt^e^28vra$c zi?^!}b(=nRLYnnKY~bpJIdTnc}N;osO^4^oCx{KkQ$b*82Vxn!ikZB(3%M zu+H=Pfc?wUT8|Iud^%o9Ym5gT&T}z;Jo~7%b6s7-Im|>Yz8BAD%CvoZv7V>f@GNJW zb|9dfH5mA(i(cO^*|lv+qObRG>LWO1kL01 zzTn7cUWYCPbbP3xhdcB;U6Wh2{dwY$-lVj~^dNpBXynuJWoeD^z&jUi=|5~_68w^a*AM9K8wNBD>{PBn z0z1XNwWD*uh^yur;()8LZH20c;(4?yV%rJ>iyp=@CUgeE8#hB3${KFn3KvTQ{h;_U z3fs2Ag%K!7r(BEUQRuH6{H5bU7}sm$A%tn}obpYPZ)6m%itsHb^MnfxxJXhGaW};C?(i$5%_zT;&_IJk{aBr^5 zbBFAMH^60%4h(9pP12F{>m~MQh@Qg*0wd? z-4Ah)w<)c%9RF25d0Cm)Zw9aHz*{jvbo-gu$nqhk*jS%Z`U% zvpacMvpeklo4e2vFn08#hpTJ@o$HYYd!?Kp8NJghNZybjm-92*`t z&F+SMyD*H`FRlmqq89uFpPyfS1@#74hC-XT_}qC2Z8$Hf+CH~I%U9Rl0E=SqYZKQl zu2E|7JGFRNKtqd9a*dLe<$IWtpT|6R-FcV0U=K?uZTX3CYyJ&)MYcmr54b`*ZCCZV zLO*1uL!kpNDm(GZ6LxOLrj;TrQ2bYUqp}LKzKo4MqDh zJ}Pfh4Ih)vHw<+T<%18y7Cc|rcRJj0AiYPZ)rfPSN8hkMx$TwIxm$g3uJg~456ez{ zcUQzQYwx`Hg6eiQ_c7%cIY%phv5m1}hGBQ{PQ`JXQc%L~D~qWvP-O+<*;}{e6n{?1 zExu3j6yL3^8g_#HaaiX*-=nevz~wWAc?uXSRbtIxZ?4?E8)BWimEgEQS#UyN*|2ik z17NxG#% z-Z-0gvK#WN&fN!V%wMh~ZRqpz5HzRi(JBnv+p_m#O3FC}_R6gq8=yYzVY`Cy80RTp z!GdL}#sk5z>U}|`mK^`oo)6ZQ$619f9FFhmv-R1PT~2!_<#tXz>-f~y%O<8K`3j2y z%EVZ{vwF6p1ax=ItE6Vvl%=ADepR_VfpAO6lyre9;aaQ4I zfK|2@Mz(#}<(#tL(YN?$<-r|{4cqKh0mZv90k!O$`kZ57$_}M?BL3q1Xjv?(ma(Ql zS*pngN9L0!PAhGXbh$%pbt;ew##gz0tOt(M_~DY&Zpeu}bfxWa@Nxk1QyTFHN-G$% zD{ZbxqtH47B~xcR?8+IiEUw&_xG(5X*S=6%*%o{Nj`Cq>Q=U|5s8#J8+8Z3BmR8;t zJX*OjIIzV1!cv9#j+UQwl~#;6f7Mx6m7|n~l1;4K#kQBnlm)3<%b9|0a$z{8#!}ni z|JKy4VWuX*=AXjsg~O$xA4RS^j^!(jJXkpC`1p<^kmk|QS++FX8ps=2IqVufT5&Mg z`RvhfvGb^MHh460u;OTCaRM@HhkPWMGh~C>0*#+%6HX-spT$GWoyvvyA?6>2&x1ow zWfbFwm=8aTw|lr=xt-a|8N_dAo)GvuyPdHSjK7(Ahv9SO&0vSmgEs>QpJzdj+Ll&M z3Vb>7qgEIUAE;AqSL?#_0(CWCErv2PSJle$Wua8TH{mEPgQEm@axy&lE| zZzR<>(|e_|I!OkA&Q7+?5JD@Z4W^oPQHIjIXPmz2mnKzdG+q^$Bmk z;tfH~KziOta!}vDWPfOmzrDZc{6l&DFs5S7t=z$!P_vv%ZPmC~8FI1mUF-{iKiA5ARiy%!bgQ19R9Ab;j{TwMeeL~t{9l}Z zneO}huz$LYSO)yMz^t*%>9qEHW(O`;Ktp|gIyb;7ynRYrc-FBvt6`6o7eiiG29CgZ z?sKd7MS%(*kn12Rb+iMt8cH5Q(^Hs?I(4TU{Q9P#adF}}qevkiU5PB1q{Bh`S zyCJQ&=a+_eI?K01OLsw9uE)yH2E0|NB1~5+%R!?K>vw$;(%inkY7W%n?3=Nk_E*j+ zh0pEtpiW+#k9*%Z-y2S%FU$Vk@?;$A&tKbiE%#CR{6x*iM+qp)khci4k>zz+qd>`YcnYdsK|V%x(ewz2&cvrAaT?T&9ky_F1ONxorI*y16h ztcoqfJ~eL;%DEcymTbkmJr1QuHl4fDik(3ydqr^}mi=fslogJdw<1-PZa4ngRxQ63 zTA3M~U0}Uqp2yW|xy77fj$5bLoWi_@=RgCv-Qj{9?to)FE}GuYp-vo8zmLZwIAvHb zUto{c?vlJOVckAfRUClD*U)$3v5K>S$3n#n`yJ-tvC6Xnm@OTsU#x5`fC0(AYwJ-U zjzSEi>e^rayc=tA@^CyDP;@;pp^)%7ssXVR$p49~;Vw&Wn_ifOwJcq6fR zWU7775hdlp^w`8*jHe$iR?hw4u_{P4+^S#>CJjHPoK>)V=e`f;YGHdA7TQw+6+90R zW26n|KjFhrSN70O2C*)Cc({sH%wzjQ&rAj1H%~YbNJ0M``g&hr%NiJqVvct%unHFp zo;ah88F7VUu8M?9c`jL&1O21@37-n1S7|B`yi|<^b5$SAJun8qD2t;sjIKDQ@XU*9Y3&z7ZTKV4>1kHO!RJe9$Bx@yko)e+mJ!5drFjUVpn^OdCh9{9T5SrW4? z7-c0fhVy^mZ8PO7iMf#p9WI4E7K|!~r~WZ#gnRiEdeMBqECKQDeG5ifq1Khdm`BVj z=JSLo*smIORX7-);kkf4=sW_pHf3j^uIhx6H+tC+jNS?6*5@)ij=(UN zxfPsiU~a?D;vtVRj3YD5X(61ehM0%fCVpc4cCg!4Y>)JP`{_1ibztrASLgjHURQZS zt(*2$IO|(nmTF#D7Hg`3`fZA@bJehg!@-SMLoMr^_@V~*5tIkchJ1m1X0J@y-N#Fn zVK|?PRg@}YT&3Yuu&fKt7b;k!D6ljXWBcQaQ)|kS_0h6a!>Y1aV-NWDr^XKZDpR|z zJf2*L^I2Z%oU?}2)s--BswY^eZk&H!ne50-?Mtjq`9iU;AA{H#@sd<<_;kXq#BBDV zMT(m(3c<8edD_h)*9P`NJF~;ceQP)X=N$9dZqN(EpHS-*Z=@y>hch`ii)eZo#@cIN z7(Z+e?Y3P71Phh01JFjWmF|P{o(J&!u7WA$iO1o*Vv%BF_8sMkQe}^0Q3Sro4()Tk zJ7kY6W#hXJ!Cre|k8j4Aa-Rd|-zC#;IdC+xQVGUOLn+5Bm3S%g4%B z29nTvau>ju6@;S=V{>XN^quWEww5#|ypIUb!TBEcJ(1g=L_e6SkzOorq{lp3 zdN04%f%98vRx)8r`??!^&Eomk7C#-Dl1${hdjawtS{X!t3qwm;#%DNHi9TV>9ni|b zJp04X*TPtk_@8I??(>#{qYW{(PQnaef3a|RvgQ?ue1-GL-<@*om#E=!MX01#1xpTAdS!t z$PN1OMi`WrkMlzizx|1^a}Hb{i3MJdk5fOg9fsC`qw^zub=4)zR}p(yeL7ejOIFme z!;$Lb9c+?a+2?{0va+Zma30HyB}D(}5`Q7*&lj+?3*i_p4o*KM6K_=#Z^&xMkfS^w~laZO~9e=bQ^yF(!w4zAI?~q zx~LK^H^Vz4*l!jAeFWy%@g3N*OT)gfKjJIeIR@IC;&i|ji93YtPAO*C?t0){2>okq z@3iO8y4v#-^>&2kNk-g0h*QpQhjGBAFlRm7ci1@Z0Z0M+PE8`P7(6X$(2lkB7@pwAuwiTV5j^D9j-crHoanQ| zjQR4`cAN(*ii!tsOMt7_#?o-C z89tjAq#kLH6xt)6-v;9~li+^9$cv#?7u%^8t<1kX^|wNHcyVe^Y+eelNrz*B7+#C( z;Wq8ld|$w`pjy8#FdR$Y7f3mfd#H8Vo(=kaf&LxQzR!e)>-e34kS#nB?h4@Dfy=xH z*p=Qsroj2#638dRYtpUSeFiutPPiuK_W;`3Jfm<;@4|Q(l38vjU$Ix&70>AJ;-2zW z*e|ByRW{l&%tBviYJ=;}>Y!b5PI=B181@wv2g)X;CSCrBvXd#V!aN#Nmk#@ez2Q`? zv-%?zgENW}h=*EJsvqI^Uy1^O((p3HTNP8j)vgwe%U6T85GyJPb${VVyfpk$_^%7U*jZY28u}#*msb3+xGmLoOIuwkXa33em$ZfVs||0>e!5O+ zD;RsCRQXZW(W>IYbMcpMb-mD5{RMV!!d^L2`Q)ePrdSHD45E0x;Tt&`KIqIHg%P|E z&IbZZ!}9}h4H|r+)q4U)vxG;%YuS}>EsW;}a6X}g;1kX#7+w?O`Gf*94xCRYzr(Y$ zp#c9p3+EI3+853z@N*@cwc+Q%?M|f=_lIj`9uL>XBe|{9?o>zTF}MP53tywORV`KL z9j}YHl!c+I11rMUt1-B*P*>9l_q!M8FRM7^m>zF`;S(t*ToI2+`A?RHmKK~>V)pY@ z^Tz#VJ)SZ6U@j{eceH9{U}D~P`1mP3CjpWzbHJ!klZ4 zhx7Bqn|ZU3;TVJS+AJ7FkikBLV?K@#6B}afKlH0Aj7EvT$-l=RhVzHTVRpC&=1M$n ztH(Cja7^lKbIkQdlG||ZD5-z$au{FWS~v;U+_x+mAKky`@Ro5+&lSM=O##yAO?^IC z_u2dK8lhs^OZP_V#(w_X(f#FfJii-&md!fnI@%0>`!Bz!{U6&Op5y&pKX@P7Uo{(w zToR2yAM>7g&ju%(Bbfg;Am6{t`;G!-cw(7t8e__jn__U?iMfJ2Z!z-x;38JFwtn1< z=Nd8BBT&B9y0O7@p0Nb!e6x~~#`OLyJnAxhn9GfOU}?2-r}u>t=bk&ta~ztZV2*P~ zy(fGlmqCu5kguE(&&ksApE%qjZMXD6T|+Ai#^8(?>V}^`z@FPN?Tt`pujGxZw~w&$ z-?#!J>HNs@a?5tik2Nn?4$KdX6f=J8e(8cJ9mj|(#gs#;bHooBT%54SeIuWPwgov? z@p=c2vIFZYFk<%Y|2*V(3oF^xh&2G$QVz@?v=w{m=LNefi@RCBH-|%P&$Qcdbde?#jYCVis1tYwRgFOGCt(g+ohl5cnwO{M$MA0Ss#M9Bujg~k zL$ScT^jz~$%(H;cAuy+0Zrp=jP(7dCgHG{#(8F*KdXIUo3F~uB*qCc(!dzn?T?BWW zZO2<7PYIYOjJYP}YO5P9#x#H1Hfh*CYCDekW{~zI2#@aCsI1)Wn!edNWj32Q9GG90!aK;y&Rf@wxe1O7pIP>PedTa3%r2LIJ%5a<;2mYG zIs5IU5f=7Vu<&)MmT^o`3ybsBd|T+;?Kr!@brGM>q7V}+XVb4qMPVn){35t-?1WLs z180Ws$K&oAcJ=VA{M(dSe=gqdNb zvjFbp-U(-%_cz%q?Ukj*Km8I&=Pr?1xNiZhD9a~RDDV(h~yvgSj ziW=^vYWG@i2EQ1;1Lh};-=WPv2Y2Y#zi-S0bGubG~n zo~CD}hZ%-pxcwernCW@&Fd^78yaq%-MZrjf83u&}u;&5v@UlTL7$G4Mg=~yL7dLK3 z|BH$4l4$VG10)6&S0e0c5;U6CJT@e2cHR73`F&5_d%JH}-;21}{P%Z13|*)0_g0-c zRdr6CQ+2!XdBTbqlZE0;GX|^u)=^|xSn%~JHkk##F@2_z7kqs>3yzN2Q{33ZlI_;4c zrqd(jMd`Fxz7FX@{%o;KGmT_MC6NqSK+dNbi2Xt?9ojV@Ee`v-U*>ULhmG|6d+u%? zO?wX`g5)eA(B=v<2X-!+EB!l~F>vk7-wL}XcR|vyp%yNC>KBsycM#v~#~P)gY5FZ$ zf0K@9({%sO$b-+jxnhcS*n^fg@xPtWbmwZnHsyEeNM&k1XaqXUO#Bxc2CduJ}pSyMbP)9u5W z!dDEBwL-F&?q>^RT2o+U0j=-!m0&NhBm$nAUz>Gu_Dp%kz}+pQTroCl;AipKgDvuF zaoXXgm}F?AMH=akvuk7@{+~U7cZFgnk8S{IzjPa zJ6g}LAY4JX$lH37yx91Ok_Ad0tt@yx_bTHjoaDnwo`^@DKx+&8h=Ds-xJxUzKU;W5 z)o6LTG@3QJ!g8IogS=PQteVlO*xUh}ABa`pE|+5|qMu=J&?T{^sUxw%C@pW`9&Bl8 zEQa!FozaYS3Hi;GKEM>HzJj>ZrZo->d)U^zL> zht!m2wZs*yAzXaFHg^r-;%kDf>Kb9IvPQrfg2&XCY*W@0SVOpI?SeIgvTw}S5dC~D zL2C@GZCHptDYo8%HHaE($6ClmYmz0{Z>IZLFNMyPRnRJ;wY2Yd$scZxzTtXyLF5+1 zzB^bvFq5s@;*g4Li$Zeg{KPci?3j1013rxLf&nM&)ttG71M#-n&8~r)+w%u8Z&^v> zbmoxjYiY&NRq$PIXYjocF_QlpzUE+E;G8Ts_ugqI9BUBF`EnNOhE{Vbf5!`q)3ipz z)7-~Dh)>xPF1Q-~a*i##NMnxb(bf=;7EG7s1sHS7_n>CdTTWU#(DSqgIWctwV#S_nKi)#cxfT|w zj$!?VMM-SBt-fZdZ5r8B)JQQ! z88&r^VpD6`VQy1v}ntEVr3xi|0DQ&qy_PhQYyy8 z51DS&+Mu+3x|?|4fVN|#srHciLn-~BHwYUjgIc0kNA!;i`_#BwOnt4?yM3OgcKck< zxKvxFUaFI7%a)r`=W{lylX{0_Mg4-vxT9Z!ZIoC5_oRK1XIyx?uL78U#gi!T43dSM z*+um#d9uYFNPTe;d~X(fHR4Vj73}2BkiD7CjxBA8wRmTWk=+MoJrtJ^-JLxUxwi^- zF~9T>I|ki7?p`OXX4^pc-qX+q`;im6m(GqoMS30c@PYV+WMwqvJB_&SQ6MJIjLbTP z^yd)S9aRuxbee2>{BgR&Z7JR*jfILW(r5kgJW7|rn$lhJ&?E8l-#mT;X+F%gM#>S?d? z|GCJ7TU}M~h{U1;-27@BYVW~#a0zn+ zO6*13>&JK47D;k&ZQ?Pk2trs9JcwvS4ffIxV1AYljFyk!yuhUHi~b+8k%&mtZh00m z{PfBy#B!8~1f7GxUL;lsZzJkUeh5k56_Pjj5>*Zh_VJhpksTHf-o+{!u_DO&Lj)p= zru%Hw<2z6VpJEb#)DVX%4iw7kf0lFU+j=7ZgTo zE!|>;x38oTO@OzbPQ%+rdPV_78H(lW)A=Rx;&j?4-ZW9YnLBoEO5%=Jzvc7t0l4 zL7LmmHk_V-T`I%CA=y5jH+YHit88voi?c&aI&Gyj!G9M0bw2g}e8h)8WD_`cF~uUH zsWL?aSoo-*xr}I`jo}12>@QZ}DKxQhj5x|`eqrUCiaZtQE%Vs zn=m64t{d7Pnqb?(#tt&jqUUmr-8 z)6lF(p;>9RRfcUN@4Ws0tm+TrWGZ3^|64N6ie+H`a9%WCOtX$=Ag!t9X|as)cden- zl6r1%Tc!_WwCXXef|Ndh%|i*v=D*%c(E>WH_88V#wTZ1bUA9!dKM_Y<;zs4H+GnP&csP|x4W``Q ze767EN~tC9v%{64_{h>>YxE&oW#raooKlO#eJoVLPpSPju^0AqPgQCP?OPqHN;Qp^ zs|M)2&8^R0X#4I4+HN^I$z_POQ*@^hTPt1o$=63#dPW?ki0W8TH4!p6zR6e4N`sNg~@x958{0S ziXDHd7?? z;kD1Aj3*LwHjUvl8d-g+x!=FUJ6~ccI;nPq`4OBb_D;k}wXb`+rR+0fDeOv4PtrW7 zXp;Gvv7HB|U2O1l;p{&AILtd9?f0A&!ee*pM4(9U3e&c9xavM;2g_nKF~b){4C`(8u6+08=XfZymB6GdQyKL zZEv}D9t{ys+vCs$Was;F&Wui^k-euAX&4heYqYr38r<8kciR!vL|TI+iZf}L2mLsc z*3|O(S%&?~&!Zj383TQvoj{*m+$i?g5ju;89uxb6`Y0a0QL_3|Q*aLPuUF8yGn`eZ zetyjGDEXZ(ca+^%q2`qh`1o10W9nHnd$V$4ApO1@Viv1sFN)*LKu*je|6aGR6w&=r z(v5P#y${W*K%D;uoEpeO?Bd;f7t8j0iwk2?8$UnrKnvm&{+5^vUf{BU*|RE2%B8zo zmk%r&B>(zm`PTR?>Is6`atM3jBcV5!%f967fm>vXP0Wtpit_{4sOJavVq|IO11dgo zTq1tA@(vr1OZ<#S`6HH6EiN$&af$b4yf>rBm0+WuuZ69d1$!!CRPKc(oCmE=u@Q=k zAT}|D?=iF%4!k#G)blt=4y1FSaA?%?CD%|S;+g^HKypPv#N#cKS#}>Lc|~j)ffcB*(Y~dv6tcy(ipjnP7&nAeON=Zp2W_HMCacr4&f}r znYF+f2iBxv#33NDneYzxsF8?ygRpCAEMkTdi%`!O%+HDY8@-n{>iIIcbf`7h8v9^{ z5{K}~(@IWD6p0wUl423p4zz|dafs184&fT=_`VW_um*>%@K`5VrPn;}i2OIFL#;aw zJN&F=;NKI6V6UyAg%+P&HB>w>_C#gS!w_?bHMZeBwDBLY(SwK-zw!sBbwHwbjS9Xxsgj1lA%1B}9O|bh;A7+|w z+N#oRUKw-LS0DOfIr)S<>JX^L`E$f4!VCP1WA$N&v;b$(mdaMS)&E7zXuLm0Z`k;8 z=HcVdI{fAKBX}eMkpsRDfye>hhd^XJPhCqX`wz6HYJnBntn52r?S*{@SlzP9==HF2 z2feE?Hn5^=s*es9|9A#NoWT(~U4j!2!FY78cYdO1%c7uMx{IH4kS9;AN=;gLVu_qT z;Fdo(c+tOy;6smPSSMeHL4p}N@Pvi-_T%jC_e-YG8bT=xOHq_TE8Q@j z=XT5=w^JEdtJtWVwC^0bmG642LOCv+`PbTu6+eH2btcwBtnLeB5!`xdDX?UU^bf4YQX6RUw8Gg!g*^wsfL zFth6q-Gr&)X9YjnRw~cLs-bMa0sDqC`+4-v=8-%Lo5Vfa8t&PWZ$@(p9xht#?m7go z_J9(XoW37vKkO2s&DgvCbc{YxSJo4y@&?Ri_`g@#2RrEwPASp_VWdIa8XAq)dUyxd~H26m76 zY{S;zt)-Qb1r7987G4a1r>DM^RTXc%{^UHF-pO*yMFX|aXA6c?Z8$r6KY6=OnLOG0 z@fA|OJo3c9CN4_VwmiI%wHW>B4#w|WhP!`E`0ysz=;6wU;^mH1yj-kurr#Dw?(M4@ zjz@mty)z6iH@OV4?Y8hD>2c`9`sfRJWy33AU7nmxzHJ;MoqXFdR8qi-Q8)Hv*?jKP z;^agi`I%Lx`;N}3lulr`AkKp8s!!}oTw4pDHeBE(+kmySm)^Td*+!q_{%mBl;?H7l zm;0O{yyrtvW$c*h{n_2!@3{x>gFjmYe>SZ1XEXRj1|N4Sc)_0yE@^IqKiRK66N#(- zY&^%G-QE3D)Tj6nlVICe^xB5K@CwFpU-oqAkI0t|;oYwAw8ItfmtA4mq4=%j)7UQ( zU3HQ?C*`e})5RB_h5uoHoBYs7UTNh?j2o;|me?o6u^w#3s>e&RaRr&`-|BqP;MI6f zi~FLH=SDx;-85rV3t{OQnkP?^?S)_Zx7xc>YRRq$<9==0A05x|o-O&dia(02el`m7 zd}|hY-aq?y^&KJCp%q8)E*QPbbGrC5?yL7xIm9j5j0^^)&>PChFaJJ)_=e(lp0-ol zRzpt2n@jL-+Y7EfIuGw&F)5W_5<70+h4Da|^Q7|jRYZP7XQRfE9*37Z$|@ZoFV}ya zpS0aF#K`BvD;s&DgB`F@^P)^D8~$62&*fh4z|<8hFz(B+LczOLhsj4Q1BUY# z!B>?@qw~fNjFA-&wgo=mwdBF((L2qW2OC^nEpO%i>rA{)CEL>eEBd+o%TK3N#oKLHtBoSR~7p}d3&l$ zeS3={9{?NNf&2tRc zxzBo0dw)vxS=X&yv-}I)>*?!xH#T-IU%z_O%H=)nQ`Ntw#^txKTED7$%}R#rW3gYD zl$^AD{T+$+&MUw+C7Z4o+mLLyVl45177-Iy#53`c!MA({54BuBgIOja;j_s=?{}$$*IrpaPXI;xmoQ%z%f77+nRdunthWhI! zHpUubb+O5HQccH(HETPiSZvZbsU`y%Crw<~Bu#AJ*tlw)G;!q}-K(0ViPs{zw4;aq zThrUq+r6rtGFEotKYVZB@@|4I#TSh!|2Is=XOal|9x#0Lw9|i^+7tM|SIu@3iLX<_ z{~Ok~uU&Kd`c){YZFPJ1#`bPVs12>Wgv)~WcgMP`SU$_Qdc`CCqo?XsPsLe}?oGq6Tpo^D zYytZxWkTqa&*IhiKbMb`#TNO*!VviA{2v$_k6Ufy|2t!&LN_*ELK^b;mc`oXVXb&w zQ+Mw>NZ;Zryv|j)&{a6kRk#RrBmPF?#|UsTk1bZyNAXEtm`M3c@tIQ7_+V59TVbL9 z*-ZQu->9TJDSd;I-b3ldO8P-cU$3N(QTiq&EmL}lk{+h?ElThcd!TCMkp4)YYH)q3C8 zVf6ewtK(sq(rUe2K5D%ObY*f`srirT%H(ya?d7sk>$OvvDsNt0U9v8HN>^`AyVNod zQ(bDkkL&8?{iR|~QNCKQQ&+D~SJzox`VC#ZoPV`^naa_Zspg;6)q6pQ;eD;v+pMcM zuB)p;*T0gke|g`je35z>1%cA`MUHqy7W9c8M(WOb)+I6)q zJzYsJ!aaMFl3q;d(MtNfF8>Wm(~qd&T7eG@-%06FO8(or{C6o`q2#YcpKAC7rRhgt zaJ{d~|A5l;BQm(Q+A{bQrRj^Xa2?X+KS*gByaty(*Uo-_Ne{6x{awav3>E7x8*=oQ zjoI}i?m7e-226L^uxip@Hs;imc)mcOT>@Pw&~AY)5@^26N$1)4FMg#-)aT6*NZIH| zXLXm2mFOtCk8iioT{h;^QFNb9OJyz_8<`=Hvf0=-J0ZxiU-1$wnWuMz0a3-lKRdaXdO6X^8Uqd>nZ&}RkuCj$MNK%W!n^8)=(0{zbd{klNEA<#b+=${Gn&jtE| zK))%_Zwd4-1o~}(en+5xDbW8S(C-TLdjkC{f&R5X|3;vHE71Qc(7zMtivs<7f&PO) z|C>O+FVKG!=zkaJ{}AX81o}S(`cDG=e+2qNf&MRn{z#xN33Rr@5C7ky2f+UqXq!M& zlrD3@{}*T+^3YxI{{@<&ZJ7)Hzd*YLx=^6q0$n7~9)TVq&=gtBT=4$|xH@O(`xW3O;hwPbHV=?=xTu;E6|ca`vp26 z&_RI?33OPXBLW>2Xo@&yE*l#s(Dci&najo|2z0GLPZa1nfu>)O&0IDX6X*tkZnT6m za74pOEj3xR5AQx^E-QBM>oO$pBP|VCbh)KDi>|Ot$)ZPDK9xmRT0R3B+m5Q@|NW)8 z3SFhZet+pk9o~!d7Xh{q0Q z(Tx36p#LDye-dbqok~(JYX4|~{@4Qtbt_l0P_Oa7y6#oC;}=rDupB=bluqO4g_bX; zt@Gt8SFc+6g{G$Ey>~2cU*DI`y<=Ts?W&~7U2N6UukS7IZtq-^Y%~GkXNr1OufcC0 z-L_%vS`*BgUi@5a4g=|<0m{8b9dF4#6|c{v-!SB*h?#5KyKi5m+-lGjLI()cVsyE5 z#2`1fqrJO()!OB}C9Aq|a^zzn_^CprNxHIeqM>23lC9^Ferhf^mkvVk67)Q;yrXl~ z^3GN3d)D--BG4Cvvk*PY4WHw8P)}}&=@S#et&cwlqdQdc@nGGFm5&F1l4CQUw9|Zn(CVCni`C=V+Pri>Ws>mRM%*TH7a;gU2eIJ zb(88ErsNdCvnLy8XEm2Mg`>0DoXO5*nxk{MrtGQ4H5%vUHj$TX+{UX6%gDmg@u|8= zSwJN#i!18!DO0jqrDSEbNXg3LP|3=wQ5^@9>n3L*Gug)IY#!6-Nm+bmvU7X4scv%J zq}<-+*}1*TvvYfwXJ^SFQ)QMcGTFwhoMha}=B%Db7t);D%0@KDxD4Z_=axWiF_A~k zxFKxwZHC!R#@WWwFcwBuJNS^9oFxY(E2{;R)i8N#RxN5?Rvl_yP7Mu{bB8nX49c4# zmWO2zs-9w0q?o5ba~snzIVVqr8Z>gUQC?Ojqmk<4S^cTBeX>zr)__-WS)-ljO==XY zscV?jm|NbY#@zBIH5!$dHFeZFa%DcLQIt8=D+(8R_zi8rE+H>hT$8euy;5(3K|2}@ z+RH#JGQ#DyxB05v z_BLOY+ur7@a@*THHMhOZQ*+zfJk_9HgE`+k)u7&~2K7!csCSA%y;E}K+&m>$&dpN{ z)~1C|Ow|QL?fm!n3mNjgplm4<##097MAF858j+&ATNpS_?}%F5-wDVO`EEbh6< zrX|_r-0TKqXURxafLX0j^Rk+eF0?tTDVgl7=BTTNNp;PWvrMy6a&|k_ysU<*d08!0 z^K#onP0b!e8Lo2MK+A}1y{n=p*JYa!wGGIglGO%XzFzaEv#&BnPc_aqDLY$i>9Qvo z7t&~)-C&#@GtSPQ73nIoXGSJFYj$L^vt~#-JA0O7vW>eZd$wfIMni0};09*USpzDQ zoi(O1*}21t28yAdh-~9lHfGJDblHu@t!&PkhiP=SzDj3jFL^TACY%_ToxL8)6k;rs zMq`;|FO1TKWUqrV*~X2}Ugc!a#+{bE*2$oa8O&bsWY9*Ug61$16|#*uLAH^okZr^X zvW=>Q`Z1aG*-OMU<3^ld3>qjOger^~X);s=fE(%p#5N3-81fCY7?~6Uzl^eTbp+++ z3WYSdf%4_qMy*H51}-1B1G#XXohvP#ohvP#oy#}R&Sks_j2mhzWE*KK$~MxzREkNy zp~?r|c-S@>^aN)zS1vr;K#WSh;pl2Kfj1f&AoQupuxm6Jb|hf~Gr_ZSbqLQkUWph8 zo}+UG#2p}fxA z!YSKWu*O(pv6@KLn2DTRJ_L=7DTau#l(IeFOtW)4hRV+Nf79sPW)nE8*?bY&oIOs} zye!7lye$2qU*<59#)FKEYGEZKJyGJ38BY+p&4EoIY6{UP`WO@9phg z)3Kp<cl)|k zw8Or(J!?{2iPp(^=yK}8ppMJIGy3Nf)v{c#gg)VuI!*KmpVXCw;<4vlJ6` zAMK>-REmMp;WY@bT+L&O%F-T2C}SODg#6f|vbB*>5rRrGf(Y71ua~X>u2)V*@N5lb zgveG?MhF94Wdh5n93w!M)-nR5Pp4eIoLv27RM_Q|m>J$wlbJn`tIkZ2I-SN5S&FTv zZ(VPD2dl%kb|{~#)s%K}>El1?7#RKG*cHF@=jtefjy=(R9NWS-_+~D`jliFFvQsN( z6!P@AnV^deaCF{Jk6Q}bs60Bir^l@ZU5G!i-o*m07qrU&N9X$V<)uIu7~pmYxVV(ARs!1UDq$j)FF72c2lwmq%|f7~$w7wjOuN1V<-`^|-SpI6C>N$6YYN zO%`y!0nVu3=#_STd32K72)9ze(TQoJ_Vx)lI+<)#-hKf`=P`|N-xhFm($)y~LjgzM zLtw<;s{$?#8b=^=#`7-)T$>5*KLy+(&_?yTao$od=Vd0iu>x+j39eDV^_t*j3Ahw+ z#D_S4mkGGNCb%8}cL=y5gYv#4;2txQFW(e! zoxmB*%kK#|`fiAr!Fc(RfIDnb@6QF?<0iO23OE`+M)kU3VDE^ze*`~?ME1uwn!;$eEeef*-N8`r`_dNlZGO71@ z0k_`-_qu>PY*O#<1l&=R@@$Um{*{3%HE3_8fIDkaUY&q@8@LjK@@5IR4}de0-^~Kf ziFO(Bmk@AOz!~-LE&*3-f;%MOrU7Tvj=vLdi-0p~$B=+qVS+m+;1VXdUkkVta6SV$ zUlMQ!OmL-GVC(1kVf^tO@Q00r##6?sft9p$Tq_fb&95M(zEIfD4)6{#w9IHNpLZfSYfE`>}vq0i2P3 z`MH4WHNpLxfZJn&%g16~KMo!=!HpJhM@?|`0&ds@H$%XkH^D6taKACZeL=uc0Klk! z?-X!86Wo0Qt_C=x{`j_ln`VN0M!+pH!M!ZtI!$oD5O7Ho+OmL$F+-ehCt$^z_!CfujQYN_T1>9Z} z+$sTg2smWrtlN_U?uZHQUIBO11ox1Dd)5T^q<}kRf*TfaXH9VD1l$D^+^+=OZ%lCi zCEz|V!Qp2Dvc`)Y;|!1H$T=wB@G}GaIWeyCDFIgnoYA~oB;aa*qxW9L@;U|FR1;iM zz_ppw`xODV7&w|QV!eMO;8vL6jtjW8z!}-Ce-v;j;EdLFzZ7t!zl`kGp9I`vCO8io zr`HG10*6O)+8Y*dr+_mW2h#-HSrgn10*>^Yk^lC20e8^^w^P6ob4GsI*9Dx{1b0-x zNx&KL_q>3M0cR9P`k8=>o7DS90k_Bm=SC%frmqzy`~?ME!i2w13AmI=z1Io2{U*58 z0`9O$y>|+@qbB7&AmC(^@}3ZIXH3c)7H}6#%6nbFT{J204+4%R5wddTQ2_>}zTZf{ z8jVv)z)2?MO%ZT0lkye_xVTAqs{|bBVWW0z6>uv|aQ6wg1aQ=TQJ;QCz-=|b4G6eB zz!}ZU^8)S=aLCH3_jdyBF_ZH0P>H_Zj+vAl1KJG|H$y_6s;4KqxXte%}^wk_qm(fU7mB_a_2ws!4gj5pXk2a5gL&a4UT+0)Z)< z(~e33x6Gux1_8I)q`Wo(*9)A{I9M*=b^(WfIs9!DaQjWl+b7@-o0NA%z&&nK-ZKL3 zS>TYB!{4g{?lf@dq8!|>1l$|I8O=)y>f=`W`i)6>BL&=tCgn{OaAZG?>YXLvs(>@< zx6cWzkkF~S`YaC=Sodse_bXo5Q{;2sCgNKgJ+z&&e%v%*Q#_s41A@Gqx7 zMhUn#OmHy)_Z#4h`ge|i`_Ke;tAKN(u}1CPAmFM@aQF?stoGIdXVh+B z=9|>}ihx^Yf_qoMtp(0#-E>L7C4n>QxAL;=_U0<8;Cg|> zzZ~2%0&bT{y)O&61Hc*0qhAO(TK^ig<39x4QzqpVmMiB_!8*T&fzw|&7RAr0j#P2j z*5Vi8zN4n;1KZjhH(S6Zu#S4k0GAMOaoFunUENxFj|jLU0mkUN47E$cofB|u9Aoqy zh}xy$+!YF+S`mvIl;3m{aV-LF>r}=_hT5f-w@JXo<^o3@r(PQF2^ELqBK&%L3G^BX zSG_dc4;5TO%YT0<%lefJ4fmp2o`!3_nX$jq2z+Y&_L_>zV{v}>ZQ%5mR^A6!h#Q52 z$W->nmG`MD#4W!<+?FfE9lAo?-(Ml_!WH6Nl~*R0=oR8-ULo$bE5z-*Lfk`Fh?B1n z_vRJi+@r5dep9ay_qi*?ZB}vGoS^yflr~9`sm+fs0!L}>I`lclrh%Yy!St=mbdT1r z&kDG+z_qD3`Zi___iF)Hbt}GmR>je`G;6p3zDZYK?*Sc-zP(w);kTx+M~Ush7STTEop2aK8bLjc~f zIvkym*KqdKaaZoUpj-(s!dzAfOE>TvY!)*9|d02UPz+#1d*DQT^}!#Z3{hpQEEXLLCFZfdQ(1p@93;A9j|^P^FR zTLB!7GN-QxJFxD=TJJhw=qCZGJX)r#mgx6)GS}l!E+M+9UVw5to{+L}XSOo78F|{J z;p!WwOpw(+Z}Lo>eg}DpO4tSn2h@ zRlHut)4)3|pT^t2tMU0Me6D3K$in96Y+OzJQQF?wNR0Eet+OGWZ|!WLe78OCGnUR( z$(3sxDLqX|H?Sgm-lr{{JsUb!Q!!mPLT(7Vo%bzZ*ZwjR7N zi1z`UdAgN9aV}g=W%yZcTe-0p-{FCB^6>0BNP^N1N~2X&j&8;XWe{m+E%ecHThGdM ziQ8DcGfzL|=vMZXg{Se6XGL99Eu}3;Q?-<4#KQ)39*ujO1@GG~@9Ayt-Oxh?(zv%V z%3Imn4U7kL-q$+8b9;9T_c{Gl&^-!G_`Lm~2@U|K?;*OQy$55B+>3w zyg%ym8kD^E^?5NR@85KJ__8Zr#~<{0Q+_~4c^CD0la;*R>GLKjdH<@>y1fIL zymWizOdhqj&@zQGHei||Jy}VkfI`b8C5;CQEzL?A4-{IOcpB3P=|&}uh89{Hlr$Px zXo)FlG_254ucXnSLQ9>JMneiM6O}Ztw^m8>dM7ApUhjA%je1>{aXd|Oc3En8n&j)U zM0uLz>as+5n&j!Sgn63e=(2=(n&juQ1bLd|=CTBMn&joO_<5S-&uf9TMK&8@&l_c3*>T(2zSXxipdiqrTU0s*+5_4G3yIt7s25db*6v3Y`nI*iKc0_hwEBElDKC9Lq|xgApo-`6!1Do(R`2^& zymnvGXu1zP*15zVXP;l!;Dexuch@&mGbOblJi6ht#URg9f5Y%rfHOm`;{6^ z4a0q3npWx^MN5E{t2@@Id{n43mkanP2aSGC-yf;c{62Uo;t@$omk^JX z24)1uU?3phqog^ei10K%F!tO$4JLpoq-WK97f*9u9TOGa=nkJRwJHsHIVLE4rSFYb zX}VXVq&LyfW?p+``fp6MjpNN<$YgqKO1{6!$qY z_bRNnuj(ZdxT2OGB@JAybt5?{+@;|10ln(>9)$*WJf}edJI=bXy`v{{uZGjO2VB^) z5u#|Ob+jX7*$Bm=rm2ld6@&De^+=fbT;zwV_cyC zhlsunzrm#Baoc%_^2jd5lxOb3$d4_*9lr?0eG|t{P7|rz-=W;cuSY5OQ=BG}{c>>T zy}R!X6&Et8*vXQi7H={zpXH?rO25VON_R7RXs$OI94Ov%uif8bO-h5s`K5O=yZXB0j&*ko`<@lBFkaPp+x|1)n=no(?z{48dV z%xLQ6HBx!q>GG5k9!5*4gojgfw=k0Wo>m{Vus-;u{5Rt#Pp;a0;RmOq8LXS660{aK zUqD@fRZfFI(7RY-Mbx^7(7Hjd zJ@OD**V?o=Ezj%wv>I3SUHJa#XggX*CA9N?)VIz{t@D2=e{qzJL45*Rsc(n(Mpc?L zo*wdIEMW{Suq8uQ)V81+f!bFFboX?C158RA;7TUR>D4 z=n4D#H}ZPBx16AU)M1m-eAG&<+|_raFBwAo!PCG($-H;9I+BqVR;aeqgH|MiE#8}? zxSQnCVzozZg-mX3vPbTUjZkYl>lnC{jJC!c;oX+b&B?Hh*#m>An1Uhd9u(SzmG=S(r6r1Yt&M;G|H+cC8i}#p8VHAHrsLUF3iRP z%*H}Cbxu-h!JM4$tr+QK1^4Eaei8HWR&Ua;%*P%1$>58akGmP#BwYuoc`-Auh2|{@ zKHcRAEbyO>`1g;5tiHR=<(KoVPKn8Jcj6_?$(OzFY`gHo)6$zMjQ14HOTsnw3Ajk9 z*WrKBUS4#y_Z{ILMuPBk(!ZlP`+2*cc-&Wfq>uP|y33rOuju%B*p#1-<0bQC(%)jp z%e5cNiy{}46s6K0Hk8Zd_{e@T>9-j2QS`BV^o16xGNBUN|J%G|p2WEE8uibM-jC}a zMJ6bT^U-cBli&?fdEwQP(KyL|663^>kNcntUbNf83w_Cu$7_%7Egem|K-&Ir!TZ^K zLnBBF**l~WqJBt5(L}@Be$kQ{<70}HXMGsga$I>H76zmLyUBOBJyM>eai0vvz4~WE zNmv(qGGLL)i%3tDI%7LwM!{zv{K3Le? z*z2&BsMP!lRn`k+pC+M zfiI4@bcvdHIE;E1DXCyQxuww&CYfk97Z4$CzQ}x0OlW3igea*1+k!H#+UL z+sl~O?Y$;t4c_Hw8&2)8^*F2K=}U$gUUyjBZJ#jRF=sfHns;(9awQgFd#YGd%)67t zig&P>_b%_v?P1ht^$*%4NO*o?n)fuDRy@F_`3BiE?~wQF?Vdm@?!U<5vb~Bmd`EgM zzd!QYCdSGcrR}mWvCR575UH}gK2OZ)`*f_+R~WM;DkG!HtWrF$##a)n41T%C8XOo| zA7Ue0yJnxPlom`{5E!y82#l~-25;D4^`Eqthjy0vSHC*P!q_+KL(kjdfjyRBYdjKX z3;cPkIk6}@B6Q2ie(9WV0bX4m4D7Y|64nTdYz{7nIYPs>SL2ZThps@pXTixm;AEjR zB4n4meycRox*&)Ui?nZ2o8Q9b%XdcN(vEfg;e%zLpLMt@620f$n_1)rsowuZ z&n)={AnL=wz@{mslyxO1Dm(l78?zJ=wr<8#Rtr$nCzw=hQKo!i2! z{Q0fS)nRo+m-zQrvDiX?+*%*mF=epl+Cgh*r_UaJxooNIPFy$W2=5#LnLjibc9*n8 zX2)75e3dPft_j@p+%>W_DEB@H3EglaQ6*2_RTZCfs7h{HB#)B+HQ`G<=e6)*5K+` zUZ3=3wjX)5T~#&P<@#|~v-+At^^AS+UeAn8U9Ny!_Cj#IRGX-YyQ1+Dmz47Ct8pK; zM*cB)qpMqT1zEx>?QZUmmizhx=e+&lC3Z;AyRXK1*cD)j`nv5&S4j4`LaE-zBxb3e zaQqbI?F(I9U0*l7uYNq8PW|9LS1`4G-*|Vbe*AeWlbmJNsI4sagK+)Mqiw+vEHr*_ zN2u!_XXn?fV-83Kn9N0q+C-(_;;^DOeA~b7xf0&(uac1SR?Mf}SQl4W0}MLbLnZV_4@ziX$`yy) ztlK_!U+6jT{88V7K5LA41nix9%ezpMZZ=DIfL~#h!E~*dg%c z>#4B>7}e>P;`i;hj%UO}g*?A&Ld|i`p~l;5J!dSn%oUbBD_iCV<40fcuV(^pU9M2v zi$BOT#d#zCR367n9QV;u$k9eTYP=Dj#NYRw9h|=+@JIE7mj|5I37jwDnRuHC$q}Cw zaxHjMq^I$=scU5m@iRXnAKs&KN#$G#f7IuC-jx2;I2%wo`@u!xApst!Tw!F6Q%1{c ziI|lnl=7^MZ|ZtM(#dC>GX>2B`7p>QwSkYhi65QU`c6OINIu$_BQ6wvNJ>wuy!2kA zvEY?yG*~5zwZ7&&bBvqrhzGYOV#mVu>}aS)-hn!-)-gVbkBPeJo1ez`LsfYK;73u+P z{6bf3rk>gD?0A6Y*DhaKEafYYjT$(BIhOE^jpPbo z?fMM*gZE(<=UC}S>&N%wZ}VQhR4Tip?6BS8f+-T$RWt7!0nI%2fMmajb(pu(Z*y3K(91!$G(egep#<^!UzAT#I8~WoG}th)3?D)|8>FN}0GO?ss!NZh;;TL65((=|gC!`P{yU z+B-#hF(}t%^C9;td>H8L@N=rpUdncE%FJ|bPgGv+fgX8f(;i;u3XA~JX58p$^j~S6 z`@)(Y&aF>tJDpD?{xLo_W@IUI8?NZ^+-F}=dHpHo+fPUQb*Zr?c`d=^13i+K57*hy z4@y5}bhg@0`f<2(lQt54S$+1jT6a6@_DL^FSciD~L!?`Ye@(Y?{_UivCQkfl9@^!d zSaW>!cktPdJdAy;M|AB97nM;h)DKUp1W9l2A{kpY^I;O)=Q zk=l6C^y57vpdT@IOJx>b;)qMvdP#qcmLIhJ`jSubN_?Ldy4ov~E~Hs?KCKB`Y8cir znoW}}R?I8R56r4PzqiT0#9@bpm2iD{$WR|v$(lCYo4gWzNP3X;;c}U@;Vr5*q?PTh za!S>SRan^;50FkI`}tNE`5liZt-)Dr>;SD`ef|=zA8Qj&^R?`Kd@Y-+ACJy@s_TXD zdTDLqS8-Pu>)5EMD`n_P^b7jL7i?|f`Z7yb${Vo$ErT9W?3bo1qqL4S(Um>cu%=hq zLPcI0N1?8Lg`GRCm82_w6<>=rtknUl1zlMxeU)@2bn{`f*4ratuItyY)x(>^t+Y1h z>sMO0hSYVdqM_U|SCj?l`TpR+Sb7Z`ju(UH%o;dQOw>gXTWm3V8>xF3aI3MeCj1b*ria5Bc2Cwned> zJ@vH}rNm$C*>Ju4d!gFXUwyi;1HAcyqre;S)YMafm22^w?}H}3D9_u&OnB2cdadV> z%Hx&rrs>Md@%4R`uN}~tihQ>6zB5`!y_Tqt))KY zo*m7HC)Dva3%pXBs87)LgnkTjyNz`$^?}j2cu{RHJ^MuAjJz@O-N@tzx$D?~x{jr`>UFd-r?S_vu&I!mp+AfM;q6W9=)#V>X^xf1v}UFCtD>VH z?Q*MHy6U1Ut)sgxucOobwd?nV19q8K!lABf2E1J^**!3-{c!#lF2$k_gF~$pqbo|V7XOHZmtu6`5l6jiyRI7XikH>R=;ATD;7)c1v5y9Z zIGrC~H9cX5rRUB9iQMf}0#OnO1t1%Q(L{$#%nz3g`RdF%<^$sR|xtRs_ z(Fw85>#1tl<}nS?O5mq1yr*{@A@_XhEQW#lwre?d#)#MJz5Yh@C}@E`(U! z%|o%`eJobGQ;LoJ3S)s*niYt8;2}YY9%76Ye0Q zUrKb#DGggdqr5lq+(A757Rp2P3sJC^0Vl;A{koVV(q}RDMh#md1Bfra?Eijyy_6ql zjovG@MrTW3n`;f(N5*U1p;J|%v3)_Hj4e(q2oKs8gqeNzaOLn2P4#b~9rl9I$v}PZ zi{25N7lq^2S;@nA0&z&&3*m9nO(%bn*cN)pdy5>8red>BIwGE?VchQvFtTr<*64j~ z=E=>`1#E$wl;SaK(CKK6&S2KiF8iUXR1@wx&D#fxl)0* z?IiM~>*adMVwrQ&5&ZI4DDQ7xCok|b?|D3N1}pv<@}1$h)E_Mm_xlfqWz@b9LuFfF zhs3Jbr0b(T?=j@viLmed=NCjqNDBvchuz+`Xq!|&t|RhLSAE?h!M5Nn%n`W8LcZU= zggfzO>tk58EXMPT2W}neMXdBG!~&nKVv}nS7ra)^lN|#N|Mb+Ed_Uy&Y~JInPs)ou zk*LLdD0=?ryvjk=^2dbScm4cNbyWI?9HRyv?OHaxx9==;cS6gtFgeV|(+P|DAowc<}j!`6ZFSte|vnf>-Z}j~3L9E!Y+bI?H zefE8sr{hRJ#;j~o;&1-I??hP2`968O$q?OZwpb{xwV7hP$DoT4fgHE}48`hBftu^= zX0%c#Ej4ziUxF?Uobw{5m_L(;v5$2-#S#gPcuztXSJ=ESc(n)X`6St6vUZ5p)Uo}c zn)uxV?gq(P;ghIb`=%$M1JDkNUm5Um8kmpa!=B-T(8hdh@5|*Q?2bvS!cH@Ovr{@z z&?@bhCMC}2y>$s~+B^!AggDIWwr`s6XZvO36QBD@dKhtG>?&QfL2qGP81r7QMQ%du zFtd<9ovMbN*-?S@#{*sav38uFQ0un1X3gxfM)yj7_tjj&u0XsSYm|Jldb8Z$BX8O} z|02mAw!as1S7#67-CXW<+h3>qN5OL|c9H3cRqpSie?Cf{MMSn=I;X8&-?K(N!JS)L zYF{qzk0?fCJ|bJqJP)`ryT)mZbT$BwhJRc<8?Pbbh{#l$1xBn{T+ zd1;LE>Xy0gG zc}zS$54>)sH7Vq=MU+Po_#~N7SzIoVN$eR-B7P4fQgIfMvm!*!3S#luHzl63jAJ`y zZh(vkPi>%fJ$zvBs%i48~>ft%CQS~!a^40JFrZ5!-`Jd(%<(T{}Rhn z@L=dZt*fRb$ZpBfBhmxi3lh65?_8RnxJTtxvnQH0X&KQiQ5_opTj7ro@6;};Q{$WV zrobO`&Gu~?A7#8ug(nS99BKMoKelJ(|Lziv?ZQFtiK^izyBD$dF2ymv{+Ndqx9scQ zlOM)T@~-?sRwQ3DNTbcyRVmA%c5h{Lpv=4QuU3qbmkqDoJaTAnaz6TW5yrz}Umo^k zKicn$-Wa2kNRMOJa2oQbBENh9D=%p#*7b|~V8D7u>*bajd4+vT6-yn7BYpk*XA=+o;V9V`eowX3{NC68KyBCLnr)|NsdgF` zIy7dm|C8A0K*mO&LA~=M>}XnFKrxWsu~z%Mk78~NO6}^{7uO18buYui$JuAjgJPeF z2UYW_wfk|Vf&IPG?I*5vxzgHTct%3Y%+65Df@+EXJD0UyY%wX z#v$|qe47v6qjqwOETyK8L0s)a*rxfB_|Z6Q^1g&xn`I!r8#Z}F&_6=6$!y~zE&C=o zkIovQSmgV`Lodw}S`&rO!EcTy=st~Y%*%~>yZp|k4!&M$AEDV~%mF`avbsvJMu&oL zZEmT5u^g5ME3GAViVuz%AYJ!gtDfBqJ&|pb{i;n?p8plM$IvV1Nrq&P$1laaw#4}U z&A6SZ(D>H78{C~=wN@Np*TLRUyBA?4qRdG}@6W^D0r5_H|5@~^RT{L>s$>jSCBAS< zA}qCw+Bg?ug2r09{ZcvRvDSXQT~_5VA=@t7xn179sU5oioi2=Bi)M?@64$jamXq}z zgO##1c;DP_uHv?t;sLPD^??>!S{|BBR@NwKo1C>i8D9Zeuo$8MHHsv{BOr_)NgIDO7O-B$7+jreeY?oI*TBmkXdykV+1?vrd-bxcO^_r9xD5?q9L?9Q?byce z%pn#)2=h zpx8*37!zGwN2@cMuRowuNQziSGm<1>G^t{hY~vWRxhV4$cQ*_99I!j2|8nha6?XQu zIh3)xA6yMhitB*WpeFp z9H&pozTxcXPQ~U@+p}zLUM5{f_LkNqnqKPt1ojqR zA))ei!h+{3(|iWXM{9OhnrtFCveh6@KDN8gm8k7H4k>QAaO`n&dpS`#15D+7i1xuJsCz&0uIj1Qz*8NO(CTtYaa%ro z40l9{AD5Gtw&52o`zAUME79X6Uh-AH6TqF2+|?gF7^8b6c|vkOwRZX`x<3q?s>F|% zaIXTs;5P14_z&uQ)e*t_=E0lrwbTy6UsZfn@+tnRW7(DqEvQ5FRX@50eYkZW_f>JW z=Q6%3_bcjl?k7KWk~8!a`4wZ}r@qhk>xnb1weZlvH}PeatWDNvN*aYXGknn<(BVqU zX`hqYmu@foY^A;2FIU>D#LK-Q#P?}#wmZMeb|XG)jlt)as4b=QT51OKD?2p5l-f%k zJGFP&79KH|WQtv7`{hEcT=YZXgQ~5ZH43kk?7FQ^tormT{%yL)~AvTr2}|4E@-crH&1#4jv*iPcslk= zF;YGQXx^%^V|DH-K4?V7yk>m=qqy<-x_uEkUpajbyc0J|N5({dQZb|;&+)#)X>KdoOPHP@Wv_< zWiei$EVASIKt<@lStNofw^;}aMH#?Y%-f&1)XLeyI0r7DV*K(4~g$2R*Sh!M=_tM)w#GYk?7g_zZxFubM+aRFA_vtR|#c}$p?DE z2vq@)_Pm(sOh(^yRSO%cPV=?Z*)(nRZ-1y|6RI? zm~(2Nx3%1V05gjg=c?h=fx|fG>itC->h>})3_fFu(^C*qfj909X6hGIU<>l1`T@rZ z-$VW`Ukz3baJZ{=r=IF_>+sd4(&3qa5&NLI%MYwTxF!UY1ijUmwL~Nc z5hHa#WN^OH2BbAWa1beyhiKQva`sfkRfn__QJ4>Kz8?sZGk6YOmoz?K0|be<4g?92 zBt(#ic}u&MG=VSy;^R$3PKYEC=YjmV%-;u+gx5RwXx4R{=X@z3Mnri!cS|G+W?2cO zNt1;{=|}o2{UU0Gxmmdjd-eodv_)^O3g>8dpWPF9S84NQ1Fge4E3x}~R-H{nZF2%C zsi}cy1&ZP=;Ekfw{Qkdfh|YL_4lxo<@QxS7uavDgtKpmN1HK}GxHXUu#oK^>KulvU z5D>A~yyncwl?A@NPET*DnbG43tX49+U5JJpYFgj)aCdrl$4<>NKIZW$3h@+1FrmMP zG|kNRq(j5aBVt+4ZQK&SfV21Xt-)`NF>?i;Vh#K~*W<@iA5*F^Q}8$OMq{4Wm^&N4 zxV*O(cjxs@>UqC$Y_C%Gm3T^n+4MpEf(`er`(^o8;y?8}1g7*5IBfw&3d)z78Z15R zF>B8j=*#CMFycM>-yn*o#HxXsr~@j3^FWEC*o30#D3Y_)>WjnNJw&${b15=$X8 zL96alN`avOYNFu-=*D^O2NAOh)kkGCzAXAqp6dV0!2h%4<|gXNu7%)~LEurHd{;EE zH$}wgZN1ozE!da|8Jl+zKZ3YC>kN#9tnFQy$Nos2bU}Fy5!Mrk${)e_tu<~#e0hqZ zEn0{+;P>Ho`~ff$c#?C#jv7KER$5>q)ek;(DOn%hJ>Nln7;ONZo$bUnvigXn2V1T4 z_(GuvC$~@FliTMUO31!p>A`t!_5r&;ytK(~FS}QCg=z-S3&fNtn3E}O2fK~z_{#`w zl-wtlvQGfFz`Cwebj;>)0y|>03)_G#WA}<$!)haP5D{0w$m4S}Gq;M?rS6;DZZ4nc zu;VZ2OR=A4HpD!U1CVaOK_dP_tk)VjC-FsaWX|}sN9N~`ztmz(7|?zB&UGqu-vgM( zXW$>2Ya*SHI2lR8lg{43n=AryzUd#~O!;D%F-xHegTxW2`J00yl z=)RKbyfZ(tOOlm0aFU{$aOw7ssHVW8NxLvX4jVyMkl!S8X0Id<2rqKT3TU-)Nkff3^-b z&NkRK+K$W0KYW~CKe~66c^o?LJV-Hh*KP5-v`po$6hkqftB-3qk`hu5MSlGJ;gn1z z&peF!9fCFMsW$`bFq_#(P4zb?DEY3!o8W;4k-(Sq#IHb7?i18W@YJ{C2l0#xzKIL? zu8q1w`Jkyj^9#yv7%P+Ycf`C?ZvHAdW2wu8T{7JR|dqaCr|0Ec`F zhshqHMp!Y%6-kV-`NwD0X_Bl{sO0d!ipQW%JL=o&&G4USugA->S+oBJ{uKlUsjfb` z5C7qQEAmbwP@w?x{KhqBwlc|VG!COrDq>9xBlQqs^~6aED@;ZU_CacH#3w!v5 zDrkVNybfP?LUA}OTSZ*YxGroJ zaednmTZJ;*whT^SOqKge5?+OPCPWs3rmqWl=Pvjr**6LiOGvgyI^shu63AQai_oKb zBI|^8W!rPfzwz2{`5Qxj0a$ZI`LC2?j8ClpF}!n)wp1IN<4&Yq`=DV|&dkK+9l{pU zEqu8STjtYkp+CcWIqSTmz6v_~?Xr&Y z*l%p-x%c1)ESzEa|I2k?Q_yZ@@Q2e^TsFBkT?wQNV8`L*(4xU^_#Lrc2oPFqU#ykz zxQ&H&NnE&P%bac;)a~o(?=HBY922?}JtoWGlHL$AQRoJ#nyY}=Eb5v-<(9$yYt7%wh#V1KW+JRI11p`FyQf=o_gsD7Nj#oVg74-gT6AraqDGdFqtAI; z!;c?XEPT#NpmpZbGmH8XiBp(c=7o>>fxejboVAt~yvu*Jj^j@CRXONqZpT_$@GgtB zv@k~xUgp1AhifUS?>XUTHq+Obj;EX>`kJ$-3#A-a;J1vq&SaWukM9Ziozo_)Qv+>^ zxAzoeMuFc{_Jq5YXyiajs_(6o>Y&53(5wz(3RNXO{mZKVCzy;-=!`avZd?&ujg_Dd zrDyu@R#WwtxZcw@v3OJJarlw2=0%{=EcY3)J>lmRHE`I$89PnzlD-CyXKLj67gYV7 zFlSCRhOrh`C|9d7E5nAiG?b|<^{49hn(twKr)sDX6=ELRU3eE}_e45p-%r1*9BbBk zSmfhf!n52>&u^pRwB`s7dYI`6?u4gVtaR1H^|LT8c3N`|;c<4Z1Gnx3{+%A-et4ca zXAmA|F{cn7XECR+6Igs=1p9Z<_Qv(yiqq!uoLfi_a-WzvwmhP!djhSoRl2IJIJ+nG zuGR)?O89=^SzCTKHL!flKKNtd3oV9kcwBVy`rw+asliHx9=NFPQrqLzXYcD-i+eiY z4?cmj1wmt6dT(ksJ;8Zc9RQx-rbCVEu{uDS*#S&xo-(`fLCz0EbO3YK;&+=Vrn9H2 zuRX3Uav?UMnWMWO?%`~@qfVc)E#8>A1-52p?Ctof-Z5tjd(Pjt*$%L!=iU+LhnOcZ@f&!dWpryY%-ThD`?_LM~~>jV5-l_Kb_E zzEt>oYcX$*{c5%F)N$T9{JRQgdK;l{=ugHg;roVPw|MGRO7+G7y`sbyHU{9+#ayov z#i5+UJK@ijD{^tR#M44+keEk$WcSk>%T-X#$SaEU#x}l)JM}Ba2Hu--of^M*bJOGz z-zznRDZW5kDNmxbMEw%4&4z!O9M zrQ~%=o*(Yld>b>5dW)+gOSJzj@aC^A>e3POr({9Q z|J1G_eod_R1-ahT9u{-KF#pY(bL1NEYaU5Ys-N_0&d=GZa|CR<#xjZTB>Z(p(v$m@ zz?GvPu#H~HS*6p5zb0zUHR758b`P-qZR`biWp5aVxkWj{zq92pv%WRw9tpV~Yu*x# z=40O7>D{5|=#F6Y-7ljaQ=F5|r_2PyW!Z+#Q_2UkM8>L?@x7YJ#0@r`OHIee+$8Gf=D29@Q zxk0;~A!S=bqjMTGtFMH{n5%jVdOY=oCT9ob_4dTS2FAD+U#^bw%GaG-foare5F2vF-=oX$oH7o1-Br z!aT1@&LOVN{^wiy(NMmzkn(zhX9T`A^uDn!M3KI~{?ES#YXf>+BYl$Xpp8JQ#di?H zW*o+OE8m%Jt^M{(ya85s&Ox-a2VROIt|=hKD)1uUJHQv=n-uT9BfC`l?n2&`{JnS& zuk$|pyYc$0-CtVB^Z161^rr{wtAphj^_a;ZS142?O0kHt=rzulHsSjV-g6M&lKdyC z@5P)Di=R!m`H%3Yga61iTkxA=&MNX2|KS`D!O0h923y#A2jPcet&U6X^SbC?EYm5R zSM_qN^I4@ZV&DnwmAuESm)IHjHe_9a5@K1bx|09Mx=Q{NY`A+j`48*)t@uNcDvGU)Hf~RPf8J<2Z~xvtE2F zU`E&$x9}d^#j*7E=v7)W?_pcpB^}Cpu>M@g<~_<*j!p8mp}fZs-`h>zGeh#8895#B z&40N)0-IvO;god)yvGnR9zKuWON`alA-rdTh!O0zc-5!X1$+c`xnuO*hgtUdz`d}q zamE~dcXE5Fuea^6=r?z8ej@l!e2vhMYMn)b{}h@9JvP@Nw;|^_)JWbl zGS4{$-Dm7sA~YxVko%DD4DuY8o*4Uw!gof{cZTtuLuh4BoZ8N(@g3gv`{X-BBmCR( zo&DFWTD^7)-%-QmfluQ(1NHnj;W=Ev_TQ4{Ec~5$4%_(;mFKYRza7t6^KZ&?$YaP~ zCV{_H!k62D*daN^p1?s^#p$t!O^eTrufxi5lFLx9`D8vr-6mIfb}ilJIQ+V!6pVw= z0Dpn?oYl0N))X*XSvuDI7}RUTHDPJ-8X_W}Q?CL3@3Zuuv7_K&{nPb7x<#)67F^g< zu=1$aT!o)>m^~$t)_;XJK+BzRTl^?_i$0*!9ECo-oxFZb(;bsBg8WmS$Yurvf#SxOQ8dE5flvRRg+=8gA9A1BPPZrn{vcqih#8 z)BtW8^#f0_>PH<$)^9+E+1e;LFrH>4Cc8T`vmMbijZ- z7j_8OOBObig_*upe}SEtWJ9^EI#GX71U4G)0nT>>{RRDn^7xy>^cTCmhv+WAHze6j z);GOHcS*9FilpvRl-&V;sHM9AQ~Sww(=gq|!uIZ72fK-1Mc7Qb@nx2mHq){CLDcPU z;{Q~BqTx00gsm0ZE#2r->J427^>#zo`IfYqhUzRnP0zCS9GIT}z+Z1}5$Uljf( zrKokx&ld8^*4>$bij=#0GlH$cN9w$`JkXnRAW_x(cw$jsqGemdLij@~g)g)MUeK$s z;10r!3oF+_VK;#RPiT|yg#KbF{h(Wt_7vz9eo*Ww5U7Bk^m+I})5NXznUooSM+MVU@^M}lo_kzJx=t-eeSqWD+5*|i6|~Wf#)aOCu`Th-pqKj< zwfxOWZ*6Qca;lluozk3QnaCymgvnh`jt8TuDB!?#~E!Uq&1vQIIBZLrE;QjGA+ zF(Yy~X6S#U8@?~=hW`cK2z2O1@GH6z`l@b(J9Q&+L^t%W#SGs!fD-*-Yzuz#ag@{W z|9Si|?~&M}r~}ZWaqE{id-IE19)HApvH1Nho4iMgd!E|ptu21_$vW@FSuD+$@HFn$ z)_ZGbp;XF#=6wlmgEt@fgUSKs^@Mg%=D#TO&&vEEnQuq_IYA#(zT{EnKH)tw>v^2( z!m8J0$anc_fH}?6*h>37j=9yiF7u89$XB7{kPARQ!P=a=&U<9$TJOb~;QNj*BLA>A zf96BpTIBQ8XOVx4ttvFdxes{rXW#FwosH+!cI5B#9+_S1y@-6j`W*5# z-uyZDdTZzK*PloJ9`BJktGySIuTk#y=FeT_t)08ldvWdx?~&5w-ixJop>%hm9haf? zmZI&Jpyd{$-4>zMfTKw0jw-cZ$e6*8AL&__rqyJ!Jj z27LwBi7R>vE%io9k@6(o|AfaicOhE{cDRjaTZD7A?0VK)JCD~E;~ZO9p8xe;Z@xIc z1n0lxJu>f$-i!0_mF6Gt*3RGWJu<(|dvX3gZ~pDA-rCz+yhm=YM7J~qix#JHvHnd<@vjCewU=HLGP6Ga?nkZUIDsM z(knslkn}3hPfPl4(Ay=w8uT_v-vfH9r0)gYAn6*=^^&dy9hdZdpj{zlGTZBZ(m{)U zfONp3*O2yG^n;{*7X1+Eh$7nbVT;b-d)AW1@(`rgkq!X|26{c|uyy|iix%hWNc)t0 z-m{T3)^;PkiFD9<_7Tzn>-=Vm7Qg!_>9F-%k6E;M_Hoi7>;5gIgBFeQ=D((^qll4x z4t)qXySdMMZlBxkSs?lo*8rpMB93d&Z+0S%dbfLaDUN0wr+M6ibN9OE++2n8D{&xg z#&H_-=^D^0-E&G;;~wO1G7tJR^EgWHanEVS^Q9~5@qVTaGTkcEZ8F_1)2ErvDP6Hc z<{M?&B-5QT-6hlAGTp-z&o#^ZGcpBoQ2g#*nYPHZRi?lbis#y7e!ol)$P|7u@%$HM zO3uZ4pnms6G2bgRRo%~dfba<^PvP2lxlYxSp6t0*@%G@nyzF>tDr3U^kSKL3$FeJI zdae9MmET=_uZVqalL{Y57SAJ!l7QYQ?q#g*J#S*cWbE`)@4Z;U?}`2~(G@bLW8BRW z-#?I;?(&(_`K;(05p%&gJj49+{N0F=eL?13;Y11IsSB_s;DR@BoWOAshv`*H`jEbb z1M8|T!1|U8p7I`95XXVBRk9W7HXPe=JdI-qjz%09%OyLJ?!vJf#~vKbIG(`)o>H*=xEO7Y~)7bu$CFnbXro6#< z(N`&t@T@p5`dTP4f%m(7i3P4mVj8YZ06hWEX1PK*f{Co?l#g@B7v94>N6z^uoLt&= z4)U*XN@?N((&Ib_ntolzAxNJ9&A0<;+Sx}XZGz@nWjs$T#Suxr1-e7h=Rq^V$$Kt> zrU!{M$1OeBq{#>Ab0tk)m5?;~)ue8oCC4fh6nWM}L6K{D1x3C!K~Use1%e{)8ZRhv zuY5s~f8_~^9BiDR$iv18id@ViDDtr}f+8o&6_ns*f+9E51Vw%}T2SO@If5cj8`X_g z-A}GoIQiy&a<#(Ar}vSoA#Ed9Lz*C0!+ran6=`!TJas(Z+Ah+Yt&|+N-dZ^2=JyJt z&ELYiItr&WcNEUJc_7OyoPKj}VbSRW^e2<=2|s=!yBlk=CH}W+`IHUF9cp<7Fj0r} z>7y>8{hd3g8GEoU+N&RD$1{5~%)0vQfRZ@jd{*LC(&g;P41rs@hG&wkv&`5~>`L1F z|7hC`7zSyZfsg)9w%JD5X6(}&Pja@~NVZwRVcBN9%d*WL8__m9C2g~dc+ai2S=We` z87;DYSZ1`yXqnL>qh&^mjFuTKGFoP|$Y`0-BBNzSi;R{T@gKj9Wwwcym$cAmnbAU{ zWhN{%SZ1`)XqnMMvn;aShtBQ^>slT+g0{u=yk=Qo|60pysMXa497;+rZ7yNO1oL3mCF1L$ z8#^0dJK=ww`0wAm5vls1NF4&pdrPt7#=h%pRcEc*X1H9O=2|3s>vzF89^7lI)LB1Nc3v3ze zWs#?DM!pI*%$>0G@U*%NcI-0PS+H+rE`fao`)1}M*j2EBZ>xkoRRMdd9ClP0>?i~F z6Rh1iA9)jVu47i?HO!d23T)05Z=&=vc0mM2H*o@Md=UFYo_r4NPi$@NT4))u>?X!6 zX&0T8c2QrFUGxTM@_pJx_n_v-F`H~P>>|oK@)6W+z%IJ%aoP6Ihjr2}x`K0TSKBVS zU)lm!aef2rqV=$g*1;}X3%lrH*hLS)E_x7l(HhuA55O*Z59Pd1K64GvT*RD~TA9C& z`~}Q?f&D1-^N*0PgI#p5%-=wMGwdSh=E6QOQ07grkzCU5C_|p#-?lx?=1vMYpKm$ru`?=3rldH~8K-bwwywmqmf zfEIP5K4IG))Hy&4+k?6Y>L+OGD0q*cQC`{}@aXnykAU~HH*SXQv1yp?aWAfYqU|vU zw#V!dY>zq8_Lx1)_L!4od(5_Mj~cw6I(scs*a-K@{QWY0K&ESC`k+i7Vv748mie_Z zT_@A^GTk84k!*r{aLu*}R!f`Uhp-90mSWokYgP7RSJ2D`&l5HR`?F}ruvucA+c?xF z(9I&Ar%eDqg|G=;^O}}T;8&)*!p3y;xmn`71QHWnx;c^W6yGdjPOx=g>e9H^YwOA` zJ&{d)mpUwUTI#q7*aHWVQa`4?d;|yeY#{MU?!|A{;;6v^f69XUalp?~vIZ&qFAE;R zf$_9pEe?#YlJ!V2-WJs1*oXt(rUe+6LdR!$*}qs$_BmU3W?!^*XZF#}_-)?9zH95w z?9;aH%)V~x&Kv`^@_vpDTX*J|v2|yTC0lpq7+WIm=h(A#XO2l*cjkDtb!U#{dGdaa zbLh1~|0NHA?ks5X2kN~nGkFJSah|+~x-id^KS4(p^&Z>>mA?_FnIKS0Tc`}b1j;r_jpc7;<*_mZy^ zPCb2q@(uZYlxxWEL(QqFeh*sv_n@P`fccHzrv{&8j_RhanyjZj>a_IK^Qhf9@Oz=B z`etPvhkPlDAJV^rmlo@pnb~{atzuF*>mmyRJb~qs5lpP!+$9H!%`*1#6672G;pK=z6d)W?jJk z9_NA6KZn&a-U7VFY5T`d;hgOs2j3L>i-~iK zscS_nJ*RK*o?pWUEA+FsaBd;*k$TiQDwIi z_0AdQ>!rRmX9QpG9Qb;nZ&l(qrO)`D3j8+oF6eKkSCrv=6nh$krnM%@K?jjK$DfZG zN&3+Z$geH%nV#}w$DdGb{iseAGBjZOM=hVJkg*X<4^s4DK2u=)ZT*Nk41K1UPhjas zA?QawVe#;}Fgv{ht*Fkoq&DF z)<4+4Y@I{&y`ZIAuwUAGiRiP^|Hl3d-9qRe?C;Py1kJHP{ei#Bv4SxoXi;bA8axjj zV;b(6=92z8+%t_nyM4gr?1s&g*E6oSa7O93A&V0edvbf3j_GlW)YGFe-9NU6XY+e< z#M$vZqmh=j7WD92^U4HVZ*KMSuGT{QKix{*1m^^%l+qf$sF?6#wXv^2p0`w5X&Yl= zg_BEL7KyaEZLyWMK+eAcPt|wmmx6Z+o~gpgr;iOv{22HReN$gah0GuCfo-YA-WZVi zKNxiwzet%sH!Jf@hKZflLdxMa>d!{>Ns(VA%Y5fBnXfMRkM>Ee>9f!R4`J5SzaA^F z0rI}k+;H+ib0k@R5*(efzR>*U-(>vxgvL#_qz~MyriRDq8zoL)Vqf~%Xw!FvOl4LR+r$whfo$Z54J2%k@= zQ$e}0VpN{5E0TxRP?n522DwUUOR0SnQX4wpEa)V~9gx~f&&-Zrfj8woGl71*55HX4 zUAq=M3m%z?W3c;>4by**DhP!8`Fa<~&{-LVQ~u;$2CUIg#p2<^gt3%2Y%jB~(w ziTx{V{1UHskk3Snd6ulJaUT)+M202nYTQSe*M+QwEUR%JV?Ly0;t=`Qm=7ECT|(B? zxWUUZT|RR>o-c3(4Hx)T0p%ZfR;kNx&H%;@b4)rp6ZUCb+s1ctuL6P99voM=E$dc=*pas}!?Oe@&hZ9xhVwV)AD4Wb)=t zc*6va3wW=`$}OJGbri|d$?H3jGRi=nE^uJr=>i9a^ih>Oocy~oxLAw&9f8;5Z{LNma3z!Skobt4m_eY`6Ko*4ws zj2HzNrSMIgIhZHsg3XX?@k-1|^mpbC^2*aY-NiTW#+p?YE~FT|vRLxUV%Q4B;FZOt ztA_ByN#|o@Gp+e{;D^4_3GuQY0rrS5fEHhiy~LDkZ?yS5^BIu3u5MRInONdA7qAr8 zUF%fvr%^14D`-x1g$+iIv%w9)Tg;MqUZVuH@uGISP>UkeZWQPeT+asI@q)*Bx$+aE zqBoyIM09%T-oh!(ZxqgGegS$VxDR;Fw9;1ak|?;!{tU(G|xBM&Q1*!IpHi$~-jRnRD+Si5fhN%9eDgy`$Uj1Fjn z%gih@z3(4)QWwm&biqi??1D&kahK2rZ4P2^Y5-oY_A;u((=+8k1W2(`62%v-1SXKRD(mvw`jAhH7cpmj$$E(dr)-%_AF zou(J@pMvn+Kz5GPThdi?{bL^A-t?Bo3$RWsv@&pj_qTNUp^XIwN504BQI0o^igT@+ zeCSPzZh8vzZbyM`=3#sX$}}PyJ;4cjc_qebusm3j*p@uD<=$JiK84YckI{hfjj

    hV=eiPw2{YFc3k(G z>9o3Dmm_O%jpU4ZYg=QUXC*rOczlV$eWp3O7M-=^^BoeM-P^$ZrU%j4i+V!})~pcN z?A~0WvU^h&4U8&c%8AR~SH2MA>b1lL=*ULmKtj<*YbM4Od|t8YCoY16>lj<_Ava)c zX)T(C%vOE9s*TOY+7x1q3ShIf9_`O0HoFb{F;Dh$N>CvIk=oYKe_o_C;yJ|!fQKzJ zGq|=!@Fh3!!^OjG#quX<1$}nJ{bgXDe>;pbVz$MIEeVV>Vok+}KNU0H?%xa~Dx&Ma zry=4F^eO%WkBa}#hxR_#}vZF;uSyBJ;PmOyznFxCLSYwqwgPMi&9O#n9-9YIg(uUvI zkF$BoGsqu6-o{QhA-@z}B|E=U??KI|UnSBTRxH-3?C0vkxwLR^ zzycAar#1x*i|vx??{!?kTdLyO4aP388Vj@JmT>9)+<9^6Q;Xfyq67n>kZA*-$OZw%By@d1<@2n*){qq7J zE^*faA1?j;5+5$^Ut_&f&;lQ>TkjG0aEZ+p_;88wfE7M~;~>@)We{Z}1{GxxWg|A# zB{3AlucG|oJjw-sev80wbFAzGA36X(ew&S_>7PH1 zgZ}tt9K=73>j zPsw~-ru8yykm**LZjPE zlw6A`xfN4#DW=h>h)GQC*%zGJvd=m3uuhzcjn~+egyH@dC&szNXK*aLhGDp^Z)V3R zhQ%YKkB4{-6P}iA#3L{V$BIV~!>t?BF$QOeK9Gdr7X1Sl?l*)tr&Bv1`h}5wQs$v; zVPP=~m(hXr2#yzVAhuib8q(u9PT)9&;}OIhHsiqfDtR2~795y?v*1Y_HohC^P!`_$Nl+Hvnh0usPcyfsna7T%gDaCj?GTcW_T1=R>DOHf3Cy97lPc&4C; z0?!cCZcyohA__cBptgwuhhJhpG2rll?t_n>Y1>{aO%VM}uPL$L@Rhdiv(C2?_e~F~ zh5EKp+oi{FZUD9I9D>>=r?dPX9<)DWW0@(IAN4YHnoC$|Q20?J(?|EA<*&j6@_M$l ze+~5_SAsj%C`mimc*WX+8N=2VOq)sD?4Y#Go`^3g`y|^eAp82F-2b4`0K7T4yif?0|LVZpX4{-3ooPs_%9+h`F=Nu%;BOvOx>Y z{Wk8|i|hX%jyqGjgpEdTW{tGfI+JWP8#7I>XU(U0IceL~z$ZbQjy?){ONp7bBUb+d zzKa@oJDJi)L9b;Vyqk}}XORalrie%(rI(Z5O?o-$-4xhq_%dwFG<_kR!dq&Xqqacb zEvE0L+=SM9i+#5KNqZjZy7HkjQ4eUJJqwkyPrrt^)}P0sn6(PTFf6dnxPdkI4E=}# z1TE)@;`oNLmNfP+0TLH>7chhPzeZf|5PNM{vsw4*6B-{Bw$WoJA4Up$z^OiJ%y2j# z!o4krs`y^k#q80uow5SC5xYe2xKVyF95zo|s0tq1;l zA?&v<%uT+rGdg{}Rv9VN?(>YETWP9Wx+49#Ye9DfY8pCV2jDKu6C6EP?d}S8!iU?h zJ%C(B`SP%Ws9TmVCHjP?!(Ac!ZyUL=viHn&fJHPFNYln zy!1?5*MMP}3C#4&k6?RT2R7!KSDAg)Ys|jlP0R*ndiEt?sow*R`l8pIa{-v@^T61^ z2Al)z`kYgU-VsB@eR29=vk+JF1`t;QSA%ksmvYVwbO&Lx5KBXw#j;mA9gJHO-A8+c zyqVbQyU?n{R#)N6Q8t-4-{tQgi0kzw*(;DSQ+e$zoP*UnV6WgSLND&eUL>v3Hm3$C5o3*eB z9uhV|zuU(6RO9?mn*e@$VHdy_fX)mWy8SSlV2-p2#9YrLn?PWGq)lL9u2(D<_@0|9 zRxq7&bHz%TUnSGKWx86X_sH~KrntXG=4)kopG@zU=>sxdBh%>AeCX;e(A9f{u6`Hf zoUN-@jfky&RAoPQ`E0!$(S5ru!*(ndQ3err?YH8tA?rMS$Cwwf0Bt?$&|u{C4%|B( z{co1|HUUH>g60IiQ+&6G;cXOpSU)4L)UmwygZ|a;rp~_^bS)0*nrm^?K~JZiO1*U( zj@>x+;W&u%nEeiX(UAqqaje9FIUEZx4;^@;BMXT6Cia@RYvQlLSxYd^fW1C~cPC@7 zse7X=C@=dM%fWuP8oxoB{n65C``zrPmQLI6X8&Cx?_s~Llr;OhA!&|-5=nD>%#$?7 z&1^|?JXt!eHO?%Z)*644XPO-_DY)Ln>r}V%<&H$6}qd%8*pCG>Y!L^{4_j7TRC8HMX!BBPK_Br*!=Bx0eE!lI7>9R-Rp z>rsNL#xG?HY6Yk)K`jU664XjinSxpcDnn3rgGv|FYEXq!oA(nH4j=qJqO_53Au=23 zK4P^Cr=H$Rd^WBhAUYf8_Y#|p{N6PpE!}IOvQHndP}!yXEL3(9s(Nzi3x!FjYA0eN z9EF1sd0Uqh*y@QMtPQG)Ih|Ha{(D(A&T3YswMsSRm8DCnx9E~-awQgfooeUrS(mi^ z{15dAFTc3Q2Q}x8dXQ)GBTn~Eq-7P|gWc)z&rHtg`cMtmbpH(dVQAD}_pnUv> znZ4yA^bSjZyZ}_vaR2`MPjUvG|B04Ijo|0QEhi(EZKU!ftkZ1IrTbxQLgQw{5mTH$ zgNdQMOdas4#ua)+E@#%I>KWR`#!55Qj0Vo;8qf)hX{slIwSS`1{Mc(qnVahGQK2^# zxB0V`w6oCaW*@`s2@55IC-nj;N=gwJOzW!Mm;MiSa9$j91 zO%Gy$Y0Cpj?m5)|fu87$_s3M2^gTltgZ_oNY(P|<0`~bNu+Jyp7k&de_HpQTuR*sv z2Hg%Q`Pql@YzNR+kS9Q48MCVb%C#dv&TWRiItd6YN9>@~)+Y9}Hvq9(YI~Opu_K8` zU#)ntzhy~+80_Du>Hy};+*YQcIn0xUOS3&H}IUL-*C6TfqQ=K zlD^N^aPB(qk^ax)$b*m0$1~7(I<+^D$GpCIxDNfQQ#*nDRd0fLE|EWp{1vY`5BikI zpF;k!SK;%}Z91`+2HqDkf!VV1g5O3I7xLo$Z}GYhm;~t`m3(|Eo`(+9sgcKbV1-&s zKkC#d3l2k9guW-PAA+7}5E~8s=p(eAjf1`czH=QuaRWNiNBpXIm#rgR#`&Q-QZsZU z=t%=u=p-F!&M?0zbtHicy9vK)Gkm8vEx+j;_)VpbbOrCn`4v}XO5Z&5&}GyCzoYlC zehJ^u)@9xUHns{l=#nwn@kf(z(EQe}VK`_Z4?hPEn)(KH5X3AjzoW!KqrGMc*_nic zw%XmyvgCWGMp^Fy4!TyzW3I23D54%fc~1F08~VyTq|_6rH&o$RfpgR|*5IhaaTy2V zH6`dT3$Ed~j^iU7=#xaX!4^6)i{GE$hW*0Q4`iQ_J}mYpOIPT`8hf^WVD&@D^$C2I zebmw+IyKRMr4NgJ8+}>a!@dvQOwb$`&>^5JbZQ(ol=Y-VorTWv$vEgsz(KDC4qBrm zmpJHgJ&@Ljd}sGSRwEtN14#`WbdJD5yL%v`aW=OHQX4qvF+Kd&JmV2KXn~e)%@a82 z@jR2b46Z93(RE!1%95n*_Lc3clspR>>MVW)#86|KhM2q=;f*D+3>^K9!lqX z&J6eOSh86avbpLO@6LVLBcdnCw=*1#JTm^oipzJx$MgF}Bh!aN-;Thsz_&9L`9>d5 zdGPNwNRMJG{tnr3i)U@(S$~Ncs8A6+?qRmC@&kqmk`^`b1rXO z;VbtW;f`27_Q!SW3mfU5q0~)4l2K|?`lj_7(=_ZlkG1wg65WQ_EqqTFIysD4L_x%F zgPl6A!`GGPOV!W&A*Xf9Rd{an=+yW1N?(Oi(o_|%FtZyT?tAF$gKutun(6j5EjdEDI$RkG?{oyd+L}AI3I( zz~C=~IORCbMdgZ`(ddyVR&Dz=uWu840QedqmulRN%!eW8gj}j|?`A$|W>|8m#+_h3 z01S@ADIXN~18XPb5>PS$$WWg-AJ5E>FN?a}O` zx5%cUIOP?vqakyWVtOOk@6oB)f6%s@^AGD|<-RtMF+kkZF5cs?V_T1?Y$wPVITzW& zDU15Z7}jUulv7M&&{`HaWk`itqJ@%h%Ay5N5lZ7W5KB}y!bgBAl$nGyi4ZsC+U z^A(mGEkYdaLU=RehPt89>psIIA!qNC~(TtPk(37Dzf;l zz$xc@^n#u`^xVq&Ly_C(=9>Qqj(fY)@~r1~f0AEaEl$Q09tWN<*{g0W8uY4L>rvW> zr{P}pe2m^f%+n|P)h)!+frK({v|{evSFUK|m2C~>p#$2Vlo3gIXU_n)9oM2*Sf?D| zxrlWd1zwwkbsE>B{26%dTi~_A4uVIuM($N3{Nb>Nepl2mu~MD?E~w%1V25V$+u?Rl zR5#R?C!_weYTe0d-QoFJ1^DSVfeHjSB@S|BZqHNJ&hn=E>XYQ8>z^Q=#|XD6(W!nt z%``%L6=SOAMEi)nOXzt9&%|C5HNeJn5)tj_3gxNbx7b_gurBy0R-_~!t&CJ+f1>W0 z%Ya=Z9|fOERp2+i5I+K|VVPMFHf9xsyJtSp;PHVY>iue#`K$88LXW;g@XJ9=@fV!K z`J+px{tsRl0ROWw#UWsd!@v|rfGG|_e;?p$Hm2C-Yrqr_@HJwJOMxT4Y3bX=(7OjQ z#Vc_BUgYn!c$bXN=O5OvBRAGkH?VJZd!TfJv2YMmyiQFyGXzl_L1Z2H;ouNVF_Fh& zHH2JO$l&}0&Z;l*f~TNHC5U4sV~Vp;+fgof1X1H`)T#*ls0f(il*}{me7ux?1|$Fb z8D|PHkD+h|=9fcH!&pxg0#!_n3+p7!xcM!P{4|Mtw8p-`6z6-i_HfB)VjW%PIgI;L zGNPu4ac?@jh@nmPDoS=d9|*?dLomeO5@X&*IldmZM||aHU>qw)z&KXkf^n=s3{BTE zlmm%NE#_Xu1MH*b!HB13Rjx1A21Yw}-(u{{(yd0sHe&xR8{61UzsCsU{gW`h0t5Uh z7+>l_HAxs>p(A}RjBj~x(I<{utQLCpnD0Kq4oHRm1 z7@=4dnWNDfg|$jF-0fSAm35woFGIA8V^o2EfO|($CnD*MuHH-2=MB5d5zJU1B8sFR;J20Rs%|uNxTP zsMwp>#9ldxea`W~|H|?8kFY<#p!{m#IxsF9|BI1=IRc%~XG|B$2>S_=I&us8H&u)S z*fbacFK}GI7m@CiBOn`gL=f@iSBU+^+{^axk477TJ2lLhy5n5jl6)hNfNu@M`u?#P z1MSK$fUo4PbUrx?$0ugt%*^_^qRgn+N{nx2pg&d2y$LJ2QdBW!>+X!;SCjUm>rcgg zbm0W_KLvZy=~|}Zg@jAATteSZL*LIu-=~}__YLj)mZbasz z0(t(;zs~q!s;=qLrse*XLM}w7y&8&6e}8Oc|v5(at6<7lBRIaGqFL-HrxE8!}xPJ3r z2Kw40Cv+O@2q61gWKxX%ZH3wv!Cn|s*N37-ua2$Mmuk_lW7cxr16^eW`Wv*I`2TEJ zhCS?{m(2Yz1O3gX%zCN;t(E8TIdt+--(pWVODPYP9IhMaYagfI!Ux0XYmwx>rlbQa zj5(jyzF=Kg2f^Oo2eword~X%-*;PcR=X*laN4*f7-rnJyuwo2sBE(bk+^<8g_`c54`(RYVtnyD!!rDGiK9U$pp}q7IetuMX>nEkWn3ORe0CvkiN3egk`P9u#|VzN7@P z7pLw{SAHgXC~-7Pdzbb3M#nYk0lv|1Xj|&7{Wx29Sp4s~6v6)>Wt3^H>06)4h;$_N z)8|sqPlHxJ4VB-37a{Y_#XZK}*Ahx#`OJHI_LUz1UtEMp2zTdnJpJQG)bKm*Z4EDN z!tTq$XI=J`0Yt@;K57VuL}cW7YgKwgM*d};r6251r#)~E_Q1P?`*R{L@-MIl7GZzR zXR$85<4G&F@Lk_urgw$^5~a^mNYj(A2FlXLuR8=AUTtz=&&zSj?D$W^Q*=-Klv3m< z_m#)Ef~0{6l<#rX*X>Zxmit~Jo*R3Jyb~M0ZcoTm)ZkaNJ%N^(8tlwVZLSXgMM`xz z-62CC;!CzJ*?%b|OXK)|9;Mn0a7}e9$R_LqznUR$q@>1OCRDFFZM6#X7J*v-y zAA}w^)%W*l@M{Fs$UQSLt0~qLQj`MU+bQLdGR&@S3>aAPE)gq_v?vdmOQ655FqilZ z1%4C18oYB{bs$aIqra=&ZB~X1-I$gc#E2TZ6#aiWck@&pG^2W1YFDskY-*s&zciSs zRG-}$S+1-xs{@L*@XVf2yHfM!s&I~Ga;`YuWbF!i|Czha?({8>{q6QD^O>;0aRnra zS&Vba!)I<__iK#%Mr*&%PVDzgnbVI{_?Mu234Q@v>N3yoRFu9l@C(eF>O~4{QpN`G zkIr3+YdzBbUDp1gdB7<3;5yeQdmqo`?SX8AtgG3hWQ+62lmFy3Dj9qZGS9V+&*S%g ziSsqPt$jtor^LRZ)bYs2IKz|IJMlbaXWmXsC@Q;(uK@p%f7RKY{^bhy5k`9)m-tp;ZytYWSy0hV;STP!3LH*x0=@kh;x|X}br8Fl+Xu`I{L{C+ zg?rMwQ_X^4kK5x*Ny+Fw)Ocs#p{C5f)jb9N^0byFqJzeEcVSLKd)y;dm4r1V@Ha#} zF^-ZQuQi<}*Z)Z|l?tVNw3!3{S!v>0x(~a^f0T9{lCBoNn}7EFGOpR>>E#}yU&juk z3pU)h&OjWtAW)I^B1&J?#MQaZ)c>UH(9W@a|117epF;!MFXjpUjaJyZ8+⁡I~K( zlqR@i%@)*YGtSlF+(Guu$lYpJpe8oEk!Z{=y~a_B`6nk(E@H44P0BMpzJp2}zU+xT zSK!rNU-5MPk^uHM4LqsjHZMaTdkwyVlV}?&u9YaBi2eW0;ceKFRLg2wbN#BI4bb;q z?}&fmz_qXcGrq5oHmxlfW-#qsM+7&TBJgR}?>aIUVV`i{p_mUl1F~fyDeE^Fl#vVN zKH*vo^F2mksG_#5ALrMZN@&V&5h9f@C{3M%jbVh9tFKm?1fkcDZsXh zPQ(7#(^8zF>Ai=8(^A@Ix{kG}g%`9Y#L8IfM%0?+6K$1ISN>KdOMfm7*{xCsU@S$| zTOF$JC$Wi{*dHe?X=E= zKN{he;DPMd%6&_l)hn=Wh_yE*-+Ikc2ybhm8Epfdy~fG5 z@oj%>(WzbdU2ET=>8F>`2Q2q)9oQezZkJIgE7$Pnn?(DdZLklKY?<*nXqo&Ah^UQ= zy#a1;+>;YqQkEA#>6oJIUBvd{Gpq~iw;lV^7^+w=w3y zC(7{dDAun^?sp;fyZj3Z_o6}sZESK|vc1@vLMN15h+~Ib$o?8LGq#E^@=s`kTiX}+ zqJ8<^pJz10_>5>@{gQqW{ddh2YmeOvh##Kd3eUs*1F6BAFSuZT8QAlgHWDT>{Kn#Vn)QQS z_e6W}$Ax%)Pc)kE;d?^ScRPd8CiZ^UR|5^e(mYK4RgF7e);LDTw>LC8KKQsQzUZ3OHfvSN6>`Oe*egr& zan2=#Ji!`rD0A}f*<~j0ZukEfe5ZL2e}!crKeq0*@_aw_5SGa1?aANP_UVo5negU| z_$EJZC{nm*uRGmPMq$t17t=$^>V_lfSxUai_2}>I@zi_FOOUF!Z)jXwpqHglvtvdyDQM%3#TDmSJcKd^y~)&*}qbGS~lTPW(M7x83L`-)fi>KZkrdVjkR~ zJykbSFP%IGiCt8mT;2g`E!Rf65YM2j4ku>OwhTUJ{-%z&&OqQ3ycOnWi*!_Z`_ zB|^mjrIf_0(1%pnhw^5!PF5fK?DlKh!S?PtqjYd3AHKVjWUt|55?+z@MED55V-lP#Eec^~`I z9LnL)+n$lyk*z5Bwd5&67K5*_Eq!=yustogYqv4q_w!K=N)>l17QE+H`CAE{u4OP0MTGy4_Cvi*B@ zSx1t)6Xp9TgTW07>}R_Cc@u2eYmbfO?|BD(ZTOz~X3iDLU&?Ok#N&D{p~vtGx9uP` z^je+R(;D(QzFFAhb?MxB))DiVl)p{u8rOau9w%LTfTcBpA2iH!LTFzqOl3zKBB z5_{a(TCo~7H);=UCC@}%gp8(SHqh@p{<{$IJrbj2UIuyl` zE#&gC-&rmnL+|jmCSuPK>!S6*bvy{J2xqR|7kQE3o?$Vjn^lSZ!&^ zWV?MSliwb-O4z2vU=Fv3m;A@e<8ii(`}dQ_eV-b z$GNx%Gkz_5_mv^`Zbp*5TRFt0wfw1f40wR^+@6TS`5^EH)>xLOv%Bn4Ido&o=Nr9w zh4xLmupMs-zrcX)Dt$i656ti@T0Xomw)F~)D|7v6K41F#CmA=L6*CFLB zKXZ*n8x1z$9${l3>SF8Ns&91h3(J7C@RSwxz;?ClR?5b&uV1t4(%}B|(VKUo?RR5! zJH`r{KFwwoHpcXp!?aiDR8)G8m;`R7sArpbdlNxk8EUg+*E;XPQtG)#0a(qGY zME2^UeyBNpgsl5OeX-{+^U_AQ`jcg&YeB>SczzxBt8atC^n&%a zeaiZF;Z3aX8nI)&3+pa|w+^+>hxycp+oRywvX%>Jl^1qs)SUC;$KWOA*xWeaCr<2O znJ)-GF*&&9E3TFDk5BP_yH((&7-FB^CFI~>tD&!;)w*e)?y>sHcoCDZ`wH#3A#E1v z*FUMv>^{Q&!M3yd2jURYFZ%iWhm1pfnok4%jO4ztQ1%VVC$_;xl$-4!?NHHXY^@Rf z!5DXZ#%?qEg!wFe!Xvk*^b3dI@w^S(1fLr&15L>B3$TCey4!0;rUsiohfQkvglQKJ z@d+2wCwzLqC(P^Y%VOLP*rYXsKH-pwdFqL6!Y4fI^b@c>;S(<187-nsdU|K5=v-&8 zsLJvQ7iwefkTxmpHp~B;=b#hF{f2$LaaabELrjEK+HI?veIlvi*6- zfc7MTYvKRRjyu(mxSrMLGTZ92gUaEfjx4#SfC|hf(I74{ z@0GG|$eseTlJVXxK^NAz+@FYfo>zWRNsQ9*_@cgK^xHD5N^jxcdL{nNlZ;am__t)7 zk|znLM5KqVjpJ&uEnF=&vG)R`t>VeXda;QF@g11OHMn{T5TG_@>0HW|xDG409SDze zgKMV%we+=h+m^zw2z&?89mBCiX{IN>LcOX~2hB0nL5C-`?`LKB5Z{dM8NGA457_A~ zss1HOYG_gJa^KS2dwq_KF9n{Nlo@<(Osc;Y=gV@J2C}uSSRdqtrf4J)!Wx{hX-`Ij zXO;3uN}#+uI%R!o?_!7Y@s09uf;-$e8*r}y-n=4`g7q6mBc|IKM6XIk+Ne0!5{u9{ z&%)aQlRykZ4bn@*9yD3lgnnQXOkf;3;XmQ|exw4EuuDl}z6MwbVkd}+`0soUslYZg zYWjz~j+hJAtj*g!PhZFT_AK$Ob0OWW=~sAukEV-tUyu@ekq7L>TlhU-Ce*V?iIEWL z9z}h#tRD5L)XNlaqchOzNR5nkGp6s1q^n#71xPFn$cH)0BM+!c%;iBv-3HtPR>GXq zgf$VdU-O(stci%#D7o)C*B2?ij20yR;c2Xocmc9^{lx;HSrlI(ycI8_= zuf22Te$5E)Q;gsNuHUH`I(IqvvVOGO@VDtk=mqR()S+8zyKt?TPJM8m?>B%&_+bny z0nWn8XvLh{y9=wOJOWe^FbY`3^BY(vM18aq|D6p-#Q(2}|6dpXUqTD`H@^s4ee`8< z=BW7piunJk`2WY^|1t6ZTlmlQx>L<>;>naVRgKd&V`gjYYNHX}Tl^$el<|ZVjo6a= z!7I2Tr{QZ&A>zl0Ia7PJO;~SpbtBd}jC}*~Q|#ZMV71ekU3#AK67r74;EWn`oycLu z25a?7?wt1hUq>FRJq+YuL>{X^4CKFwJXVAl$iIv{R*D$N!$*yk zEeG?jAYYY~e-(MG05ov_A0v+)J_qu!rRN&qgUVxawbFrgL{Zjn@tUt)`!^_Dq=j9sCMg$p}0@2J6@r8oogZO-_YHjabHZ+kJEtm3jHfUk z7GoNCq_tR2Bu~q(C$=Dw?VVbhcnkLd^9a4;3&cQhPT7aFPhAn{$(Z}C>Mf4=(T)Au!;($9`3muyca!WVA*%U3hIBsqEwym*Kp`w&Owtl785u8@h#&;H_fABG*@6TQP zUzrV}{MH>maWHaJll$0W-c|Qxb+||KWkDPFC!me@YftU`RT*l&nenvUUvHQZ52k+b z@iNS8+H?)~kJMjBv%QF6#P47pqcu)}(eV~eJ$=c(hWdEHPrR73cU9cS>yvpsPdv-} zU4A8-_a#5a`^7Vu5yW>XX=_!)F%Ds@6EpIm_5NN5#|fi`{8kwz-%3f(nht*HnF1f& zA^caV^;q#daXIB@ACuEuhu=fp`4K2jJlk7{(3jg(XQrwHQIT2B)*OJ*LyPNZug|meLKDkt9UI7CsO?7)nQYEw+0$Y zihCkPdly#jU@bVF$;SYC2cHx3k+BvY_M5;tUi&t9dEwO3tD>yLzXJ#MogwFdiN$k9 zxK|sz58r&?ne|EZV@dQz&^Ld>a_9R@Y6FO-ce=?-#BV8YI-Y8rxX11{d3rZy4}mx0 z_eHz1J=HVtTNqpIzC=EvhJS`Jae8-k;GBcw5btwOpK|(6$N{{&6VKkWp8KsT(wcq6&?BTTkwiP~vBIRfhc=?hvaP$I?^2-!OsrO{!8$0I$(=r#cr?3La zvPdFj>Z0UtiaKl6fmU~WR;hAuu~@NGepR$0C4y*a&St0%REQqTC-#f4d-Lu8pSX9A zj`F(iy&sJv5QBgjF(83u@r*_qaS>>Q06U4SjD&42#(-@c5eFxL!4??ING7qkSRQUR zj={z@wn*b7#En}gB3Yc)?Q)w`840k7FRk0UOY}YNIy7Ud~$Q{bT0Ydq4ZR?fu)oz4!0-H7!8BLwU3Li%aOUH+40B(29m+(g*U=`}5NK zTsr8(3!riM!jW?N+R#4jUZM6UM&OKlvc0@xm+tNQqxHJI!5S~uyPG=$@$;^42i3QQ zqMf;O`|o`32HpLaJ9*SOlK|EoZKJcd3lrjM)C$942^%_sY~ zJ-jluKm4D9E!eRo3)IWHcXkY`JvCl6mNnkUht?xPH$o+Zu5FZ_30+f_`R#?o-v!aovFi2JiiBtKAPzPz}Xq`8{SSSN^Nttvu1J@>a^Vzw5$^?s-?H z_;o#z@S^X!ykgt^E_~&r%M*D(_OFHCae2kWV-9P1fxW@EUD=J4eaoenkbcgkmy-Ub zODi7W@3^#L4t~R>6}Rz(ORpf^>(Yu3{j5tXZuB!Qy^8d4mtIZ!X_r>~>8D&;ajBnl zX~nBP=F)3OKjG4fZ~eGSEAI7Cmu9pRd)v0xTGEfXwBl!f-K7;*`%#xxyzRp-y^b_% zjC!@TiF+Tne9y$a_tTXsH*xRrl2&@X>et%z2BqIQIe|QbQTPfGEoukUWK+GxfJ=7yM=lmXi#O0h%JpTFpio>t6 z)sJ$29l!35kGUfjDyICXJ3g)>vF)F5dAvT%`6u1+DR+F@9kF;Z=bzD$>z;M_UUxj< zj^A*{-*Lxpy5n;?a^1IF{@d>O9e2bo&)omJ?)bbr@_n~7)UPe8MGjn3|HiSR>7l}L z>=85`({KaH1tb&r47S3O3tTP}crY?!(cVaC(L;=P`rF^sxwC6!>e}KLJ}yoDS+O^2 z)r=)36rkSKs#(3N*-6~`JW;Vk`6KHEyQ>#fq$@96_hO}}r#&Q!IYIh4enj2{F}U4O z6rMxP-%}mUN+s*t@W3+XM0uxNxmL`_UAd_9H03YxJLbwny+0w3_fa#^;mq~od>VOX zB)WSa=A0R^=Ka?PG?F#4HPSWm#R&{&aP>v<6RvS-amdv!EzY^ZrNvQ~y0kd$^)4+AoOEe%W^nGz{l&4b zb!l<(1uiWPpKxh${(6_zj4;!sHB$uRnYp4v%^<|NF!$8V5{%ijW}INPtz#kx`?lOf z#r18ui)3q7E}GGTwTh3?4tlt`ze#Hb4A$9l&5%J8L!&)pW>2Evs=j7ct~KwYS(kS( zY0b*Ps7-6;=04^Q2J7Fpm3 zDT^#{l$1plc$}0)7I=b`MHVCW_Fajip7SB{mNJ?A{sS;r zU!W|9D?nV7eO1Jo%dXj&&wIqfB?~FZh=WU_%gWFb_g>vw#J&Yk<&J7+o-Rae1vpTh6r&Al_V#)fI z>0)Ws(&*1d>(j=5K8x!5*EdL?_CadNr|8oTIDOjf#j}tnuq(yy|G+2WN!*?nPr~Zc z%Do$nKJ5ndY2S1DG<0ie>C;~N6n)x(LTS^sT|t{RF}9faNl$!=K5c9!k~%i_lm`Oi z!ZGLC{C)Gk?mskRWh_vHyr<~(?aTavkkzn#J@(TX<7ywCylVFebRVmtE77DCgcc7J zSMMvl1DSYnb!%YRKwEsD_uuxm)g`0Sr9EG~5)?eNW|2Ppr2CB4tR1b{#x~}^?u7Fo zw%V`qU0Lm!^DsthW;}p*SuI=n?u7L;MjIxrSua{HbAGqguC?QdW6C*7yEf#sYvruS zq!nxDDD9fGX8)+`iI6AH<#M!aM!SaAP1?30v}n?*wHvM47p2Yd_azQJv}4bz=!6Iw z2&X+WnzA_?7Z%69Sa=F5LswR|Jmz^{LYIIJVIew%g@p?oy&e=oA#S0WP^CF{mdmQE}gE18`1OD9VwW>M_G z>_H<2Di{CYbny>z7yn>+@ej&ZqOD_0<^l9DplQwD);+u9C%kd>)^5JKrV9-q|LfbP z`v(d^R;Zacc{e4<57DMouI%%B1A#3)x>E+(QrUx?+u1Fz)H|xv1?P#kwsi0$x#CPI zQCZP*p%a55SgYZ!KWLMnBpXt7D3n`|IIKIsH*xx&IY-!jkNHzdZ3}xY3Q- ze6&1qC;TFZM;9x{WNB~A^T|u%OiY7ZUi(a<0_5_-Z$VO; z%L~N?5Aa-GST679<-5sW%FFK}|88Dh=q?xY@_Wgnamw9K_%0Xn@;&6=cKJ6__d;vn zB+TE64hzh)Ca@A3Z$;yF54tQc6B<{e!$Jdi%}R7r;595*NBtG(x7xV|&E5i5tBoLg zELeuNt&M#wKJG1Vad-5(iEzn$bX99o^+n^nWrb-^s}e>F{`Bhu8DzzMtdLjw<7C4z z!IqybkWP#FCDAbN`NVSMt47{D$Lh6M!OdgE+NhcPC)6Q^X4vSo;-$u?$Lh4+K&Peo z_ubA*c{(l4+SB8IUVKNa5S`ZB#`pOYvg*@9O|C94SXuX-Fj_4I#z&e`Ef;CH;bemHvKj+BmdYHBb2Fd-*+RPLg`V3jjUU>x?041=FO^L3 z(`@PVog?3Pa|z#wZQ1M(^)9CGYLcC*^WEe~E#uEbOXz!NnZ4j|tLx!Ud))i8V3!}M zy*d0PM$)=@J;v`d>wDDT^jvpv?u+a@ku^-*FsVy%s3+#y$jmSv%6dw~99!;V{VJ!KU zKZL+9CN`Mm4|)3TAbO~~;62j4i!X?`yu@8IDG-l~r^hqJ8tKwPy==5l?cQlPh%`~# zyO#Sz8G)xnQfT%QAL1W*H?vNpb7xVtH~WUQNQ~DZF>XgvC*ajh3T4;G_W!@JN2^oHinX<6~!jp_yI16p>K*Kog_92k9oKOpa4 zqhk+_k^|@I1GtB-m5xYt#24qnv$$)%J^($>u%2tqxqJPqWm%bQ(8X+Mn7P*J0yYFn zqnW47dVj8W(&_`!hpv}Q7{9Gr-|hRL&M9tpDV+5%IL&YuiTYob&!_ld)~R$mGN|lNPKxZ#9sNJ zg|f8u49k7!tBhw|4Q)J_u=wKWB7{_L^@%*^YO5>k|A0E2yGOcG>hldeqiS}xT)M(+ zdP`oqLwGdo#(S^Uvl-h)T6M|48^rCQvZ{F+(;EA)8@c|@t^_o8u9J5PTRPJSkSwb* zr(3LmzRII-pkJhaYud5FD0ez2^wfF!MN4l-%za(oqhBnC=6beq4f@3)c-C+??Telf z{i0|&M|-d2XixW4Pnquz*!P5v0=f@<)y)shm(wrKZ0YEeT?qOITmP)ld8$u!4?};d zqqYqE`_Em89y`Bg?3U3vrVo{h{?Iv|SL9&&NIdNu0rhh){p@r?MH%REjL|)a`>39W z-`WaCkL4lpBlCu6z_~J%IlX`PSKJaMXr|iftdDVeg}DZJE|3d}q-)(XcBU zp+D{%URk}k;LjXyGj*Xo@i_AY@ha$$bs5sMA?cd!GoV4yu&y_JZdU)1AQ6^zO^NX4 zsJpVdkbZA@P<*e7^Y(tyeGi4j^9&CZ?U6Tp&&@3+W*lYN`{^s~^`iZ)qGiVur4Mhp zNF+t*_4*u7>v&ivA z^@Zq?Smq~-3IQCC4p!T>yW04ouXRr!<20)?dm~S0b_cshoqLNiVV!56xm!|M%7rPR zoush0z;`ue7bx2lDMPYq^Y4(~4TZCdH0Ljp2X`7flM5|hb7k*RhTWVwe~I)?mwul# zc(SJaL(*L?{cF;lF8vYdluJwMy~Cv?ExgaAB}Lrs(vmKMUu*7Zq>fHcWTcT!CuOCS zRO`T@>X(q~++M%v>qV z<2~r2deBAfy-XLijdP+?aNfWuK_9gcUDQH!Q47&KU4!0fAv&go=$(Z9ys$_3(6@%K zK|i$+-PFQi@;w~UK`li0bj>hzT1cadT8O?1>^Ot>%+cUIU*qte$)}{-;b`!lIU2lY zjt1|U<4&EY{wTa>$_?H#M}zmw(cnFE_2?JBs58>FowfBTG(*dyo_Af5H)>sLbwh@W!n?DxwMJYYR$x%w zbt5{X4T0y-7ac@TljW{jg{x=OV>>D5nvi;O6X_0qhbz;_yL&_GX(F@b^diSyxu_xK z%5|^xu3Qw;P5EAa>s*;ADoy?vzqPJRqaAzX+dYSd~JYgC)j?&=v%;OdD>hpR%-NY6uDZS*`) zuSSBMncOHb+SgvAyIAeMMWbqkOKY?(b!m;l>s?x-GwISAwaqTA(R{5-Ym_f=X^sAb zON%SiyR^8)OqaI0i!}O*-jLN@*tFGMq|tBm8mj}|-_#M8f^%q<783Vjb~Jq~uEw{T za&bG#O^SLZXO)_MrotSrI#X_rm1m}!W7U}{<~ZX_ z#2nLHG1;V!kt#E(ySabZq_&X?nbduxCYjU@QWH(;2vJa1<}#`h(~t&{g#Oan+obr>gRh>&9ioMZ`fGW>$DvE;2F?6Nk(HGw_hb zPqI7YWc)_&NcHnYFS5Tqf%J0)em;?|lSP|9ei2Dq$(ga4O`WANFMX-tCg`O*4_;ze2i9vk8HhpClk71?zz5w5YwZ?dQkt>ga*?SMDW`uIRy*j#`7 z&^Y92Ld30Qud*E3HF1g2PmIHIT%{#72Ta`8q7PRg#Xp>0O4+?>WZg}Vq<41RjE-Uj z8j9P`%HQWEr=LK=dzrtFbRS?i6dp|pE&0*p<^Dciw18MW!gx6HDLRUYnMkU;=#=-w zUTN$mygIw%wH?~Ej;q@@dE=if8(7LLan+uiI`G}`u*gp=Bj533VJVD@yq@H%zLl&W z!#<{$2s}gVS<8zXs+Snazv0HjG%YcB$i4W>Y;Nl_F-^~o)7|BQ`c!GZrD>? z9SAIoO$r@Cwkj_(=_ZhxC;t0xWZ#X?A=Cdtrj6cge|7oblz}_ox2w)oZD_l1<5nR$ zC+oh6=fpSgVtEWdo5P`e+@A@c?a6275Sof_d#BJ4B=PcjkvpD3DwD6RsP8Tv$j9q@ zX_v5nenZVEwXKuo&$DJ={D6FVevQQE5^-k74F+FD>l9^vtnQ0}tyj}q-B&wQ-4}27 z`|$Yf^M?`#M4*5HknfVv+nRpvqy|<7Fy#cNseo2ks z?KL7(y5=X~{-8}upbcnRkDmsbi3R1EBmPgHSW!IAiwvCREv5|q%Z59m`vT1yD+a{1 zH*Qb0{HM|$6=)}}oq zf%2Y-Uy=DQxEVSB3iJXup~+c}ydNz|;|=Hp zh(z0XjP^J}d)&sl{IC#{u#wi@{Wjz$w?@VQ@Fa7MZsn%z!W)Ejxi?t@I zpGC)frq3@2$GBt>KIC52?4HBqi&URpZ)})V!M@b%9IX9rXc;^d{YI0X&H8i~b$-T} z+T#^%ZmXKxbKd;w`~NnVR@vow%7_6PNc>6d%(gVqE@z%eFXg^pkN$MhYtirG_t%`R z!H0m@phU1v8efRD$kpX7B8vd?JpUhjVkmlx;Vi_H_YOlFS-*~~%vh9gM6+zv8xIwH zR{3M(+n7^=bsqTF<)P!`TiIg;Yi(XD4EY}Oz2>~xd&IzIh}yi_TOfN5#_j#g{vvMA zX~jc1avnTWl=i*pN}K(KZ|j)-MV-A?t3mE->X`jSjlDJ)^QNne5J*uvMGSo?w2zY|^kjV-J9xT0_GHM#fc* zwGB@kt7&-RX<|7R&K=j7;5mug@g1@4`g8x`&S$&m+wP?QwD*4Mqt>0sw7;7?|Djnm z2`f_lE!i2gr}tjb86?WTC{q+$Ijf(@V4kelrKKUt^rklRr65`E75ZeIt!r8$nw-Vg zQRW$ZC(}~3AaBJftFiUelBvq%XM(lKnLBl)H}x#j+O^4PUml<)_%+Fy*|!ia1bxct zD2wYZR_;1XqxR|^w}+9V{C-tK&mUC|R?Y1{#M_2N`(>K_8yo zf1paM7}j)LwSNheTs5=jsj7MX2bV#C4YSsI>J>)G>{iA{U{TGied*0(u(7CELlnx= zOE1CQgvpyGe&{sj^tRfYLrQ1MfBC=*o#mM+CG&}6xS|@{iZSE77mAX`CRe;wv>D03 z{W4PrKEhh}A+&?m3+5%b@oG2`2=BzmzQa!kvU5?ghLv=kyg#48BlI!+lMaWphRV)J zX3oKf^rQ8fkzQUeE?M6_y>>}RyjlD)J0r_+Ol;Gqe0qzmDn3vWQLf1G*d81oD}wZ?YlDUFG;(UtxnO}YXg)61i!u0v;y0!!)g0K5OousF|=6_cjvORESU)PEA z>d0G!UW@HBWsJ(GS34FhgXO-|->Y|mMhwRdnbk`>oOctPw*}72ufFF$=hFX{Jaxp~ zMQ@3h6MxbcZ9Y~z+e_ytZe}_ys`LKnCGp=GYYgoyhF9Cs^AEnhaNSHB%XSl^BRfMj z%scUU#o*QX4$7HH*;!d$w@stS@?^vNQsfO!hVQBVPV%kDYlDs-c9Tz{Pk^_Y^Lur@ zcyP?}(KPwnp;qqsSzUin=N-Q{yuk5#Gh!XDHY3^bd^5rwzc+Fx$LkI6cYNFMeaE*A z&#$rXVz^L^r*6vz$~SW4*K?QU?USSU>^%i< zf3j%CTZOloeu6?+*DgfX!dxD`m9kslG+FMaT@Nx-up^Ipb5ko)mx_M*G5j<%C7@Ue z!NhHqYp=EP6@8t(>h8eQGqkftL0H^_eh{~plpJoqzD;dXTIs>Ty%Jv>yIOd`mIu=> zbFQaMd9`VY>cx|@LL615Hjk*xPkyGtm8nlEE4|OrexFIYGSS*n)vHO)TEP+e629a3 z?04NRdIJZUGT}giwd9pB%jg+q&W9G^;^kpO2h1Nz8~QLaS$Wvd4YP~N4L#M_v}ld@ zHRXm5eLKI1MuXKhE&2^c?Kg_HnLU_w%EQ}~uC-~63BJdaQ+F1-fA2QdC8hWTSIrrI z9DLb%!>2)Dx)eIUeQ?&v1N(y2qEOY+ld7s)nBbq@8m%9;0?|HDEl6P69I`-rs>S zE4s*lm*-L1yIR!_5dHo5ZZQ#r3O|MNe z9|RH~bZMrMCfUTaiuM-NgI83@dO8650&zK61NTA`MT>6G`b5!k;?Nm%H^Qq|h*z^Z zVxC}DFuZy%E86qsH?RNF$0o{sHmae*lITZT%)Kbl$9n>)`RE-IK$w#j#CQ zK3)!|%kkx1dOqbptIG1>hW__*X_fsu@>&J!TCIe2^kO$MFX!>-nor@;e|S+mx+;%H zL+e@I{4#x&<;}rZM)Vu{31x;aJHDA2sq-twjp3zdDbp;-*fHzj^W=jy>G6ywlYfIe zGvwzOPbU8sd3HdbW6YcU+vHhAT*nwRc}c_AJ$;V*oBTWE*%yJUYM*DMXfaUyYAqjq zm-BAD9@INuBHtRBoLgJJPdY2i`rxvzr{xUNFM5 zZ|45ggKxvL7l>!~uU|=7GG5iF^eS_{fBkBAyu}?G>Mw+9>r0=(x8itFRj6l6T;r)A)@dHF}S&=#~9$uFWec$nL);vasp>MbR@gB2w*Vxjy#s2Iw z*|wr>uCjH69a|n&d(w7uwDMM7?QPF*33S7E*CI_n=FX{aXw$1!xV-w#*4-Ll%?0M9 zy@%=J8gGt0cW+>+t1nuB4nAkgHw2QdTr@+u9kdUByqd zj$PlijFD?^<9Ee*$juQNZ_G*BrVJ1gnyjc3z`Zb16F+(=)qchc7{p3ULI zO*sZ5MB;qji-0b;@6azM)#)e_wXxcuTSme|gL{Z^z!sSI4~aKf8ah zW7dxQI>zt#)qUu?i^jzVCQJU9te+fN8AZlgi(N(xIjcWZRP#U|l2k?7LVkyY6)u2H zH5LTp`V1=5Jwz~4q$Df~6e%ficN_JQva-`RD08g#4T`y>p1s(7ze+ttPLd5bE59Ng z+mV98Iiz>;I@&l-n(%Ha;-{0X5|(Ob$6#S8{{ykXwYYugnmschMqKipFs z{mWQ!-P*E~?Ecmcd?je2;fo!`H56ht4{Cexxq2@WF|lp+H$_u`IbtJ1gh5r2CjjW@Pwou*mMgo(t>{ zq^3bojMhJeOhG%r_uwmM;A|zar{E&#qNiB1W?o{C4>v5)Oaw0x5}#%} zjoh#|Y~}{DC+-bX##+S#zZF}Z>de=I1{=0FoQ)@sO^m~1#zvqC8-NAK1DlWsHnpr_ zogz8mEe#D|bv8Wl4Ew|5vUQD)Z}in(r-{{juJdoqKJbOE4^ow5e);h~E#CWKpXrH? zfZjuGN)Mga`|BOdxiNRQ>Dr~(r`z|=AZv5q1J2bAt8D)lttDqpoDZW<$wm|{dy(})|~#&as(N6z)L$unN9zVE+w zwr#1LIs8nr`k?pabIjv~$(o0~X|IZRB z(r8`GXzjr-Z&JdH5;H;+9m0-LGe*q#2)ps=g-Q!lThG$HH4eG+yp~_saX3)yfn%X} z&^Qc40#h^&H3|a}Z;GB#MA`b|>L-oP{!H6(jZ5WqmVO&^JT+M7#^*7xY)=OlYlQy6 z;tK757^4g97&Sf&;t|(E2kd>&uYU9%<5kCB=?$7KeSi?Iy-063seC*=k#YN3<*!#h zCuefBZ1zgMl0IvkntGN7?L7>wx^Zu4+Q^CAm;n1$_cde1$cY%&W)I?(+HsxkWF3g7 zGvj(D^8Q(+=vd6SW=tdZZ|Yyo{()nQaM$hFUVuGM6LvoaZyjAgQ_t-U29x*kXKLm> z&Nx3dd2VT=-*{e(L^Fr71xRg9V<~=f*rdrc3WBJO3+< zd)>*0-VE^i z%T!*hOt&!d6U_C4;tMY;E|ig_OdDKJjBRFbBq`Ge;Uj?`c1Efr-jS5-_alC5;djLc z!r}wiPe+FeX1uC5ThXbxn!QG8<(&RZP#)dYnNsr2K_A_TJiBW%^}%?u6uO)lG+y7F z_sK_-+Rx$nEzQTrg^W%7N&AmgHMG1$`yOKEJqX=MDuT?Z<$%6h{cFA*p39o3Oy4j3 zyWz^nNiQ5IkWbNzkgT#7`&gB| zi!8j?$_isiWR;d592RYv^S|)3Jcx6K4;35y8F>ImMCVMU|mgBWRt=@ih@ek8qs{ z3CDBxK5TeS2Rvtc>V+K7*@9(WFtN50+N<*nmr#tLKGquM-Q(FZ;;quorX7&$_L;G@ zF`%&mJ$7?#i7m6X&&N%BvY$8gHU(J6vbP@!D*1q3G|F%ppo*6eI!~M`-a-UcRIl$*V>=Pszo;mz@BvUo7<@A(H)!deoRdag& zaDwK>Kd6$}E`&VbG&dHbih0eo;KmYj7OzB_^@A9M6^_|Zr*L0-{U!ZInn_;_N z`n?A(jQRe@+RHC2{ELsr501@T-x=yU5qRv1c2t#_ioBhBVZ!d*J@5Y={7vQgKkf(1 z-9}2zT2d`FmQ=H_q`LE!S>sQ8M?0?@|Ce6Yit2R}PgyopJ6U0A&#=6jznl$KNzGGc zAFtKQoo5&i#P_s&_%E70{1Pmv*vId?$?W0T%_9x1P)0(4y_A(Gcvb>HeoJ8c z1J>TFIQG~6U*LVT(YL#H#e81nueN*kdCwQE$$$bKDE5!}#nq9()4NmbeQzJQIkV#I z^@HPO4;3k8x9UZ!ys;T#{3bSTIn}+aW9{IyLG9WHcTD#m^TrGYkb(4K`GYl+|ERZo zpm;z#+n09t#fAuO$U8HCuotf>C+>MW^D=WY>ug%Stj}*Ri$n(krCo2PmYr)KSeYr! z;BV{g==@r0Q(&C;eQb^^GSkjB%&nezVg0W4!OW3e-RsAnK{t*M-S8`3EUYma-S2PQ z%3S45k9(6RoIUv@b8Ud}TiD>ROv?w>usZtES!>nxSm?dB$gA&zKaGcBU3y*zn(EsI zZ#{F%SuAAd!z<=r#ZGC6H#-y8oY;p~OJK5B%q;y?|LL};?|NEmI5@$vs@d#gXR)`! z28fYdeD%PuQ;C-cV;d5+v%Iw3#ikFQ)h@OoLTHk8b9>AjrghBazLt85S^MB?2_J&j z^_JzjI8_82V1q@q$j>c z?DTs4zb2qZ%jwYO*u6>+xdt>hFm>?4`ugrSc6Bf&BGsN(KInyt3s%Fc`~0E68pcJW z`WEj;o$se!Y{kx@$xuvu)xtozhrJkIz#hM-yujFt1T{PD5!RW?(HQVa_-5Tb-h6bzir{6ytj3PJJI;dK@qRdsy zt5}gI6*Y+UV}@V4Drnax<1Kx7T{-%g#oni(^fYVf5&Eb}Ph;&dIlORqad=_>;_#w= z=w|pvXoX+Pjo6bF)y_F^sAkTAeMQsW@{c`Ki*)0?R-REPTKTW3_o0>lXYt=aD<6zc zoMer}__s7u&nkEr&%z-|=ipkK0_q91+=O$?zjJwS=4YKj_fVId&9{7(dHXuvm$ib* z?~;Aogdl6;6)M-h(3Im>7*AeJIWqn9)znAMpLvk)9~U(1=7$Q)&ap22_KU4h=7Gti zd;YL$&hSh0!5=YypQc~ZRdZNb*S9=n`y`)+W{HOAi>qGgfCkQW9*O;U_QcG6siXcM zmuyRoJ6D3#ytLLkmI=J>=H9VRZeC*K=F{@#UagU@TJV3pV&~l-jm*2PvPypQUy+wD z3z)g~Q{?5#0vEf3PF`-~sm#cWr*gfSYZtq@_U0?jwf|9U0?Eox<8ehSm9b;a?btKB zgE%OOl{JCL(Fc-qBE$eFU(hFS+skHHd4w`MFteH&&dMWYd2B6%IU1QYGp*v3NJg%D z%(N~Z3USTbxSqUbP|di%;8;#P6Uo-uEd+F3FE&A%Ifs~WS*>cu#fp;i%&?mR%W4Wj z-})~lKP|3#+h4W&(d|vUR@Hr>>`)Cd?uMVok=D*%6)Opht1X?p%t^wF~@CctCFAEiABE)lN<_k^rzfqWs?n|_7nO>c#(|lPtuyHG`z}$#T zI#ynw`Dn6NkFC|xS62AR^l!X5%%xskx(NU8u@eTT*JZ|zKdZeaR~3}L8i|F4e;ArO z@s?2K%$9*5Trn6;moV=I;3#P*x5&}0krjz%+t6aK^-8^Ub9$20D~5*{yMe46l`%Aq zaW^eki|r-&*UTf#ddaVnPGx1D;YB_0ftF-L{fQ~)NuNbe`b^QZP%Jzt3=ZtkROiH( zj(w1-`Y_1eKlfhjv!KP?yD_>x@qXvrxJuS7ksL6+rl8at{q^-kZ@1syi{m?z{#dcU zJ=HqC6Yrzmuoo_9)bskVQz!}-HqstJ=B#n1Jy?aqX+QByWB?x4S0{J=wl;~Urw8jY z(}Oi0?ULU%o{^wu+egpMwh=sZk7=ic$cz@jI~VmYW*=HyJJWlZ{q3Qmt3%$DiQ&ZN z&Uw85OULNP#GUX+`ft3x!OCRBx3s@(55rI4lAH(3tS1q-TjtY~k z`vzL2G;xU9wq$)64uy!Ngr~w|UcRk;GXA)C9IMQ8T~C{TVjhwv3L^i+L*)Mn^Z&T{ zCt4yU#7*R%NQ(Ru`;mVlBJxkHMgED3$UpHB`6mJ+|Bsme=kH4rWtpfv*r29;SvMzR zKlZot1;Qc$za+ICZS-b(Jz4#1Dj5gsgt$t{nkW5a?5V=_!oi3h^Y6nNKUtlQfo&qc zy~GAA_*d2r##YuA)Fl0b{xiCl|4gBm5su0x+U6_R`zPy;cnz`4e6t(+&PJjD-HG)- zF&dkm!p0Gvf|=2@>Y+YjBq=uCm+>e-QetpRo+AIHyckM~XLmmHp^fM-!rbXl0_Nr|@DcfzZp$+C+`OdFWDuoE-&&y!c& zKB6tn7kW%dnwxy!){f6~^Hm}Hhc)R5CirH5jKY&)j`wmDv z>`Z@$|6k|T_#JBOz?SX9ys`%Wk!!%XX)3Neq__TdL&NivlQXLpJV2kOuo*?(2(rtC zg~jnI6TPZ0)+-z3g3@6j9xSA@;a~N(Ua~q{r^>e{ z@h;z5N=qC0b81D=Y$V!>P4Kdj;g*LEzYK@Mc3@-R{SRXh*+bN`@OE#{g2EqiC($agmNT@h!+Bkuo|aaOWlh8#BR@K)v&{w{l(y~0{q;BZ!c??az& z)bsn;o0PBv{$Z+*Jx*IvyPr+l;b9hMrEXtBwRY^@vSi$i!dYn_g|ospvu3=~H?vkK zyKB2}R{nIOuEGY~e4}tyYJ{^=m#&%^D;)DYI4ha^`?5GI+%3>*a8{5RVjq3%)4sD| z_S%Bh*#Y)>7HQ??2_UUh9QXA7P}8V+>Q4Gey{&H*_6qTMj;Rl`_4Lg;r#Wp3buU6g z`eo}i%*Gxz?^#9D!DZp+@f#2>OZ8LHZH!UkRT(+T zGct^X2hw?=Cih)#^(Nl7*KC^^Hpvm4)FCwuXAq5NQ$_|N*#)_agR(rSC)-& zq_SQ!Zarez=!y1z<)PDHsu0RiHl7x{a-OSqv~hznxfn9iEd>166KVt6 z{lO6|m2zRJY~|gIZ-Qi_+=F)R-#ZdhhHv#U1?4dtXNx_I;=zFBOL+jPpn2Xat+_Jd z;XwDoRsjhMyC$q7bdC7@+<86Q4X=*1gTZ0%qxZ?4``;wI#r}*dTPxWOM zJB7Ga^ZM8Q%)I3}VP@=BADVs>-Iy3@>NEB{L~?|0(T|266!R>gYq{V5LClo3q8(N= z_gKsn`b{(huRgR%eTRC;hVTLPwUN0n_1m_Bx8;txUTEO{HA zktvU->2Jm={Zdhvz5-T?`dmGf-`|IAf9Eh#=hdq#_d_fld}EJJv2r&_EpD>#aaC~qvOfPNKqWTJ=t^iUOoSzGL2J%k>bYb z0@v4zQWaQnB>nwSt$J+_T=cbKP^(odMp_Cx!qr;uzKixC%lph)h8mYBV^^3h{}tGn z*cz3G-ltpl&k;k19j(bHc{=kCSSS}-zQz5(-ZbTki~<^` zNh@N?vo5VjD(qlQIT{hLOl(>{SWmmOe6*f&X+>&z(xuU+fMsIqE3(TIE-gQ=$6fj( z-tVYO|AzDtmzGc6V=j%R1}qbMA9eBKKKJ3!+9)857u?brAuuIDy`H)L1GS5Mm zR`i|&E{$!`qWvzds6YE$S~0nT&KCxbE*dxOuvTg3|01*i=w-J0LMQ@3W9LyG61qUp z`Mg+c6%5 zdu;vbs?(vg^F?2$wDW%vM|a)`QK6>`!GYBD)d%(G|CNM@da|lr%+0$yjuhv_RKMilMxu6FN$2p`)Y^I%=PS0m3)nVEB-teE24Q(&st)p3A6mi0Ov=~Y`UglYI7@^JUU0$P$(MwD%dq0gljq2Q_ir7kZn0LN**-sQz1;6}|!mlyXCr)qY2aTd5(^R+H7t^$|+TBb>Oo(&fcD;nK|& zE-$VM2XBtJyf`l0y*cFa;>K`(Ec|TyiBmHx5a-M0#lVUNZx;ldd-_0?iuCH1go9(Dj;8zHP5JdCfM=Y}U$)^O}i}gnbsSYR)Ig!}WAO zlh=#}r>f_E=6Ra&cy=Y{O3xYku{lo`LOiG+k-o-6x@xn1CC2m-3QsRe2 zCMAx@O7LNEL~%4M&K?$56i2H9twfxW;~{ZI>K&xb7OhUoUQN}B z9H2c_``?UHBED*64&tujs%W7+aaM6vyO+x6ty-IluYO;+B^420vV{v!gKXWBHEaC0 zz%6+PnS$hwMV)6!3AZFsxY?8zWJ)4M3D0S&7@cDn!T6t55a|nylE!>}`u>`+!~{!3 zj(XOPZRujatribi5<ak+` zb97Vary^(2O$~K=&z&&SNV|BSM?H~OL^&`q>t)FFZF*bkN(Q&snEt} zQ9`7jk~NRC>C$Um3bbOI<2A0NC9a^60_pxRx|zpA$++TaDVEm1hV7w9Cu#p4R?bCT z(i|0a-E020nSV52lx#Ks+s!{(LP~@zU(_W%Vo_HT9O+~ox}=&v_3=b*>ZDXYl(!Pm zB3b=RDp`BLPl6IISySNNWOZR-LtQeEj7=&O9>{(y7Se$}y!Qgz8sba5=T?=pJ>OBe ztPeefG$4WPlBST;PCoEuN|XmJX6NfksOdW^xCebf%$Brgk>Y#HR`sm*+sfKG(-uvJ z&+X>w;>Cm4`)yuj_lr(PaRh3=hqqj2?ud+v=XH?(UY-tPRon|4q~5)h=j%JRk-KhD{yy?=<>hyfe1A;LA%plB>6qda}Y;!+IEhxLkRf@kA7{ zLa;oDn^dr<3mx4CdhcRoa$y(Oyp*jQ?Tvu8bN|H~+d(_N4W5K%W&Vym^3QCTyD@~$ z2BeeE*QO`AAe4mHWYfyn)I<=!0F_JETsgZX&3q+^ zXeD|pBmeHJZm&cJZsh%Mq_(oZO;6O!8Hn$XCKnrh4H3*Y8hs6ON#bm0rgeIB($v@B zFDH<{9`8Pa&IT$gIEJpqpo-MK2G`;{1*f{;)!r{BSsW3?qp45XH#%D| zkd*vEG09l9U}M-0hQQ4Q*G({ZA`zWK7WY7(Z}3Ep>AdP`|IZxsE1h2%olwAvp-ktH z9qhexg24}|Jun(S#BXbAo%>1rke>)YDod%zEA*>kk^^Htg}``S+&9kY|CwaZHC=@}RDNg4A9%m)Y@G>ah#yF-^bZtNZpz9Zg2?UrVn!}2 zyeSXN5Ty9ONanr`++p5**HQoT6KSX2?mBSj!A8N-awGIF-O0#zEt|CI$-vdeL-Rk> zIlYtVCxabgv;okRdT`_lY<&zah}vi_OzcY6jjnHTMEVSUZVY@2{Y)gjlGtRP^e&a_ z8m_FPSuOn#vHgSSYIZqHkvom9W)!`cy|1O2tgZ%JkwwE_3WFnZ+3!xth}WqNSb2=n z)l`K-%lH<(&kYk3?83^gzJPU;Qwc1RC3tqYdvZ%;@b|FU^*I#yj+k^fO>S zm&VhF>cJVQ?fe2eRPnTpL}odOw_hOk*Ku*Ce@<15-6ortrhGoKJeovjS{@2w_o?Ry zEsWi%;UP}niyi()?*+jf!78C;EqtskmNvQ?MJSja^Wx7L+t*gZD@2z^uy{Pis@$TE zl%~X|BB?u=(}X$lB{*}WbDO~&VPya}yEia5C7CoddJ{C@64nfrXUdhdocJ{cIlJ3VM=f5#s3Wt*FglcQ!GU9h>04=Ws?Ym!M02Zm~vMB-eCUBR$(k zzLHCry9Dx?~E~3Y4os4xB@{e54p%?ogr9Kg!$S^1) ztcdiTBYHK;+O>|QEQ=>%>bbINE!BKN*nr8U02UO<3C>KO7z$HzzF{aRE8OK4S_tkhZxFGGJ6{*=b=$oDH7uvfLo#k}8Il8saP#d?P`*nKI z9IlAPyb(HLJ`M67Ied{HEYX`91P>_CNSGb>m94R%NP5n+puFt##@HeYMFelni@C31 zwI@B{P*_sBsyUYXnYAHpUxM5iI($K$#&cc!Xq*wl4}~*g=*8lU%x*FKus1Ay zVo&!7&FUWDjKsxX+hrB*a7MB;^=kG$fw|m6*NS$<-wZ9z>{)8`OWAkH@x$!((kDB* zRb5xl^2h&poh#C<@JHOd{VpDs(mQ^QAnrmPd869S%6zSV(bwuo{8D>*_5QDX#!-rx zy6ZJ3OUjt#8Ns+m#QO5kDd?SjRZb_>S-G}Z<&sc#QN9`}OUjjAPI|ZPLML($v48v~Sbz*m8ppVx*7wL+iVZWU?k{ z$|YqD#%x*=*kIhIjWpA@X-PVR_*&~)NkOS&o+Sxsu-4YOUg?-EH?b(IZMmebTx;q} zA{(r+_gQM|SL&XU$}muM*!IrMX9?#UB|W&L(?T_O@R|*K>bX*E7pv z-rdDHXnYs;;<6j(hpuN4I*mo>dKMAGY!SMiMd)@Gq0d<~`~}KyCw&iT%3CNy*Ru#c z&m#0Zi~2WkUTM|SZ}lebuj6KWTz|LA-{X!Q?zqJrx4PrK_PAd7D7xO@m~b>WCLDLT za)V<+o_D}MxdM#(H_*YXcrqWOUbs8AgOhVJdYCoDkh>MhrFQA7kQnB#aKDHiW-S;5 zcTj&hYujKHeGIEzi%W7}>M+`vw^$YZ$dV9JR8(bA6?E9kXYpdPE%2 zT5fCfZ4Fe;0@vQw453T8?x54vs3(cOW_o^IWu}>#q!KTqX4bf1bugCE>u}xG>iyM} zuiPF7ZN|>I`EGvOD7%Ma2S1G{tT~%OF{S)MGq%yq z_wmyxPI2tyhb2`rqY|u<3t)|CbhTF+Dy+%W6Uogy+fb;VL5E`VhI(U}%B0I1%C5~+ zHoLr~`V2Z0Ti=WbW|RdkuhEj7K@zS^qp9AdHOgkXv_@ZzOKVj6F0Ij8>Czg-_!F3S z)##46v_^f%r42W5qf%TVJG#X^vZG8~g|y)S;x>$EGvdUBvZM7UO4r(QaV^SCol8o) zQT(pbjvHK5+Hs7Rm3AE9C0oZl>$K92Q;GXhzfygEO681jrB|pt?nXVFK>CiVGwbmh z1UCe95#q@adHPH6Xp0gOIb$(Grkt?|Aydy-e2{5pEIP>aGZq`9@{C0WnQ_MAg51R& zEH20<=_(B_$YxUGOiG9&B_?&%nX%@04<%zvs)JOqNo^rjWKvs6Re>mSFGr0<*1rdZ zRYKW*p`&m-D8v$rC?doXjZ|tq!XyVms6`bKR*6O^<@yUjmR z_xx1J!qUV@v2pE=fHQI!>DzDDA&k8&j$P4p(jh$IbO>2Q5u}C=QRKQ&h$64&eBrSM zzXG0!N-x(ZWcDF{RLny>U6ANDd69|Uz3F9myBs8!VV5d>LYnmDJ}*|EaK7+OgDLVe zFh%~tGu|%JCgAac&cR`dJnsFPeLK972y`2a+}`j=gX^KSv;4{b-iNbcOC>!(FE}1L z4v}X)iPv0(xvrPBw9zlHFWA&(dF>fuJ+)(BB|SthYf5-8`z7nOSB{t6MctR9BF*8y zV*Tb>4Jp#x5PAjq!=sll{_yR*hy3Atjd$PM$b6S%(DF21pmi?SE&O~-vhHED3X1MD zg2j=HA4A)K_Xv^AM{qZ!QRrnKc><}w5Iw#r&l99@<0Aig2|}Fj2q(Ou_K{d}?!=lT zIN^$GTiqf0Axa2)t)x|-5={%(Gl1(ZlL4G@^?T^>iuAgjWCX{0Jtg*u_b|J zeyKMunk-yh*E)amu8;VN-_ZKSu{XzV?ZAi7OEO*%xES4BGfI<^6J@oRA+1tg7{@ zFF`Hu;vMjG4>&lP(v&==kmfg@Cu$WsHM|9qjY!?Ed6`IeFX8`=w0glQq&+Vp14rAi z5XsKM=a6=vAga|W{Ng?0Z)O%vGB_dEgA*d2{cEH|cf|J@>Fwt?hVVf2N{&Fu3p$Ng zb$a5!#vS|b&5v-OtWlJ|r+mfCOIAPWuYF}1?fNHoAC62E$_gGoheI3xL^1!SNB0*_ zuiZB}ZO+(UCK8wF=`Us^2am+}hi)|f7i$+N79e{Op@I~2!vppr)mEgh5BcIGalVcz z-m$;hd$a3@U2q@yZ6ubUivS^g0W$pg$nfVQ!=L{?t+SKXNzpp^bYFw+rsXDd70l$< z++?vqh@V|VluluTJTck&dJh?yuY9~EW9?(VKBl~I3N#Fk&RvWV?b!y#Rn3y@pg$8R z2R8(-Z|zNVJ>_S9P}A;@;d<5i0n+&Oy>F+Yc!~FCUaKka19*xLXFdWQYA|l1-ydeY z$gYk^_$`^fnjybo<^}B0EmjEhnuin8a$e65*|Cc*@vb)HUgynkcF!BRw8}2eQ`Rv1 z0<>DFDD{6@JEtIx4?y}iyocyr(eyokqF*y>09*ILbX%sjjY2PS+_T`o^QlTpXkx;* z+r62wCc#(7^i|S4)$}WIv2+LZId5d{CVB|s&#!XPO0YU<-rUfUK?q@78ibICJghzH zhksO6j~DQ~o);K>FH|+O{4rJ@^T0JTm?6UljrGStS%dT(;Ox^?GZh73c>hPxE#n02 zvz~+E+ggXH53(@<#M!ZFNN-o%9_00*Oz_e%=vHWM6|YP~Lixf26|ZDV&OIPI`bk-Q zkeHYD!3X&w_#o;#aZmLMJ(SUWeMdZ9)_?c4>)SduZk;$_qnDtK05b$%M}q{C_2AC; z?L@PgILy`AnQ>#k_@j7Web1>~Y2FRJr{Rx^P%QqKtb3BUsgF)&NDSvXm@Pr z70xPo89&crg5+Y)g9-AG>f1D&+yikq z+U|R`Av6P)+aiJO*1NHmy`cLF5e>b?Yvwn*AMWbXI{s^U>NLy}w+)Efj=}`_4ZTDZ zkbF##iZ#CIMLgBChGGXnH|X4w;<1d%ti0&Gh8!k{;av%>Y~YKA_pQ*lA!Y!4%;Z;- ze^=3A9iLo7{vxpq;3s?BfGc@v|n-Hv`@ zYw0>OM&l!Vo_m;bJEm>gjCJC0slMR>ZeFnR9D9Gm)A5H_9kY8i5f9rfR~p5m3%r}~ z?_42F5PH$@=`1G5C_X(5&Qr^6ju*qPk)<>uOA($^V?V!Pe#leA$LEWWBd2LJJRJTl z9^P{syt^zJ4U2a_X)r-5-ij^N3N1S1XJg1~1*b^gl4(kwk@S!H!U6fU!vXnU1)kvt zOThurw~5Qze%>6I=ukk043`KxK5ov!!_^8!;=MKA%!*1P!dQF4uxY8fbV)sBeT)UQ zqNjTC%aziylTu`|vigz^Wb(We;eNiGS1(Oz) zLUEr}eNiwgfZ0kL>aMkEW^GZmDW~pi_T}DERyIZ0J;3E32h#)OkMY`_pY({{4)RAs zi*P$01_uNb%|e6x0SXAnAF{*iIUxJJ;RCYl>px&E{mi~MH6ves&S+HPLx~?Sv(TRZDBfsRv%;P}lay&E3Dp{2_dW8A zUgAjF{EOt-uUwbqk*CNr1Ah*lZ0esTf1|KK9It$d{0+kTaQyOR^7yD@`D>qNBx7Jg zaKCY)sSM}e6_0mwz*+JaBO#40bN)Q}ci2yHz2UQOkT*Qu%>i$b$7dd_4}1P?^4!Z@ z%SdB=ys!8sM%prD-r~Q@4c14yV%!=V4|t|qCnM_?Kb=3UqsqijwLS({uYZIWYb`Ba zEPOr3i?8SY)Potk0kdNlL~d^(Hu@npdAY4(wyQCsTU`?T*{ zwhH>`3R?$#HFeaU!P@l9H8$>LNbPOUcLdhLU9UwV&3=^YheGO`U`=}F0+(0c*}7Z6 z;ec~1HWu|vAIH-r30F@vVC!uSETtY&TT@T85@bbh&)qAGj|yEcnxeeKmah*W@ip~D zd%<|NK5@C+T3j?5tWM7~^_hi&2Dxttq&d+Fvj?btt9h>OEqY(4ca0`XSRa})jf+hx zBZd)cM^mP;v(47KmOZ&CJ5Cw2tnaMAhR>9N=W2E&OUW~iu4Sc|KO-qdE4vtGBZu)J ziR=yFdn`tlICXGJ4&S44P^cZl>9WBI<~Vtj$c@4)yS_M>r>7jXp^M~x5(Q*j> z_+91}gXv-CjQ&TMQ@*F2$LKjjw2AyPw1K%pw3?kew8uCS`@;#L%(m2<2Ge8dn9U!r z&Wz3YaQbo1dRlt~=5%L0jl>=c>Z0qc1@%a*pTITw^3PARpSF0)<6`;t(`T+|KaD+P zQ)b`cwC0}^q^-2GGBnn^4Tt5h7Ww}t>V{;hl6huPHzW%cfB1CN4QosN<$=qko%^>WYh*Y5 zZO|^BL|XPV(lXgMKjTjx5TbBazWKR54e?hC$8TA_rRIT^@kx=ky2sfqeuMd6kxus! zh3rogH4m&AIHDb5^<$|YKEQl9BN_W9vwnNgvf2ruWzhih;l_|Gz3%?;%GzRNc1IJ* zc_DD4dkv-uZCc$|2P&~YG{3L9H-$7Z)CX>AUoA4YXnErGBr?@+5mz+&-}W>#Y;<_c ztNdHb4*8}2ZM#>(GxBNpyu ze|h~NJIRG#$Q;(XrK=k>jvEH>d_dYLY2sA>%LU2!{=~+ulV90?uUDHa{Q0(`2Urai z26iOA4xfW(P0itRFh@+x zVj8dm5RR?E1Q>q=&Y>vJ3!Mb{D>sb$f&dK&H z0?Dt`0tw1luMmsTV@+Y;13v7XOEzoAMyB zwvoL#Y!1oZysxRgk-yDS9Ls!c4%QeZhCk~H6SKcxK9jfdu3&TgMQ;uGjl$-bRnizz zdu8cBRB$;)$9u`If7oZ@0(S(|uBt;XoktR(xCz}Jv8r|bOlU&yD|*U(hglb8zfWtV z-tbI)lZ({PtYGjKIA+xQx}T!<>t3#na@VM?u15~I%e1Gvx9&G`u4k8N`zxMn=~jQy z?hdO%)Mc?cL~%yG<>>5xxp?{c?J{@L{lu$FM!$&uy*M+iB;ox!HRast)+v+=6StM} zDd(E;$9|zxw-o@|D}qCF4gE>|qp+?c79ovWX&+dc`5ozR40^`+?G6Sy77 z_bqNm&u_u)P_IdwRt0iKRfCBPJZ5Cfn%Mi(A}58}p=U^EAiNIln8WMXH`eo4nBK_7 z+DB6G@5Jl)=M>rk#TD0>jT=0jLlLuoiTGnOlE}|6d&x>@fY!9~_#NbQE~HIZbcpzsh#)dy*2YzK2^ zRD1?~{*-esbVf3f)CsspedpB7<(M4k#aozv6O#wWZEt3;nw5Hl2a_OjXr6BM${Zrc z@*2e#8q2y6ouS`a7zj@*{ua0#W5NN(d3o8?vzqz!wc9%B^9O@iglPW)ty>%pEKPzn zXgp%+diXRRwn^b|D7J8A<(y%V1BlD1*sI`jAe$+yl1^bBx`SE6zgsm2ZNpqv^fQN_ zrCgF?Gnh^`JX$l)&+JLE_G3M3u{hSLU*k!zAXR3uIM%C7 zF{n)ph2HQc(ULj;Ni2>I&N<$LFN>Ymx9NIeaX9Ral=8yjaM&C>m9HDY;^YkN{#i72XzS)n{N60{o&9c^d28*NA;ckddDzuj;7B_RPsaK=tBX5{#-@|>! z-}^UZ=&f;6Gu+Q&aiANJ%yS}p*RVL)~g7qc-RKfpNm$NC)}vKQLbZ`>N(@Uo9+-mhV-~9oGJOj8ff1h7S&T!5hABHDtN76Ji5157T!1OW#GwF;VgDSB!Z zTeQ^RDaXE-W{?|IkWduH!7dqV)v|Mz|0{y}EF z?|RnlUGKZ@doOB_p!>e$aP2ORUfjiTb<*80W3+J~7VOv?t?lf_T^v^@&8Lcr`^A~H zW12d%R=@Rq2;cfnpDH>YqjN=czs6Om`!&?>=c@DS^r@n&25HQHc>wdP)cqP)4eBR~ za=@M+7AAaK`+K=}a5~3?wKsmYn%)P&xg^8A z7>!8}W|!j3dKhk$yqkS_?I$<2eWyWbk)stmWtt1;D30Q2#k~xrlYeky(Eyz$%B5L# zc$kmo-ZZ2BbI%X!X|7B2@cI7hiroGyar4BN<>G;LH%BXWDbb0THPd&_D=g`3bO&6nIe}qv)xKnm$dzQHa*G2x?(=u=czonz46Z<+f zP0#tz@41q*28QFF3>J#gx3YVe;;xLRFy0&j}BqIu!hFpm$3!?qwp;$Z+}DkoFiz(_((ACNW@o!^?{wNYW$bO zm*XpiKL0SfE#uP52b%|QqO%%f0Up6 zM+U#)ps1g|NlyFcNyhsbx;tAzt|UG#t<=V)>Uj|wmlpAH27RAgNn-tPPZ1xJ@NuY0 z^Koe)iL(IueHyB}iP|c^>PA?@9U1g}bM20dN{lP3wEmz@yCZ|&f$*$cSoaa#nW5f$ zF!}Wn6_a1QH#m7N+WG~>RTZA%F}ZZFV{&6O^w|jA7rHxk5^u14zTtaN59lSoCxh}p z@7_?}NPZ-(vA7+BVayeUAK~;{GI%*Ku1J*;V-9jNDhEI70GVU`72}Iuv|_JdOu=u- zpt!hjm{#w~2;=+eA$-lmcvnUcI;M^r@+iL;pQQB!^L~6YRUJ2& z^n~upz&#msUk1M`L)911Gpi=Po>eh%a67&ZyWLg#irjk(RvVP}n~+98*O}w7ccC+{ zMCrba-+B0b88}CFw)-;NM{`3-+^2{$GW^^n*5wt9e{fek`dHinAH{fp&SPr#W#FU^ z`rJ!$X@17}OVjsdR3r+hkH$FHbYDge?#TG)k*AxWt6#}IiqYg(uiafiV+S z2WB0KR_s>hg&e*w#s}8WogF#tSTB{=;Kq(|u0HxUGQFe4d$K*~|IwQ%C53YWk~~P~ zXTRhQ|1yI6GS<<384Ddd!}%E3FAHJiG}NTbVO13p#wDeWoN!)oW52unUd3Jh3+zV! zsUx4(eW(1*)5)6pvyxB6_BV}Em@;9OFC?LVT_m~teSR04TyQ1!zCSt&z1Huz$if&9 zbEyZj`8;YOI|yC74fm!o^Botbv-J~Fc<-O~%(3r^J1%esJ?h?g$3-`E-f+hS*)PB2 z0%Q7o+_86@?$~n{dJE;p>5hwnL%lDi?zqtMh51WvfZuV!^9GB^$REGs0(p!o_||-Y z^)^fSEGt{;}TV$aH*Y36u zWP!WhA&W%%ZVL-p1aY?oPFOF^g6SFH%LDsm};-jN9@8xxP=6fwv`I+vuFqiFd_1$>3JMg_y zlBKKPN%vZ0ufcr}#_t2+1dN+nlPs+@`Ksx@oiFpYWHtF}b{F3=a;ye*-@ z_ayo*kSF+np)L7>8*NF2davD02}RCf^lhNT_LJL~FH37jm|Vy*Tas_gq3-~-TeKqH zKqL0M?xr!fY~POXv-W(o(YhV^mDY|Bt^{M`W%yQ)X!|e6x37ziALp7EkYMS1fKipX|1ETMwnb=p>;#*zf5{Z{nR4pnYw?e zeuL_SHV35s3u_Ym4n)$kJz9Tu0^g{jyGx*V>{ku&$1lAzvIlxd@06Ud(Z_W??{v-! zF<*27dLH_MEWVJ?vhhoAdJen8OCrPKtJl!o3-g0l7UfjDA@jaw@|P=`{Ilh%P<_pO zzYlLz%gpUBRNRUicL2UUJU=$9lVMfso9Jhr>Kqk_7C~wyzH(JN|Iz3Rc^IvcFJQPE z32R(9Baqo|(3)UM*O)uET+p?ej~c7^9njwUu(tewtF*UxcwTkna(|Rzv{UhY-y4n9 z=qmZ0sniGgqjWcsTh)JjV}CqzRq%V!6FXnk#!S(`74q+LU{A<#K_^-mEzczq<+&L9 zEHirB&Xng+g4%UszO(5BxSsbuoX1GF>mT(LMsW6~hU{fA?f@ivnM?K(pmI>Sr*O;J zWkug^oQrbcOnD%Qoj9CBoQP9t*q_LyUdfM=l_zp}$x=PjV*b?zj0)8|hzoI=b>}y! zjH7+ZZvV5jRL&~IeRzf=s#+0Q5Lr@s6-BqAHSMdH+|;>tN!yB8_o|JnyH+e&zHaS? znzkj&S8ZIfGG0A($qg&kuIOC79Co;3Rl!5Ru8nIEpB+o- zQ;}k=Z9~fQO?B})o{uGK*9|#LZM^oJ!%T@!Ip;9b;?vGKOijGzoWo3wPd(=_DY@yw zK6~BVl#<&ygh|Qm9Kxhza1LQovOb3}DY>0P7)nPslylX&^9e`tJD+eQ!}AMQeO}?J z&oe!$gY(HJ>F@c3Gu7WY)H~^ZD!p?EXR5z*2xqFla|&muzjFv@s=spxXR5z*2xqE4 zJzV`IX!PqBHZqBBmGMah%9+PtR2@u8dSxS6w-|CQ=irj7+Um#xGmHy1h+_MCv9eTYyKD_V)MybXWaR&Q9+Nzlb>Vm<2L>Z$mR6R~z( zC;4=>(w~j3G5oBIt#2c;t&6DjYg^k_->`NCVrpL1+PR^%6CGSLN_iTO#2lN);dO&- zmMvvFvYpwkYhIwR9p{K}G?D2Vqdq#LlJf)tIo>89B zo-v-W3B;v8O-_i>>v~A?dLDFm^Y=Kt@)ulQ-ve%M>HS&Wviq{V%Do9si0q_^Ma= zk+>qXE`AE8*Ed}S1kAy)_pZFMTrQ6wZLADbA7;NQ`Z3+iJ({W0XB*%`c`JUBjv@y-J0rVGl$2OmQo+MRzr=R6qoGKTMw zUZ`F^FF6mMb0aQ{H6Fg;cD%uxbU2S)Xgl7Ld*Sj(I-O(P=V7PDdU+U|s25rfze^#mO*nL=Ixfg3JpT`}6gh@2WF-&OMXom1puiW3*@Vyy8sJ zo`0HW4Eev5=Kqh%x#+xWrR7W^7o5p+{+T?_812<(3VBAQZa(jjXVjtDXNtDT#yVl- z9?Wa>XO}u|x(Bv?H*9@3Z2eK!Iz*iRqSy11*Yz^&S3q&=pLWt4@g(}c(<3u%K1LHcT9tP~j; z8Jleg042B{|3^=g*aGN5LUYR)Ki;XiPvZBG=6)A?^Sb8Vi$3CCntN#$V-87;=hyhX zTywt;%#E7+L14b6x$i()|E0N`k=A&`OL<6w->kV4;6I|d{{jB9n)?~>X--V|2;!GD zH}zSh%j^Im#H z`&IX5azCuP_mX?J>VAgYUsm03lKVc@?Lz(wzo@z=k$acwZX)+%s(Tf=?^WHsgJif-Qd2>;NEF)?=ZM; zHMqAM+<5IF^&|}LTMX{42KN?&n<`CzZZ^0#8QeVvciiCKXmD>ZxYrxp-Ku*5(s0dF z-B*))s_K5tz$ZI$)v5f&_|fs#le<>slm6)ZljNSF^4n1FI-hjLRipCXGw?|_T-7SS z8+EGVym?a#yN+4{T58=aYM~%I`Js?**YRcK zo~ZKo8u~WwmH&!?{~Ec=RsQ0 zD*t{1pH5M`MymWWIgMAyU8?f;8Tk9jU83?Q6{hhKa*t5?hYb8@$UR);@5Ovn&(A(` z`&2%!9K#%AI2k7AhR4ZD3>3wOjT6J;WFrj}`D63X@HiPZI1G;yhlq`jlZ`Qw z2tQV!as8p;aWcg~QF?xXE*I#4KnDf7LZCwe9Tw>E0zE;XFB0g90zFBfCku3?KvxNL zM4+n$8t)An9w(b(ph#Y|0$nH2Qw4gOKu;Iwiv{`<0{uyWo*~e5Kqmb-*`;X$E+@k$ zqz#XgMGX}B*9&xmKsO3>lR(cD=ve|iTcDc-dX7L}F3?v9bc;aG73g^aJzt;~2=tW# zy-=X95@;Hkq#q~yRGNUx$v!R6iv)VHKwl%!*9!D?0!=fI^y6foNfU56*=GfMi9jzE z=vIMVCeX_Tx=o;02=q#UzCoZ@3G`}#zEPlW66kh;UL(+J1$v!8#{{}VpgRS+OQ5?2 zdc8nz5a^8p9T(^xf!-w0Hw*M;f!-p}TLtt9XUl8aY3iNS-eo>%b66lu&`W1nGRiJ+)&_5RF z*97_}0{v5gJ|WPr3-lWT{a*t8Gl70npnopVzYyqO3iL^VeoLU=7U*9I^sfc_9fAIh zK>t>t-xcWJ3H0v;`VRvAoJ3G``!Hc$95|Cfva%>M=2DbOx~b_;ZtKxYfIN1$^AI#;0c z1Ug@!3k2FL(8B~;7U)8OE)wWsfi^F)VE!-oYl}9TP%-}({Ix}(l)tuEl%lmor4+3# zKBZ`Fkt#)Ni&-gJTl7lN+TvJ>))vuHw6@rmqP0c26s;}ZrD$!DFGXvMfhk&BG)&Rj z;$n){79mr#wpf{>wMESotu210Xl;=+MQe+xDOy`}P0`xoY>L(vaZ|Lm*qfrYMd1{! zEgq+6ZIL-eYm3n-T3fVE(bI(bpDxfB3-l)h`jY}ZL!d7a=t~9qGJ%c?biF_~2y~-B zHwpAifu1GMvjw_Ypyvqm>8qaU#{qk-N|V5kkZMeH z34hZ_XE|7@G+ig~>tG|LPnzga(xoPPv{Y}R%cLd~Jw}>sqQ^>?o9J=UTobKG3rw_M zy2?bCOP@B;0qGhO9h9y&(G}7X6CIM4ndq>z!bFdkR+;Dt(oH7%B5AFOo+x#g=t)wy ziJmNNG|`pPCKFvHZ86aiDPf|kCF6|L!D^)2O#V}(&zb02X_twvlkNeHSFE&w)%&Nv zsM2LB?ETZm8H)_2afD5@7Ldba-Y3xC*J#>LEn;=je`_?o|5e0lrODvaL+@f0u_;oU zM$>yyMX1j&YBar{RK%*KA89nbCxq`cNI3{c552Eb#HysIM$>yUMXXZVsL}NPN)ekZ zJ*m<3E=duaB>h~Y>79-uHc_H|dwS^IiXwKA)Tq-u{t40sjiz@Eir9GRYZ^^w{)<>x zI-${Y_P&UPq=GCpJ~{_q#44mlji&wgB4|gCM$`H5A{LOouF)~XQ^d-pcQl&LOcyb~ zG#bD3&|X>*Q=}_3n$8dxv2oJv8ck<;i`ZD{+Zs*hYKzzy=?@xB=UaB57AgXZMoM*0$CB zt!b+$T9+^HTD5v*_mY+C+uLm-THU?g!YB3Os_Iu`$XmQe!yB2cH)yel%=XsK8}LO5 zDq*!D5_-K=i^jmE7hp5F%UU}-SF|tTC0WtAq^tF;kY!z&5WH`TPg(Gc81uNMbvb?M z0@*Wi={4PqJTG6@wgO+tSlhL_TN8mX2;S7~T4EWF`yidn7*p@_3SMW`-X-{ShT+wG z?bYWY1gpVUG{iU1hZO9*QnPw*eF&*py(T||U{-H~4%VdLyUj30A3JKAg)>gQaB5Xxj0=Mc(Z6+;eX zGKwLGGTFqCLzzrs$e~OYG2~DtgBWtClsybNM9Lh793o{6Lk^KLh9QSY*}{-Rq)cJR zA^w*>m!$UxXCE}Bz65nvlj3o;tys4HhBJR#id8l?E}6rYtX{jidkOzs6TUavy&~St zDqB03<9AJKtg>}|ymCri2B$WIGsVbRy|$~na!O@w<&mXS*9GSEK`b_9!#yAYC@*j)@XAcQ|LNVKGW>X+MQB4 zwX!a=b~!tN@l6xs z!{iZ3PqT_rcE1pT=HJnM?Ij^opOs2BB zt|l|Sx|+=R>T0awGntN-MyAZ`YDAe+x}tEwtIkjp@G`|!XKw7(bgM1OQEgFD>|7U^2peC>3Lbf;USJIx~9X%^{D%an8Nv`jhIPP35TGzh-*PZ=u{C%hVW6iF%W=E{q zW~)dgX|{|s+hiSSw#hpH5$r7Ofa{DHiiGVrRAy#LW~6wcr+wFK1ho9x+=? z`FKB&3FqugX>oR@v^YC6-<+M9@hQl-WoretRcl3TtJardvEf@b`M_JN?G%fe;F-*n z3ujx1QRQ1|SB(w4#P(>`)>1OheAC6wtQd;hJpWChGmA}NQ?dCVwAQSr8qbt5jc00KRGzta(s-tp zMdew{$7@aVaaKu7?(M6W@!ugWtQ!MhgV_Fl2XowKgr+tCOxZbekkt0VIfS_>b=$+a z1WN6XoJ$~K8|GYs3VS~15>(h=I+virZq~U36}HaKC8)4ZcP>FKHU`fnxW#VdxdgY^ z0zH@D7W=N}65L|5_gsRTKH#tJ6P{ae!)cFm4{kbRa?Zhx8~5ju>6Afgr&`V>Y|0|f zBW%hf&m(NgCeI^m$|%nxY|1LnBdp0R&m+9aF3%&p$uQ3&yvZ`pBfQBp&m+9aHqR@( z!8p$&yvaKC@SW@0TDx1ZoxX&AStahlsBG=-?p(cWefNqkR=IrLnl&rdcB^~r!6k8i z$gOk5%G61;)FC~-3vWQ^r`XO6zyUa0fbslX>g1f5%$XvfvwF4x`T0IuU*QblvaZx= zOECiiva8h=Nv9QUJ?Vg>ttXw9ioYZ2eN_Hf<4f;1p**)5lFk0T94+ zaP-VjRG8r)>Y1_7K04BRMv%c%9pZAy|$TL@)}Y`3wPWv>_|F*^sOdW^1xSSeTRz zENvN90F!Z90W2)c3YKYRb|^brv%}jOoLzuStFu8GOphZ>_Se<3rn_|+tHiKQ{avNG z)RX*Y|4Xl!(0}b!1>vhW_!gDsPp=;2O~^me(i9EWXi^j5SH_q+{`-sUypUbDf`Tc}3dNgEu!J!!wn{f2?=Ttb<9Zw1I03iH23I5Cx@~YX1zZ9+%7<9q>jm5{8(f!w zdk{FhK9vz4z1wCi?|vKHV*>6O8{9Jj?s*&B%L49o8{BUM+`Bfo{}FKiu)+E84wkXJ z9)z=!%R~WJYJ+PKaN})o*9f@jHn^C8Yqr7NF5ngeXQeM+5^(fQP%FFqhJd4YAR`v~ z@&f_4*CyRx3b+F{xW5TF(jTjI^U=W?>+N0Osx0ys5OAf4->SS93%H;SZoYtv*x*(O zxEa7%_2+Q`N8eMn>d)^Ia7%&nT9o%u0Y~~{h5LqpOW36QoPg`K!Tn6Y?X^kw&jRkC zO?*zgt7)v?Byf10GNZj3CE#AOiLX+?(f5jrEaICf;OHCDR`R<>z7ArxNHp4je78~4Q{l6J7|Ne5^zUsaE$`)bsO9w0r#E_?j`|8pMSQh-&+Kn z&jxp&fTM4nTh+%`1>6i9+;;`s0vp`R0?mZja{|LB$*x+O+rBPp8&@%+hkaIx5$(lZj?bRm)Tp4gydcQ!xjR%gt zF(k&98|(Kla0r@F-UPY%0nTcj>lXqpYLo8Y1l$4}Ts{&3H1#aC$zMRg#ccBT z2?3X|N%u+t*K31YCE)hjqUYq!K z3%I>D@zGsTl#c!!u!--x0`4$yU}fa*#{%v+aHygT-0ub4o4{GwB`xa1EA`MFTvqXo z5O5#b#5Y;MQU7U`?o0t!2AoyBeMZ1dvWaiKfSX|x-);fdVuRZ!;I6m9eMi8x14r@{ z+uc_L9NlAve;K&n3b;FL;`>Oz-EV^%fkCUWe)rm>J6XUTw81qAxTHe$x4a=qxP1a{mrec-3%G}EaIXot z1Hf6eYkv@Mhiz~UOcIUtaU3}O%czf$0`5&4TtvXV2b@*?&K7VV+TcDb;OMSHtMaZF zaAh{QT>@?ra8~v96#+NH2KOBS*J6|Is{-zN8{E4Bt{ph5anor57YEL&-b(OoTx0#- zVS}p_aCBd!RlUs=aQlI?8oyjG;11c~It5%3IIHsBA>fYN#P^7Rd)+3!LjvyCHu1e6 z;7-}#P71gWZE$}Va2_b1mHfQ;hOx2Ue85@Br9!|dHu236aFc9sR|&Z3Hn`OSuE_>> zi-21I97L1RzT79^mI8++ECcsd0oM&2{$=34E8uq6r2DdfyB|0!JNlJ?qw%j*IsPu- z4%x()S7Q1+J39iL@zM8ZMrb%}Yw?G0{9bI0#|tt%v`5En6>z6+V{9)Ol7RcRfb0D{WAuG-{n6w5K)~&Ih%x#ux&G+5DjWzlmgDM2 z@x6EQ)E*soqkwzxD~!>1(Dg^hJucurc*458zZP)4ht%(`i}3~Ut$Abq-o&+A^u2cd z(bHWf;Nr&^qca2gqvIYEaPR&Q^0O$%Ndfof%j&)EVtnK9t$JhrPX357`X0Uh=;?k| zz)k-NWAq(+{n2q>5^%{AjM4Y+^+(6OCg5KGCu8*8eErdJGHzQi=5IW%(dmV5=#P$T z5pd7ox)=IRzy9dBI|N*Fqr~X@{`#ZijtDr{RT85;Tm8{-bgiy2f19tB7~LnJKRRxP zfD0~>7~MmlKRRx+fIE%@$8>*z{^+>F0#3eHVs!6;{^+<5MH~(s(|rm0qvK|bHJ4-6 zw;RxDN$f+Yyx4LZ4@hQ}d^L?SD>U zFYApLBGuz-QE@Rno%l84v9cfA&$j(Y~Uuj>SUsZQzt3JsUVqM*=*zf9cpqsMpq z5OMp5i2K11aeo>jE?3d(H_9__SB~%yaWjXATQ@}9Lqo)UYlyfX4-xmbA>s=CLzCa+ zA>!r_5f>XG?u$djJvT($dqc$Kplz2}`XRmAk1>e;==L!JICATc{9B2=4q^oY)6Hq* zN8{@j0rxI&bXK2mJ_ByEfb0I9#KRNrVgv3g0xn^|(G6O9x<3$by9_wGSxd+LS-?GL zz|oCcI&Ne@b?fEbXTZ@-TsrO(0`8yzNB05gxa$SnVFQkC?$U9$2)O4BxTpd5?*i^M z1CDO`(&PKNfIDfx(G6fa?zDh=&wxW0o5J~ns#}-KKMXj!F-(uIS-`n|pRQlJNleGB z7I3lwM>mY=xH|=0nE^N3fcp;tH{O72HsDSOxLN~ljscflVJ>gffVTxIEP*t_&xAPiM@hx$x2}8og2c@ROk`l z5Ips4_BhzLv3IXOfbw?W2>~~~$iWVRr$0KbYD#sDVtA_SF0PwWQ(ZGXqA2)>NBvtJ ziApCe6 z+zzR=OLt3Bd&Q(};^(`vE|J<+#Fw|%kb8#eu4VX~p)lXWvd_y zQ7Vdu%*p<7v3l~XU)i;MO>8Br@?;rJE;F17Px_JNKw6Y6xh1$MS#mSV!+KO6>Ah3J zTkA`@x?8)~cTuFI_fAH<<=vgYctK~~+lD;1c1GaO>Cc1iQfb0x^@1ii0G$3D(aT!9 zplcj|2hp9YJGA(-Zl~}_5|S>)ydvv1ctH4^r^j(ynl2*DPGB57o)r`ik9TL9=JD*% zF>7O;tF^rBAdG>&mFQL6pU1mh57XAQOpkXvVGQ&(qF1a<`6r0x;$c3N-~D%b_Z;8x zTC!$cTg?=e_uocdjmrC*kyowqJ}~kkD(|lb9zL|m)A);#H(lkuZ{$r=d8drLsVeW! zMqZuD`;(DZtMdM6!Tq>GfllLj7i5nUE@pU`>h zx{y7U_tP{F*;9ExN%N3BmG@ejM|}{F@5gB#vZwNXl;%-`$Z@ZxdB~p1dnL^yd*pfk zq0VcK;d^r`?*#)-FUJqkJXD7o-}7l6^)Wo%=hD1Xd5@)esq!98^HSwKlIEq#JDBFB z$~%zerOMl%=B3J;O!KI`dD1jutcRJwJymrhf;_2Cbt7P&RI9oXAWxdY-7q7#Yg9K1 znkQAOZWJ(2il}ZBEKjOZ-6&9=RH?dAkUVL!>gMTAQr$e=iK?5Ydy(o!y1CK>?j||s zO5?ek@R|;}B$uCz5a5u>nm)oEO34~(r6+n{s|JYw=wNMUZZvYuu89Nr~D&)NT;>%6>5Ab|DaB5{s9fo<$>_! zI<5KpHN5Vx=rs8Qk8v*Lk7u7~b*!t=7{iQF{o83gQ>KP(Cpk}+Xq2;2tq7EJQi?{r z@E@hqR51AaQnZ@xNE!kxU$tzFmXA`6=5j$kND1G<;b-N*zmc@$RT=W;jCt9!DVH}c{3GD)K$FZV<> zUn#$fG@AUztL}}Y+Dvw371nPcq1CJ+?SYI6r47i4N|KCRpt-+h3(geHW$0Ew$9ig3 z@d&*x_qcjBJWm|yjn!#N5B_676JGZ((`oVt-qpOaz4ZoIk48%#%v-uN8f^h1nn++r z@;OJ^uhenFif$r-E2!^M-M~$9Y@mq>pHgu7fL?J!mr4VB5vM@{JHfG`by-*1Z#<{r z2V8};0ivj-akM)qZ9v1Kxv7kC4FmV;wQ!@Xf%*-YEOm8);&l{|Heg}|3h9)qe%y=V zD@S~~pP&5N(|PcdUpt_(%Ux1Wc|g+S4SEEyAl5G<8u1S2{#sbjKGk1MXPC&47wckLMfd}pTC`VT-HTC4qc906djX1No8z}JI4yt~Y@8WKi?{svoZKH^QaZo&( zJ1liATTXcJLsDxScO%YxnupWu8~i-(Rx!CW57+S764jf^&qCNXa?||9{V2bamk;py z`05d&SK=03wd3M_=Yzzfekr1cxfPlpS#ksJ^yPCC_jXPbsrqkI{c-=V>Yv~=k?c1E zO?U0QD_EGvltK@S2kYf{c?-)*yblmSxcv(3siVsE+Jv(-+CpaL_jGuhwc=)Y@$1C1S z91p#fz^&0N?)Nes^TV5eIGfP<%+Ed*PL4cwtTS#ICXgu9!?=_v^0U3rrQFIW555ko zw6`15evGFrBkdpFtjD7M<|5tBn~xovdGpEds`*S~L(o*tF3DsX4^ThU|3R%|-?o#Wv&HlL&t za1_>_L|WwyD4ny=RsIVmq_s(pt0&kfr{id~Pp|yW?DqM_^i(X>Q#>#rU#+kLD%~R} z-GJ;0J%Z9TOu0HG&xJjDiYt0fe*1X16{VvXT6sMhOD9w5{C8$w9cE*Yo`6=<+a`ZX zlP2lYBQkUeIyBE24?2+cE_rx2}_Hl$_EmkR-E3&;Y8`? znA`ts;-LHO(|LY&AJzJzaANeaV|o7QeWY!R!%u$ZQ1d_i?GL?3}sTTgw$kM=3KPA zMS*X2xXb7HkB9udBOt4%x8(Yh*$$7wlF|IwOR&k8<#)E6{N8cptpxNvL3T;FnjQfc zDwf^;7hNRY_7r#$X1+}A_-`euiHe(o{k=P}#-oGmZuka2&# zWnSiL8rBEBPDa)6=wYD!PsiWk{gE1y^gbSl%Eqw4 zIC>XXyj)UB3aFhZ_C%P`zbM{QQO_#k!#$S?amW2p*`*}Vq7^GH|Eob*K%gWC?Gs9o z{edx#nx1!hM(13HKJ=q))E^rCh9f<5o_ml(YFVw)>`|h!6+am*^hP2SuUc;X_g@1a zmOcNi?LkLfb97c>e7$mO)4=SzBEEZAu+S}kW?;@;N@1~_Rs2QfDt?Gf!YAVz2b%A? zJj5c42i(Qm*`4M6Rbz5uY*$$#GA5^&Rp-2ldvkw;Axswg9Q-CQg#iuTOW;XtUMNe> z3Ek@}1pkiEr7Sm;z{!uq>0Lf=q_p38#NGHt>qD($j(j!20u56Yyt?*p%N+jWuGxbX zjgInq>2MqK4>(+to-Ll3@RTJlT{O6((RI<*tZ1UP^YWv2fHP}QVIj5y`H9HeSfp^f z9FcdSGup-~kfy``OQ!+}|2Z~8ewocE?3ZWw2G|VwxctvnhyPLdk99_qt}<5rr1I13 zzR*uMGFHOK?Mm+QIU>H;uB>+E2t=z(kB;}bA|Br-BaVuG-yOc}NTOp@;MQ2@oYvT= z@S-e-a@;N57{!>Wcr!xzSi}*iFKn3nQue3-lm8X1Uwv(IR3OLeY z;P(W6m*KahhdBnwzsIAL1HT8qeh2(}SZ+W0Z({lMyJ!s^rfFl=HhpI{OV+Wax>!b^#d0u z85r3*BKc73gG)y#QHhn0Wp|*?h7Tml*pyM_P@VD|wu9HACosz2@0hXlvxD0kcjiSR zm(T9?NfBw*@QxQkYgzvIh0)ydM9JN;dmEQG%)MyqUPr|ac5!|uYlu1mtgx@V#MM`R zLhe%*x%z^8tqDS-E+V-B=Ex03yLmq9?w+$d@O^Jp zJIR<W_bUmhv~KI=BkP{MytM9FO0%G}HhHaBZYpy>PcwOHMV>Q6B|6l#nWxYc zr8K2kElRYPYT#_?><(P!Ev#@(kV#<7F>f#l;mq=}5A_AkzR z2qh$nryuh;sDx3JFqO7Rp1h1|z1GgV{%rDOOmTLZ!vUZ)8CL+s=uG3dHs3b+ZAI6_n*q?PXIl)ez_4>#SCb~$^xPH~L>#geeCrE|lKr$u?Y{?s}7?bQl zA2_^&WRCpH`QzuIEYZN+%B{W4NC9yX{hk48b4Ip~!rX$D&tdrkdpi~{RppmA;Ogj8_PDZ> zqsseT*Dozej`G(_D3i>TCYGBl?Qe;f$I&iD+{rAsnLi5}Dn%&9%cCP(soi=i#@nn= z5gX-CI(EQ!3tB4XAInIJEwS7{GAn<4OB8j}>!^5%UF_|Yx1oJX6qDRoFv48Ms&xvWbsANqH2atp%yAJLeUx(L9VA;C*6;Rzln7$1Kyp>}KHnASM7#m2 zm4D`z-$q*hms7m$LOgvz=y4$F$_*w}TdUiZZo4M+mr(w+mTS~Pjd(J z_MW#h$t%pdL-Il^qveidzCL%*JtjH*UCJZr>NCqI#%9F}i{IwF1Qx$7ZSiPvxWxxg zP`f6p`XJam+WnNxb9rj*K2m->_wAjeUByYV`KYo`CR-hq-0%9sY39$7>A29D_3_E~ zok?G8flFdV+|HX_c6OdOs9So<&IA3fO}d?zmoTP$mfLx|!Oq(;a#@lrOeQc2MY|B# z9=oQsS;1k1K$b!lKW^a8s5(yZrEI>TiTR7zHduapjHLKV<7*u+1lB4f%|FI+E0QBr z`>*HiC)q#RPO|?{bV^?ZNfh>9zDT)oR;vB1)$E^-Pc-}An6m$h`VmqY*?$51+OS`L zp-r{_Fq20Gq8Jgw{>vMcHDv$Li?@aaM)ayusP`x7}Bu>UBzo z5{1ZzHa^)lXLs=X)CTqBPw3AMAP<_2m%+w|m46}7RUt7pvVS-1yLcnTH~MI8SD93f z*o!xw;8oM<0!2M5dPdwGJwEvVJbwgBSehmuc*fN;jI3?(Pb` zJrL9U0Z$XJA*AA<`Xakaw*z9BH+Yyo4tskzrTH0z{rNEG2-1>6v39LBNY- zE>vjsFiA{9t%og|?WlD?_0-!y!Z)y zK(VW%2(R-J^n!}krl2PjMiIFI8qaC{pgvAFwkhHHBXmokTs%JP?FXoI)IJv_XLT^; zwPLkZnW42RMSOgxYi_1Io_Ig6-<1q@T+uIgcYZ8^dA-Xwy7s@0>GOPNM47GJ+8mAeb}|%?DJ7*WyZ8|{ z>#iKk@mJmT@3A`r{Z;!gd%j|j=Jhn=PtWVG>frNwAo#qVW<`~Az{9-WUnJ-Ff2_{y z%P&=O%M(>Juixhz5y?Bod<9IAN3t?`9Q#9pg|>xRXmA9R^JRx$aytUfELKv&<{jOe z2z30aYhEZv{#$4LO~H<6Xn`CJ%~KX1r8)gI{So;d7Ae83UcQyj*JnwT4~PFJ_*>47 zV^04fn^DrwX7~m%r+-=gHs*`LlS}ajM?57r)4aPr>9JUsXb9e=V1=Xx>ja^`;2vMz z5f&a7ag-m)a)cj^L7U*A7I(4T>Wq4L^(;o>$>e0v*r5)e~|=wuA;F z!{=-XJ+EvD-J>{^gu6khkIWy4hZ+>O^1S@c&GRZ``I|BLWM4hd>~FwaVuG@0usgOT zFc_Hwdz}-`iCjL=q%^Ry_^iP#!9jUpa-N^bGY8!P=ah!vl}c0c7A1<@&Z~9!v+EiM zHz|#gLm0>2qA=ybvg8z$*w+^u+TD!?#MwbyNk8)&8h4S{AKt4mK%s1~-Ko<*M*)wV$8)LdAgenPbhN zdGY~ZcgON$R}H+?&^)-fELnXu@Sp8JSjK7}E{jf`eXJIUtfU*`&`T4Cv;6GpShMp0 zdodP)qzayU<#Ja4x0oZ?I6T>NwBwV@M?r&Gc(yWf;L2k!ge18niS-1uJfnj5$|D9S zj^WAcjwH$wQ)o};`lOs3HgG-GDiT=7=?gDnqnA6B&rLxaB{lopkrvw9PL)tyG9+Y1djn2vGRHy}}85Ff4o6%lIo*%-g|_der&Av+?}#2`MLlW2H(m`%B)4LZdngOZL%^ejp>=yiWP7>AH;R zCOdhbW=eM})=ARoUV?PlD2JYI&a90zO3-t0b!@}m%1%)kbxwm4laYK|rlK3`!652FYDX-BD}oc$5~uv-3H zf5P=nha)%`7?XDy_0+v2T`BtRtV{B~!AGrAPZ0-wcV}N%9mn#r)+MeZ{(fjo1Lp8l z2laVRGn6Xs2)+n=N=E7@9?w=sY*23OVcv#yCEj0c#k?&o zFU)z_>S4kANlI6u8Hr&Aq?cnn%E5Jj>faNTF2`6-)q#)VR02mJ8E^y(l#$pKV&Rb- zi}$)5m!L(~^@o?oUsBE6)!gOKp%XGM*R({-lPF8LL(vB!CW-B52L9s#}Bvp>-%Ws4DnS}NW}=Vg*gUkElDkBP4pT*KlRgC|Jg0Za$bot zs`{w6w`#q#VE(fdJ;}qqkr6tJj4*|Ioslm8w1c!6D<$3IF?wvMN8TM&c9I*(Y>cFa zA%roU8YZvZPt2=3t8@m-8P_=(`7lw`!o+-UwOR55!@Yc_!Cp!Y)ffMxCA9tB?x^9MT;`z$jA9+ zIbM?&+(BjMA=UL;Zr{UE=cGHS@&d!ki-O-PSZK199auS{HfJyF3GL&p)0rRZP*IGA z6DXHjCzO8=+6=udA(>0CX$RU9s^8U+qlo2lh26l;vUAHB);O@X1?i$*u+GS&v6DV( z-|eS0&8=JVFWS|SKk=T9oIw5*Pw;q2PT=J!h&$j3A1^sQ%JG%m72nUIbS^0Ed#D3ZtbG&o<;7mG;LiwZimD;>q z>~rh+#i*Z6&QGsmXWxzLpb-UnPxuhQ} z-dR`s#zq#SJnr(XiBVx`I168+T!&8yl(YTlnWONX0pHSo8b_dij!a+L?DIsZ>@BcN z&%l-yQUAiozsFvvf7~lac&~&Fm%JX2+ACF1uQV&8-|6$XPBnI?ZbZpYcZ-_kWG|oeap^`m(L<(eg4xVr?}es z4hz&z>Mz+I?+dG|D-X4z4Dy&GGj73pj=5h6(t4oKuZ%{&Qm_x}Dj)S+f_9bi|Do8Q zgwp-VKID;dKY(6sm@;x0dKABt=lW!{Uh7X%vpiM;~i6PB#g|awy4Ksc&Eu&Y+D_>a$9cS7Ro# z7(Ir&{8IEK41LLPg?f_fjzr-z13smgkLE;RbIQopX6&7#M?pVQ6v-L5Wks}NfwDF> zA^LcM%y>T%F!UpVA@n0kiI4WEXw;Olr8D&->X?|%AvUD@k*_DiT0f!~`jPN_b3cN< zj`t&49(g~)^LUni#43+x>POV}htIEc-PP-~C4hcp9QqODllLQ-b63RQ!g-VrVn&;J zBaix#Pe;e1{*q{iH*}mr8&rihRGXg!lFl%-F(Ez%t=zhoq&E#Y6CPE)@! z6aD4U4(cVTUl}`yxe&(KYzlfxzJ88AWsBZ|rpl_fpB8<}r@U?VQeAqFiY@2|F`A=L znRBRLsZ99wdLup9p!F-!&LHaaEwx`kne~2Uq|mRR*3qxL#rqYt&UwF*s&lPh8J?0$ zMT!4R{Yvgx`jxThS9YuYiY6CbDh3_UupMUw2YTu&C zBGtEOwD)N4v2PE<0qyYoBBQOO=O@o<{Y&yJ^Aq$hUw5G9&d|Sbn>o+^Wt`T(j6?qt z*5)R;>A8vC&#deqE5XUqZE6pb6G?`b$!Z_7q4T}i0rW4Y8m11pf9XS>)wVP}kK%I^ zL;s@pZF+k|IpF;aq>=7Jc6N+{ogQ%KV^78rj1H&%Vt#7wk%Rsso6kM;ej*3`L^kFd z|1s?UgKUnPYs%*r_?3ultl9Wa_hj(~o3^D*v z@jjN;5cKks{qBTbQhO}w)Fr9;jv*bFn2xr_WlHB#)0)kg0=#ASbyeGs_XEsRF5!Hnb0zwL z!609!!U?7)%S#FyqHj0M_feZyhc>b#Hl6nhC55!gvWP6Rho8p^Bo#+cQikKiB?~*y z6OaNaHYysp0WNWH+2Nj_V1A|$$6T!b<|IIDI0Sxlp-3rbfsPfXFWz83}w*s zulH+wJ<2>kT2JdJ&=u%T@AFc=X^mtrWJhff?Z?rKd=lepBwfZauC8zU6+_#z=?J%N zT5qMYQ2nIGvx{U+wtVky?8L3t?k=bGkV|j29Mc-xoC-ds-82FHNd83$zn=C#3p(fw zk+$x2gzFA$#6fb_`<{28la!aWv9}-{wH{Rc8cFM*DgByEbx@vkr|KYlLLqtU^HkC& z^x7#sBER~HR12yuZB5mJRtH8s((8fiQARy*ouNLDM#NM;DwAHu++ebp&qt_CSi>8R z@{oN_&Wa>o0~k9RdM>-=cJ?udyY)oa~#R{9P?wPW$1^o z>zD{SBf@@TwSKO64nJ3XJI)pRaGv-+mQ&ozuE1I11F<_p{g!AH=D;v8|{Qk?fl;HzVa)2wiqd_^L!|ElMhZx~yZxIM9~ zTqJSR!` zZLWF#(X2N1LSO<;6EDEd(w4x>O5Tx!7+G%#_T$ZsC9>l=hmzyo5_nLdGhGeAYZOOd zcGk=!VPMg&obc_~m09F=E760rGvP^YQTmnn{rJn~DK0jz->pcB7wtk*a!!yjNBQ%? z#$-Hzy*Fi8G3_SJL%;b9ew)HknNE^@7+;d?2{QTGZ1fn>#yxX1R zZpz=SGUjaB9^Ibd2eG@pFzkuYo>~uPKz)3Nm3B_)WN{F`bg~#L{`^F75Id&W;q+7x zy@}|Jwa4c6$IDnv1oXpYY-+SDIjy-YQTt$7qV9g|8Wv;EkGWmX zF@FimPI~(DF}tPHJJ%=aj9y=O5%N|d(-|JJY}(Dt#Xf2@+@xH7>}s4)9+vd>U-=yE z7Peq!T^BpBxOr5lx0dGI#mR18VTA8YD$ST5%?r zqnFbe^}fI!DS>fUFY?b6S!u3V$Tnfz1f7tv_Y=ttC2RL0+&=6*emh52Xhkg!K8>-G z+#Q8`fwqQ{fyePVKj#nRF?U8An4>g0mbJP_21?&OwJ;vNn6sMz4pr5Am&M& zDeaA_IU9*SMU7i8LzGco4^tkYF|;lmcj$YLs$8pLM-fZ@MW0@($8sn}C!+t}u^)3l zMm8yWzvFd+SkW<~8Bem?vFsn!|PE?E6pD z@y%b7@Ll0pFCU{p12joo#M49niv1VtF(Su~0M51tM=QhgE`v@MVohg{K3>CELCd>; zk)L#l=RFd=_DSS9)NupGYbAx3g++R+ue^jC)vpK#@~6mBA>5+0AvZ7Qul|}RhfvKF$O>G-dM$~3W zVg1CHF&=tJiOs#`hI9=rDn$J>4yBA+sEioT=w)O|Z}{7ug%Nf3khW)a?`~_ax_8Gm zkeu`y?Bi>y$7p=c?G$z5kYJ|{$c}7sHFRCXQu#>#(2u*)ryrr(S!C>QJvWJ&$@ha>GpB%cZpgjG4%0sdjTwhkPtZ zd`)&Gu~#r2B>N6{Xp%WR%!N8MBgJueez+y;3LPnJBl6tm)H ztexhES?_DoA5KSc#(H-aV+Gj%YjIYvUHknzh@-OONnm?ftE%%^20cZI=fxFQst1aCno({IHk;@M! z@uboBI{CL}0!$rWYdjs@pv>r1*aUWcTj-QA3=8g($*-2tJSv^K9!||cJ#wJx426}- z8e~V=LivN(2huX;>L`dwv&VQ2B~aEZT4S38IjutZTa-lNRMyeco3VCHvS^7dtB;pE zS)P(S_>9!Rl30_Amt$>V>j6+>JhFTUoSU{j42qL+j5L3SwR`eYWrUqDt%me) zq?5+>PL+`ZGAe?MUSZ4F#*QZJAFq}V#P-f^DNiIm%zEea^7@~{4vc!z13kzKM-S2p zc)BM3zn};2WtVGuaEYb|Ba*(@Kxi4a7uZv*)=8tY2;Si!DHJ= zARnxAnc5nxR-8jyqsgbNO_Wc*Y%iaLtqs-1cDc)8L;JW5K}z-Rf2AN89QSJj=h6f!R^G4ZKz0jUE_xEP+05@ltvN7o!B}0N#Bd-MFznH=L{; z15Jt3Ixx=NMKKG39nnhf&E3p_cMNEpPaOE)?D>FO8TW0&I%`(ax5r9K}E%G!AoEzZ!4y+e&tkE?>#biYQq!s=#f zHWS=f&)0UJ-AwCF!PH!4GPo5RxgB8swF2{lUwojK=u5P|d(k~h4|T{Gg(~VyKxvbn ziam}oW0*It+RzTS1PW+C>b>M=10YhFdr5tY1m zmD!w3LrAMm&UkkK^?NcMq1*xTUnrKXVYoP!?ysX+*O ztWN5V`OZ1ubH4&!-c0Xlo9>-o!}~ZUHqQFm%>5cQ6^oPJ-x<|w>2LP~eI7g)C-lV( zX=F|7ZXS3e@H2k#$+rrsvw@<)_;IlI~p_kR%c>um) zoI7^8r4M~_5c<$@-dN4c;w9A`o`rSbhg-l8kH81vS@6T2Jm;Jr!(H>)oLh5-W00K- zz&jpXjQPW{0VT#B3dMcdb5HWe>`;TGHvgfaNgBXZ`8@?2*<+tpN0sUr={nAL0dbVmj78T zGyiwBrW04AFO|mfXl|}!uS;nyC&fE(gWI_#pq(tas%Kuxh&vv4 z>ZWUDn={_$`=Op1%FHYD{V=W1_mZ9(^n9V=@}EBA)EiMdray3@Iponc`LeOX>)=x_ z!v8A@D0RD83k~FYvp!&mt!8geb0}SGF+bnq^c_vFgN_Q3t9?gBUuitt%KiUhV;rzX zE!9DM(x5(&zafrTE>bgD8;I%lcwsI>n->Shq}Gx9C`NmPYq!+;DJ>-TcgTGm)B-~L z2m7Dg-@*MB(BnWGiTiOKI3!%u7LFnv;QiKqj|i?Ro`H&+FnU<+`|$Q6&=2tpwUB5J zeTm|ZZSmC7Bb9Q;f=@sz`=hlL`lV$R`WhcKXBGNdXG^p`zT-0WJI9I)=nuv6rXq7i zI^z()4-NZ#eX~V|nHlelChzn$O6zsTFL=mwnJpLB4O8=*V%|5RHD=W0n3~+La6|H^ zO(~u5JAWnL8{SZ4ESm-D>4Z3E=82l0Kzn#-mwE6f(K~nDe_51n)u}<8XBOh!PG@1@ zYFcOhT*=cGnUN9d3s2)Mr@+zvQyrmV(;2uNXv@zJ7w(#Gw&M+h{)o6y_aXNcikI6A z-OPS5Eu2F#@mbDr$x+u(y7Fg;KC^Qtf`(vJaU#bLHip(AFqGNnkS>*$P8^q?t+8|lZ3;#j^I`w#vVE*1{;G^YzZB6v*JswTHu2VK z(1GW=oVtV>|9StaMCc&@qjFJ=^@Gr~R|| z?n4gHt3S{FZ~1(e{T^w*^c4qMOYaNMn(--J_&#$7%cbp1+App42E60CyehK^`W4Lx ztkYUcXJN#k&ztf4`g2`qC+wfbEVNnwa`*cU<8Ail>=@UHEswzl3wt;AAJT%A+}giC)u08*PexDi26i(MA2u;9thgU@4|50R)=FC<$2oJP*3=39DSZmI!Ek2 z^Z~~=c`RR4cE_a z8Q_18q$7qObUN3e(^=kqEu~zD(jS-1GUvq$hx5C*W5W#W6orAe(tZ~0X)8ppcj7HY zmuWwySH%vjNxt_KeEW*yZ5XX~Xs=eb&5!d-41a@O{V*xIyVP*a%K}dgI19U=OR;jP zg{F4g08ibOt!dQ%$#UrZX`@!9i?FV_S6k83uV^So~+er0HkXyAWe34!sGV&{8VSuXI2^Rn} zJy^%scf(p7Pt5&^erwiR?(RJU+q7EC^nnI%sEfQ-c*oZ)!dkX{mVBewZSM$waaTpj z5#R+w>AUUqEcM-!)-%5Ov8=O4IL43_&K!4mF-#oczrfCU_Ql#DsH*kN^eA@v zW|Xv+)M@0xg})`hI#T}(J0fA(M(}~e9}*8*c;&gS|NF4SDd5eY2X8){4o*R94XhW3 zcG*1#83cFk829(HafOuS19W5@^lBo1O}OS1f^3-2NE!gTO2F z8BWG4Q`W=&8>`+QMvvi{=2NRPm&*CTwawXWuk9?KdGuI*T3D@aO0Q<=F72DqmeSby z55VVsr$)cY8OX8X7}{%`^K0@N=QHg!{xa7$zG;@3@xFJ2n3B6?+MibNUp~$CjkO&; zA?f$=6c1EKBFvR7*D}_$y`H1cLUKim&_D@Sb!0@X=~TI<^Czt78JDXcHn2BwEmrcv znt558CVir~t`8=!YW>5m8JhSvThlY>n`EtNz=<^tzJPYEYoN=H@Hc<~JY{Q)Uq)M! z*YXm3Et|$txt8f4B5B~7eguzny~C)N+z>GYipCe}|VdHuw!^<(bmD&Sg~ZvCv8X8rv1pR<14?O*0x+laOz z#u4#Bw>=LN>jv$eux_TThjnAs6Q!-WiXJ0=3BI3mKY1KJF+EPkTfdGT7en`KVXlu` zuUw^Sy>hL-82uum7R1iygC>8yM$u&KzrubMd%oE8NsfUFIJd0 z+%D}V)ajl|StJgYWMcnNK6G|!m4e<*SfgnPU&fxAQ@|(gfH|u;-j22VLDZ(WptDug z4;REGt%dYMTHAsZ-4lZLDkRQGT04{L5O!G70@kN>oTib=eyP>_NWf^ zD1Lc3(CS|=z%Of}^n|RV=o7{nurS@cJ0>^P3&N5em zbN;)itLr{+$e!p;@fEpeJ8Rf&dM?&?Y29bV;H|H8P1Fy$6Z#=DP3ec6cPaglt@}x@ zYN}~_c1L)yj}Y6E4&;b;i886$T;)Lk-T>n~l2(HE`t5@K9Hr*lL` zu%zEu7Ir-7Vy3?q|9hRpy)2g zHG=Cx*1>fFuF$CHWh_Tz$PmkMEoC{bB`n948WL($=-)-{V?dxBks^8ZLW4q$O$0k_ zd8H<01lQ4CkGy*OGs+l|c^3wQ3^cjQqcxDN7glV<4mXz?eE(q{Mru0gDkM#NaC7e=r_ z7$xiRL;MkaqYhs|-SFS58-dNb5xh?~N)QjaG@=`!ExMs^#jM;OHUfvjZxk7oH{ro( z_zy=vfVA~KVn-SQBidP1z-UG*d>xM$ZNaW5ul6`nj=deo|GM7`pE|GBfgD%YBgi>c z6kBHJJ|c7XA~%+lyI1D!L$0f(o zc?=~-9(@PXAK4-q8mA`uKh?kO%H57?Mb!w zql{0w5iLMT6fGGHMskdN&x*l-H@7A~YsH`*$^8h{BKStak7zV!&^M5as}tUAA`-uU zc!TNbzERT>g&z2hD_9v95b?u=2Om5jt6nQIs?Nic?^SrTLD#taEHrXw;MMm^k+~ci zuNCy}Lllw~h;UoY__A9rZbsXxqSqXqVy&5DZi>&0Z)v-Ccu^1I^JJTY@a@e)R6eL( z{Tjl*c)DR|MH zqNrdM=F6(bicYWUEV{l5BYWq*qAhpsEjoQC@Vw|Kx_&3???8R_+#^=H4e5t1xE=5w z3+@2iZo!>^cUy22@Gc8J0C=Yb_W*9Q;9kHFS#S*SgBE-Y@B6Cga4OkSH52dWZz1UZqCCkhEMI+OCEDL3#xT0EX#&hwkxy2Pjd20!m z5bn=ght<9;=Seybt-si-xq;eCG=2Nwc;w-+KN% zd>5X*6=lWc)nbEcTNdigWm`Aq&y?S>1Np^EhH~({^5TlI+fn3E zob7}|v2tP}q*M2(GSnM$JxrM2S2pB&gm92>b!rD;AK`x2Uc&f3z^VIGKAzj}d{n|1 zv$B5C$!|nIcOkDBhOWmMG9iDr6|9=P5Q6ETg}SI5g+2ZD0)&~;ckh(=MwPAAg-dX5G}y)*v7o%+;v;H z3cQ=NnHPk|R^3N@Gj&;iAMw)EWkY?*-$7h9xxK`7Q{P+Ndw$xoq25=gEgS1SH*HyU-`Q!) z`um8pr*6x{?|23ECV$82Y45G>eQDaVq25!|mW}nEn6|9C@A$N3{e8!#Et~iqG1Qy< z9lg`uTix3;ZP`%ofoaRedZW{pRrhsHTh`y#F>Tqz?`TK8$=}g7?Y-5#JEkog>fJJJ z*;w!9Y0Ikn3ht=xRCuG*F%E|S_gnd8N%;$s;Dt$Wc@pePf^`WOVE-3%<4)o~;=Va4 z%6~R*I5%!UraIwUl@TIIOps85g_DJ4>JEKiagDJfENq@+m6k&+@M zM@ov694RSMa-^h4$&ovGC^=GMq~xfx9Fik#`;;7YfpSQWyi#&(vm{4eDLI;!ZNB~q-yI^?=JJ9qA-+(?;` z(xQ?Z4{7;00RnO#WWF`giSpxv!YF+xY=qW_Ke-%ffTN^T*DU|n%Kvrp{~r0jLH-;1 zcF_U|vL(6K6@~u)I@8b(i&o^N#E~1_2*_!k$5+(jYv?tw!E6FAa20mND~P`}sCz{- z(r?qgNeL9*S8I_@iw_1yyHV;`hg1_Z9)pNpau49wAkSi-*Rnx)&Xc1%2ifl|V>are z*q1FZ|tP|2$2c$8`g3H?=jqQLmwgu9dlD|YcEPhkUUjxFHzvd+5uM3dB z^pr({JK0D}DSvH{@|Rc2U-u;BFRzrpARnw;5BaMF^4B`ZU*P{&VNC99g8bEJ$zO`c z_e%MT{jKCAm5>3TpUezW1~AhQzX<1vz&mKprtk%PBV_;+SIPk2&TNUEOkvE< zWElYdQJ5EUY$Z?br7ej2p=Z$7f#X{c?l*w9fnNiTjo{-sCwR|B^8403ijPMg$~k{N zuAFo2{y;5@eLC^^6j9|q1<$^oivc0T0*u7|11hI+YfrY;*x;#Y5b zW*^t#)c5sUyuNBH*Y4ytacxh1Z*?zmz|>_!y~Gt$myPxIPFq&pN8B^@eG}VCd^NeP z(P{6k?(LkmY^b+m+Oo0U_G!zi``V^0o7mPJs5iN-Tc*9YI*DhUJeEVf<9uo`iPtPi zg2~I;{404`o5%flUKaIs6s!=Pl2^q#{RQh_<8eS=qWsl&L^zQHTGRGGH8(}?6wQ4d$$vTxX2>LTzkXTfcqp+6Tmys}1E zH_}}3udPYFxUx-Y_sOYlhlPW-i7jD6Uk6^%=3FP+Icz?41L{q*b%19>lxf5OFu_$O z**%&sK^tBf!}}k)@B#D!Gs7Q%!#o3D@mD}|!9$iJ+HWXAUe5<_INzY(>xY)#KNuYF z50;Gj(GU7y$g3X%S4Q1@OSA`E>H+X}QSf-3;K4hr0v=d9VQu&jb&4#7+I#Rp|6y(5Yu0&}J;ChJVgf{MQipul{oIWj6l> zUTBCqSMV|NHy6Aa_>&>ZxN1p*sm~e6ZR@AzU3v0Z@C-()y>#< zrpDUVUBaq}Z?$MJ^p69R_?FQmz9s0m3L6jmlJ&0Q3R)=dQG5&XP|m*pxN?rwRON~< zQP1G`+I1BFq2>VI2fjqT+s-Fn!uc|F*-(QsPk!$>?Y-^Z zUap7f+QYRob=g>wuIRRBPG}F;=j8T`)6&WG)~2zO>8(wNZ`U13db_;3Q_B64>XHGI{UFnIrTpsK3Y)HySR9oS9nP#WS%3-Rm!&i%GwNifEoNus&CCC%ME4b9bnmvIJzbDZiT)=QFCfz<0ruL4j$T%|_nDJ|;b8g$U>p`s)$p!7pUJtw0r@pa}ymqDk(u6TauIHmpRqef^W z^g$co3Gfg+0&b_Y+s$opNkJ2y1rNe&^F-hMmrsCd7LUxo42r3PN`}14KqYerby%-` zqp)B{1T5OzUnk%pz;9Z*Rob#!qFXLCUjjY60D4$e6^$W=#?XvUK2GrUvM zBFg-f|EUKk8=~xg7wFJkko!qHDEm?_w`kE2>5)Z?P!3uISr;@TzCRNTWoAi@&NVG* z#$3dqn7c`zXC8;Dgi`M6nJv*T-eg}WJJ$g0Tn~d**k3@rAIVWNZp@Z(bCirr?kwau z09v5%W+cbL&+;BcE0BkB_7Nm!9j=4&1=`M%ede@_eF?)0;c3XkoVA{Cqt2 z?8C~}YNS*7T6v+T1ycVtn`O@lX2qj#Xb`=k2YL9 zbL0!%3NsQXPJt0~j|zHPWlB%0dzZq|6M|CH$;*Nu0QA8l1@Z zHWk53{&AeBPr!*%$^b5uQUQUYZ>$LoCUy%6(yGf zt${p2Z>yr7zr~)%7SR%?JmQMa4w-r5SdsEaqh^HGh(YLgon~QZQ=U<}p63$_$Py*% zv;yos!JD-~Mp{Jd+(G#C0NdehqYhxZTw=lxaJFRzw8LxR{nLC&hqn2SC{H9=i)dK_ zQA@&zp+~=)Uvw+an^ET?@Qm~oVw@oQ-5e7hc(49hJ+NGvJ0SV`4DckYkj?}qngdKU z2WdCbg}_Bw;Ei)rdPn zm$@e2>(ZWXOZy~i@;uh;_<3Q!7jLRDO&jZTUXB;rr;O@8esFM=7;< z6zuE9{#LmjKi`)bd@)b@K9)$|#}eiH`03a>_&(0n8ZN|+2n}bDX~m-cg2?yaqzdvV zb=c23(f9Gs<%)Fro)uh^6t zQ%zc(3iVvgX@045&=&{dXCp#WR+S;oQ3ZgM*+c z#T7$yiWiN|0S(S9UOY4hbNwmoH-9SAUn4U8BcQ2AQnBYe)iD>bbmsn8v*-GEQ^E#g z{<{&j6!Wx!&y{m~Dd%=1r=W6ZTfy?`?clq&qdYCsF9agqq+aAao|~(k^i_L}z`gp( z;85z~C{E+*^628!`Nf?ly$YJ1&~)7DsVb zeYWkp>x1v^YU#V{RPz^Qj~1N{FUJg9i?)a*o@^M^BoM>Ry46fB*G(tSnO!#8@E zf*hMI5OZ=L;A6GlkDZG(4jZvYVj|C@-8k}-?lr@$k2PK7jBXwdYcDEq-n^sO-g*Q( z_%Lw3nNH4U>bY#58$P{x!#KBYCicBKeZPe{#<|Hk#JMSPJ+9PVac=%c&{)x~I?Oo@ zk6d0!b9|24!0$R>ORc=WXv?y9p(|2n*FiH;IeA6;o-lm$x~&z;FSlMZg0139kCR3i z6LePe$$bblZ`V^L>!v3A>s z1u1C}Ma~wPvlTg8z{Ahm?sl2G1Gz0pxjSU;1IR@T$;5LX zkhu>cw>~NNL7Dpya*d?ihh%OWa_>&cZIijXkh?M|cbClFja+gj$V(4t*{ z?(a3|{$7QB_zd)curR}_Y%Dzm%kc@=i=myYf)=p~y2vWnkC(%Ky!-&{#ZlOcJE2SL zfKIU;y2UnFrlCiy7O*a4SOT}a^cH6poNi!e{MpbScNqFbXTGmS3L|{ z@*dcd;e)#hqjBdh*phczwq)gJJD{yZeR^;!U)uphZLoc9v40z|^0gh%>H#ZX+X1Z! zu=2GXfWM~gYdfHA0IYm%2c(`;`PwQSr}DKOkhW{(YdawA*2>pb={1$FtuK}ZPAuLsI*1XmJR!|Wrv2}XLX9SLtFXM4h`G2n|5y4uazAdwqMjMUO1Lg zgXe>;XB98(7qx_GJ6<@HQis*&#(f)+2VNEP1vY2;_?GvlG|p**)_saGdS;QJ> zk7F~_mT&HTgiB-852W5t*z%!`5cbi3w?AzQVaw-sD`D8(@!Yg+gspMdP8ha*JU^8_ z$>50qSG%Ooa6sm}q`$D`uWR`V<2gg=(pQ+a`?7wA^cAKLa@knA^cA-J3Z$!h zbH8ZEw@M#j>A#C_h0iwqgo*F)oGkhY^GY9KUg;mqD}94`^_1hRqlJ#{+w|dcuwQb{ zChfYV55H#V!);$)g{vm&!#Q^$|47|7bsUmMlfJy8#LSTYf)4OH%Q)XTkGTfy`dk}s z>B~ESxenWYNEEOR^&Zlf_m1i!`UCL(H>~?*J?nj}XP0yR@!rYh7_USfu2;K`(r>8s zjP^)fIq$XesUP9GpSo-)$mn|(oCHu*bF zO?z*3k}vDzcMkO)pZ317B)@>k_f_{17f;>R{yyUDsmq2Yd`A!JP5zDp)81P>!9Fo@ ztcE7oCnlDSCHV#1{vGYpzOO&YFJSV06Tf4}6yLFB+UJhz-6nrK_e^e>_P(*+mTAkX z`+h?dwrt{e7^pY7O?OXwZ*}j=Y0Gdz+O%b3z2Rxgl6`q6+h%MV%w*dPb&$j} zx7%h^ezIS0vM;Z2+O`_$%R47m%Nj02l)(C~6QzspNHNz!3v@z%)um1>$b|PUPbd)9 zj!&Lgz*Ey>kPwx|!`49_4Z7~MWk#Ha{%d5$=8u&bSxT7^yM!P!GUc{p#-?dy#`V9U z%=iUMW~3e>V#$gFmaI66>u>6dOBpoT7uSdPL7yRbrJivo-wQn>uPPoE#t~i)airFlRja3S`D}kl3XD#>D=++vLTXCvM-rNojGa9h}p9HWqqJB9p4`=i9i{khkpI4pI$Rxl>*bEhTKw1SZ31OqgWODW$zgIz zgOGaQEgD|Z+gjq#A*(>jq2{#StN|?_^p8f*an~X|Yz?Fv;Ct)s8iaps15(a7ykj9Z zjXJp33pr}CMd zfK3aI0)E4SNe^GQ;2yxQSuknrc?%{je$|4H0d`A&jfj+m+{*f<{1!^+gA$Rl5b8+y z71o!&Du`p?GoizN7BR~Pz^989=h6SHPq{Dy#2bVuODyTnGo=r1O#0wnpq~o;KNgMU z(f1a-8~kyr;Xk3Cy${ds7jLW7T{r060S=v9s?C*?b9D#6DND|LOuVDY%8%B8M|MGm zaJ`GWC{NIroifGyOp|wCFqZlOVI@yEZ_51GT**@}7=uie&95mpuqvU`ob@ZFA!|rk zrlK0VhmnR{!Sx6^2m21k(rQo_EwN-P$|v~F1(dA-Q@&WxpH|Q3!Dpqxx~{RVjl5zn z-UNO<09$0B26i-&N*e+ES9JKV=x6bj^t(NSD=m_%(v{z>T0=I!zKb?VDf_x5{~Vd* zzcSJ97PQ9VGb5M_w%=_;^5%@!!17a=JACGJHp?>K)sxooJne%g1A^^&KYx;@o> z4P{6EcellaTY&;9rCyB+5%rQhwmTpck)tUY4i%1)7c#J)8{*|u%2 z(s>(K>AEx7j&?NYtgbPIv_w=yU(n;9YN~^!XX%0rWldY0aY{5~{C8R&q7G9mDUdda z)!-a|3w2$<3rhjB)V52s2Jh6EpBXHeEiG*R>L}CTFQ1`J!g*;h)&yIG`Mk15G+r`e zf6(1D-2R>BVOk{CAihW}VUa*oSAC|FF(jS(%OWUk5)RXON!r+8kGNYi#oe>t(pV36 zw$ho<4S2yz{ztY;*t}%~vIT8>1Gv&I;dSh|{LrQC7be>w98cJuvp?-*L=bw<-tNmY z9vx(Q-ZJx|ZI9R*zkdXFy8Kojuy_e<+@+DF^G60l1MrN6zmE@Iv3_{P2H+JNw7gY;))F4cVYA8is19b6}~iZhi@E|TJcbecJC$dY%A))9~$6K?*{(|egD!b z@O>J1OaUG)jM?=M(G$>8lXsh1uWc?D{){BO_JhN}f@Smple|nDsQ-ER#SVg(2qXO} zePS)W4cG2D_{2i5zY03}RnW<=It?G!m*CfW3cjo-EZ*(7ou~%Km*V>6yvp9KWf`(Hn_pY)Pu4cnD?Z!26^=kk7XB$y>;IGiBM( z1l`QU`^J)Jr)qD~GZXqSeiP@e-DbPaIQ_is-rjNgIk{|Tf^KGVd#doBX})nnd$?{V zw}(DngHqozSwCavPu9=adYjwzGtxgL;X7+0dN*un*jp`qXA6`*#!G!n=ng%_{5$xg zteE-hv@y==WV-oBaixiAjlSTES3ok*Z2SGZMC`2`Wxq$c^pU0Sk7c`W4Hjdc+s1t9 z4^~$;sXg6hG)6;YiKJFC?%bkeL6D-Jsp$Pn2;oGW@=5C?h!$<2cJX*$mTpbPO8$GSV>qjc|=R*~~HNk`>SZs)c zrd5KrRXrvBT_@AE0f%j`HLHYweiB`~Px`xV#{Oq__-;zI0n)Wd4%dg<7hB2bhD@t< zg}ok1p3WiP>JEtopgV;uS9Ho)py)>=N71U-0!3RQ*nf%oL307_r%nK8nZe^VXt?o? zLbu;^^P6pd*BJ}P&L|oa$$^Y2Kt~{3gPzd8wPJ{SK^46r-B~2lav>OxH& z@5t%#x9Q_PsXuN69xB5)mH}UsNj#&&o4yo2 zsU?MXTxc!soIWIAYtQT}_h$$T~8zn5W9Uj7WuvB1eH^XFNT z@;pe&OEc(q`UACAP$ucVTr0}&RIQcu$`3qgtt`@nrfZqnyvEp3^N7e#6GGFp{O$s* zlv-aSPJNDPjJS#Bi!gk_xklDPX9ww5Q{JZEhX?qupcMS4&PWqz+7xSKI=auf|4IBu zCB6Pc{-d>lb+`GqIv%GKJ9$onl4Aa@sl@!%;BPAD?_1FqyI=6P=v@sznz2bQEUoc7 zamoceMoUefDD{ZH>jth~>V_{V&Q-L0M&pf;S_&{{3)}(h4DpN1gwN=Lp<@Z3(S`68 zEgzHqqQy(-Gio02Ed1dCC4c;|GDA0986kLo6hZzd`f(n7M#CQXM}Id^kmNIJHh#R% zsOeJtrs0x4qd24RWXVv-D7_y(qt&I~jFxe<<@_?8tNsQGmzF~7W=UqQ^T0Qe)RYq4=K{gDTNi#eZox^{r&;3K8~GW?Put4Y3+ zd>;2u?RMOT0XvEBz+t7o)hlO2NfTy7&C=FxU^Bg|?n_yi55Xfoq1Wnz{!vjFtnrD^ zdhyL}U?*?Rn%dI4PZ`pOc$hdW1Nb5Xcq(ni!v15BS~D>A8J_aDp2V0xNe}SM;1BaM zpfkz{WowRkNAsNXFy=FJ^dCfVazW@vYZd_ic*^39>v7g+K~;Zq!HRgZXF)tw#;EW) z#GlhAc-|)MWMF8q5!|ew^j9x>JX*#cFEW5XlE?SYFuorS+qmO#c&r;Z)6?= zdsztGfUfP&oKF1*pf(uil8+x}6*U**EI8f~j4QA7LuWKyjx)#K3%tSckHGI-&I9Rx zUhAI%Pr#R)=hI*w1fTc5b-1;pSgQvPtSG5*F6qW@EWNAtTW-vQfZ-RV+2Y0S|At6F zUdOqHn+-Ta^i+!+|09jS5x@f+|5!TwV{@>V7Wo<~iqoE^pQhp0(o5`9>5cw3K|SVX2%Cu}084_@IxV>V=x;LB?=fyS53p+Mf%3V!0YhwBu z9rWcn5uB|FeRc+XwA1rGFVgZ(i_E;wh^Eghk$jqcqL&Wa=J=Uj4|ZAj;Zu_KNWE+e z@*IMC+05YY;s8NJe1=Ze*2iWHUx}5+g#W7ZLaoq8s{Se#89r9)1Vv^PXrav(|8=)B z=*g{u*Z4>623EbH*M@_bU5%H+8lphY=8lNphVOVBe&d@Re6Q2@qZDG07s1CN4)p91 z#De}I_C>s@aTn9I6sK>a(=Z*K%9cJE3pjC9<8@FnGc~U_KbmQNFStOpiaLFlozBqQ(pvpl4R-c$ zQ^?RVeU}~1K)h-@p7xe@uh|qZAWf!C@gW@}K0^(@nIs#vLjpKh%3P^eX#=^u8qn9l?uk(P`GF`G*^UOE(L z1()3FbL#J+1FI^~O}iBU=>mWB0AQg5CfHUJJb$khig2Nla2hm4{eUJ!29 z8^w-PiIoxKfv1K=aMJxS^RZel@(M2r854qMD(Wcj!`4jZBkjkPt9mD{-X7eK_js}E z#K-sc3Bh_;<<2;+JhjY+a_;)_<2l~L!V`CFxO6ZuQ{Pb6gZV=(>p?IxO?snV5qQ4% zq2Y>>=hIE(R_HIdTB6%7fu7HXUiOYN@Vj^geh)WL2H$AL*jl~;dSt{psC*l~0I+#i zrJETzB`n3sGs(Ph#)c>i2$%595vkBHL*CaIXAE8wh&f>}_BD30oQ$p=ehE;DdH=}P z3k7wecHPBn?B3$3xgVtY#OCO^lvjZz84IZmYwY}5p6xZ~vZ&eKK706=IIq$?_{->B zyLVjNy7PM_4#YPQ;rYXbfswSr;E!je`m1AJoNAqha~PcALh!gke+V0pXe&LBHZp#~An+Y!=t26+vu+wj9C}jlR)`&Mbc4=1 zcrN((d0az1o-&b&ePF_3?N z&eeFQD0JHI%v|#{TW7c6TJ+E_i*EA!?3`NAKJI@x&;CUI8k}jWTI+fm{`j#AX+OKg zvK#C-a3MS+rX|J6nJ($vn|Pn6;!o?bBk1#2A$9#>R~e95XViX*I~dF7`AuYURRH<^QaPY;GhrH)kg>+;xk=3j?@L#?j{(%^trlgYDA$9kOl zdfka=do>Gwn2wpQPIfCD)jsveM}6Rbc*ctAC&ZcLSm6wXnYi{Q_mcxW>fWq&(#M1A zp<`$J4YXg5H@__-=y)Q^x0-c$rhG3wAovBq(`imUO`kIpD$I}vJ5$>nX^JFmkg)95W3!WfPVjrB9nE1|R##Z_}f@0%LxS_t}?_8hN{+gPD3S)6?zl}b<0u9Pp z_=VWxq}TpKvONgT31cm#(-o(;z6@UovVThJr5H5^;cm8<$Z56*#2nk&s7o&!6o8A)>m zQEjH_TRCTNzPjDF%{Y6Ach@ZXAx;gM5pV`X<$rl(eCEhe>f`B?m_O*_PtvbjF?aaw zax5ezP^%`nZx>|m2TQOi_!F%A#P{PKyAI2~oxMq}Rq~A2u*P^MGVxKWZ!ES_V8dR} ze3Ij7_bI=LIyM`}a2y<1?@r(u&Vjuaj#1Q2)s=G^YYTd=RLF@@=t5vA4^Jrv1Vckm5h^8Y2HRerV0^zYLDF8YVvJDl&``X|F~AHUbFn>Z&_^&9rzx)x`2 z>nd&?a4y<=4t>FyVYidGHqlPmCsrYw4LiTo{bc zntwqAo|PVSF;K8$IQKR+ICRPj%9r9IeG`z&I#JSrlHsU47dwQQO6ZjX=!YJhFSd23 z5!$3b9!;rtmx|JF%+$mMys-!R44%cw7>PXr(4<3a|NaVKxh~K@o{E_9Hqz{q2DdsV zj`-je@PeGr30###Bi_b0pug04JvN;PeRm$lR?>;5qIPeIubIA`q5yMjIJi^dbIh4c z;#&5jJuaWk)^J9`cw6a-rrLVMPwp7ANMBAVP^Kg}X7`d#A{^nv{hwZr(Su6Kd8vZH17>A)}9rM_(7mnHq@73Pkm z|Mi;H+3KWe2o5-BN}f;1IXLlrLW?H7_w?E|oRei6)iudvB*nJ2TGy0sDVFw8HOzCu+fNYs8)m|08fR+m#>vXcCqB zTe+6m4t^16Q>9b)=yQNUMQEmOe_@S9n-brK-G`hfat%*GpDuPiH-SD8hrXxi(;}X| zG$U}d=XZ898rJytdUJYJe2Y&=n>J>j{lZi@bVYOvC{gaERMMd(V);{D_DleFezFWB zsn-$j@q2luIs0qgB$|>fd!?+N{i4Qv&ZIFXwf^fhiT((AeIM5!YuCK@w2kl9W8As^ zB(=tv527zQ(wqZ7>-nF%$9n`c7kx2d{B2q-$A2QN-iom&UxN2o(uKl}96Qbh_62*u zp4*9D30UW~TK$n@(;F+Dd+ARyyY=C&Acs5~rBf?X1Qs{0Hm&9a7NvV8)0l;1s(*2F|It&y`B zYyCD_W49HwhI7pB?=I39(D6B@_I@&rvFUojFgfbyo}Rkwk*5vtCZIvlm1&f@s?t1y zl!r9PraYuNh5EDNIP&ISm~E2oD4Ij+Bfx(m)__HOyyLWIKc%Zd(jF;QDcVDhZ_d9e z{W5iPk z{{p(>19c%*pNhVaw!DP3+M0el_o%0cSb(^578bIS8t-~Ld34O;PY+(`}DtmsNaANwC&9ng<`Gb&3n{0_I@ z5MPB%a!<)T@~2Yg;e~!IP`6%6BoVC|IuFUKcMtjoKv%#Ux}lLq|45o}M@*je^gd-} zn{Px}ZRj3N(TAafPW>&5K3pZQs`O=uT=Y@@8^h%y4fz-mks^_c?0u4R-r&`sTk~ z-}hwp*_Sv+Y#t5gzn{8_^WW`JdL8rhn#4`eyx2+jH$4&Mryt!H2HM z^-g^YzlUQq&ii5x``N!{7rzaDWZO>qsb{G^toUCK^ei_2>q&8v{|#FFZ^?IOqYn=) z1#O`&&VKhlW_Llpk6Ajl3v&M8*-95D&t4xp_#WmY+w#q*UTcqqnqyK1O3X3tF@Vh& zd(`atHBtY{IR$E;m`fawnu@EC{?vT>XnZ6Bqhiurvd2QsrHOLxlh(YU1V-G%@8dVw z--v$4T(I1)sVYQ?6jy^v+t#`29 z(|QMb<1~2%j)U%A#)hNyE^=6;F>PBHBam+w;J#5@FQa}0*1$5RVO=ccbK&<`g0vUq zj73LlVUX`Zn$|)3MJ`}Itd-?_F78{%`=KBBp5q1oGuhm^%>BsCey6DLUGi7>pAY1h z{^`>~I7KVY(QO*O_t2u~dJ{2t?zq3Gpz6M&EmfO~PGi6D^{Pd%)#(k}aH_65?k+Vd z8!rmqEX3#HuIgI-_8r={pRmhFUele76wxO~@xu8y%RYo1mr=ZKV!umse&z}T`?7r< z{c;!fVf?$gdgT8h`F~LU_sRbwa*y^ifE{~}%A9BA|8w&HdHMfo`5!~gUO>JMqr~(b zL(1^26Pu6|>Ofq+OW30vE`)93(T^QKTYoY!%(l2e|B`vLag+-vq@XR4vE(Xf*l zXCryV5vEI9h#fVi44QjPhfSiq59!lM>1U9BDJdO8 z`czW-DAL%Gleqs`q>m@1pF^g@vxe6WzRkSEK=g(hpf4;WjzWkgOoAYy5z`DC4 zYCWqdKX1kR*wNH{;nRq)+^7M+EQO7J=>|OUrwxDA*%H0LJ?ro}UmFy`hN9DV+*`C| z`3=~uH)7Nfe+p;37E~hM6zsM}Wy`Rstl2AGhc){(?4CN0h+x=tUEPWIHR8?=cyX>N zI=#FdYdA)k<@g2md&WO5oF@w1-oJ8g+ zYYsWE35=*O9PLDZyn9fa+{z>Rdv~`llzn3 z+Wtgq$i$8iT102aCp^(J4<18aZ56)Ovp?~zXfJcsw+=OdzsTMFL`}JwuD;T>zv$ps zx;zJ*p|`@&rAy0uB!3Wh9dBsz8NgHbu+OaTY*pV`LR~t(bBV~ezwfc^4^QzM8TXRk z!jsF{PWwAJ7w8w{Su9-qvCO_#eBJSNXQ5Mkc4jwXPYjE_o=x(L?6Cqb(gnV$%lZx% z>}T7i{`GJ7EEESk<&FdPvk|M4wmR6>4A-qfXY6F@r_@mD%(-gwY+;R{NzrgzIW?t^zNH%yWKX`rdYZfKfeP$eEX*Qv0S9J!3Jlq7Sb~7rIl{$vvMtPGe{tSCr|fHD5%?FxiG-y=oI3CN{w)L_i0(*lk!w<}MLr+vlI`&< zYHGvjdCPLl?D(ePEp3ImnLcM{q2El`hUw{Dg*C;felz2qXnvF@>|JsAaHifC*yIeC zUo5D9EXw%0#sT@2r=jH5b?dynS%9$Erap5~ zdZj*(?-B4;3cwE_G=TXZ+t2&tlbshFggIAuFTU*{(g#^bt}*yf3-NWnI+pP}Wxu-+CmV5=RUTvPW6rq1kKyZ8 zp4X~p`@rzt0W0r&s5fBUi+xP;UOD=VS&w?&oa2Zi(CRnEm!q6H$M(&FO$?(#KYH!6 z5(h`*eMQ)JrDoA?$8n6U;lC=lW>lV(wR7YgO5{C?cSmx^>$J&rrt0rXg7+ohvw9G+ z1ZU`*YQD(1vj?%^``vbXy+ILBvt|_UW7|0HmmS`KBkPvo&ARIO)Z%pmF?4YopTLi5++S_ zlhz`w_R1-m%Dvm4MvNuqa}PDR_3-mnn)|Mwv*0$skbq=8?$>_Sg1NW*s0DMMIHW6C zCkhzylY|cde#U}(0QXyPFW^24jsfnq;A4OfTQKlp_>cvk0DRDbPXX?+;FkbDZNaAj zKV`wM0DjVf&j3DP!Dj*Qx8QStyDj)tz+D!69&prxUjzJv1-}mXaSMI}@M9Ki0`9cn z3xFTB;46R;4_NlYRls{K_-())7JLoxBNlueFk%DC`tJbVW5MqNZnxm~0q?fp4*)ZE zFvssE;GGuyA>cL({uSVdEche94_dH+HTZx9*8tvO!L@+5TW}rVZ5Dh3_inXdVvj8r z9KrRReryKu1f@3nJl7qFH~Ro$#xiR4Lx?j=JT?QqM2yM0l(D6-3lU%0Qo|s>s%*?jZ3OC}Mb&AP>;V@7c2diLC-beT)M&^r z#aB|>z+ON;Y&n3fSiID7u-EZ?L5+hws?;{1J#K0V&{G?zZD5*O241OM;1xcAkXZy| z7VKA~yy&ZOk@sgjTkI3lTTpLl#!@NkQ)@u&Kx6)_6>IWmuUMO(v4W>nySnnbuSTm} zU%9m?x;#ZpDXVin+xL^sg3d@eopLIlhrB}0b3WIzz}UbwMqfC-1Lx;FplCbkoX!2W zkv3{vGng6fwa`WIxm-`+jc4#4rn%N^m}}66xi)QB(sjs3T)WA6lz+Ll?L4k|8zwHW zVd4lICho9d;uIUU-$9&{oX2+%C)s(#RW?iENqKz7D@l2$li-(<;8RKPi6r=V5^TTY*rYtZBbJocn*{eH z!3UDyXcF9+1l#ZEn3TtNv?t}YCBZwA;4Mk;<|KH71>5xB;te2kiVfr|Y@P!4DeX<1 zFU$3lXXXymHc;PnRebr@y{5->b+t%)fGc$m(rNKjGyP()OY6#TKAJf9&SlvGQparp z>Cn}|7Lc(jIhF^`;#i?<0pa}L77xc}U~itiyK2qI_};3w_D|S*DQy99=AZk2sQDkUBmrOxX!r}Hgg2ufK_O9Jj2z%}H&VMQff+fNt}dQ(;p*~r@JtPGL(u*kp(DN{9e=r(CucZ1FFk=%Kq`-PeD!gjnSnDI zDjl8AKlWR-8*F_xzJeUhH9L(qY)85tQoh?S@?S%~+i&DUmyByH|5bT4^QqrQ`K*Lq zANOs=eF?og(%XkCjmRn4{`2R6L^qD@GEOTiErlThxed9^yc!ZYfEE+BIwZ(m34VT(e>rrQHCh1 z%j%$wuPr)VSyQy7@+z!2SDwzrJDf%vqDmwE?wkHHFJEI$ox{Y%i+ zpMtjj1hn-#@P_*@+&5emeLrQPD5!fEcn&Lo`=K55g+Lpz0!I%PVCP;dEK=ZSGGsY+ z;Cae^m;3oO&jo1WOHBWst}jI07dje6&CWV%|a}Jq2C-N$hnn8Tcy|zx%RNq#Sa~Zya!$U0BqIpsR&RPA9GPx@qj1z zSw4G#Lx`4;5fhGb=q_a2Am6t+xJSmT{Y?YvK<}rp7Qe{|c%VIq3F+@$k}dC%-zW83 z1LUb_7l4gnlw$$ADD?-Huf-iE^zUBybwB$k^X#!%4PWo}u9`)|u=I;SYw*RKFYW?H zlRg*k*Yim`#(geqTZ7ztGzR~D!)1Th&aB`OoZG71N8R=kqsmQN$E0s}LkkSKUHNiL zJ@?Q-sSCI2+s{(<;1fWfX4E^G36pea{?a|!)XiJph_S=Uf7%_uUNSj`u@vyLkKX?4 z@ANE$uXB0r0sHxxfup(;v1Ky+G1sm6&hsaI--XZbQ15tKjHq3g?NzaSa>K0S%Umk^ zncqh0a-r*5_lfhm({f0WSb|>tys3|2l)T!G3W(3AE@UUm{SlQveJHxQ)Djm zNpqnCStskEoRkR^C{fpqsCKNEd>`w}wn+WH{e0;6R|rGB>jKsv=b@r<{4UNtc@Nsp zIk}p8e)NYYV0>W*PLMQwb>XL@=Uts*cP-aB_XEoLV(a+Jn~h%qizpp`Pn$MO4`oJP zi9o+UCpc2VM>Ma~AMNEj<-P&5opX)%@*c?V z<2ta)>g~iad{ViOV9VdZ< zMsv=sVY|>1z?;M;wjQ2&)Wb6mdOhf`RXyslnMWNZwb!iYweoDezWU|@z0}bP+v(4u z9Qk)U>8&(~2<#kF%yZ|H5~q#w-h1egK#rBaad0+`{+gdV|A>Ss-=uT9Nd% zRx{wYp^v4_MUD;i@mj%(vB%H{)W=Jm@}SfiBerFT`cS-IKxd4(;D+BB-!1c;svUA3 z;C_sEAD-FEI&#dH4>5)y?!AZ^n0z1YLYBSfU#(88lX$}uG(TtK!VT1e7UxPG=W37( ztcP5CjnbCGIpfvN!0sc*-J(I`)PeL)_kM>p1O8xoro)P z+$F9E)&d_4swo%(UT)qm2l@)6#TIRN|uTTf+mM+WfAXpRf`rC;qr zlk);=QI8BX*$tNtHOX&xUbb}XV<@-g)4#YZ)--wk z)N}rT|3NIS^Cxg#fhf7?oXAIl&R_;no|gRx810fVcPMY;c>-s+A%>t=o}UJO8?u8+ zQ%>Jy!R>%|S}^5%obe{_q5O~Y-6TxCz=IY{eF4sQlljym;Cwd;Q@?=o-6TxC!!`>( z1{mkN$$aW9aK4*_sn5XqZW5-R1LwO*_$9zN-%Y}&0pol(2~%H!^W7wT1~ATdlki!< zINwde=K$k;HwnKA82&X9J`cFnf?op+e;b+qI$)gdCgC>#<9s&>n}Bh?n}ja_hHsgK zuK)(Wf^(?6*y9E`q~Ny|T*mxsgiA~bUnh*XY0Q6za8SXLE{9C{Oq8$WxX~XD=OO?f zMl_zgW$UV`mu{9i>am{UC1Z%^i{GL##!*8YU;HZi7afZ0=fWpZ#Xmxw_)T zL)to~d?rfzak>O_&OxK77?F9`Q!gDr{-QChRq2wUFCI&|hp>+P@@laGtw8JMZblw7 zrYRel)=hcp7*ASr`{7ron>=Am$P>_Xb1o$~YH4x5^FBVyXWrG0y2a2%-_LXjpWW|> z5Du7k?QFnXasC-nrG}@te8{ntaLAOc*`{ihqmFAkVV^mdk{q7k+`+UTttlVE8ECxU zoJ&a$>68bVF2R@L9Jz(%L#K%+A!y;4Ra3LuxuOnV$Sj8JU*9ez|P~^uwkwl8|GTF zVXiS7wx7#2n4HJwa*f(~T)Q^RHEqLO>o!arV8ixvPfyC@b6-lzJCy{VNP>?i!N-zd z`?;}6d3{#qCc&Lau>IVQNqKy3ds1Fo61*b`-jW1wwqUHj434>cHhp!# zkCTqrJP2e=@nq^RZgEcLjpPnv^Mp9zFU|ItnQ^5xZA;&C%M(qHr}J8rd0k_>_B(J^?Lcf_V3i175XJ7_B24w8Fl>%5?d?uR)f%5y7YExPa(hX>V+RQcr|EQ zUDB6^^6VbPbGdk>-*b@Vu+_+zjL>N?YSZW;k;W^}>t@aYmJ_4{hru^5%$@ z9MDk@K4nt-imMOl0bFIwW?Xre7UWqK7ZuksmgByV4vkm1-fxKJphNp+-$Uk(o^|oc zgD0a8LVs#h*2UfNZ9!x9Z#61CX^md9SNTkD$EgEb;d2bXWBv9R%k*}&b*HJOfpgAz z8uB%0a1DK<{z1TfnQUHV>!5!kF#i)U+8SGGY1S!tBE>Tg413yDx` zynGC8ofR_kAY1zJ-P(tUL*yLxU`wjB6f(a)T3?RO9M8Ap=zrr0{(Ubo_ zcW(n9)p6bn&)Hp}l@MYjgtcG+XZ4K$vHIY{4kjxhJ}ofD*uoAm$bc=3z$<}`FhpL6 zlMn}NlHiE6PJ&yfp^?)|-um{sxh`1=3?#ueNu1!ePD)zR`reR+ymeY9uM6-0KXcCN zoY}L0jC`#Z?lOr96)WRcFNeQ*jaeFxRV|A4tc{tA zLuYzK`N+_dh^u_h3&r@dgul5YJfjJ|YnJC)>_-;m1wdNtE0~n*L7($3aB5h}+mLH| zsbA}cPp>lJhbOavd*->PIR@`HJl~D*aMD+aJFa8(aeQGXz7Tf~*THAW`>h(_J)OmQ z+ybm0F2tU7*c8mg`k<1}jKTiIs8O6OGKzmtgwmCB4dP*NUg0l3j$W@fXcP|?g&TUq z9CuR|_ofHK^}Rtq!oOM37NN{HF<%wM$3xG?jpBh&xc*oWNLi+@IEk5RZ!lcf8yu`` zhc}=JvjB|BO?dbabFlJxfg80Na1&h}X8Ak#*6@sDi&Ouim+wu{47d26a(~DU-`Vp% z&g+C{>{%SxE+U0yUd^D$o17Fm6}v<4LC^|Ks#5kWh2R>2*=q|L( ztm}9S%Y?Q&ueyzQ)Q=}m`<`2o2<2AEeU1gR{TRn^&tn1mgj`d}T_6pNZHB)?x=e*% zR5bTU6kH-5iAg%fd6=na&ZRJxk#RZKLi$ohuH{xKx<$#?FdaFzjKm}j*&57a@}WTy zpe2e?)8`(ZSbMBy;$-}*#=q&uZpAqA)``=OaZUVIgx`ws6aHZ*!&DiDod@`xj(?Mn zjmO$IFcJ-5B-(dG#r#+U_s1%p1)dkNc2<~6Fh1(9=!;CoT~XB0#TaQWDjG5KQ(ezu z{xx@Jtn^Ov>3ClJ0DOR_7aYR59wRz5AFaR|@+gdZ+KBBPdmqm@*1KjV*!hnV*ra@Z>5L&W?x;g>72|o7Mum7lZx7-7}4|agw8vXED$o z`?+5@Qe3byK4?BK=D}Jno!WbVwW8$zZ%#&V3gAiUM9_d%J$I~PDR)>giXgk%T~(0i{;7jyy0>pX4Sl3FIM(N6Sk7} zG?#Z<@^_K-%li(=18aIK>T4_PXcY5I+()A7%eJBOAjcMC+fd$_-h{B0u9dK%$9BX@ zSDBmRQ{#=Wp<><%X+!y){Tz3~S4|dYi2UEVhO3R-1$^O!g$GWv?ImfZmmUA4^j`U<(@ko#D z$xegq%%<7?Y?|#-(vI|65GLnJ3`6-Sd*3JT%CO_H?b|f$_IQg~S&yWF1_}fUfqz#KWY?yQWM%%?PT=lW>9JgUiXW6pLuw$@I9Pin-uKIFKjGn>Dgk{H$ z`Q@FW<%#j^CurB2`MfGIJ{I{rY};jZw`ggihJ6Ln4@f)69jmwvOx-bho%T=buegWO zhq}E4eSua)Dlmq;h*{A);MMpMwy*qla~W*kg}{*QWBK%3w$Eut`iVA-GYCw>wuiKX zw1=3VV9ck=ncBdzN0_vCtP`7NUD-72FkF9G%CK(jaMrm^vn|*(+lWoG?bvbIrtEOG zHJfHTv}v|qn`Ya#;~sK^=K|ASem8s#oI&B-!YcL6)jXfDYRKkIrmsQTd1?Ek9pw70 zWp{DE^$;C#&{!K!$&`VPVVy#=eI92fwCt7iK|Jm*{;_FwIH)WrIcj@G( zx$etG8(eOafCr_Aq8IzYb?uqqap8Dg>N z3r#OyP_{DpTu;M}yB>$%tnjt?T}gOX8V_PTK5t<0An2JJ=odPZ6zyBa^uUE4#*;!W3ErKGoX{^i$CoDjH=T@C%@4hRxzEH0bIU|EN)2Aba|O$Bf_luUSj9r@ zN4y<(lgH|Q)5Uu(UX14gz5XS9`B97uj-VHYN3@RfCgK+5O~Jb>li5b-wZQyQK`-f$Qm7ubYwxGol?p9Xgn1aiu`&r0}4Vg7h!V&MVC zVfs(f;9!L>~8n&n-`ebE}e*$Jx9{CiMeIT zaSWIJazxRxFRsF#1cb}JxJ=QxxR>aNnaJgLjGET_0`mDdWzg^8M274OF{Vd9sQN|v zH{_T;Z%P99zwF+|yaF{tmJ9A9@);9x|I5S`p=tTV_2t?Obx)Pe3tcbKr?KPJu3yLF zSg&1zv&T7xV9TPM!F;jYa;-IWe6K7|<_&f=rplnwz+4qO64ly|2tb!hi^s|DGll#< z`Q5n3hTpMBmWQJ0SCeJla5+f-2-C}hO<;Pm+@w!I#R)gk$05(%^~mz|ezrr_h37M1 zQ1jhUvfN*I?hWL#T!MtI2g~~-dZjaYXd}cG@{{CV_&W9lD+W& zJg?D_N!%=Fu7@vH=}{K;+%^ei0|q*a;XiH(#mb4pu{i3BQ|tY4cx$87{fp47k)fLg zVpaEJe_IkBLugYOwJ8*BEy~9DPt@T|b;G3geeomEZs=X~u+*Uhe5CXZ#Omk3Z!jC> z)d+uqsF)Wp#=I8zcwU$Ak9j@7{IagZm@^`Pw$Acn&JE~+^h;5%W*!T(?PpO3>Y!uL z0qPg^z*#O*$0ijXU|LKw7D7K#&g7=X}dth1P4wB$tq}_z>C7gK(aO@16@kZ1mzfHA2rGv9sx70yP*XDrF ztG+u1{ubsh#^(`5w~#g!E!(4|x3cY6dMn$QrK_?nRx16JZMjm#m2JFI>9eJ`xSNl9 zdpTWiYoWXJRdP;F-IaQKIW-r@7?-|F_$V{z@g(T+q!l5|snJ)t_4vuIgHn%w*>y4g zKIYVyVn4lPmk;%r`BdXa+s9XJu#Q+R%&$|IO%Gi*WqA$hEOnJ~EDNTgKexwi)MZxUBCcjbedf?-uyF&hgLD&FdePLf-7RdL)cSvfcbg>V;Z|}E|K%f3!s02Ky}eC;7Z8IR&;x@_WhS+@EcCaw`&3p2Hoa zcs}kRYKeUW=DV`IXe-sen7)Eh(nijGT=whGd;Hc+tbsjnu5GZNr_avnb`6s}w82$56zhT5A@iTg||)qSQNiQA#Wqe82WKa^P6J^%RH-S%lNId8;iE^FRc zut(l@m~-0{H6i+Mkqyagqve0K;lH;g*5yI0N>`O6VDXJ|a@1w;|K3!z5fIcALfZ3E(; zl*8@=IfKJYj=KXmv#Y`{XLg)Xp2II^hQ-)5z?os2wd`VFtF0Gr>yE5f_E}fraVE zDL7z%V;=#${Q@Zh`RelHcbr^y3S);tVcu~Q*UbB|KUfsMPJFDxckJ+C zr5siq=tjLgey)*ol*t|jqsoJEjO}1^lRl2`a;oH6nXJQ{TVann`&Fi;`d;oa>?@Rg zbR-!j%|0A+s~~AP4yaJ!>lrTh6ZIA1c^K=)oI@324)r|eIFz4Q+dFO|?#jz8P4M2# z5cr&llV#W~xXTqb^munpRg;-hb)5U=vVvuw%rZ)vd4FaI7)`j(>oej>jIna^UP>&% zTW6^^BSHxcP;6#%xi-EQ($o3(M2H4YMRG@l#KYM;_Cc^fzJ+9{B zXfNeM?PZ^ui=!Qd=cBK_HMdfX%&m%z%rz4?gTFbqR7^#`T!Q|%B)2?Sf;qLiV*qUfb8EB@tea%9ePC`K z#oRhtFtWgWsB2%mVB||j56qi$$!-%k1I%+>vQ4nhoNk`-%X+P!yIhrjm4Q5m!_muk zg>g0|+=#K)4E&n~Y%k!WC>i)dQ#AU(nH-Z1)zN2JM-vO9>tS7Dkvq+ORdeQyO8y47 z4$l^?@!+ZKWX`L<>an3&IJE^;Y-!D1+SKn~-Kbsx>56%*2 z>VL2zV)s9H?PBjU7JbLw^0}e&+=D(zh{TnL(EpgB0-hz_EedYN&5UD23(ga-lzz*3 zY5mVQ+5fCq+#09na#fR85A7`g@@74D*)cI;VcQ>XXh{^4fag*^{eXkwD^4s&&OuubYbr?eH*^{B6J)j(N^FKy(U)~!wqwDu|hY_^-UA;O{x2q@lB<% z+}yH6?$Ea-q(6$^2b&51TU&wpPTU1P1$Lo^_pQe8y{-6Of$z=pW1mY_u)K9FYy$57 zF7y|SE{Ili<;6GTymTPEums;L=BsgYuiOj7G{duw-Hw0vhNFp%7+a&vg9=+j4%3Tg!nf6b)b=C2B{-Ili0$E&eR_zlvHv zctdR6u{SbTP|6q>hiBXai?%eH`fbPco$A>AgWaOA0A~T;iaxaa{AYgB9Xr1E$;H?W z3p}F+Vh1cb#oNGn6F7F@hVE@IUOWw{9ax+v@AUn3+(^y=hOlpNPSKeV?)MeyZt%86 zKAew%h5=WOXMp-V@PK)n=Q*68!kHwl3CP<#i3c2kMsh8Q8jg|V2=W{eTAJy@c{Yjn z%l)f;=sSrI+y?Xju1T;jB|dN;G+eGt;M70yfiaR4Nxlc?Af(3QIne{SrZ9vHd^=9` zcVe9uXEqw%g)dJ>K;~|tSXr|NvhVX-ViiMc2F{&I_WsnE$(>CrO{^a`#>%<^W1|=#=5L@&p#>9BIMCG$6bNDnH+7)x z`>@xbp*gazVBO2NC%OKzFe#C6tTX=8%SV{R|2Ykh=TntWjP3`nZyfyM${=Ug68D#w zK)a*|OYy`7>T&SI1KQ@`i3t>U@Wce-Dv6yQae+Dj6mv)b zD=gZH_3xJByK#C1`-dB1Wp5O`iCt``9)VA}@i5khFbgS4ASL*c)tk!C6}93lN)yhS zoQ03&8(Xk?GwV4Z1x&9vX*TS9ZpUxpv5J!gxWBdW5caSg#9p?d%6rhs_{4&Ne`;D% zCCck!C$Q3!gnfC17{ox92BLKX@GKgz1FHc@!gViWmFOjSt+CoRb02oZ?Zv+4CxJka z00O~mdHCM7&n^6SNR%0zW)0WLnM2G$3>Mdy6!u~B6$3P zFy=h{n0u7~!}LDf3m_ud=CX)F23`U`%MdPbYdp6+HU{I`#IjO_3*7!NM!vuWUfcZ6 zf+1YsHuOn`PVs%}zR*hI0ykN>z%dIim$<;V zLqp;M;|?zPFjOf37Z{7aSR=zcM&bhZ*ky6&$afQKn_alTC^Z`w_|J(8EM-}edKvOt zTNEyEE^&eLp?~K>cSu~|HXRoj`Ry;YzjZF|v}h}w_ec}YLJ}9aEks=4SBVR3e$upX zf#v87xWEh2ae?Ql?^O33eFDDnfc>Xae+~P@N-2kFY@}wdKV+w;TIQ{hEP_->~_L7!*?<- zb{k>4k+{IfPayd(UBm=V0uy*7bTAvVnpcgo3?)|o9awDp2`R(C4u4w`@xN-4{24rn2xi%{-g)a;_`dgT;^l=m_xjrk5Euk1^%H=x>b^}p}xWT1ywjwuf z@V8(aP~wQk+1xsg0WqeOUO%)foK2MN@Bw2t>O0rMXJxxY9Il}uUuujfF@~ePJYxYL zl}yiSQ@t^zm%y-)@7QG6>9B>uTNbl*Q@{&~HYN3x_`-PB%KXqi+uu@LCCZ8`4Pq8> ze=NL!#epF#VOb);adqGb<7Q9BML)6Ag(Vz4w%PgylvD4{e$1mF^9iJ2wKVle7#If_QvNz zuwe|9ArCNbg|uI`{Tadyp4%gBnB1$u*l5Q{N!l`3s_PAD)2uLco`v$j32s2!Lma*j zdRj<(#-UeGAjT(at1pFw(_+EX*78h_1n7q? z`bp3mEqX8L4vXeKfQKyl0O$uT`XJ~H7JUe`jTP}6Xd5TuFlZYm;sww)PQ($=HcrHg zplzIpqo8e^h?hXyI1w*{ws9hoplzIp6QFIJh|{2LoQN}^ZJda+plzIpH$mGt5pRLE zaU$LZZR13o2W{g-TmWt3M7#sq#)-HH+Qx}^1+$;5?4OPkU_akiL^toeJ5f_-~wbPG7D9VL(rihzV6d&Yd#XK;E*} z@V&tN-9WU2D!G$8do9BviOJtt2nnN|TxGHb|L`@vfd|MFhj;R^EQw|eBw@UfyfLw6 zt`ET$d`*z42C2rYWG?6$Cz~;R?Vk5$yD0%DFCy*A_l}0>F86X9N7Mtwc z0yDUiG;?bq7$*=1=O|dRSrWy#JTaMZB#JR`g15_ObF=8(KzcRZz;vEFg1!s98L9MU zNi1XF8vomTZ~+U+7*CD`B$6>ofb)0oSqjY<>l;8kmPp1#V+N`*zSG~sGC+;c>i}Nt z08e{^^2&XjdBE>Z>lY=xoZbkegY!Su9wpobiS-bi|FQNPSnDAzkh_H#6YvAXfQL*! z!uwbkdJlIyH{*=1JRgLRwx3gH zonf3?QKy|@oI6wJ?J(K`o3_)Tt+2yrLu}f9P8(x~(e~IhpYxrxP0ldBleWtaqfN7E zJIxmzVMiRa{rm+-*kK24zwyoRyt_=`Eo~CtAn=RxkwA+ zJ6jxKF$Zm@ImZz;+dwG?uHyoQXe_4vBxxAS8NAvG4*w1vKlLl+ynD3 zT~mQvJQaIjvVdJYKUoCazs|shE4L?&WDqOB!ouCaNr*tTJPi>pX(Cc_eW2*(#=ko` zv>uUl+oE|U?kyx%@e@C5?BYs9i%=^O3aj|HVo3y!lG@iK{swoUs5OY=5_?0fL2!MM zs}ERTwALRw5Py z_>aX?Tz5FvqfmyqL#0&o%QFpJcfh(r z>K;POU*%Z_t}}4`L9Q&~9zyb*GnV1EkLwXk?-OcWfwqyT#^>;y>kwR5;JQQGE@J87 z7jkk%66+6Dl*9T1{F`#UfjeW0@q99Wv91v@ac5f;>kSQReKhH{@=(4KyOf)-YOnw! z*LfmVc02yv5gK@eD-Vl8M0vnD2FT3Es$3>a;mj)zXW`jLZUk2x;P;Jk#vj9q!z#H7 zpwNU@%5{LV*sF%sOu!@t9;@tl$@WM`5I6E^W6GwZRtV0W0ug%2z*CX$j>ZZ@{hv24 zj*JpdMV=Ua276Xwm7g>gM+`A_$`3L7&oq21noqPeQoFdhb4Mnjv- zK}-1^p%>vtzZ)DdL4f^2M5x*)RwKp>@Rc9N8)|#U?0BwceljoKlH~mg{Dymxc0bku zc(1}cc3#1+1n>0g-vi$1*S{CM)34tK-s#uh29LMx#m9QYjJxr?A71(Xs=mNk^m6Z_ zSHntCs0SW=obceidc0E)YY{Q&#b4B1RQZ!G0e}3Gszh09{8IDZ#(v-Q9Q^5ruqG?+^`&4qrt?M87jzk15n{RID8eh66@P`M21MTzr;e{`P?V;b0=)vR5 z6GannOWzPOaBf7DDQw^lLt+D$#(<3^4tFkXs{ z;9Swd2L6O$2=Oy)ingv9#&P5cP<#Qv(O(T(*h>r(puFwSfP)=@kBT?s#SxqUqSU+g}XqqF`Q zhx)*Fz%qK4>;3StV`cx~MTyDVYUU<)H)HQZVq2h9xV1~Ib=i7FoY}nU#RB+5_1Oct z503}d+qm|{gE*}f2y(ADbO(Jhc7l!=AVO~uFoZ<(Ir!P%hBg;{2RJ&Z$|IE zO78M5_>Xo-KhSlD#qL#Yfc3sMWdq!}yuFVaCwViFJr#VursVrRUc~=E^nQE zf8IK-3v#W(x=(MDtotDzKJ3&pVDwtF!sSIhTXl~97Cwh#JAc<5cH{A)M*HEr=Q==f zg@(tgy!66`8C>93-18}%>Xn@!>J;+&n; zCLHa6_^BSayo_t(^4jrE!8b44j}ex*ydQ=Pad{J25|`KNH{_ZM&ZkXS-u%WnwKH{FK5Bnf<8S^udv1$o(@vG1_k0Lz(qXF16-Ncpjy_ugUu3;e*AoU#=P zmlw9;%fRJ*c%h67J3;#(v3IG@vYzR8w(J7#MO0zX_bM?KdmHZ?YGdnguRCmqD{MXR zCiSolhO`|nIpRbUi#aCKW1CvpooBbHP!rL2D~8Z_`_z}B1rUW7`*2|cimP^;zEXw4 zE2?5BCt07cBRD4*ox6%Syv2pY;l=uhT$5uRL-!;OZ=2<_w%fy?-5!YZ4SO<|ZF2~R z7kfQrIl$ko?1~*Pbw?i8=sqMFd5*``j^pqH%a;h&_J@1^byeSmk3F}2pTwzl0FI={qIzk^-H9&*O~Oo zq$7$xNZM3%Zl#!W#PnxTKUQNVq41NGFZg|&-GN6|;b{<4jpvVQ@NddoVD#c#63!fB zUJV=F`*@6=-qJ+w*@Y9~>y;=QK-Hk=kHAzehiREz}eBjOO^)3Ygv7=k!8(nAT)CgNtb zGM&mqF6ToS8RDi(@MVed@@_laC#SH16Sw~3f{9ZuXG?V83PCL0tmP^uJ=(Z07_lPA zB|OV3R0=qYd3nTLq$`-qUSp-ADdShjz{ug0UZFhRkeNIesKE4@9>eW>>i1sj_de?v zUx7Q=c4MrXz*w~@D#xIarV$(mVjnrqw))JsG5?kLyqLG0!MQ8W_D)rq39(Kj&z zdJE4LKJUI1K5rA&Hj}7tsk4@zvOTa3NE?swDbtd7t>HPxAhfyknPMz%>Gl+9KT#7S z(N{0G+Y|Mh^CaejaoFCl9-V93XtqV>&*s@qnOB=<8)m*aE@phT?|4r??}$(?NGQ==BbIt%F`IX_d|@ zNBBwyz1%^!I_O0Xn&bHv$y;Xwcz*=SE9Q`y@2ItUJk){y(|0B#FJ|uLim>smFYw1Q;kwl0u$b^QAm{372Ot>o(=t`@jn~D15dEs2jr7y zQphL2Z}&yMBlsR-=q@Q0(!)oLU3mE9IW~o{`+Eyx_dDV=`@V9{L*Qe_JOny`y%+28 z3%{S>Z}I;JxVTt5<`^D*b3goq!pw||i(Q@;Mld}#`Yz*NY@}Cj_vTo+?blOCB;LD< zxs#YXi@7s-B~BIGZI&eMeZ|3|j{v1)mAR?osj}}*ikbJqbJrKZJ!bF~ek@wk(b~~m zmWae zUl+~?*TZuMB-~^jFk0#k!q0U8elB2;%-Rb-7Z6z*6Y!wDjZ}8Q zR`Bt`Bv4&>?v-tV2$N&Wl45*&x--(_5G2oc?t2tGHh&(ffJMIjLjeFXSja%EBaAD&{GhpKabq0Nb7dEbGWX8rN zo+M%pRy_~>4&$D(x9TuD-T|*s0yC#;V&e|U>;e9oKeV$4XFL4OW=x3EEE9d4Kc{SW z=xul*(cVJ1$N4DEYbj)0V7y9XTwvkT4zq^elKXs%fw{qR;4648emu*F_WNzLLGZVd zr)@`HDQVh#*jGuPAZ_77o*`}FpPeObVMM-3x?JI+T_9bi=yyn4IBFM3Tli|1NLQj? z#(Uo-&G9$r_efiqZ3qL78nAIq3mdlv*to=gBtDuN&vDgWVW<&5jpxhdS@O#@c+L^k ztr)9doFo4ccVaunCfl(O-^3Ym>>0MOam@oJ{9}Um2gr3>voBJIv*grIw$Nsb@S2LU zhwyPLR^V;{oR7wt-(OyNXF;98u?aCdITkqwds=~Y(jkmoronMY*uuzt8?sE({&uDf zd#g@RVgAmt!JZ`Sh-xcfnb|bUQqp{fg_A4GI)%eHgpovhGJnEg z;c@nXuWm0qZilSjPW&-{ax6~vu8b$LCIe$JFWT;l$uj=<;P)1eKWSU5v+(oLT9vd% zI-7vweJ^a2jeh)T#48C6H|iJZrt#!hL9rCMoF(@=i6N3bu{ps9$Vax zXVe6~XUK*Y7qD#c%wf0sWIZUm)hGKxNvCXSUxBQ@lr0^?pk*D~@mTjZE$1!LPWMS% zW<5RGmORp9d$ZGEJG5!GPn%}D)zd3SSguEUZ2NXR>V-{*9W?cc;c2!RdWd=yAGd6? zZG~S2M()Kxp0v3vd)PvZ6iWuj59Dr5js+&JvTeZXvFutB_}&Bl=88Fq6ejM373MC~1KT@heeiP?OAHy7?>m^6*UZ+ovKc^hojM=q5lK7w)d9O~5tOLi}0X#;Khvk$y&e-4ngFtrbo zws5x(k+#NH=Z?mA4`vP&;0TH47Yq%9luB57+Zca*d>j(kbUNL%-^3YT$TQQ?wC zJc)@bZCZH-OkCQsDb{#S**JwIMmwj*eAtr+8R@&bTyp{TA-6IyUfQ>*$Hu%v9$vXv z`9YK3IBEajy`vpLx_)W@;OpYZ1NILX+^~nGN%+2w0n6VPMY`9pe=%T!H*MAH81@h6 zpH`UC{-M~OX&z$!#xjQP-ooj3o5a0SK=NojM1>qE6}L#^YYE2Kj>+(1byRE z6$8^JD=3}1Q?d0cAwniwXD1dHskKGJi7-J2@@XR z=>liW_hOINDDkUANhnLK-F+8)Kr3E73?EQA{6BZTs{4PQ@%tiQ5_L%9Am;k7$A2wP zmy;gy0s+;ia8Ufd{oG=PQ@%gV2P9D(-~;0CIrxNzur$%PV-FbC6D2y4d=B&oq7U&5 zIM0sLkIFOSeHNbWIjpl((wDSTVrF7581eG(9POKEMd$Ea&iL5FMO2A4{K_-y_@yuC z9DeBwDz<$=_$3MweMHq}Q!|m z&N2VP9aB0McQjWdFn3sCVqYM3aNT7B;}^YGtZbfncXC;!F!yh#|EHfPRB@x!nq*5P zCi<&@Wm(>ay=i^*{Z;VeMEcBa@Md6)Vq*VE1S#O2Ge37vnTyNwfVJxjEl9syUUi`<%tWyf z0`BE#Y`L;AY-@;N`#RUpfnj@G97NwF^_f_E#N2C$1rNaX@+=DM80Uc)GYW~8as@b> zjLSHb!CGxWtTF*?KYxhnq1QefuA7@9@oc@YY$c|jLR$h3Ut+M5cuO%qa6As%?>SBU z91BbHAnfe{*i)uG8@A}FD(pY8@a>j^&n;6}cRh-hShlofxfKo!JPRvR_Olj-CT(S| zg}Dh?V1`(@wl$Z(jv3VBeBZ=ry;wo&MPJrigE2PNoA5gYVN>uswYLWQyW{Zh@Q!Zy zbM~N&aMwiX9zWJffQR{o(73z;aiQy7^dob3{Ics`P%wJsyd5_aoi__I?GBH#TSWo@n&2h zmj|NR+uw}O4onpJ2zA}N>v~NIXvSnN5JA9qg7$Q=#L|w?V4M2gOc{!Lu zrp4Q?g-m-eE^%qgZT!ik0h`cZ=OItd%?m?e)FZ#~prubG;Pa}N$8s%pqZSy`&_RV! zTYdQ(DIJ{Bi}|vxhZyHgfj&|PYp9FIq#jDWq>fIfP9`wc+r1TY-4N!woFf^S>lzsA z;p{pvYJoMKfRAV|zVi_5myJ*Rbzn?h!rXQV{DYq`gkU)_f3_@ZiDNp(v3!E}vL5ps z`j*FUvvgMao*4(v`Ta7!_8c}|>MdnhUbc*_m$u&N;nZbW?v^g|y>=Wt+ac@6``J$C zJ6WfC+Op2Qhtp{Vvi}W)z$b_zS)lFbaMU z*qGlHqIH=xC^fv`4e?3fe6mgxF$0*dvQDvHvAprOA( z&#}8-_Kn6U>Up^Es6fh6zc-6Wo;fkecLc2TSx(_7w7c#y-$VRZdAA?mPn%*CC4n_- zh0Ay2u6fvagomu~5apEJj|DM5uiy$$QeY(^VdumM7h)ukXb3^;_hJQW6-s~70OnUW zGcT)I7q}f5I-f&+E%_?q(J9Q?_2603crGz$JNUdZHWfNMnev_FD-u)D&ShU4CtseN z3Ogaw%PkYra!X^=awCZe)N{K&VUK_{o8Xzhf*=!J#hp%Qe)JM`pWj6}H7-=TzZYX3r|vU8#=0ES!rmt8SvD-ALo5@` z4)8h4((Adclayik+cLIJ%Q~|3j%nNBv;(p}y`Sx1&D%Pu#oLL!r<+vXf!jBWd%5Jg zt%C-3Z7Ljdw3IyNXsK}E)uw3R)uw2yXUvjVfGPd>GW8oRb8K>X3a_>ic(p6hF0zs# z=I}so7>I)(l*@Cg0WCJcJyM`Gv2^EN^!Rzuw;4n zY#CPHanDGz*&

    V`6$_MCtU>laXZ=lSC`-)@>PR*fq5qw;hB67$st?7Y!um4{#-_ zs9cO8ZvRx^%>K+T#OPC_j*E&SZpH8iw)0%k#h#wEO&gPY@gCp!%A`O4RQtn=2PZw0 zSABM26strd2X}9JzH@Z^kuIzQ%~&$9==j1D3tojCD-`8tDrvL(v9DPay~aIDhu_%Y zQz+pIUzV1826_?;U$!_tSSIH?zUER`>{8+%#R3vv7GvkI()~i&mzME9`@TZi=T<3N z_00-XR*uIkeAyWFocCZr9(E^)wO55NTYdRYIQL*U>?-kOb8;(j-gertG2n?KIvMtJ zGVl?BAv&)+r*w~*Q+nJ4K4Jj)hyi2L9?WBY6dIc&@n!wlU9It);7dnWHFaJ>+raq> zV9Vm0)jnQ%_6+T&#t^V$%oE?se1($e%L38Ez3CkK0c$I9mkY$ z(FbkkQDVx5aSo&U$2`kZlUpUG;JbhYpZJLC&qWE!RAI`NCiBqOVr+S? zaMTn>8vr&W`q*)~l{nK~du%M~e;n%n7WA*Tpgr6o+rw10hZx#J6WRm%(o(tJ4$StZ z0Q%Ap`qI!?jG-@dy%--gT44?2K%ubP14bhY-R;4&?{g>wMsb#`S6LUO2J%~t`P2r+ zg&&DnvJbNl!JKv##yIzZUKEzNv+PTT>PzB2p)9axBntG&k+I4JB5Pn@e1GI+oJ;T} zc|(z5?FSpJ^$_27VaNt!Lhc5e9)GHGVA6zOTu9$v5cuW6AL4B1>#BdiGuc1D$28PG zWTAg}dW`HJDu7>Jq3|WA0x{MM<%CK?BSVQj0{w?58X=0h**l0(C=?APLPV9c`-fl4 z9bdm~0e)o2x#Mg0TA%r*Xj=p;nbtQ5i6(`1Cg8SXMPD)DoF5rdKxM zcK2-Y7qs2xl86w0ozpEw$HzyS>P6)p!iTewZQO%ioLd!(M1pr>Y&x(IXMunPyL(aI zWFsr54C|SZ-LO?Fc+>O&g#|mf5IPaejR?#sfdyN0`AOP7v~^&kj>0|5`M76!Ddxvd zHv}v87X>SyE;1%HkAcr-46sqn?5_RsXJO|)zmQn4_WOth>wc%ig2j7dk$a=9mEth5 zU{%gz;$P#;d!Nd?8LOKyy33@GhUtldLH2kBzJlYX{FZv)z&7m8aeYsCh2ZzB7R0t4 zSO{Gik2W@5ei!a5f_?+T3wJe4zWgNSCh%7Q2bTIhW$sAoIDA&Xz0Je7j>fm<;alHm z2$t_F3RXQ+1U>cx_tsw!EQtOyzIDUsmkxh9Ht`b1?IZavrmM>MSd8gnE@WZB?%wFk z+$^+uT-CJs-_N<#C-6z_^MNq-O~(ucVk z#xLay3SQs2tYRWi`4@@l6^5wo#x3GzSMy)zuE4F#iHN8gEuN15Q@r0iCyvZ7m=7Fe z6LZyta{f`5;9U@C`$Axz1Ak*-2z`as_V+C07+|RF&sA+7{eRw+{b&zQv+cvD`?Nn; zF=|Y(GJi~}?T;!jKi%~$;vgUX_I$Q|`+aQt?suxTUl`h7daNc^c9*$7GI#Q_cr|OH zBw@H~{!N^#|Mx*-d(m9c-^qHsd~?BYZngL+Eb}|KWD(! z;%s-nHV#)dwGU_9S--4zljC2jp4(W@u(^F$U%{SeiRnwZVyvf-Z^IcJwg--*)7wMS zCTTH&jZG`5aJF;G?&6((1(t;j!9v#4LcZR!tedyi(MIlvjr@!5@s&-bwv9X|a)DQu zTONyGKTcw{v>C90zi>1@?>{bbOw@mw->>R_kpAj11$hN# zCv^Cu)$tZ~bt&7|U@Xp?Jb?4IP1vhw_=w}H$L&IT95ZuP=$9mCl4Txep=W!Np|?{(zw4dm}>e~$Tv%v-Q( z^cZ8po-w`&KN>T3R2^(+alvcAoc?IV59ilec~rhaoT+BN&;YFZdgESaet*dP!uPko zZ0;21c}skM>1~#sVoRm18nRPAJ-NAa*Yt}uo<+1wpYu0sa zU-Mwk>@^Q<*s`H}<2t-vTvK*kN8OsO+q#+8Yszk_>)B!nUt4xdkDc%}W%+)7QM`Tq zb>(@}jGh@XA8WQea@}zndm4uwr=h1|*l}j{%o=u_`kwk>$C=qPbJ%fG<(4Y!Yu3%C zRJjc!PO9975hqm!!-$hA>tV!6mD@1l@IAJo47bgFka#S=4-$`M_`%}UeV}-CAMkyw zgAbBVw%-pD&sBfJsCTyg)cb}J&sBfJi07)mVa0RQ-!S62>TekFT=h4Mc&_?O#cR3^ zR=;V*JYmcLQG~UMEdPi=sQE4tx~r{exd<)0W5rStn%4q;!ScKD|0WS?S$xlm`AbD% zP>9yndzPBF)JALTrr$ZOK3X5Gjn1q!s~>oDW9ND^8l5rOtR51XY)-qQ!JO9qSpA00 z=CpN>bZ=-hr!57!>Vd8N-^Oj|jW)D1VBLEBZ~ex{)^t;J)#msP_5Zq=`0XLH<&kba z+1k$kZEug`=fU`+>&dR)O777u?VTGR+Oh#DE$V3ReyqJ4J=`LES=_%hNzg!KE6{+E#cvq=9S z(*GjT{{qtgJkmda^q;|ZJe`-{xPMgs?62iF=g;}$(f*f>zt5lDl;8MTe)RSHsc+<0 zpUp4-VZQmJ{E{E%hu_RE`bmE1r}=s3@`G>X2mThX7f+_%E`&MaM&I9*pD_G6{`@)3 z!_0;&ALmEiXg-?r-!lB~jJUykFdt8cZ#*9- z0{)NYeSmx<9PRY`H>@X3`K$atH-?)Hr>-PE1YLQ-@ZWNyx-t_=d}B7FIsaY5KPG#) zg`wGw^&f(+yyNrN+-MtZ9DC#C@uuJZh1?q~kGu~-SI!6gmq*@UJ~}7dczGQ0`TrvS z1LVWmciyD>=4n{~+^< zy^g-@MA41sBjNKe$jycF4sXVnpYV-b;nbNp)+29p9Q(fEkA9GGiZdVYjK1-FoHhIx zKZq@T2e!-p>j=nvpu-M+0lt9G!xwM>zJO=o3wRp7fc@|Vd@Z;&w<*8mwfykw`9*K! zhtB5b{V+fHqx`^+^F_Wn@~*2rTza*KcV6w`Rg=By!`rWx><546Rb&1az4QM^-`Vzo zYo+ySF_&NM;T>0dc-3TAUM=QT8}*_O9P_GeXyMh8ZONdWQ1T$gO7`DAHD-ARy8bkD zeLr;lYk^0QaNcYA!PoNxZ$O_zBiYVf0sJCY0L$DJkQOCPVu2oInI4dq1}o3?mic-B z>%k?FmPT4YS{ff9AJ_gCwjNi;(!Pb&>5@qM7PbbL1ly}C;6XhgEn^EE5zq)YPri8; zZd67)NLVzEsEVg7`aFJLvgmivZr-x!!{{UaV9{&gkMdy-%ycf`cZo&61(}B}`VeIP z%A)t;U4LiMi}0>$q|3NTgfFt_1j4^+(eEMrm_@&ga36TeN0ENWqS z^q(mDZPH&gKA`Bsqz@_j71Ezo^aav=iWVr-g1w3^BK>tmv)l{*Owo%- ze?!r$NPkJuYe^qbw9M>h9Q3Ch^rsy3lMecm4tkG+-tC|h4*CfP-Rqz~;haoMw z)B+7E+(f9IR5+s73}!)V-KA3&o59x zjz7OZ#W?=_tQ^OmAIFlMe|~`ib^Q4S7DXI?eu0HN$Ddzd0`B0H!13o7I7{XD^NUFij_I2kAJOz@k=#+na1Ct@yj*-PK{ro@poxFMsm7nv__s9vZ#Dk! zH2&{3{%wu_na2NIGEvI#g zpQY9RY>mG~<8RgYk7)cHjlWIf=W6^%H9n^CO&Z^<@$)pkMdRmd`~r<%sPT(5ezC?c z(fHdnzE$Iw8WmPq^T#K~7}YMm2zLVx{rRA;wL>iAZ!+p!e6i8!;={%)7eCIp)y0oD zZgcTB8!;DOV$5^#6O08eexkA1#ZNL?UA$@B;o>943Kw5$-0kAaj8!ha-1wM_uQ2X+ z@s&oqi?1@)yZCD3As0W{c-Y0)7@J-E6eI59ry5&b{50b+7hh}aaPiZPT`oRqJmKQ& z4Cf5hC+dw)xt=!|dtH2^@i`Yi!+07zjx<>VtKVMvBgK~}vEN>C&R{|ad__Hw|0nMp zp^VDg(aevig~MdSZco5l7H^IqUvGTL;(4a1P}CV` zES_h33PsfL1yp>VvBB-r#%zn{S(idlYizN2o;fKL(~QFwpOEQKHGX9A+*?>ErWk=N zl^)MS6p9*Srp5DILZO&!bXYvk2Na5GKki*F6sB>j#d8mFp_pVm zV)03t{zT)j#dBY6p_pL&(BiqrwNR88zMx8vdrb>5zG$#`?)fYfTH$1etee0$*osR%%#G*SkiZyF^juJ<3H>}&# z(6DCPBWv2X>_~+^vN_(lp(n#roZH;m)xB*^cl-K{@|3by67B2OZSC0j;I=gnKHAxt zA)$@i9`$&VIwz{0)jRUmv%`{i2DU$BrJ@IRws$`S00(|yog)#RCbg1rgz~&;dgue~ z-Q62H*T^r~(7k4B`!z)#*qSbaEtOb7>Kx32MwC!gRw=_#g;&uWjZ)p*wcslss*-a2V~?O64|tkBPUUr($L@-DA4 z;@5tc*NOCN$MQONeeGCYr@XHn%j>NDwPWe%57&;NV@zB-hK^)$?HD><#y6~nf$NRrQi1D@y40V|JNHK)%>qFLaOy&Z-i9izupL`wtu}5QUFBPZo?`3p=*lT7XO-Kq7<;w zwUU%6t>5s#qYqsbs7ch$o401MShI1<#%*g9_K~RFwxMU6sBQ0Fhu_WZ@!IxBdukhI zq=hu5g)}%rHg4Ivt+t`Iv9_VkJ2>hQJfqetjTyD|p0ZvE&!|mLx4w2pZQZQ2BxLYR z?_k&G%1@Efu5TU+PS3QIPR})iXM4ZVJ2d?hWy;>)c#CHmE?L5j)Xs1TsvuXcSmU#1 zxxPvTxxPpRxpJt2TyM16!OYs3F3F)_Z)tZPQ_?eB`5X#Puib{)nYA<0YgY!R*RBjs zuU#4JDuhio!4UFD#HTwlPTx|y?GZ?VE$@36ws-cUC)y*VSyBfVLAdYBF3)w8^k z)Wei$`p4AGObfF_Jw9@#SD34k@sU<^SAD8)pXn9mYVekvtF_Cp8TI;WYU^gyr>8fg zK0Un|^O8)q&f`1k zJiepO<2&j+z9Z`K9Z`?(hek*FY7iO3*#f1`q2<)MOH#i4>+rJ;h-bKjVr z`^NO#H>T&lF+KN<>A7!kRj15ELwfET(sSRCo_qIzR;87m`-b$~H@I>yyKD~0W~K+% zA=p($mH~8qg%#%dj8vkHu1^^Xc72XDYM4>mIMdZNtCZc}X@$8y)CzNbsTG#~P5e}M zBO1z8`ZsVG;kLV$^vqg!7h-(_f@itD!4d8`7`eq;dbW3PhP2(qmU`t3??mdogX_G5 zqu#-8tw_DntrEAn+6U^yBgF`u&YfC1*bPF zHWbf!B7(iYvfiadskH08zp~M#hbd{dy-EeUhde{U8FJ#CwtGA@l!$kk)O(kSdtj7G z#61og3ikeZ_b6vb+Pl)+W1S&s?+m&}JVVl6MFq>@Ra6M}$_awKiVDGAIYF@3D`9>z zD7|}#n98_UPS6HDj1OED-Wh4|GzAd%v<2{Ocp5Q;dss1cDIW7O2B+HyhNc$^Ex3pA zmBC(LkCZ*8e6k-%7ni~5r6q&YOG^f)=UWD+XS@L!_q0|B_OezC_OiZAB}2HU$%njm zvu*IG37N_Ca*@Fv#i+tPTUUJs@p{h=2z8pF+0}bAJC?9VHz9-5ZHNr^9*KAqyp&Ea zFd3X)Ff!P?!FZ^bl=f~g9^C|`J-P`Qoc@)pibn5t)R>`Y-J@2^hEQM9DpbBRJ#hwm z7p%9ecd=$DRqsrsg~}%A)tRD+c$boU<~#J+=@rAY-Sgj+boysg*!9_R5ZdT&r&gFN zV^)~UzNj#F?_`C!EQ<>Bn2$HQ=HsH44)@ND56Iu^@vA%KvHF|wsswk+SCdTn{#|*w zYK+vn!u3bllv=eIMx4~5$S~q)D=)){sV(vhBc`^VG>n+q($z3xYO7?!h^Z~e4I`$< z`rj~Odn`E)BeusX=P+V>EUXSAw#QoTFk-u~@6~$XuwpxQZVWrNYq!X7V>{R5hmqry z23osQh7mQTg2RuR(!t?JO)25AKj&j!;kLL z#oIFan*Ot$#i=x6DTS%`ii`K`@s8kxNp zke9eyjl3k>y5uF{p->sbhV;Tqz@=JV0v`J1C6=yanWQplS|<67>Xs=&y3S>gbSR#b zaB1Jx9h2G)`{_XG=k50WFQa!>_5t3JHN0` zN80I(Lzq*p1w4)kq{;CNmQ#*rl)Td8St_So2l$crr@wclCbtbdjxVIiZPMft;CZ}2 zPj9y-w=aWSpC)%GgWR_?xfe3X9oOWJg7^9ko;h-s3(q8Z$?-I^Q|@#IIiA9G%DtIE zj;BYRa_2M1@zkSJ?jq#8>TRhe_g)6Mb($Pc9eaH*VF#V><>_9p_a4yXc#76b?i-pM zPpf*#{f#EaQ=wk@`>`g+)0aFhqqo0**5r6f(o635n%r{mUhmDrSs-V*tj-`eNt5fy zAXl%+ZOb4xUz1Bfj``5PceN(BFN54xP3{ona4ak>y{9y}BN^nL*W_N#AosE+cRGXI z8=Bl(8RY(1lY1wF+<$6v?`4oH!r3Wj{RR=wt6ZjNa^Vbe&6-?w2Dy7Rx!D=y;+ou| z404~*tG#?hlUoltFTMOrO)d_(s7HJGo+fuV!+ZZhljE62ul)T^lVkhwdT$;& zSZBSx1G(uQ`76`p!bsoidvDR?$}`B_p~(^3)GNIWn%o@7dG+T#n%r{8dG+U?)8u&8 z6UP?Q^!IC;9NUkV++S*Pi45;OrO6%0Aous0+~ExG{f#DfG(&oRoUwG)ZxV7irkeJ> z<2AWAGo)9m$q{E5$5hkOo3F_c``N4f?$zXi_%5&f#WlGS$a&T8lbYPr404AwxjB&Y z`i}pm$t{PR*LNJ(J^~LOFTKJ)_B$XOR0-O>TAux&NWbwPuj}fhM;Wa$ff3A2hjb8RY&= zliQm?E*pb%XFGT1vh8RX_^a&KjjYt!T|W{}&Y$B zzNpDnL(Z!{zM;v@$sqSXHM!*(%hCO12S+<8r|C4=0*X>!Xm$c?~c+}RFRXOJ7G$#rCqo2toe%OLkrO)il^ zZiObdFN53$P3{on5R^9V?9t?2$RPJQP3~w0xvy$+ynDdwz2Da4PG^uip~=0OLGGL; zcRqvM|JLL#W{~?&P42x6av>;8;n~c+Z2ET#qKVD8qZ7(d1S_j``5v`)8Wm+6;2XG`UX5dG%XA(&Q45^BULv zvnEIThJpvfJ{AeWDibCxsj!$8!u@2$|}PD9SC9n8_>-pn9(mnKI$=QV$OSd+Vy zL2i#G$IN-n%f771g)+z;)#OabdFAhvCKrXAm!IqBnp`Zydw-|NEzcmAhnGM!^|v-d z{>n7Dc!vDls>vlXy!TE`?mz}P-d*C%-{B1JeL|BvnjyU}X>!R7>3vI+JDVZB6Pn!l z4C(#7CU+@AdjBtV?;an;b>9u28SP5DtCciD3ld@xvv&akRv<9gSe}&x2rLA~*uogw z2m}^J;9ZH!3M6^Kkl+{`5|hYz>ZXtvH^jm%O-V}{7tcx{90%VTC)kZYPw}mB5~m?~ z@Y}SHUwFU2Gc&8zuxpd{kN0`^gP3#q&AI=s=XcKh!30N0grpnr$c{ysJ}yLGGp$qE z1Se-H?-mnWI7@lWCO9Qac^gb{M29o=gT615-jB6e;GQjK1^!}d7Qr<)p+{=K=G=4QEIQnK#Cb&CIa35!Z zTW5m%V-~n>6Pyj5$}}H`O>iy%p~xG?@4F^AISbrb6I@l6dVgkuo13M)PfTzPS>SRY z3?M7@TaE}&_=bLzo8ahMO_};zZGvmhQeLA8ZX4h-&F@1dxP5@bzZ=@O!vuFYOL>P( za7VM0ciaSbGD~?sGQqtFI3(TBzMq=lE&>iybOYS4O>pl5E)!moQ6E{U-zQnh8)t(1 zEK7OQOmM_MGu6Am1m^}^rg?k71Xq=%yzM5q`B}<4WP)qT0(aa5wh^pl-Fs3 zdpApYPnh67&H{JT1V`U(%QPQUAbXhO*g?cWGQcf32u3o@*Xh3t<6&2HWOS2;4;yxgC;nVXEMPZH^Cju(!Li> zaL;Ced&>lO5^$O51a~?MT&D?61ze{79x%aO z%u?RBO>l2#Detri?!zqQy<&p9k_GOv3GTBjaQ|+CvtjXN8o$DEqvwsjshMe90wy>) zOL_B6a8+60?l!^A%>uW{1lN!S?lBYGa=>9|ZlEtuo8Z<04kYXbxbK?awgC?RZh-rd z32tAOdS5re9RXY>{P@5GNAho`e*C)$?sS&&@=G;8R5YFc&H*m{2e&Bhr#eoD!?qUx z5gh-XwI0W|Hiuhif?E#yK=)A_KLajmf;)pdzv&)p<7dEq#{_q@k}L2=kF){GKqu9Y4<) z-CJ+`jPhPG!AT#0zGBXep8@xuCOG&1Mt?!Gjh_KG8@Q0Z4wl0)m+sd$eg+)jOgh|Y ze10a=dLA&reG(Q}!ax=jYLxe{Cb$lKeucgRVEha?55^$9-nsb93VlDo_!)3(O>i&Y zEin4-fblcn{=x)z^kIR~_X>=k0rx*maHrtFe3=9Z{b#^UtQg(C!(SGdoT0yKOmL?T z35>q0VEl~o{>%hd`89#j_ZWI=0-z4P?)47hgz_t!=QPiiXt)#-4#OhF8^KK1CgCiyu?`gUE_;8y^3B0D-6Y(On}mD%CgD!qB-}sUB;3EF?#DL?_pdq}(|?2?WspIPpMf9$3vd)Re#cLPt|78PfLw_`$|Jp~(mNWi z=LZ5KUwwjerNOnB;B0>{@b?LBUK(7V3C@)UM_;}&>OF3Plhfeni&zHSD<-(AG`RU` za92%mbJO5%PlFrl8{OZAG&uUQmQn95Cb;EkaP-A318%hmZcQ4Tk_PvL39da2j=s=k zly}Aiw=E5hzT9QN{j&)!kp@R!^fKT+Gr=89gQG8f8E`(o7B<$)Gih)O(%>3QaL3c& z=*wV6c^gb{r_$gSrNJFE!M&IU*O&(PeG}ZvX>jy~F{9r1O>l3e!O@q;47ifO=>A?# zgQG8!8F06o;66!%Yf6KA$OQLCz%}M*ctmHSjPhcD6OBQ}Q~8m=-hk}55ioS-hTupQ z>INKMPyM8y3+x-%yElFS<@|Dw3GT$d2<%Cu89xJV#;lsTa@wzY_Pp7%W>(Lto+Zm+ zp05309S+Z#Ge?%Ial>i2hBDSV9$V0~P!Omj?I*AyUbhjylBe~kh9^|AcHv=xf0N3$ zEsUFTwrtvlGX@0WMST6ETeo#S+CiWk#>3OL$|E=1UXDG*U6!Q@-4 zP733$N($RVIycMPV-t#F)c$;t=&Y!m9c`KeB%z?4U zv9`@@r}k0U(l(PC&ciuv)v0u`t(wxEmfYKgwhi(19WyCBUkg{W0!!{~LR;7Nb?vw_ z5I@9k)#KHAe4ZA^JxyBvTs>~&-y*cNwzZACKSz%n`Lnfn>irr$ZsgAr+O}-(7Nq+PO(@%RYihi$6~BcAn46-D|wl*0s(kcQ3)D#rIHr!`4)O zg5rF5m<#>4{yQB$#~ob{ZF#h9<}5Ak|4vVvsil3Io>r};T}@95Yia*0Ee)4w@;d&I zo;FuY`+a)aEn3=@^t3rz+V9fSW@~BxmY!ClrTsQNP4CAi>1ldDev_W2_v6>;X<=4E z7Cr4>)6ziiINV2RX|wgT57W{>02X=$_cwD;4} zKyf(SdueI37O8!g)6%dO$57fOBkj>HG*3(W`A8a?r=|UDBn{2e(%u?LBOb)d`{_s; zny01x(?}W-L=N}nNE(`_rM)qdM)=6v{1YRsHHy3Bw6s^!(u{umcq9$ep_TXYNE-1N zUhjpGv{ZkGN77RLJwK9`>hHOcv{ZkGM$%IK9UMtZ^*1?^mg=uMl1BZ_7jB`1?Laex z=V)ORkT1;E!gw)XsL{fBAzzrq!$2d1XKG<|G+(IJ!suYW5Z1!zSiUer3!_8%!gMW+ zj^qo|v@ox?N(=LPr)ptd?-VVJdL6=K9;R`22$eicN8?EF2 z=w82m%R`(n^Y|a*sW@O~i0itdk4AYJ(wJ?V% zAb46ISbI($Mk4@|Pw(pK4j$%hwNBI8MmZc`s`NO<%Q{tSS1NCc9;duYExePKHj^y5 z#gQE}XfxYs^TaqMGeW(R==`&jyztr-qr$)TgSsR8Tf&| zmy2DkMCvJ=C(;r?7-i=2Frbib6>0jlbPEq_={aKO);1~#Fd~(sheJZwy7dH)bbL&_ zjfYXDlk{-XzLB2K!y1f(^l%+MPoR0z(sS`{8-+=KvHk`Bm@XIaxqS6)if_c{Z#Bln zdFL~fMtmu(z4JI$e)yq>@iAVmn^^bqI7PMmJz73K+^gj$c$}i_Uk4kW-2bG%IG@SI zHWv3sq`0?<@9AHTjZgC1Tp4_WOeVx0zgG#V8P#e;QWYhb_7wksaUVSr00a!%2zkTSj}YW18=sCn;S&-WyrrnznjB{0JI9<`s! z2Ml6$Az>mylV(TfM~8?1ZICUrKDiIrX9xD>v$>1nas)WoB$bV`G5eFbC0_>qJt)OJ z8vgCci~C*${_SVSyW{d*k~Jh@ESG}Pj`95pdDT){aEJ7fDG#G;dOPmfQ#|^8i-+2H zsQ7p{wd)75?Ctq#T6>OVZO<3AW#rAcCz7!(OTVx!+PI*k5S4Z;*@nXn>k!j@||he+WX)rSpDCRe?+uhf0NdI+^0zC@A^S06ieJI$fX5DDN1Z%mYzQ* z-W`arKzyw24pZ52j{=&O02eHgEuJ_1zyMz<(J1sH?`nD(GrK?Po?yQNJoDNf;w|ZU zV^QDeHg}_iGdy3(%T#B#tVo&CRi(Hv9Hw%$ejCsK9Q@4v^55&^~df#xBtnLQI9>!dfaT5 z$9{?}@heib{Y{3ueIc^sGP~zr@+LExEeYbf?XpLW|k-!8g-oh8O1)gJtp(#vs2 zFmVUSY+};zxTNL6!-;oV|D(110`pv~Su*rlo9Owf<*|p??v*7Q6aC3?jYENY=5dOa z8D~rG8QNDrHDPnBwU(-Du0=y@6E&T2#AFs^4-AE+JuF;&PzsOP%a}*V37|&Nb61Xx z(VZThFTKv@7bm6pu8VB`m_ha*t)izmNA&DxifVDQ>gVO3=M4mZzLT+1Mq#V!iXO@B zKnd!cRW4V!VyMz(3ENz^g(qDQqN0C~zub4pBFd^Y5i9q-+V%H~EiomuSSoqU6`iDt z^5D4tRLW&0c^;YJXJb~W17s1A1VFR90tLPO1rT&ZK z?mFK*G;wG!;FOyEOcp(YP&f6pTOw13Z&CbD7B{Jn`IPYT`)fJ>a_(81VL6 z@pKJ@`Yp|#z2Rl*-GCVgmd2SZ3j@KHIAcS2_G4AA$MaI$CVNHXcQbJ?g8Xhteh=~f zZstrPeyN)k(zA3Ilix>r%PxsO`*$&$rw;kMEb=;(Q@Wd3c>Zpi?4$R0vs@3Jmk_6C zX%AzAfNSYtB9+(Q!yI05{f-$yAxHE%XS#fQS&Q0?UAV=AyL@}Yjlt!tZu&PQ(Z6im zjNq4UnGqZY&tzd|bnc?1gZ1az+=-ffZgo}_(#EMBQCD=0#lv2R=BoKaVV5;r?h{zK zFJW1!+SFZZ9qNF$ly$_%4Xumct>&rGhK^`WybDa-!YxhRWWvA#;E0Ur& zFF-Lhmw6|#lig#~HRoOO3N}&w%7PBJQvHNI9CpdhoPs;Z<^%R`n%B-~RBtf=-rDL|G|D3oHZPOVWVcVj1xFOPdzXnBEt6_?6fRnF(dH z5{(o;6uhHi#`M~5XQg6c{f7v8VVeR$Q!lI!Lj0eh79t#OYEwgCqNLLl@SY#u{}uR|5P5$g;;*XH)Hy>?oZJ^s^fdk!N5ec$3DaYk>YYW zB8_jotXz$`eD&-Vc`M`N?ZZ5K`x)jqIuN=P=Hp${cYoQOr&eW}W0!BpV$5@dQU4W@ z`f7})!=LD)b;J9UWuAVfkKZ2W2cSnStX%B|54#$p@p>h+6@6<~m|yiRo!)o2QFb!X zC(N2>?_^2kd2fpZT#*L+r7n&qXfawZU9Q9YMR}bDo_O!TI?aG5SSQap-+zwAsmC=b zJTBSh8WR?QhxbcMTE;Cx=A>|!>{N7tW=xvtPSf8H4XdG71iouK$%?~*JIkVK*u z?Tac}ot8c?D4>W{?lSM91xej>-sOP(+|zO_Ov_Xy(2czgY^MJ~{viE2}{ms6Yt zj7IQFXgJ zTem$$VSJ!4lU(6OPY(NDLEASQVs@;^UtdsXl@(3n6SdqSr%zc28O+vh6cI8ZP| z)TJcg5C*0&yPp#xwnN*fe)@csa z6H2(ubb!;G&`T2yT97s#7>o0IeKlA9u)Q4=ptb%@;F~kRzPXauC4}$GpIJ@4AJv}> z_0X7?%}lO#qE9p?#`w_K7`0H3Zs$0sibi=Dmu&-RyM}L>-~XB3%JT*9k0pGYhBp47 zAZHw{mL?xNRijfVqf9DK8*_s$B}eE|)-j)c#pTJ7_;{vh3bpMLwar49TUOn7KVfUx z%tZV1c%m&5)|tl3c%OA%hLI&Y^8>xzMCYj8+Y5%qmaB6=rgmc$DSzBRwKd9xpb!UV z-R|s^WSv5=5=aypg+TpOr^8EYh;?P95vKZ|D|r7K28}3FmC#c5LD$#2dMv-W?(*2B zF==#RPu{xi#tQxqw3tTmRE`TC@_ z6~OxRvts<|atUaH|0U2Y$R&h7q1Q+7#|^op2vh;C^Of$KQwxgslD&$?cH(Zw?!I_*cnae6e6u{BqNS$lR+yku0G_^WyBlC;#IxuMvgy5 z--*^a0%|eIk?VGfm@&e<{{Sqty5`D(h9fbTr%-yuug^uWMH=w&HPAH>xJmlL*Mv@A zuqs`_*IYl)TSNWWQ$)0-7jnoEO%Cys91>CX0mo?m|F~9@L<(o!UN}N$5+x}~1nuB- z#>XHpCq~m5$dV~p#MF*sMH-zcR+(?GlqkH^S*AW0tI{bA6do??8T{bJ@j3qc{G?U& z$L>r@F^B3*j&GH62Cj$Q3d{l)XrW@#k#M^eW=&S&oJWOPhOmr~Gn#-_Z*fhi`R8HA zQhNR!jf%WTc{~U$e?Nnm#pKd5Ms|C9$=8_;TK+F0q}@LneHJx%98s?IdmN`&Rjk^< zb^Dm6+uxVVb$cI+vdN2uW{)7*JU`;veKB~L-E-eK@R-NYy~MNV^~(vjG>P4rV7|xw zEVQQ>x~x_7<>WHSDKXbLb~M3COW8ZEA9S5^CuXheZ1&mZ$Yy^`@hz8=kFGjjH|U36 ze_t{z?PcN8JuECeE-}v@Xyo?@)DF?}3;ge8e+Auskj*bmviYtd20dT854!!&C_e{t zDGg-RfLpqC%@rE%2c)^~vdMmWcH?v!G*@V}Hm))6hUQua{?nsr!lco1%^8~RdB9!j zv2hK#wAUj0WYpKooRohFanh9gQ4iOM-o&6{h+{k@uIT#9;018;$5?Zx}hthpxM zkNVCDne7vx<-t>+Z7eh?JYId)C56SHF!sLI-rx`$ySP_=S?-melEn*F`DN++?&d&+ z^n>Ut!O7v4p>5G#?~uH3sMj|ni=pJKI{%aMf+4GS$d58Di#@lpI{#g==)EY`5ABlc z!;3uyQg84HCPHspAq$e|T|nBbt4?kTA9SnXUH*DmaVs$cGvqoSQ@h`+zimC_b}I`llE)9e;)AtN zbtTJvL%H{#Yw8(0NM(#u?;3VujXtPKgGK6tYMCm^Kdu4KW!vK5#6lzVtR`13tV^EA z>{zX=*0_Qbe2tz5C9-_H;@QfSkiuSzu23FI9!1JN*gy(@eFQu;4l6-^_lm10d00XC z878uA(dYkgIF)O=dLWhi?LVAG7;9`dS%sX_l;w-rge18W0qi;9$f!eVxy5m#wf1-XGX?3!~$VRx2 z#ue!gcN#gCosgjuXL8@aPI<>^46*tGuo_q9h@<-RD(ZDkYh3P>)zdG6qy9WrCVJU# zqee*%kDB{Y3@gSrG5-$IfDY5>3d+X|Z_oc*&RynQp%jd5CvM9J^ zLAR$lh~$5tfs4jZp{=6-RXH;CV&1yACda|P6ucz$v_0GFsYh8FuL>)hXx_-~q04fq z@e;?o*rp=?5gMbrfcA0dP}GMi^nuSm&AqKnKu=KfBMU*-dEL|l(X0AIe}Oz6yFx5f z!r>ht$E3#$=tGc0h)KR+DFXc|s6E?=4+ev9WAGCI9!S|()Vp`8u_#)FT zmJWDJ{m(+W`cC}CHcte&Vu6P1nGq162f&gj4nbzs`w5!_Nf#tDL0%*sH`UL(&_`O2 zYwIe8=slvR#@w8at_2@?vz~m9@y=Kyo!0WtW3-w{8m)vZxjurnlYVxX#*M~|Y%uf= zL8QM&Z;`!)!=m2P(a)Hjz0K#C;Al)c_?WJb&|J1*ZM-H=EWCp-!dM%N78m|4G^tCH zlD0NV(eBJ+c@%hC7i>JEc^S5a59hI3JvC)6FI z@ixW~ybsps^8$_I3>>y6j;{)!}X3u3&*Y;33|X3*I#@ zY#UU34~Dl!(a$l@*(8~BCm(Sqo5r1d#GMx2z<(B`@qq!k)D^`#8qnznAIHz*0UAvs zJA-vc)EFK112u3=Dq|itD7OM1IPJIB1r8DaO7ViPIV73$NWG1mNAfn#zp;&JJThY& zKaW51HfsFQn17?C;4eqTi7tuS$@vR*gNVP#X8w{-{AJN{H*625L7R8PJ_J2F88ym- zT`=c3Xh^e%(~udQn-^bJHaA2#4e6KWUWtMZa6Iu{3S*6N{LpBz#$$+fK)(US1&;yk z7~wHr^+l#9O9$fvp_JuCa-Vyrb^c?pZ;$3N{*-+&MSHPM*@l9)y)^a_&{*mN6G!ux zB|hK`>ZLlLMLU ze-XernGZ8*5k1zNNL8}*TdnSM=wC7Hrk44S8FVq#KZCdM{*9)IcjdS3r8zXy#J4zK z$r~nHfOCdoHRg)ueuvIiluke9@tDR}&}W0Mj0ayy=f9X~jLxxo8Xs+*#}lte%`=S) z+Bz;ZF22(0&l?w-=j^;9Svue^Eq0$1{a*#gc&9Zr?>fg#oA=Kdm-{g1H23ClxgGS8 z#)HcSx?E=91Lq$!4j4~QVHe9AZoiUB15a^74sr#Sj`?BXXugu-DWF~>JSFkPJY}MU zHTO9@h2u)1#F34saN88|ln8iAJ$On}*zQS&ZJuPF%|BE+d1?f50(eRgJZ0i?2|Ps} z(0K~+6XGiyV}u#`9=7j!P}38juWH~s(tOTac645ho(5NWW8s`Dw0_v1!c)+8jh32t z3g72Ln=|ngjC-N#xFCHz;T%a_>71ot@TjXGOp@~cofOIh@!Zb+G4PydnCnbcxmC>I zGcN3g5T`j9U+Jm{uL3_A;WYQk#AiC-OQ0ZcKJvCsVq=3$amgPhMy*Z7R8ywY&H&S&Oa*=F#W z;3YW|pEceLyJ>~{JgC3;unP8zzR98*$nUTJH zKA$;n;xh|4pCP^?`ip#`kInj4>qO!&+RjC=wD?csG52KQF;_W{aa`a$X8LFzb8jXd zgZ^gYF_`5PkJ09s^O)2e^ZhA*Vr<%&cs`HEoMqhG$8h@Gi-{db3V7@c) z7vhC9|4xiYRvts+Ks=_Xi#aanJVqazO6XCX#}wTRk5T{oJci?m`rqdO3a#1ns8WdG7)7GhowlM7EK+BNgG zkiIq!>rAtW%~6*U4LZc-#A}?#HicNikZ)+c?#KEhd0*2FHJ(oL%k3#WD6JlgxgOmH zHoBfswzr8zICb~8)JmIt&c5zPV+*Sj)TRQ+U2UCq#H>5Ht^xZh=^DdV(_8Wm-gZt~ zzPvPA2^sxrY&q;$&7A%-+d2VuEVdBz|B!zotUj<+9DUwfS}ZE>*DWm|TSbdh3tI)J zl{;YFg{(u~Uhoz}4%aniDw`?)3@p|uxe$4Efu&LewjbK(6a7t~n98ikwAV-u)hH(X zSk7*Iw+C`qF2-BeIaK&fLAK)lAZPhlr^Xqw;(dy~NPwRC{$h+{m74N=MpWaJ-+^2G+{qbAoe}&Ey%36|IkhkQd3D zJe;zbs9n@wZKiLql|b%1+p|X)v6WDNXii3CS*%@cJh}fx#pK%6hrG1ESG!x2XPY3; zHbb5jW^s8oc(HVHRfNV4ek3oAvrJeH_eyU63K-)Ky<+EYi(;5^mG&XYR(4e5H`jK( zhxJEoXpXMM$Y^t*ttGM`j;y6=H2z-Ini~JmC7H(B&{b*efJ>*=43{OaR*ZFXHZ=qK z_>WpM`W)~z!}qmE<^b!3+By&Qlk5xJ4^V%MzUj8Dqcl&SM%U*4_60!!Hm=o#cGyy!xhdkQ}ePvL{C8+(Os>idZm zIfkz=_7aPSbYJ1uISly<|IZvng~H+|f8jdyc~6?hFv2Gw|B3-&%=lKjN_(Bno(Zfb z`igII*cCk^oMhN9lL#h%P#ivmwj62f4ZO~Jy-7BJ-89=dE4+uxp?TNIk4wFQZ!+4a z>S`3_0&xJoW!9i_^y%&e7vLT1Sq${6d#pYghZp)j?AbPiBG@ld!k=|NlG;MN^#mR^@CT?eko5!oc3F3zf|*3*7jRr_tx5hc9F+%e-G`m<}aju!DgAs zD?Aoy`T1V|tJ2=xC(*0!fWVfWpAq_!V3iAe4MWfFnBf~9(;OTdK7-sDo+aX98TUU% zo6pBFlMf8Ph}5O(Ve9~6k9}qnb^{yTY&K+$InZ8xf@sC8x3IjVZLlNW(OuWQCYhUT zjyfTuxIDOv4y~wDPJlYKs1_Bnei-|EvJzS--!oL{R&TjWmDD1>qv(QmJUePa(hKh7VElaS( zz(l^Y=u+eGHzA+s=HLqW-{gp-OEdW`){bo;GY1kij=BE-?1_`eEZFeuX>><{7*Idq8}T(E-1?0oZ&Yr_lalnvccV z-n9Kii`~a0>cxIh_Ih=>g70e@ z^=dwBM!g<;SYj0xl52Wx^02)w_)YoWqn}}i5L~g>d4=j3?!>-fh@JWnsnn(ywK3wd z)=5!hMjOwgEa&uxHt1yu_eRNYn%2+k*gZAJ9jd`GUES<#OuoLl3`%~NyiU+^ z?AHy`ew~ooud9$`NIc!GsLRNw@xhAoI6TY(+NAH(T_X8~@9mG?r>ozFeY$OS-)oZH zH&kNt4GFg3>&26241rEypI3fqj-}!aIpU;!I)gWGycC`Xog->RbNIZkU%0b75bJ@Y zh1B48aReV)BP0%Vi_{$vZ%Q3CpD&F7Y?l7sifRasC?nbZ#NQ4tieti?<;Cgk9sqr7Bqp z_hHnrW9My^%e^dM_4Or)mj97?n9Jj2WQ&P|mVat1W=Eoz1rPgOl=czH7upe2s?Wfd z5f4Guc&uHhgCt5Z1+i6#y+|?0Vjsm@ZIe<|$vM4w14h6_JqZ+r!!)BL1y3uAg`Zf& z*9`fdPjX2C%q!)j(%dd-tg)_i7?=NGX`N?&qLMk&YEU+$}{ma3!l_D)tEC z0dT3lZHP^Tli3-hZ0>7A>?Fn|A3hzc5PlJ1>i-5)|EYELpd3R|pdc*Se6MN!$2r11 z^}Mg(DdL19IivbdY3?H>ss8IQy#Jy3hi|t3HBmLs%XY?|!yfJTq~AuXqLT|wAEr@Q zeWeE)1N9g>frq@Huw!s)`Y2ADYB$XrZ#UaU^OmXI8cCPKXvo%wxQj> zjFD_#72SZD>r&ta6&>`hlWFZ@eZ$7Yr5^a=8tXcJMPJcZ^hC%r_V919o@Ynj#(Jg_ zCk-XARvjKipqZpO%kO(8SHx^m4C}e?0Aepv%mWG3G@ohf8RsCi^{hmv6=FRvNUdGi z0=4<1vnCi9TEFJ?3>X*acb)I%*Yl^*D^@R4f)9o}qQ8wUC_H^?edGnKXKfwZ!xQy& zY@v0G{u%4|j8#lq$0rdZilpnuuj90S(>msOjDC--W9lPx*)Qxj zV0gd(gmru*`bSIpI)1N#*75qt-+(?$O44keS=3E*Df=3xx#DYB-AQwmX$`9y?a7LJ z7yl&M^J%mJZKk=Z!dz{@T=(01+yaK1nP2Jl- zlW5&q{nrkY{OynvH62K6N7_L^yL=JsVwDOB;;p_yHfgAk(=CTr@k~TPbzq6E#&2u-VF#i;20`aWLI7dbDE(;|>+}0ysf^Gqs81gUp7S=V+ z8p1w6w6{rSM-5&D9wk9oXM@4k6u&2nV74+(XDQPtFJKGH?C*%1a{J?1;?)Lkooz2Z zT#PlK1fP;7+p7=PVYIn??A840PG2>+@qU!zoW5+sN4A&@8z|?|t-=zVNrKdMm}IcJ zlJSl^xJBtb<<|`!hvr9Dc7^&|c1vPu!_JqHO1^SuyCYN7JSHcgVME983))!JsEz4- zh-9hf3XYR6T=zR@Bh@A1goFrR*Y~4TE2K_FmO?(dsAW7x?L9@SPhDE)p}pR--Q#te zpqe*#ZMM3;hk9t-yHStPx29+^LbWuYZ-*qZi5d00r%Z%(_16s}eS41jrrS#+)6`NX z<0QxD^zD0&h_d3vHH)<(!VvxI!0*8fzj=CM5geuGvM!*!wQkWwgHT=b?pw z7VOE5kxv!UgWr)lqVYxJIzPl|y{@{&sd18pI(hzKYOkAB@%7F}Dcwe>uW_13qyBGb zvY2$pb7rn0uL#>w9^rB+9Bu7fBD2I=AcL)8&s}#$#}3)mN?>YROzN_+f$P7Fjkbo_ zb!(XHW>)gKPxy1HHEY;_e%c!SUqHB+^1Z)}Y-5dz@;KSWmLw~2x;|gZEBOX)(|H>H z_lKE-pN7Bw*zcl?lbL+)4cpkajdS%GZDZfo&ee0<7%O&4_T-(3Mduhkr}=O~maMpN zsX!8A3(qAIq9-Y??1XS0c-`MPME0-;&v6g?V%Wo^gVL^bqW2e?J{GT`MMgeXq;b zkgcQ~r~P|b6 zDbiB4!KbkM)SkHL5iP6Kr{i|dUe+ro<$8Xu{;p&%^uuPg8Jq?dFU#^I?sJhzC$GZ| zG{#&F;9LMvD&QfWN!Xn(#c_!X*{ixWJ5&pF8*Xm`MI(C{bPaxz9`-Pi{QFyA|KfJD zUATA!?~R;{*Zlm+7EE?5*u%KLKkQ-rBz$QPv%ClR7TB@K-(T}fU7fVbdwk^8-tA-Z zvgDgfX2@THru(H^x&xEsXOT|c?j6y~4GqZ-oP#Hu4Xl5&m%|2h1oo+4`aL5sokGOZ(X;_tyZ((eex?I z0VmSRljYtz0q0;X6E2VkyjyJ%K!BuoCCO}LYXoZh^9y9fsa&o&ed^cRU<;{=5dv5Yrfsr zU>_LiJGFPa{lop}^R8ck-cbwb{2SG`#Lr-Z(CVYMiawjKS-wXaTX2WWJdbgdT$Bbu7xlURc5R<^8e9w80R#rnNcHEPtm{1$O8=czAnF zU#i`+6N|YH&-cP=ccr&G=KSy*)b@R@vEkXyF8LrR@%O-KVKWc_7X)9SlgoU5(H3ow z3GmdWN4)Jf-P9t)gaGvS@5pO7 zXL~C8^~mhbu=S(s-(NKI%8mWuI@wX`|2*g2N9}p9Uq22^qc!8KOhDFX;Wo`itl^YQ zhc!g)qS}mc*+cveWt3c41)r(Xq2;hYT-n(cYq0$4x+BCq`}1%I9QfV4&|*LBd^;zf zja4TOc7PYUa7K<{XR4LOBkxDM*V0WIZ&;b)&9EVk9HsNqE;UXx?!+O_!n?~btXV1qA4ZMnQA0Q2nBN`ri zO0oOWV)-BIK&`b_P~as-)~^xvgSK58kf=EE%|=(q2;)&wf2aP%}+&*I;qW~ zPY5)7>e*PFG-2K*z8*DPP6(;?Lsq2qLwK8x&--Juc2gRiG5Jy~GIcOd45+Nx7h${B z9flr*x#c|=AXziL?FK$;=OIVo^A@N7J2YRj3%cMF?&7#iWjUvFTvR)7N5WTb>=U;oa9^X@ z1$Q5{a(spjNACm8o55@NJY>UVjNeMEy@@!*s(9+5eU1$iP7@B27E)(QtHaI{@H={d z^X`4nTnVdNh9(Y<;56ZL2dD$#b_%aU{Tg0}aApE{?K0tY!3Dx<+S7vmDyy!EMH1tj znd5IdcFR})zr^i9jW=`LUaWi?r8^-~cs;rAu!h$jjW>G?UjvoyH-WE#8K=R;kY@1a zw#>ZQqw(hVa30CTn>mj&@n+2Db{(Hd*ZefPPse5W+URxRY%=cnc*@@#td}@0hYtE^ z{cHXyim2m47vbWKc>Gh2$F~EI7pCEHGw^so=g9BX)0-K1at0jU?$B_!8vK~n-7(^Y zlU4XVvC;TTZARY+e|w!PWTMgc6hXfP{-)Y%@f75(;;^8#h&A^8<;;y=f+JpLq z+!*;*k8t8~oeD7bd1X_^{s4FN{tz#IVFY(m>*Qu}m(E8K-z^;?j6J+a@zui$%EGY2 zBo60P;rlfT_4F3)JmsNgE*&ba^I0-qF)&<7-=+k|)^1aIJLh#y90()tk z)^~ze-hh)QU_qP_bd(slLi>dKftQJ#=3t#f`-KBuol}MSrC+S?*|iDh+Y(VuV+}sb z&$r!(moX9vv^~1f(u6qphNE(AVs3W_({Wc#!j09WQmiF|{)@Ufzvf zLRLn5^?gmf&A(WmYB8{KQ$x?LRC^8H%H@L5vhsF1*9d&${A@DG#%$XteoAdM@X@{k zx{VmV2Y!Y$&*|fvT`nF1FFgoeIvD11u|H?rYW{ccw~8;FxCtr|X>`or;&_h+8{?nD|srq;;K;v?Z6VIuKT z@`XBJdH*_TzIN10GRj!B@WQ`EEwOtbBRivigjI-g^M`N?!+Ej_9feg0zC@CsGFW+y zdl(!kyHLWS*@eX0G`o;t2g<8SJl+^tmMZ?HZu7 zaMfTu{6O!^&JX`RbLY5FbCsRJ2m6VS=2t$;Q zXOQyo3{XCvKFZ%ca-x?!ipbJJPgq#ECs7MLjeL}j6T{?*bO|SbhtQtV-ZK7d>6Q7j zzqgV1s5%=3O6?fUJ&)jUL1G`VFJ&Mzk{NbX- z=f}53Hnl#oXS3qJQ&M0hRYDDt;%{V%yjW5^3neAE2(r@xMqX%2V7a8ocVRDYg{*k* zmKEPhc;Vb5D}j4u1!rEA&?@Lj_hE(JA69(3!thO+bp`%Oig$PP6L{x*46iW7r$pP` zW!PmXm}7}Of)Lq!9^tXghylYTO4-a)HX#M~29R#gQ#bL{C{hz?sZpNVfz+O~)DEO3 z+l8%L+|SHPw#Uu%wbwtx39tA{< z-hK44Tb-%Mce1xIrWf6bYQ@(mxxnk*fFD*z+8f$PHcM~A&e(}JMYRj1D!~S+Jor>~ zy8-DdWu2~^Ipw%L@vh;V;Q`+zp$qYsVWCp|i;vD>@ zRM@r7xLM62g80sqQnAltYBi7`z z7WisoMW`+Z^a3&5>0UK=0Dd$p5s$f_nd^|5jpC~ikGkKU`>4BY-d6Xjd6>_69qza1 zVYS@)himZkFQ01qaJQUc!M5pN4QN7cObl84@VJR zr-!#8+^UD;2(Q({JqSOfhjB|=_(46q58(&&@BxI^=;4D1->-)cBfMG=;6}{-=&AoAiP`;zliWMJ$w$~ zW_3&kc7wO>-5niZ=KSp?g9{vR3 z20eTQ;d(t>hvx~|%Jt?h7n}1HPZOhgT>xw#KkowAOY4yC2q{x5Dz91VweSRoSII{_ zr$LWbRGwuOQ?KPL#CzDM&aar-#}-kT;HI9%$vlMR1j4m!F@;0Q1cF~e;UI<2=G^Hj z)@9>a%X7F#3jaEDvk&2-jNMDeR^6vzAR1M&A&&KB9$D&e^=p6!u`QD*JG+Df)+T zjv);?Sk4xTV;+bOqW^3w;uX{SmOo0*$$j}z{fT$;JLw%fom13K=hS4U9d&g99AS;kB9A3>+a!k^$FJXTK{>50Aw00dj zt%5Aa_xH+fsa->|D#0;%g8+&@lh!-I6Al30Nnyeb$gw=!<#J$rv~XEHv-rEV zmV67T9aIM46}`t{^)d*SU?`t(FjGF}!)d?}&KfXf;aXld;WXAusvSl;WZoXa{mgmS zdWc43&a3SuT9P@hua{^}=Df4LM4K|_UF%Kn2hlTgKZxFCez&jZRMxy}Jtwo~)%Kpq zn%CERJZs+BUZUHX+cLTz&jQZek7u&J+t+g>Yu>e+3y`HScWizN~qp z`;h>gxgR}Q-|g#(XU)6Tvn^|0ZErMdUSDrV*1WU5?OF3i_oEGP=6r{m=Cp8YT6k?*cpbv(X6(g? zL<@-y7Uy6eW<2&`2FD~7Zyo$QtE>*SDtb_^%CRN$RQq{V!DlN-1`xgm=_c^q=^j8Lx6&?q0tpiz2tjgmCTerS@UL6SyE z8YF3yq(PEKNg5<+l%zqDMoAiEsjfk`;7Mms`gKk65}u@?l18ag`QS}_?;jc^JxQZn zN9oWg`JO&BO7ayTjdD=eDCtQWB|S-_q$lyN)(jfuo$4FVeJ(;Tcp17+Wy5L@KH8jI zd>$Iatnsa_o1h6nf25t(Ww;Gd3GSBYl!`T}_!dV;bjk*%_(`vP5S~>Jg_Y3qaCY5t zv|f3zn{Lxnyh?Nl1ixxw-^00nRkdgx)iIKrUr>BL0ReU(-b4J+&0^mr1AcT*7IlQehe57hR>{ z+~fdm!e~Maei~y2e9*MutKE!vQ?&8ifP6_HsqX}|$m6)%4{wThA$}B^6HYhH#Ji%i z6K7Vck3hqN=S1~E=wb(;li@o7x4_$>rU&{MwAPw!(8r?uOph`f+T!ds=wi^l=B$A( zwhFqKrh`d&uuOiw4#r3an~>7M&Ory0bE+8hzcBGl(!nrR(82n-4z?_%gY|PA3_8MG zjPblC=wNq32ZIhUZ!vVRM(ALRbRA5S^ZU6DM&qr?`u!5oPfgbEmx#V=ZmqG+xoe#0x%it2~`}#vk=C@s_K4n0U_>Jxu)S6Fp3P4OR#KF7dw)^)T^C z=_cND8i1QI)&PKoWcPue1gJ$Eqs!~J}rET!d@+WhQb~# z{33;AE$j%X&~tLg_A1fJ(Da;siS(PdAn%c`Q!%3t@^2r>@lz|N;BPAas?JV@d^@#b zI*;eS4Ig6CZxmfed6(j(qfGBBxr{Ru&>iwgKlu>z3wb2JVoELh81gO2(2&1#KA|w_ zKF~|9Xkp0N*MzGS)@6N?he`eh9Axu8_8G-x<+i0HTSEtfZjl?&bro9m(4F$?C=Q)~ za0U57y`is6?Gqa)>`RcoC6Zo8Z6TZk9P~BPosXciTDilk@;p*8_fNcSgUJL-#_a^Z=ZxlA#CmOEk9xr|AOyw0}friVJ9Og60@< zx)U@NZma8syCAF6x}f*&BDtT-^6&xWvOLm|PiyG&@@c(&UOuhOO!=A&t<^zm*?`ex z{(kry&>AIOKx>;j^%5>-&O58i`&wHGf6Z+o{LcJt zUk}lN%z4*(h+bsQtL^Q{n%CDG&zd*7twgiTZH;Dqx38xoYu>e<_N;ley=__Z`g+%9 z%^Tg;HGng>bye1P`+8Pp&AZkES#^uH#@k57)WaLn!jNZY8tIT{Q!ref)#153tB1Kf ztB1KfyJfVzs#-T$w0i|dIMRZW*vJL2j4Aa|)b@J^+@kb3zrq+UpJvmUze;Bap8D14{rZt>&b&#=c=56C!g z>FK94upZd&y#xLhFCUl^n@l29QNhRJ5%6orI1YagV)ZIJw;kN|PYduBUi zN$dtxuZ0x322$iINRf-rLngeX20MgbBokiZGNCOc6T;3PeiysPM<5dty(O8@5lA?Q zJ`mo&>{fKUye1Q#b0_9Pj^X)4M<7eig`C9WM1Nj%vw04`7AIPTGwkymKDC_6Bf56l zouD|Ey@&>$a;x*eGkKiordyDM3-~wo-RASYhlOD3Zv|rWs*dG#3aug(dC)> zB0Q)?+yj8I(e0X$XRzyt{?eEbTxNOZQkp!|CSd<-CXFY(cM(ssJVW_3{-2jm^OY&z zkXNV-nqvdTkW12eGSzLQle|J}#^Cd#^RD%z^W)KZwP|)Ftv_jHreqRo53ONyd(wIG z=yLmd2q&_&hwvwJUTtqWKQ@&)sy&3G=Jt&6SQ8&k@m>=jPVwh2;=|WAmsKaW0LOA@ zz0q2G!3thn=8{Hu@!vCz7yn(MZHR0~P{# zLIXc}vfw;^9R3J6Yw}deAK{(WZHtK;-@BN2@u3X7c;(_9DPFvu z^WybccyT4@^o+o;*RUIHOykBs)jbgc#4W*nx!ng(!xMpf5`gQH{fD?Qw=3bv?MirZ zyArrAw=3bv?MirZyAqznoyl%AtaIa!U@sz11bUL)h~J$8yAj=$(hs{)pbq>A>8*e- z1=n3i?{S^R;KIm9yv?v11&4jak6-2dI1X-TNxHT+4jX<4?{$E`hE_<5m;4Hr%jNQu zImBNZWrG)AjB7p!8aL+rRks=4pKLi-aSnDE4>+g{4vO7xzUv)?r3T+A81Q4~o4$lX z+%{N>uM?@_YQF#76uoa~)%klb__aOknwscA@r!t6zd-gA5K#Q1J?sZm22QzVh;Z69 z2&#?U>Xu@{Ku0KHb5`~%6z7;5RBfc~Q=DVDpV`6Hz?E8xIkoU9He}c^Kt<MMy^x^L;=`Ns`EC)VkvQ1X3=W{_X@%^6>dU=~p zFK;ch4HLaQACq>xfv@j;F}-{V-`An9=V*Rgf1Okx-PWL4pqpZ3`NPfi;7_N|#t{v<^wkM1QZ`LoU@P)YLIY6g|my|%{e zWz3UgL?sL5Mu~j33P30Eog4161>Y@v%a;7Q4&;zdW9TjGKr0K?b!V1l^x1kAcuKUg zP%YqITWx8y5;#tjkthm_({y#y4I>m_({y#!CLm*B}MDJUYRq;1;ukC;{A9&Oy%L( zj%N$X*(A&WjcoS0828-rMC4baUkNlZ(qAmeaY2053I0$-FGZC+wchK{X=G>-EbNO} zd2kFowG?T<)53C+io6gvOI|82gBR|A7xd2u`iGOMpoesmo-hDAH0l*yo}a@0{Ajds zm}ubaD1AL+2|VGLFHz%!2Ty@og*%POf2Z7slZdc=F9~8f`6kJ*yIF|-?PE@O#g;Ar zHJa=2Cu|O)avnC%5mM}+PU9T`b-csR%8~B?jUst^4CvD__+jos%f?hxUUO7bT`RAc zRtsAto-+`iTnqZd(;dja6*LI@^dkJVGti((%frRCVj27M!Lzkt?th69#@@$N5jeKM z0edCIiC0nGM00?H!14YuWnq$C&UWC5KOH~Az|ZhC{3+NEWB8V-hNH*mToS$kc*ygQ zZpC+h*r86tUfAu95&T?}n+-n;()PmsdK7+sHzmtF$$qjx^Qv4pN|w>QDi1{=%jiCp zOLIKHr6uHNIprevsa$IEsr)7Pshl7!xNvz7!_6kLRKXf9Fjo=2ra;(PkNe|B`&7O& z%BOM$@RNUQI^|ROAHdIVrF<&uuv259^RifFW{=8TcvPOlDw+WgwsmL9Lwlg%!kaSG zQUg3l!%IVgA$+@pbiQ*1{3Cp$Q>!G2&@E~tfxw=iNg}{M@DpxNAjt#v1WghFo^m|I ztdoR7__Ut#@oW=lFYd2)HT%Y~hR_0>TS|rsqR{5TbxDhAJ(qJ~tSpGYsZHhbn36r4 z%VAnM)&Z>>tGxwy1KjcBltQ4i_~nQzR3aVhd@A~;eFg4>O8Cp=gL&nCHS|fRDpmoJ}E4BS}5apS8C96_bb5lEDQQ`>+G<8e=ijd*pzj9T1B)z>`$UIu&qc&v4M z#WXyppB?X<64~LLTED{%+_QU!@ZsvD754~P>0Wl+Un%KpKJ-#fSeZvY$CU|wZo4ep zhjW?m`y-w?^V;e%`fhGb;(qYlT)USs+@@juF|FkZE}SVpTdMe1%7^^5xznQfYE;Y# zfBH|bjvpSir;D#0yboc?*SU*%pYj|4JkjBX`xFa^3 z%i=lO`o%Z1H@lw+b3TLiu&MA1-fj2ql_=Hb+sozh+$$7oihlJU9>H^8-A@uH5{m|v z#KQ?z;*>7$`B~?}!R9TE=V>)B9!yE(C41@nYB(84_OU{dcn#UdULD~zC4_=&v+j4Vnt2&^t%zzPO!V!C_RVIQ zu`%-Kh*xhTympsK$w!ZERRU)F*3Im`-ssu;GHg;RY9-I!(`Xa?MrSGT?0tamyFSGK zU&H_Rw6Arq3j1TN{Qtqd}U1kp>}z z_FP~Dx-b$LUm}w|0|<~1mMv@~UnN<#b@zbf7$k%TNwyVRvgOt}DChY3J~_uu)r^uy z$#GIO#~;~%K#C(f&Q~Y6t)Ejlu^Tsa8nvI=x~G2d`Tf?~dl)b{N&Dq{p7RIHyVt(0 zz1F+l`+DER=|=3ZhaWk4{Ym(akHdF-48G%|@Es@NJMMw+xC^no`{6rIz{kA@7Q`K} z9&U#faSQy%cf%un>&Ol0yMXPm<>I@T*K3InZF@u=s?j4G!%$0COLz9ruRE8Z4?e71 z^im*xUaJ$%!7PKaz&o(3y$mh(ZQug!6U^JT)AS`ral9kLmt2iw%;yac!k7F2e97D4 zOGfPNjlhPRfF(C=F@4D@zP8_U1FmPBxQemuM`lwi# z*!Cl4(~7a}_iO;HVr=`RJyXTlDqE(Cu~oin6=SRX)+)wU`K?uqt+HLJ7+dAHRx!5y z9@>;tjBUS%I*N+1?f1||rDAORJw1R`jBURs30TG0_Ir*3Rx!5y9@^4WjBUT?IA9fH zt88*A#k)RsPk4-( zYb$z1j4&g5D+U~KM)cx7Shf+-JMNKQX?n|-_9Ai^5xsaGS1qK{f(GS@MnCA4Mg%IJ z|1K=d;MR^tdY$ondhVfB1LI`WEj;zL&St{&oR<&S))HoPE24DQsdie2XURxnM$DGi z+UP}xltAw~HFCs|;F!_4<>RyHtwx`gLW<@wqIqkx&>#D4vOoIlavi-v+%S3<(!W5O zf#YoKyG0L@(D?_lZYONol8*}sXyE{%H-lrLZE4>GP7QlZoUx7#4AtRmj?8m%Js{Up?Gb848>bZeKq*2p9(~7@T-$A-I z^*c_^cyDhH>EjH~?Kw8%x!R1l)~S8%Jw&=Z{WAw9#Rp6t)5+gKnm_eB_Rsj--bwz5 zDbJl0A29h`ZAM({XTM|njPDzm6dy48nUlZc?rFYb(~QrZu%}J^cH~a!*)ZdMwTD_} zJlA`uX~uH{hw5iMH~BksTsO5(t7p8ox95f#&yDv~&v>r(P;|y~nQ^_+7c&v#n~v+1 zz8JIp_l@z*jO%5*FXMXk$#K2Ni#U5|2{I_ucN~wLbvlfXV9Pjbw;4Iu6Pk-SUJd%5 zF!zKqbAGYR6_&j^)3$gx?7V5R-uDl`Mel2#R_|l$gx<#^>U~W!>V50~lX@RCdTEIW zn0lWT+3-Ir2A6syeJMkHJ}eUN!6E@mgy65=^qDXQH^6pS3KcN&AZdPyJ_fzO4^i`#3*OKhez>WAHfX>z+HNJ&iyOhqhvku4DZ;AhUU>$Z}*=p9{%3Nc` zLCQC5C;REB8Q$!JT%>b333lZHLZ-d+jkCLmRpt z+R*j58@AC+(4*R*M?nXyZZY%f*F%qjmUrE1=ukI6hpL7S6@?B}ozM_}TW72t%+#3P z=v3NLztom)hqmN1eMc`tU#fO2L!HuO)hW=6V~~XrM~m3ob{JhL*IWA^+dZT;knL}1 zfWA}@eW?!m5@h!qW6+m$=u4jzC*}S}C7-uTeF<^7ri|V$^(7^vBO)8mQZjnG)R&Zu z-Y)ee6&u?w^`$FPrf-+}(z__PY`csHhOS4CmW=g%%Y?V!n#(3k-gC)>cL0Vy zFR$MN*f8M);MYu;yz#sVlXt#q!aab`nK1eCSra}A*rmK`Qb%%?spp?$dnsd1+NF+! zYa}e?4;h<++9Ls@=!|v*`OW%0&6xRf7=K=$w`3eSa}al1j<^jOv!Q%pfDvmt@CC@h z3$2)qL_H)~=o-C<21S$%^o!@~N}ypd<1J)a#CMD%CUhbe*I+DeIJvL{n#BdisW9di z@vN5!Gadx{1TRw$f>yB$&+Zj(sUt}BHb!}5zs+Mv>5!VUDapD*kea2GJ}%x>=awF> zgS?D=g0;@~aThd)*-iL9Y8fB$m~wpic-BXRm4;!zD%+C_xr%Vkag8xG*n_8IRpOiH zeavI-Ck)`;%3AC%#xZmauGW&uULWp3?61^vq_!0&T!!x~A8^zWroK_$n_bW6Ar7~} z{9R-IHu5(ek1HU*+d7d0Q_Bs-;3E@=$F-?=+(jPbW>N7gtSwHv|BZeXkE_;@l;07* zf;|qHOU^g1gG^4(?38$1DT}A$alw05y@2a^U2{4fx801#MH|i&uRiZQ<kRntEU69;&J5GWSqTJvU&+nxGxy z!8JX#Ph9iU_a`&%yxoKFn`=yq#u%g24qS7zVt8H4CXb;4HwSmHLhUtad;w< z_R&%vt3Lg3+; z%o{>GKK%;J9zv;es()d!w3&m*o{9V+3uWZ&j`PeRl7d}77m>EiByyLwd8mDu5@Bn= zj!gQHj#s84sJNmbT9&asLB#rm+7U6^kBHeZ!q}1R*ES8+YjIKBb;pIG3m#vkQ*W4LXJ|lZ9Pm-iv zW(=&Pac2B0X&~etL>VdDx69n0w2AFfj#9R7JX1bj(sC(BDSJ1rfgGi5+_*-w1!YM4~65~W(+Ou zG=6EPsi|}r{{TBpP2O*_)7Wb>`RAV@#)RJazlf1{wt{zB-gy}-8?s_$E&u$jPU)LR zo?_*j&k6mPj9A%QeX@oj610ls^nUqu|4IAIADQ-ktU4f+73SX7h>bZJ4TgH{GtihIqg7(cD}PP-R6tN?G}ev29?YeD)(%-D@+|U039?Sbxazk zIiFAC{>+R(5<Mm$y9C+auYJn)uM4`Es9c5F&Tf96M7rX~rAb+f zLeV%Y?x+DVNI&h2mpaKq)CmHl zDTqD#&qk|CTlW8wXw`V=8KLOhOgh0!dn#eK8Qi=5{CoRSjMsS>^a*(tVSi^`YUmXB ze$X<+yO5?uK+|B6t<=^8Dg#eOS|f{56}DNRPU6brl-vU?QP8SYk0!#PJ#|J=SY$be zio%1oJk!29hAO9l1|MP)K_6yK#HRlDM$o4vuF~WZ(2*sQ{%HD*z!VjmstK1Xm=T*= z(fjpuY-$B!P#K?U#ik+_wWP9kX~~N5ijw8{TRL9hu0Y=IrN}i38j%|q)<`3ALJp^l z(<|A44J2Lx8ler8XboF|ZmD!^s?msknz5-{{sXb8sFz@_BN>}o;u9xA|80>Tt``nt zFJn_@t2HE~KgY1qhofTn=+XIlI)*Q&PU+Sv{`A)gOREk*tH!#Sq*JeVNZG(zH_)du z*UftFqcPXbvI61J3UWP%8k2{OKH0xU7|?I)BAs?DAIVS=`*pem6~o3?Avxe zwo~Hwu1WX}F9rB5aCuD?aM^>{Q;ExOCZ6ql8oBLVr6Tx~yxX**GCf#SHW>JaPM;>A z|00GI(HE{za@KjB_1aVn>0l%Hg(u#px;A=(<>F|E{(jV^! zUZNKIkE?P*b|2Q?ocUtno3Kyd+h2_e@h?L;F%#F;P!?SqM%1Ng z!y&el_d_#(N?i?@2SJ;kU_wB>LT7&l7@ugBA(C6f~8 z${Jv~nT&J4P7v$Ei0XIsUfE%%ut)y~VN8L-affwPvKv6gz_!jUYh6-$B z!WTKgT1$TK8q_yDsms{nA<{O)+b44%2W96~^d5!gn*+L(B|J>`|p79jU=fp?AmyPW5J3B7Upc{WU2 zUFJL^rv3Bg*@L%way`PIB{<93LUq$*MW5kboFGt)lz25J)1Lig=8 zu*#Wp{Ufp3e^CfchyCtncDwcl_z~v2^z-LiW$X1LBK;Te+qPqVe<9~v)luSnOFiL$ z5FwsNWV*x$8KE8zuKNrP0WJhsX9oBXe$xNu!Pe3ePd(^kWogX5q7!lIT1Wi5F5p8* z4?f#@rok71e*?7~AR`BSWphQmboI#=i3@#=E)BR4BrYUTquB4oUS-gR@vao?G9?wY zpbfpb?urw@i)SCr4Xt-Nbw z@-}e9$}8>o0<%hMYc$}?6Cym{BeeOm5z{@Z;Ez#Z?5n6S_LOLP=C2z`FIFU&YdVmd z5KTjYT$5hp(&iiMko!CXf1_%QNqcXL?T@>20?OX2ti6B38e^udSInOya)va|eDTZV z(xeCsSGUXD({}J~5xi_GjSG)o3_W5GU(R1MTHX1{_P}mE&>C&L7zU=nQWuSSkdt~o z_fb42^HSesBX6_^e&VoXKjfsIk9|%)?xlD`Uh~(G{cd&WhHZ}%w#K+Il`_zlWiJ!H zp+vB&pvRZUHC_*wdwhwrj*moBW6sd5LHo$yE_<1gPwc)Iwl5Jq6dKkJ1^S{zTC2mZ z>DiZBiWp*paVThrTZ{uC-E$}~${qR9tVmL1m5qvcs1B9J4rsb& zFCFnVjK)LHdKk%zYejD9dT*O?piK8{G&ZcSH(1x}E(0~Jn9(5GgezKu^Th2k+jMhT zRr@yHpLcF;D~MD@)T>$hTZ`+>M3Yn{s!EtYLOgYP2apmv{607>-G^x$A_n8U8L+1C48)u^iC#RqZpp z_dP!MfU14wKPcRTxW6CoyI?yMI;!1PmM_j@e7XYyIg7>0CaqBewXdxVk49PpdD`pY z!_KC%`#g__HzK~BRneZdwIq72Hz6_s zzV&L;P%-&*E9#B5BYsA+xtHr3 zn{%xC*zJgmM7%oskOE!f-qIAb6nU)^_yGGVL;`D%phNgC``Q&~NbY=E?9{P=1o>sqYRzIy?#=#re&-d9U;4*7Z zt#;Vu^&HQi;C}b=jT_J4JKQ00(tu|6y^gRw2ySaV0}_juHBak80aSHbzqI!>M|zv? z3Mc(H&sLRB_hNk53fs%=z7u&GD4Pa4sKu84coz7f z6L{lHVW-yP4}H=NnFuvJ&DvB^eJAHp)tT$<$^3rSF?|5F=s8!L?Wikneg}6!N_-&R zqN593`|a1Fm=2tAmqEtva7k0_guEP?!{n}tT*R^pQS7JmTW(+zPx&UW=p z`Z~s&jCOqR7~AA|alzI+_dA}E4#sxGDhK^`?Lz41UH`VVq9GMFzpcyX>@b$Mm;j zocQe=Utigp>_4A-o5peC+Ia_SinXvwld=Ni$s2?XGC1psp++Fa-WtyovMkXS$1zRU zCMoe?{A;^+;$2g(Q)4^Es34~cau95$a)d$0P&Xa*ik6Mw@Ef4fvqQ=MlQ{5=Ks##T zE{J|z))fO>_BHVjiSKnL>>qsk|2EcKwZ7N7*9EVh)n~+|I{4h>Ih~ry1ppe1_5U)) zg;-(rnKUfjXN(cwYti0EDR%%!^gU7p)c-B$Wi#mq6%L)&i z^bh^pR`N3uY{$mipg=raRRZVt;QWF?X#QFks@(em!_o(t3bosMuMg+5xVe`nH+Y8{GPo8-GJ{Jg zYSRSyyccr59J6WYzT#KkOIV{OzHJ0v5``kb+SwKk<;DYjlla#2vq3E|7qY(x$(snH zzXwS;@fk zYd}99k@s7?N79ceya#rZy}99U6PItaYl3*&mbf;B^SmL~F5k&s+XKm}G}no^HGYLl zGiFWZJNO3f^q+w?KJhtxXBgD+4e*_Rzd!v=cv4e+W$B>DqLArtv-pmz?KFip&IvzH z9x;_Rwv>r;o4^H{Alqe;-&7hk-|Mhu8mqk{jM>uaFm`tNy`Zlgp9NSGnLMLXj>?jH zj*FUC^5Kbjy-~oZTV~9Y<0I#EWAJh(~Ji9X`1|Z(mY#ZF%6&C ziaDl@Z0cAvppTq$jsd^i!dq)JBoE=7V;oE8JQC%YLDN;dh3yy{w)p}!+n>+54g7s# zJr-QxihKtxS)tk^#s8%EO7by2+j>uGI=-^tx9}CL4OiIkyJq4m;U7>o245LA`O2H% zE5oSJ>oEDsu;MFaKd5mocuT}!gJM6ZUtLCT$ydJK!5Ig4IriilF*SKQ~+ zmgC*zO5{658hDVQxXw{jr#%KfBQ(Q-+yqYY9uf4To*rzsXkTEwKyCs4v%iaD5+}vo09jul1ttLIUe~eSg-c7gq5E07GP^EA&+eMZQKU31=r>D{6^Mb zRQ%>=6a418M(InQ&+r@6wdHJEIQcyIqtu1WK1yAvjC~}(nbODK#&0I|aSFeYeN?)T z^=@n3fkw$XZHnJuyufd$_vA=^Gtq}1&Y|ve?-YLXvkt{?{?z0*0n()vir=_km7Cx= zJAuCw^l3VtvuhfjGidP~OXE>Ir(5zIRU`1X@SL3{&smw~IajCfoL^7y9Q52CFu-%> zXbT{fi!{$k{+>K%WU{UUdHe&V>!98tCGw#zR1vhkN%5S(pyVIZ>N?Z$oVnmRs24bu z&i*SZsFsU)qc-xJ6-Ld^Ch;2${N^R4*YI1h)`^evrsX$(I+fpK%quz0uhvY==gjUV|E|tdHDo`&s925|_bg{zt=v4x_$%IzFR+md}9x ze1^|hde;;_(;@vUKIGh<%wx!5bnulI!6{g8wI|Ds9OUzmS3w{zD|v+@x&U@LYAt=R zyE&uByN*E%v1%fdvn0^Ehnh(w20I%#1$+l3gFUk5`CRcE@P?}RBDY6Ky(EH4kq1tJ z!*oUTC~aqJu2E+D1iI-3+>6?)I8Qx*Fs?YznYK}p?^2(;IZc=SUQwEa-4r}J8}?Cn zI8xZV;0hc0kRg?hQe&*=dV{9{a>Mu`$|>#@-0E<*U~x)c|G;zo737*#ygx- z-yx=Z$0YmLqPfwi_%p=;F63_e!J;{`?)t$z(B=<1y*Rq?81vqP?B#Y;pLtoqIRZl3>8%5Yf*IHr6&_l81m;I2T;6Z}@3)zZZ z+x}v&D`eQA%a}Td0lOvEf#P{`jML*=?Jli|!LHAKJ*7Y6nN=K~X@+`adEUb1(?e6VX zpSQ+SxD$O&_t&)71;0oe#Uy*yqj@jCK%DqD%nxl<|C&L|llxC$2HuNH-GMeLd>g;( z1ip*?{Z`^VIlde_H8%?;&5f)xAJP$rVa?0r@8Mj4zNhDa-^H=F=Ha#U7n$EPX%1$# zLE@WUW?w%5o^x&UyD%2}tvO2DlJMP@E$N53x20v@*MOgt^ramsG?0v@?MSF;n<4M# zWZI7wdx%3lUp8fE`bGHNDd6G+y~SD$5|4}yj4A6}-+3ZPyHV{yCEs5L&82+rFy(vq z{PJa#?^$2>2ieZWZ+Xh`{B-~EjaL5~avw?hBG38M${Vq*%=K#DV13L!JMVU&zHWT0*=N!M(j4mzAixyI~^ z^&OthME258KQQxV*%5QYc$-|gw}lj%8MHK+K`R+{OsTD#bJLPW?Z}fztoN+=kxozN zHxqhY!v%UemB$ml&am!TDSe%Nj|kS*4dlq2ZHSrih-G~RRg-fy;w*ffW=`9u;NjFf zi$(o~dv(?yKUWtWRhNUX0(MT(|;T>IY*GI5%uqYi}t`t#~adT{u5nR*!2%$ z59wvGx?mXB^udqm!QY!gmlbxW`Z2Q%U!rN&Sy348#&E3i+j_M3d2D8n>p!ISVz=Wt zyssbaOL-pczp_6adVFqt@GrWXLk>|H91T{6vxC@=i`~qBtf^?*rr}2pwhpd0kh$mj z_Ts|o=HgA&cNCwhzP-5cI%MssUN`I-yt7m<)*D1%K73iss}<9>?(8$G?^?UI^)hQ0 z4RhW$3O(}MvUE+ZkMvT!afF$)$$!V`#;0iIF6AWD}4+p2~}$+j{9(YGUNC#jZ*K~n+fbT3~i>zuL>@Rj-)a$%Mc0|^R*gq#qI}U6!y1Vq7wiSgg&kEH2bD}6XH0$+{ zP0TfNFIGdYZR>KuGJj`a&?XRF<3XJKn79F%Em}htY#d2nV2%S;BLRI~ydY(@JqcGZ z=~%zKH`y9GZd>I3qUSwGTVtN*y56hX*-#n4i$VX`U$#nJDbx?C&97|2tBj`BP>XH7 z`#bV{Y?hRqM@^LzGN54qvi> zMW3S*;{Hi3M@u@D{8D?!8k<|^UOV|-UDws0B2_Q@)spyOeJa?V4;iV8-?DvIDd9r5@cP&E1j^9Jw)cr{MThO10Zy9fbOp|~AuGmsz)+YHC&PR&7U+Hk~w?}>) zjl-gZuZXn!AQ^a$H8cfup$Fpv-=S;`)wihckiJKJN2Tyt{om`lmZaU2afSQ_Rx4*e zAwQ(Q$;+NC7f$~1oOQ4Gp6z?i5~t_+Ih|3x>{;(l_igfXUK&?8}L=FJ9*?@purFBKrc_OX{wigA9Kg+aQAEppw-^~UF8?Xf@ZWzAdo z4C2>1<+_jnbP9O$Iw;utIo-+4X9E)d)P57+JV#poQ0O zL@V(B0NZZ^tlHaU4J-@BezSD@Eg5jD1FC!+&9qy)VTF~{UH&0u3O?S z-Xi-Y24gq(0-F(MkbJ~nDnbu**dsYrMGtY+wuayELe6zi#z%bH?k4ow@2TgCK9r(| zhwl?3Hh(~bQq;8~{h$rWM|iJ1XTM-0jAz(+Kh|Y8=C7OApv_iT-P?@&;9HjW;M{n( z)&3uE)0>GWEg|EiwRY$i*pmIhr^Fp^7Ih!a3UC4Xaf{UIF2Rmfdx+nu+JA&tmod;0 zwvFYhHpo}_dez3U<^5J{39sun+g`_Y{pP*>=DinqFLTf1I{Hiehl;T`>dVL#KngXK zAm(h}H4l^uvje?@etptg>}Qg58nS;GbF3rhE!T}|Lp&CIUA%Up4Kf+-nK;($6L2sC zu1Lc#Yuk}=ANV5QQ_A)&q@NOBN)1F`gW3hh@E)@-;-bwT8q^*}{!Pqy(2p;*X8TfX z(c{pK9w9xnN8X6?Ol#;(8|IWcF>*@}+-dfiV?kRvetUr{7A{=?F7ci1h*dE%@aTsb zcr<|6j^0DSl^$Lr@ujL)jfohwAyyUmlzHz+%)~)kVa9cT_-l0Lvv;-xjWiAS0S8I{ zPr&XvfrFIWv5uT062JB)jQDTIG1zs0tLi!nXFrQ$GNU?hgx`<(Q+_(rK=I03lNg!cnZns683$4$5gaIXm`0Ut8qqkwx%_!!`W zCVU+50TVt6xZ8wZ0NiE5rvN`@!Y=~;k_n#%{HO`P40yi@p8<^e^78x70`4^7bAUTc z_*K9O6Fv|4VH180@IDhZ0Pi*73xM~S@Fl>=VJV+~88C8xOZY9oJ5Bg)z{n#k+us5F zkO{vF_(2nXAMgVv{2^fE7M9n41o(avz6yAo310)e)r3C=jNHTW`cDAgXTm!0Y6qOD}v`1~C_#_n9cXD8ni!F9;RyDR<9aj{#qv(L6YgqeG{qSvvP zFy7sP9K1Ld`{YVAZjyZ{ug!j#$EXa4T`pM(!pKCX758c$)J7 zVdTuB_KoM+5Aql}H*il@kAhu*ZHEY#$$PST38T^v-syatFlymZi^s7o$z$Zyz_V<9 zgqg_#Gj^CGff0TRZ8&xuQ6nZt@+e@89O`6@yPo7%;urV3j`7;1wHw?O(0P_p=fU3P zF!wGev3FVNJkWJyE$f?eR*JQ-2hgHGYXU6^jlTIcYkdBib-vsh*35;hB6WuC9s4^! zN?dci@~Jx!bBJkm4C1?@qg#;ecro<-b}9Y4m+5KUgLIQ|etchH^}rq_7m!DCPrb4V z@EWco%4zHqpTjj)SdE;yxQEAFf9$Ju%(clrTgP0t?7wx)XNw#y&b7_&%WS75g6p4S zVYQPsSTJdZ1(TLoFlmehllEA!^)Aw;%r?Gm9F6 zY2!Q2XSBVV0iVl&&t|}9GT@gpVCx;Hr?l}MFJ`oz%79Hk=(ThOg6|xa52Q$FoMVjO?LTIy{fQ zYuo*4z3ESIn*M|=GglV&SI(kGAsNn@k}E5_W=FkVhs`%fl|Nz4g&#?e!ihwtH{qKd zGx`#i!DfOLa2{> ziMWp_Ya4tCjFslz9&4`<^Q>}@5cc6o-$MJrLg`x=gKb~=4wP?!wtsotWA5pJZHCA6 zy-MEze*d1wm|OY|`c0q07>-$S#&7NwYRB*2@)*7c`ZU^M*MwdGpGJ&>XZ2`Vl^cZJ0g;SkkMT;6p%W)f!~-tZp85 zqyEP1!8=h=rms!}{O}NL#<;AD((b8i>pc^7J|^ei%FMo%S+!O#qX*$G+qmL!78mMHbLHXc5Iga?~?!Tl>b@H!`Zxa&EA&^`#qa+ zE+@VLHfUE+%uieTapo?ZNn7r5d?$|6mU|rEi{rHA9>?2poVMKK_#PamE%!LS565Yn zJ&y0jv1NH@PqyHgws7cT*o*G>Y{l`OjN@%MP9ck|ZRZ2%zot6}cVwLP!A89S)rb3R z+pSj3FB&jHYX|K^j=_&nX$uzjYH~+d-K(!5y6+5p09O$O(gX>m(N|b=Ki;qfcQX?{ zd$A|+GHm;6OOqj5;NOMa9^QmisP9nqHmvj9FBXT5|1zxfm*5k?p0bq}V7WJ>o!_{= zW%x99mldP+6s-Lp!7BhiN$p8k{6EB#+AnOvo7&*}tWI2ZREUj3?}O$6m%C|KyaK;- zisJx3TcLKptrfm3$T+$3yco8*m;4v8RudlgMOgF85K;nbewMQ4*U_5qcG{#h-|ke_ zd>5?wi($>5Ev@-|u;w>NYrfxhgD)G_{HJVs={iAc{_)PW0pa<}+^FXoERpR+*AizD zfsqRKFHB*bldkVc48|}zUPj*0R?mJyo|ttV?39o+Vc(HH{to)Pc~06@YIN*1@_SoD zZtktkXFK&|+WEI5mP3SE!_RqR%ZB`^2rM|X0Kz8zzi+$y@ek`ErvyvM;mBSF47+4AU_6U&TgL_?xSt!Ive)V%4j^=&A3yH-;zR4 zeh8ag??HZ#9b*(FqbYT4L{5CWGu4g%H@x0f%mUElF z9=@A1+*`!!q0_?_xv*nyC-&FO@jg1YlXk?Si7{Y2L-H|KD2X@}&U4ZMIeGVI|7!QT zm+J5n#P{cFj%coylyj34vZKzWKpY8T8X`ZCeu9oj{$nf^4gx3S-c!}bEDMiOBjT>9e>_LO0rp=j zTmvoJwdI9--0mi<_Yit~vJJbblfz63JfX-^fK~H!K=Aa&GiP#GX276PZh?*envj+ro zeET}~!)Ji;pG8_gc^}`MY}M!;X!jWU2v+2;OwR%P0ndTvmYxIJXIv3|KJj!7U|tWL zC`Jyp&BO=5Snp$q4H(4wZZF0${D|#p4=>Ku;~v<&?&1A>uY%j9Zb92S+o@OZ8K7qv zPwU!t<~WA!r+txnU+)IMcwhg*G=0-{$#)h;johzv`fN)@RtK*2&==?r*+TFKGJ-R` z!iHkS@H=o_(ne~*AKr#Z%dB!J2+vZtL;`cI`j{F6Z}C| zm;=>G`yKEN?7Mvsv_?JyGG}iK?xT(F0N*9)T2(FZgwGhV;eONqVBKrLMrZmVGOpvj zY*)|!eXkL$HRp6|#vfU@NS_34aKO>-5vOGjMxWr5Sj#=%7&{j-TkxG6NBLf-<#dvpXIO+L$f>^hG&jRUl=$AeR&`jG2Nt4Fp zJ%y?=r_9%&hlk;30G=^74D7cOf6~XO^a(%7#QAbNXqb!Fa@_zwS=CqZLCAcC*VoE5 zwO@NMqaFT<>xFF3L1j_)eShZ?;L#JnqkgZybPyRoh(Yv4sQwA~BIu>{hlaGSEf@v* zBP907kpY(}hFw;F#I6~L41CVQ3Hl;Dg;fKvJNx-8+Na@*pq+gDfTZyfH%5V9d?$So zddA$VzL7rj8Mp_&hynN_bcI{0MmTT_`|711BlF%16S$Re-Q?&0zSppBE^sS6fm<4F zyV%D|eQYP#G*(?c=Sz-?(=hQ1{2w&P#1Zq{1ded+sp~CYgTybh&GI!!`~qJl9@2jy z#}GJY(zNh;(5-&2q5pYat>@=)Nwat@bV1?<=bz_E*IL7Q-oon#*78_B5A=uh$`#bJ zC$|Stu>f>z(zOA*w1==wJ|FrGp5qEAoFfhbw_WJh0(E{DaI%);0=_r~Imp6I=qC*x&L;ju9U5;hKSN8v`nk*B8pJKhyPCzMMjx~VMm;0xte1q(07gAL37-Xwcq0j)1B`lj5`Gmh>fuTFJYYl~ zNcc6tsD~$E12CfMBzyrd?50>#e&l^%9!UjXCaf6?*!~vbuxfvsa7eYkLpWlT^O|=F zmnxWR7T3sjoWBvZAtddmwq@DrX`5e*s&DIH4R3*!8aR5Oq;ecNBgc_T5`UHWTRM&$ zleKw}Gm&$09Qh~ZA7R#gShOCtNvzk|Sf`ParM*H{deaQ#o~+H5mEJT1c`9qg2D}&F zwRE$xWM{F;9KHvck82&W@*7HN!zL|c-M0wa#~p3z9>4<*S@W$7&xKW7)_e;YUvQO) zYFwx4z6Ed`5Q|DsRf7Hod^>Bj+1aN%XNvhZ0XThU;#Ww(Dew8Z)(IYRDg9U*3n{uI{8H~{P^MIA--%{nG-u7%!i9TRUYnE0${e!HwEXSEamEtu=Vg1KHSnCr-b zxxTFDaNSw$T#pvab!x#}zZT4OZNaAqOCF?ZsaWkNGp;$F0UyhNk7mG0!sc^>OT` z;QbkJA_Lx&0q-DeK4&{%HK&OEF~@pK#?>3FR9a)Q~cew|Sf95LYeL*n1I_BY!DPtFkkO`Gi*+gb5+ z;0KGj>nLk5C3EO$HrKAFciz>TLZl6{2s+}yi(<#X_U~^y54{Gqkue{}KP%3xQ3Bc= ztfsWy_9G)yIF2Vk+hL}s_HHL4iqMW#O6x160%s}D!}`N^Xvmt(42ffDy@kfiUq37= zGV&YMxa9RcZgoAZ#G{nh;fZ2KC`QgP%U?Tw1NiO7ugoru-wL*4*Rjmhir*zXkNYAT zGd9&j5^4_EJe6gm`G^e9KZ>@yyVea>bz_iOU$shyJq}U7Rc_dOWgPhY9gQjuJf_8V z_U+QcsLvF+OVdM}5mS!HZOyXhTG8N)1ZPC}XKlJ!hK?*rdT=A|Zqv?o?b^Yb3NOHp z3!5IYKh>fAfM^=|?xD+F%_Gi{cVIt;ov;eg)z>#(gf@W6P;+Wx;BKE_2ZVjdjriJ> zHU>*ODlG(2X(5D43t?7TpxtaNkt%K17`eSC@y)~)SQ24>jJ^dcBJxw!T!IA=5zI9P z?2(8mUWJ(ARjsq`u_VD1YzJ1(Zp@(nt=)n!JU)`titR7hx z?bJKyL$kwCrOD%9VqM!mCYL|$Ki4c~Yb-}n)}5%%PjUHHcR_{Idj z5fT4Y*e8M*hvfCt?V5Z!@VDl|oM6|~^nY<^Tq@IM3mZ>LTG%pb{Rq@2Tl z#0Pm;xA_m_cO3Cj<6XQK`lk-tknTYHH_F-ryyhFo9=heZuOtG`A&OFez09WBpTj-4 zu2;u?X4pV%IGDP@1+ifXG&Q5o68hj&-RFdr435cD|o$>4Fd{ph) zSn8;3=d*J_sc~M8uV&=PHoOlpv8=!4dBRt(T@v?j2H><;r25-(P(Uv%tl`EdG>ySBZl9G}7#C!{D6g9gBs)sr^z6IF1 zVwlvdWNU2Sd+?BLg75AQ#J!V>->ym8R=N&w6YAL6B=wT?4b%(t4fJbSS0+E$(@lze z9BqsKQ_oTw@0eHXUd-#s@voa`Ix@`tZXPw6mVDu~A+_@tT-;0QV z&XpSu^{` zwrC{fX^W(Zo`T(`Ja_5o`Rbg?g6(x@!w6IdrL(s`_(8mEpn3;5N8Ge_K<4?9#)Klj~y$4mlu-DGIfv#ZErH6e1&`iPl zfzXMIN>JBF)(|Ym{YwU1tmnwzwdU_S^B43L`Pv7+G@)PWsOJ+?^?X2GSs|2|vl28l zlMcke!#;*oP~Xu8`a009hx)a>iDii}=&Pt3y~=M-QBs8+SpuC6DEykE*$@fgq-hRpN6iw@wb6~+~`j)1V{a!hDX&U*UNpt(XQeTp^ zu;0shx1J&OB(t4(BHJ^bC-G>S=MmSeXAlQ1xFrK7&Q5(EaT@xy$rC2}N4&S5u{s0h zda>HMjtGB-hGYF9zRmG~NyAB-Nwc?iv?ZL#m%zE;_X|d4hd7m_{s7*VK zx#2j&PRF=e@5?ymn1jkoT5tx(Yl{nS25q|;b)9d9{CqQL*cTvKf1w`nu%v-;^V+kx zmb8PFZX=aH^09Y>TbWIM`t%Pcwv`X=cdXq80^ zmyB1KHB(JmXVy&(CyPiYNZ%waj3kRlLlE=nkhCzAtRRiSu~X7QL;~vpFKFYq_#Nq? zC2vmRW3tXokw+uV!&;CsCgp;wIDVz$yNU(|pV9V#2EOkk4ZI*_OpZ~?q=OUkApIPE zXv+`T_zdC(XnHQ`A?YCW7%7)=thpLE{%C`o+3)4NSTN_yf;o?cRq7hfuhq_Zw_xIf z1rtv!nD}GeOT4n$iEkE6JhWiquLTpgt$R;rwBvg~*FgV4%c*ZcA6uusnf2Ri*G>?KUY_XDPX0y3;g|8|o875Oxa{wW((jom%c zlHh!u7O;aNds@y2Y*fRHFFZPH_ov&<_`=CHsuwWFv`}egz5?gXlkHUYT4kp?E`AhK z*$QT|Q#njKRr5ryf~9jmW2b7KWT)DlG!AdR_{-$}DOM`R7v6C(>S4xp7)Ad2VU1eC`w)xx5buMPioeWT&KN>k zr%HK0ex;QQzqDG7;g?n`t}|M##_&t46|GgYTJ__XRx3Z=d&%~+2Fo1!UXQ#I&1E*x ztl2%c8!vWMb;EXL-*})6`N8$;>IT6tSB7DUaSi(Rt{Yjr)vEXjTa&c7u#RUFY)yZ< z1C}b!rjZ3BdWe>)Mrl(y2L5=q%Q;AERZ4?p3pNz$CB6V6XHdP1`boQCH$VkE=o$9^ zx97tSlb?!=*;98IJom=*u-36u>^0n`!1wVMZOJM-%GN-sV{O%{%<^O21=~=np#*k}l+#@}{*mu(5c~)M}r(mRD`Ha~0NK(>E5pf7U8GB51?HN!f zJf*k0?iolKy7L+JWWGV@uO@~fTSr{@T0qY*;=unU+BwKk?B@=& zxk`;f&@cGi`n@N49HEb_-%I-El0LP5$@5(S`q%osq?az~ckA~`z8z54!VR<4BP~t5NcM$P_se^)B}gkp0q*NvYEBm zJj>^JWxv&WQ)BaT$E&o1bbkPQT>>!zqE z9ES&OO)^$SeI?^YDq3V_$t=ZY@;ajqY&~xRI7DVf_0>q z#iLv`_C#RMw#490t|G3G`#fi`D)dM*GLB(24UrP0%KWQRSViaHU$bl#zTl7T`#M%2 zewFJ-RGq{c+BkaKh_WBBCavbg!C)LRCqJKw zKFc-cDCl$Y$y`xbUv*erb+&@TPOdG!#g6lwXI-u~8&{hi$NyTrCV?0|9Tp7)a1EXWy!QsqV;JkrTtBOE>|Sz*ZmlyN$LC%duj)V3ycO2G@?pBlmR6+9DaUH)cpcF0X-<5hx=K7Shq0o0yc#nWK z`14s4+Kvc1a6CvD#alU7v?VxveZGHA$fs8g++P?S*kbb?Ml1v=!?_jUDV`^f zkMp=pUuw}GS2(%!MZ__HJ}o$fIpds?M}StK{Rg8;@mFf59NxE5k0CYg{bK3M! zGcoNOp$(9>KBX5cJ0NQ;T7DAB0iZALie!g-X&+<8HN=EqJOuc^P4R!!;AY*9a@gx$ zl({jpQ9mFLIo#$(Kj`>p(2((-tj^`nCNThHM*b6j{G$BVc_m}6DjfDl4&($g;u=Cv z+9u)}b|lv&h1c!bH2hGqt+QcxOW(TTqjmMeTgcAE?Zat((R)}{;POgMw7uD^{fGT% ztj=ZbNAnJQ{%tFEtBJ>&r_XCQ3)JfrG6>{7RydS3iaC(F;$28Xuzry8;*$en7x;(2I?{6*}7Q(lJ{2C1zxJ8;9Xmb`cKRg+horbzjmN* z<>2gLzp;61b+dHf{`?D& zsv0){?QDQs%TDJ|}I=o@zmW%LRVV~DnKK%23`rA{u=Ja#a7+Slx!K+>? z$VnaWv_mG8^jqlYQ*x+G>Q_$IXrzxJM>DccQ@_G~81hy=NAll*aRXt=rvpYIVe)ZT z8T1jrQWl7)_U&wk4#sx!f5c-`hsw`eF%G$`wwvuGsO?v>ymx^sn9K#u^pvc?ae40o z{N3oTh#{t~4l#8x#MBK(a{_}|IicY!S);aig9kPL1m;@2-|RE07JI^U^cClD55Ld4&blW< zPLcOp?Nj8GN$1cP?Db$i#@ET~7lFnzC($A$o2WhlzZO~J4Xk{#n3BN&{(vjUu59Y!#FeClE60f|h&OXD z+1vo0i_8ZCaiTi*q$C#t2WGp1Vj1{xv8yy$>{LNOD2R zit&Pyir#V@10Mz!08bW_R03W~yqFqmX&}~MP{tY{Pe9b`_G%AyTu#iJ{p#tr>Q|?6 z0%O@=;RN|*r8er2^BNuCJ_z8*LpE1LFD$&iwg_tjwIXZRgYV&Q{2qM&dUXx$q83aZ zm|UMZN9Nuo)LU4L*q(ZEJhFOqp79%0QvS2KEs5NbS%|hHN9uB5Ula1Q3d0F$#Ij?~ zlCgG$89eFxI6o0<_a_{|-F7}6tkafpuUk099f~2?C`}#(4gp@pJPZB!l~Hr} zmG(k#1agXY=(`_CPLz(SlN9oYl3Z`IsO;tlkm6|RamVb5Q7Zs4%U2_DX}FMa`aJgnNf ztz3UOSbxxiQCoM#_*bxQ7GT|=hH4x>sSU2v3?`ryeHN8!RtA?60-zjVB;=OulgPOtuJtw3mvM@Tn0babi%y-yJ@Lif!OS-NBwAlsvQ3N@^&{f9o zOXl!-c}vHi04;@0uH6g%0Xk8Ocy;_%@SAmfa|^y1Hn@P>k)tKEa>B>wl`j(><`nUw zcGdeG?!*J&AD+6Bt9S?V!8h=^Ql?(j3%&+lPYvin&G@>)8bO^4W6v0zWY&n3at?J}nJPs~+r$(@J8IaO9ZgqRWa zC55VTo&(ELX|9q#OHoPoM8}P}|0vd?FJgr)q4F!6wa(F2*uzS)eR8!QLgb1Q`WJQE z-0=E>TLOa)tIdA#Sl^HOL_Q)15LNe7-<79++Nobi9%)9j?P0{&G0wK>qUF6{hJ?*q z{mskqq(rk@`Y(Q+uoj*iPCI-`hiZQ7>ylRO>n!>BJaK zJ#f$iuW&Uz_j;%i_WJ3gURBKN?V4VCho-VAPtJq(4sVFT@wNAa*d#7xSm;FvSELl|$vpVA8)iff}H7k0>asLy-<`E{~_ZlfWy1_hJz zUVyX&I;kDTTUjOJXOA_W0svEyvTUWs6qqs)YyQ`g+vd{Y_$x@e94d zco^5f8?h>Z?@J-Wz+N09ix{mVi1zX$r;?b+dVu2uj^RU5Sq*U9f#dxd$IR zV`genR(kb)%&1hKah@5MkZpyLzZtE})$}fK^Qqm)m{9f0J*zX$U3|E{<4^V>BU{u! zE(E>w-(mOBt6zkli5Z%dWyZr#UZd($@R=P)q@5oAindl0)g3zW zEg=5cs77oAV<4)Uu_h1$QH6M}D&%aaJr6tgtDr{bVCO!I7-4wKkh8*Io{HLP^a+`} zs}V1L-BsX?8(4sdlk1{b5e{(vB=Rg@qf9NeJi4(3<}&6K$qUPzpREZvfDg@nqj9DL5kYI`s3!kaz6>3S*t;Tq;vz7^Ci z735xS>_ueVvk1L>e&l}7sDBYn!WZrbtqa&&gSXh$qwXHBk5wA}kGJsd;%;VUt?k;m z26QomZ-&MVd8drImSX}uo3&&U;M>T%%3RCHQy}a14xs);lKTuWGRUn zu4U}i{_M96$ehb{`(hQwtas$sSl`Or%O5cJvUWlnQn{B)5v`4<9BY)hmr>IcV`hHW z`pMt5Jn6H(=bky&PV$>n?&YzbiQLN*-$<#|y*!EB%Sn}c*&GY;s_j*0v6*{0e0*~5 zWy}@$x|w^q9r+Z*=j2`<*z8`KL{5di{kakBc#r6?+d*as_DTJM<1KS97hAcPQLmJ_ zm(6~p^C;jvuU#-lAN#>Jt{3)O>Lbt@oyIRa+>p}@n=Ahp~vznW>v? zVgDAbWBhh>fc19{vA=80W5UfDFzgWWI<_-@oc9oBJiG-<9LHJ(hrm0O3;=zVc7R0~ zZyoK}(>{@p8MKot9u^IGK-`Pe4BA z@YHD&j%fosGdZyjK6WR7P`;_s2;WgIy?I6Au(06&B| z=PAhd#5b9{d1bGwunOy~s#mTp=$KrG`S3lMoarKTeq4h!Dd!7TQVUjjngx9Dd=K(8 zM{9M`UWrGVfjvo}_prZX4A?i|0rE7LPR`S8VZo$4&DtB9#H~qrnwj@Q_M15odQ#9= z@GkHIKlZHi*~q=2v`8~gv-M==X}0jI_a5NN0sKwmXX7v3=@2qqvvj$~NRnkh$ZJFwaZ=2=XvjG?HGWh`AEWh;4{?!VHs2 z$tO?}Kz`=JDwP>oQZ;#f&@Sm*zM@~Xuq{z;K#l<1OXF|VR;3RZ6|wJSQ8*;ZzEnN=*P>kv1Am-)=SoSBG$UH4mCb+>4rgR!X-xt~O;~@@Uu+1nSzrrtX(^c2==L zwHtX%dpoM#NSO9!RJ)Nd?a|QbBusldv^)s|8>ziXcmrVQfD+>{Cxn*c*Yl05+8E$>YN-fhB1 z0q-*5V}N&>@NvL9O!y?=hfMedzz>@6DZt1*A)omoU}W!;@M*xvz9-?A0dF(mGk~|6 z@L9lHO!yq&`%L&%z{mwC?>!F~xd0_>0KVIVF91gFK-qo?Ffs^A_%dK*7L@Q?fRSrZ z!fyjc)+Y(S1NaUTeitw*D#`XMfZI&?eZU({_(QUn`DpL!9+S#$qGa?kzZm{k^D7=71RQDM8MI4QS)J z66WKr97jwW{+16cxE7FXj7tom&Y9ljZ-NoC;@qZQIzw%*5pb&ie@?8#4ZQ zXV!Kehm4|ssz={#=$EUcq8520{y+BK20V)EJR3fD`^F? zjq$D!zXmMZIN*dB1Of{qVD$q?fbj}rhuDBgOblt=L{6QCL?j`t+me)!T?zlh!8VSG zA#dE4B#!GgPC{Det&_U=yPq>NyEA8JumtY=e%E(>bAk4G&fMq!oM+BCbIv`p9T-pC zi1g!q)M;r0;@_hnZ=&%q-aWAUDNV%K3=WEoko!t5-qZ&S@Hw=)5r@vx2`sH#;8ZNn z3;Cs$3s+fuN)N{q8#K=!!kR77DhBdG`vtRT1p{R_wHA!eyR&Ki0%^sa-C3L)n)s~2 zg&^%;h=7kd%D+M5nCuD71&;GEW5pl}UT9iTh=BW; zP9wjM{Rwk$UbqSEfSy-0M^X5g-{gGEzru6aPePklv82l=CZMCrjhofV{0AEUHsI`zLr$cLs=-_&I4 zvzknOnc}1{Fis$zz#TXojQy1tN~q6k_sAA#@>!iswnK9#o1)2NYc!c`kQN`=Ce58} zmL?z9$;Wi^Gdj6nCqG59n)jnR_rp5*kWSvOllST5y*fEgvKr)0`nozFAkJjw zr160^b{!AS4eXP|{?iKj@*?)B{7c6vuqIbx{VNN+$#WCLoBTd_lW$8%cyHtgv83|d zMVwr4RPfUaMI}*UNv>1g{-xgwWY=i#1w!7(*pK+06g@-ofsT5-5vaubzofD!#dlD7 z|L4Oyg37b`={ExW{a^540p9=NUBJFL@gc9m3h#MZT?AY5r9BIt>yJ0$JwUi~{&PXR z1HktMz_p>AVbFMzZNC3v^doeC;@*lZoprRB=8qw3;ZQc@aqBQxvUu|CoUB8-6oKE`HbmDc=ucj}Du3t^R2l_JI+BS_7^sDK_*`!}hUjbden*J&1 z`qlJLLf5aOKk9pM?6Y{=LA)M~W%Kc#S7O9#jWz7>E}R+f0UrqQM5Wf(%kWORyfAA{ za-m-Wr*~4ih&^a;fl)L9X8w7DP34W?qKSv&Sm*AhmGr=N2E-iOOlf}ek&=cLAOoQJ(E*kj4xvC(|v zpBSEV9>*&pqV*f!$R>w_iMm4SZUa@}X^i!Ba2op)fu-g9^P<(s!hs-GJ7@(RoW_4_nYHVk`7Z5Sg>`0}#>ptw z(^XF61o&#PD-G-SqsnMKJ(5N9IcrX1pH2A=*$=UYMCCO0+oh#`a2nIs3rlH59bBo% z+byt%16Y6HI&tNeU!b)Ju>JTkPB=pA&r%WRG+yrcRma-<$xW-DxugG@HFZR%C3BEZ zFkCrnIgQ7UgwvSn*^1NnG&qeDTftGVbyOMF-^*zIJ%aUjg-e9is;oJU zDUIMXR_imh7LV^FlE0C08uK)&oW?$IJ6drX(>;yT*auGIL>8^XVf`spPH6KwPUFbo zy~JsZ(-p3v?%1>7G^YNWpm$(uA0EKi5_?LRp4a$Iyhp=0cRb3!*mXbln>!W`7W?Cl z9IV1;g*Nf?-%ji~zreD!Wy#M~WdH<3twcG1lxF-l^sCwfx<^ zKQ!{tN(bzfYNtldSFEnL!p5c6^*M*}nyYdc^ELinYYt=W`*j+pF~(}N zCo{!qTyYf+W8x|{<1n74<1l`S*Il=s!#I${J1uRUUP)hy!`L?n{spu=tm4xXNn>$M zY8=MgV(U4KmwPZ@+k_IP_X}7@KHUSK_!T&eWr|beFjmsW-vc2G*c9TG)o~c_ZX4jw z^pD~&PUBn#j$}QD@o+GaHr}p?!&q%sGOf{r!BGApYGcbi z@i|HxTdumN*6Pvj)SgQ3me2-`Z85c0PwkW5|9k|G@%O=Fyrz-ka4v!D*MQ1nO#H>E zu@J4;V{M;olh#h?U60GpvWhrI;}In;yf-47WptBm+-4Du+iY5!zZ#b@KNCS?f~#;D z&(7pB_C@PXUX9B*5QQzX;4+>$`+iFznYva4_z!$0cb{{>veuw}Y7=b<(Sdj^iS z_)+3A4xX{4$G=l${v1UcqIo;k>uyx)hratU0i*TtAzuHQos(sm=K8b-uW=L8Isn#) zQ~boVMxgN%)7pW?PfTkH8b2|uHE8_A&5$*I;$@IEe&Q97HGblikTrf{TI108iCZCS z{KT{-qVW^cT8YL_9D}U!6L&+__=$TUYy8B!AZz@@amX4!@m|OpKk+`u8b9%V$QnQK zA;{S4b0hV`!;rxP$mOGu!GFT#ryyew5SRNQ&sF7TAY%srcRvOh-*e&eambCTd;&7~ zB)R*ukg>vv^ICa@M5E-Q${ReY-p{nr4xcDzD_-p{#dLpjrE*%Gvl< zEAE#iM$>q3QsrrU<(2gKbjahDDcGBVyWws{o(9?b36cXybLEx%b3=W!yNX3FQVCVUBN!Zc$(>n#esh`#tH=92FKt8gA4k2R~D z$6vtO@ENoRt?z1mn(R28TcmbbG-EdQBh#3-&_ivO(y!^%o+fNXes1Ivy)VDR6 z`n)ESEzo4L5t^)pOEyIBM&Wkr++sSpT_?Bc7P)Szo9;%{X817F1!7<=+S zj9P6R{K-uT-qozdMH$l{~0q)TTvz~@@GFP`Z8b$sQS&%|HE`V#ZH z8CGuheCqi`{12Vj!&OJ83P1UL$n$uLg@#THevhx+x&w3ABEPbB+pc3w#!l{hVoav{ zgng0bs6iUP;k~=;Q`c2#|Md~PuMc9(Rrr5_=@{?W`TnqvW;%WoYiG)O?kUWN#ogV> z4Nq_W`KCy@wuY;)>|TBMw!&CUY{exB{XTA|_vX-4Fw5K%+So?33Ghjejrcx=lOIPM z3w?Hmy|(x=-EE->yA~5OV{FdQ(twA}!TTt1g~TF(ou0N+17PWSl<(Kxyc6G|s~Z5r z^gt#(% zMu=~)>}eV=VVrbyCic4wo8P!dC*Yah}G`%Co?29I=m*`#d|Hql&QBJhGx`&k;q;)g4)rEK=UPbLe#hAOPU0dcM(fKX+k6o%4?5?SBfEVG z`>S0lw;b7Y7x?7}dxm6{i|#DRDzE1`l1mkyyz?Xn75Oca1B(0($+8mXyCkc;cb7>n z)A1v#e9G^WyUL{pEciQ_dq7rsji=7;$NO&Z^MLPzI6sK5a|-r15Vzel?ml%A=j^YX%$-g~1ScScG-!+r;D<3rdxMbDx1W2^(4x(;Io^lYpVP_Lpe z_!*?I!f#CBQkYb}@4*j+jW(m4UxObiAGE~<xW2zI) zAJvs6QypqD)vXwwQujCmtmXq_9)50$+KA?l+K(nT>*U3%j5>#(LY3#3+N;u5lXUKX zBEH4-yz94))^vjKDpXD&|T+>Q5k3uMhE5mPaf&&l*0Xq5NMWOL5$Dh@nm zFCJPxNSt6328j)f_=yki{6umY-p$a-o;NYy90=e9pZ}t5HMoh#Cn-f2F={*q798Tb z$2&!w4_YR}zJ(O7;%2;a!aJ?{1+ZC7c#jl;*OU#uX9E2<-D&aD5_9$FdlnIh?N;mXFw?4$&JuvLv*#5jN=B`HIVq)TH=|%OpH| zXhmi@G2-)hC=SgE@n3evvoogalwR1FP)$AO#d%QoLO6jHkK(>oy>L&;jbC|+B>OY#z<9$rE zh13UAHV?LvIE%R*Dl4qfaTaqfBiKKcuO0il$QBaEd&>5~?o~kt57N7Mcj%JiQ$N6X zcM|qQT_)WZcSXu3)l8bU8~b&@-$Hw}!0GuuWQ2viMWe;z=j;gV=X$L3 zf$zfWsJThZDFhrSELv9`yu@Cv86q^Qe4rkC(Y(r;hNt(BzcOO4-pP zdAtr&b~McmO!ch!qx#omJ}2Qe`bZwN7w)bLkGCh2@TkpcVNhGtWNM?DOl?;TF9nX?*f!m8bYc*se1=p5mWyo?>plaN?P4I@PFNCz-#Y zv*2Dohtcx?2v0GX#PmGHF(KzEjzgz1en+)Id!ggx z?k%uqw|r0%zvcas#4WH}H(xG^-;6QvEb1p12XlM14_LB;nhn|yU9&-lNLD$F50k8N z7#}5B9bY}AxO3aoPqI25dq$CY9LGpj?bLCS)v??OlGU-~vkHdWyOWAL_j^il=Q8|( zt60iaxXmrNipic$Q^#|Pox6#T@v33#lrbOnSpvgt-j%6u!9KXkh>I9DuJUxjJER9- z;9{I0xiJkIZc#c-!e)7&J0Ci*!BL@e_;}!PQ>+n0#EvQ%r4u%15(xe5}RCbM&sF z5WEHVNfZXP4cP4ro?J3!(c%MynkaJaQGS!tPQymu7DHy6-&7JC8lc_CeGPMy+ zrnaN`r8cFxQ(MzyYKNLk?N^hjZEJpa>D-YYj4ff~vCcqqC(OM*2A*Pkdy8x_AG4bA z6k`mk+B(itJkcwYt&3pXx!?bH4#)G0V^mk~fTOq>Lrr4x&{ly;)URgIK0b`ou$Rvf z7T1AnGie>@AbXwf;u{+?d@~FDw$u4Hv-T@o#K9-WaUKu+xrlWjXZaWS8j!M!@5ynl z(s(tNgW}3QzF)^nJXvgY-(6S(x)Zzj{?JML_wIO3T>p98ZUf6v4aU;gbyWA;*l+l0 zev;GtQnZtgR)H=oBzM9OWUK)xd+nHkKUm?R#Pc0{{fKYlh`=LEe7<~d9`@kTUc4hJ zkMISo@va9R?HT-G@Q=NK+`q~kbpgM`992lCZt%Mtzr+V}gw8PH_X2)tEr?Fz@ULFs zmw)vNzZG&c;lwC<<&e1g^Yq5#rbwCD+K;k_4_{AHNBK0m|5Oi+;19Mnmd4qqhiDXk z|NUU4Wz+6&+D;?zNN|qq*#4XCj|P4)d4Nv&29My(^##Y>32-hGa}@Hsymp`*t2Z*f zOXWWzWA%pIeef+2Cm=@Fa!>+|9G~yJY@5dBCc_;Gxg}Ka0xM;`ewz*_hF$-TQMEa!&TDZz5?7!XC zSh|Y6ihhRr3%m(Ph@8VMVMVq0!RL;R3MIKK$vSBWjdys6K zOXV!(&(|`AlaOpA-0@y3#Z#(q3QxWARg7+Sko%=M`nt&Ck1CBaC$_&keUeQb(|M+V*E^2&pMA*ZQP`=y z9fx^;{3`a!GW5qqSWA3BVhm?)LXDgcev7F7a&`SHm-4LkQR<7-2kAcL zpZgc?!+L{fO~FukX&9{^YW+-fr@qU}UG2LRujU8$dU^er-`lB=lj>9qo7cIy-yUio z)bB+<6eqPMi+ek%)zQ7H*^6FfzKVSfij|1SD?;O7JKhAZ@8o10D~VM4_a+Jk8^PJt zj9uJ(-YUmKd4sd?e&CxO#O#|-yDTM|w-zxF9r>aGDQ};{p7&XYl=qmU#ogf{>oR$e z_v_C-w1MZOB91o&eO_LxHM7yPcViSwJ^Tuuc)KI={yF>UiRUnI?V*Q(tBH7~64@%Z z?+KRoIQAK%Pj6zrTzOK0LU*VkQyCS75qcd$2b(W(`%x?uGuaV(mr(@fUd4TPDaC><-Q4(MCW6X`js)g!D>2>lv7z8CV2!pddv_|o%qI{J78^>Os)YW{r1x4hs+&sg5i(YNt@ zoy6Y-DE(Zs16aqxGdCHdy1+R&>hCxQCn@LPiaa5*ti<29u(k3qk7tMj zB4McfHH_Amc^#?!jl$O4ssHo(G{4tNb+7eB!QW1d4L+@;o%j*2;A}hPgMTYVmHD?~ zR2g%y6rRt)RCnS9*8CDLuqG2PZ~~l*>Avwr>epzS;}XqS{iX9(IUTdW3%m^NA}i@Z z&pjRscJxQWogX;9`r+M)@t7!SZRb+X*FPjX@JIWr9~#eM6=cldY%`S^obuh{?WLoWZwHo$p~E4Y4Qic^kfI!M_u;L269oV)i1=)*_|1%gHTIZ|>VP7T;)T z#;L2jk_Xq#y_0d=z<5OLf=^gU+)pjxbRLr zSm?)kq;1poM!AVK`SCU6fZbE=%l23MpZ6^-oy-;|R`xUvb?vO|BrahGMuoQ}9ErI} z;`L=^3AQ-M;vw+*{?yLc*fWmpV+Qy4cmqkC3t1+6cNCX_{PE8Q{1EE_%ebUWTUdB(=Jbl|N|{i5W;p#^73U}K9gZiHRSXTh^&WWSM<{Qmv0 z6#cLCt|9x)`C2(gYnsnjW+(9d2C}ubMn5=O{rr53gY)@fyj-N%fqecv4J0ZkO#a+0 zS7hb+tnl{oF`3HY8=*c=-z)$gHYm;KTOsiIPT4)mRfZGE(@z(`<`Qq~6xh-!co*FX zKHtt!{yp+2|B&qTH#(ewIgTmQ_F(RE$TMzKO$dCx_MDEzu`$kPPppWxUq(B@Sw8Uj zBF9)0Gw}H`*fdHLrH9g4r1JSPN~gx>TLwPY88~YsD}27>hw?{$2&egxxgS36Fu0Q) z)Qvj~_b6?|(@1?TwJ(4v% z246n^WxQ=*omop8=kEo#dTzz*I75|~is$w;rdq3d8_dE zPSe#r_%`vJ9q+ExFgH^yZU#~O5l$~CDg|9m1bE@3&PU7yI?tBfroe5>1m#m)C#he1q2%J}a zIL*?FQ@R)zjYToUE5Xbwdq#OmJfl7FJuEMeF>f~WcG44qc|4wwCk_JSKBhe*eiM`N zjQD*MI5|D~?~668w#nz&@&&ZcG$1@57;$a!_h};EF4^z@S-#y2OYfT_SA+ky4|`b} zeNoTT?MSh5=FfM8n1b0rP_gHWw=YD`X$&-as{QkP)qzLqmX=TUH2LPTYV6w>6?hWN zlR4~LWWVLg*mp;DvazvZU$oX0j2C)w8f(D6Tskq|TXsA1Iq!h&OwI@254d)t3v#DO zS@?bv_{aMkw#m!rI~Ru{EEj!fa6bBm)8%8BSAuJI>Xp5dVEfSK!8)Xc?q3vE}D^y=)<-ZE#ut5 zD|vpB^U-ID(bkH2zVLl0^mBz{cgmH$n4w^e3LLxC&!^3%J=Eao-aS5dYA*6R7J1D@ zUSF*PzpmF={@rN#O*a*kc3 z^FY~YAC+;2DlSDU&=;__V#E5nts-@5Ug>}D4Jv)_GwK=n%|hR+QTkp7=gdXl zStf93y3{af1K z`DXQhL4y4?t$(WLBj75adZvA0WRK~63f1>=)Hi?Mf%htB;Er)K`(-2U<2?%L%fWjd zfKKD<%ytpo$nB^92+Q5x7>Ifn%9zsz?&7UPwU=NzkJgf{M62n@f^ioUSKDhsIB1J;qeB|9{M5}7sj*3Ox=xkaN3zWbqH*p3Etpb$u)s0#PyxO%0ef1Q_cD0BZ!LXoZjG8oW$lo2!o@L;SWy)2Tcl6#Yn0xJ z%RC1Hv!_v-H^&b6Z?UvlZ#=)LeZ#7aotsy+u8VcIZQszfZq?c?o43`ru3Fo+ebxG& zhV3;oT36k-Zu7d%4Qr7EHZ1(wwwhI4-JO)?Vc|E{^lVl=3=hA#M~h%scuEW9GS<>M z>^y9&@2Tf$TD5u0i2Q|n!q?=luBYyr{LSo{c}@Omdup%A-;ACa*W@o%o~eo$R^4q( zmFEckrOI=J{!-;KLVu}x7@@yZd5+K@#iw<&YiXm`>X*v(TK!VlUc29#YxY}n9r05A zTx*)C&0niuL;a4hZBU;{#XCa3hWZ_$Uqk(l*srdBN9fm3za#W(sNWI#HPmm)ZQauE(G=^o_vmKtsyO`&$<=b$Mo7zZKx_Vqe`w=vvoszs~if|PmwFqkSX%?+w0`%E!%3>ZIY+2 z-O{-(EKgqoamAW0`m>=MPrr376`Nwc%HNt9`0XKO^OjD!)73(M zwztIavp%-9mDJWQ(zb4HY2R?)=5+{ZL0e1bww6vjQ5PVWAL5djZP#Qho93__DaV#$ z&vE2r%PLDIsneQyXmutp3Z*Uemi=5+~#m))NiOxy< z0Zk8STqkfIGwOC6k=)KFZSLGJ*xjDP4!8G-EO*i8v)v`1%W=zxobKS`qudn-UGB;Q zqurq|=h~M`?#h?l6|cC1ue#-P?vmHsML%|XUw3=XyK{fycD~_uTyQhCQi+29y&~~= zoqVmhYyT#H++LJG=)~7&GaB9RN%p*)krswvJC1z}edV&vzIfDirsLGd&{xje z?QdLfUs?Py^p&?9_DR>$SM=@FG4cA_$1$6Ihx;1pMgQ#dE6?cRKRfLn>7yQ=`Z2)U z%CmW7V*uN~4`&F#x^=eaekC8*_KdcNr4c-uncn6_J_eh&%#LJ_@X@SqOTTbjZ(EAG zk4pCYZ6i&DU~3{d*P9M*^NwDx?QL^ie>&c>*(AId|9bn$ zw?2k`ve$0E{d)V!(vM-RwBKQ$e7$2O=f^NsdPlN3AEv7_K)RXC(WtuceK=1KXutZOXIu z39JQt9%})g!&<;0tOY!dwSa?I3pjwafG<0{Tv2z)%kH9A+}>B+o^$Tp*WAt@yB)8) znOh!x*VS$=x!TP;u6FaP!Cv*|ZC4BS+T*;c&;Ow~|38$^<=0#*i?8Oh`D!}F=zU+3s z;&!}>{_Gh|?cCr%IWjmvoP}ZC$%#_$+w2L?z3CZ0UQ$DK7@8S1Z zRSp94hAMx6XU6-g{3h0?f|wJN-<813Q{@JP)2hlv7$1I1mCpflPL)ppQ;haWesd9K zgDSsEX;9?_aQ}uXS0Jv-s=OEO3}KPqR=8KFasuwu&e?H9f@?MY=Y2_$X-ty$xFSD8 z@-vG3BFSG+C-0ylhe-ZQMP5eoVMX3W@>7c3Px1jpK1uRd6!}e(pHO56 z(x3O|id;$ZenoB~`DsONBl+`+e2C;fQ{-n!eoT?il1vfeqB6<*oFdD}d)^lnxryXQ z6?rwuUsmKe$%hoVpX8&8e2V1HD)L(-A5mn6GR@nk$X=4as>mUd|4Namp7Oq?$SX*G zQju4ad`yvfVn3skAJNGV>*T#U`5~RWM<@5`j{Cw1ql3>D&_}mn!bzQE7Yw$z<55e01)oNDe6O5zIHW^fZy|SKMFJxt}H3 zr?@wdPUDx6EGzC8b?$GHJXvvHnVZJ1CV7(Leo5#47ReJ8_cpB0Xz6JunHmwU%R2Y> zNS>g$_h6o?;dhZ-thh7GO*D50$>SCGeLDC3BvXUM<@KcTMI;w0?oa95`$@h*aS!ID z@f9SGQ`|jRQ`h44lI&I7c_g|&dRGVg{fD|+%+B<8=@Eerpu6mF(O-5lZo13P&_H#U zouL8hE<4^P>n}S)1JYe~h6bX$>NEciE|ObeA0z zsrt*#P@uZY&aeoeyX*{;aouHSUY$neg@V^zc6NhKqx*$|ju#NR%g*RjS^BcG;=jpD*YO1bv~PFB0_I z1bwlfFA?;mf_}T8Hw*e5f_|r<-zDfYGD%-{c6VBVWM`ic^yPxSLeTFK^iK-?-le_g1%bNTLgWLpsyA5RzY7U=<5ajK0$92^bLZ3zo2gv^maktB{)nJ|M$q>O`ez0GQ9*x9(Dw`a0YN_~=#LBfAwmC~pnqP_pAhuJ zg8l_Te^Sto2>MY$|DvG(nV|ov$f__5Kza!{>E$DwE=+6rJ-wOKQ3Hsj)`bk0m zuAqNU(EmZuPYL?>1^u+3|3J_af}RxgK|vo9^fQ9~oS;81=r0KRi-P_`L4QfmeH_> z|19X21pViN{tH3>rJ%nh=)V&5w*~!Q1pOUB|5ri(H$nfkpua2VzY+A`3i|H^{j#9{ zUeNzt(Emfw-xKsd2>SmP^#2s}_XYirg8nB#|1UxRK+ykN(60#khk|aL@MHcj=>eGk z3%Xs<9fF=E=-GmvBj`>+A0_B6K_4yXxq?1M(A|PQR?t0yo+s$}f?go##zhv)|AqVN zqK#UpnEwm+)kUDxeRZ)YrE7~yiUa2Vf`4_9Duq`Uvr@Xc=#|pd#j%vGE~2G$b+Ij_ ztBZ0eU0u9O>FOe1N>>*HQ@Xlnn9|k7#gwitLZ)LO`MR~J)L zy1M9^($&S;l&&t~rgU|&H>InK!YN%{JWlEAB6CVt7o$_Ux@eu!XA1S#Am}#=`b~m< zv!Krs^jiddwxHiC=n+AW3VNfU&k^({L7yw=^8|gqpf3>gg@V3F&~Fp;#e%*>nxd99 z{n%K6RB6z?Qnf)JC)FDC8~7<7&B=!T+Mp@;wXq`UW`jOnnr+aFrKmxlAT=5E5^26c zpC~Od=#!)+27R*BY|v%tE`#oqK4H-P(me(}Agwg$L1~piFO}99^fGCkK`)ou40?sM z(V$O}HXHP*QinmGCUqP1N@=@6pDyh%=vC4K2EAH}8}yK*pMly~jijGp+E}ghSp&XK z+HcUq(&GlbUiv(A9CuO&R=@x7ONw5iz<&Rseg@)!V?w#X|B|jBVUezXLXxhxc%